From 4ded9abfe4a46aff2787251c9a40f0505bcade0a Mon Sep 17 00:00:00 2001 From: RuoyuZhou Date: Sun, 15 Mar 2026 08:49:08 +0800 Subject: [PATCH 1/3] Refactor workspace crate layout and names --- .gitignore | 7 + Cargo.lock | 753 ++ Cargo.toml | 20 + README.md | 84 +- crates/camodel/Cargo.toml | 14 + crates/camodel/src/backend/execute/e1.rs | 1 + crates/camodel/src/backend/execute/e2.rs | 1 + crates/camodel/src/backend/execute/e3.rs | 1 + crates/camodel/src/backend/execute/e4.rs | 1 + crates/camodel/src/backend/execute/mod.rs | 6 + crates/camodel/src/backend/execute/w1.rs | 1 + crates/camodel/src/backend/execute/w2.rs | 1 + crates/camodel/src/backend/lsu/l1d.rs | 1 + crates/camodel/src/backend/lsu/lhq.rs | 1 + crates/camodel/src/backend/lsu/liq.rs | 1 + crates/camodel/src/backend/lsu/mdb.rs | 1 + crates/camodel/src/backend/lsu/mod.rs | 257 + crates/camodel/src/backend/lsu/scb.rs | 1 + crates/camodel/src/backend/lsu/stq.rs | 1 + crates/camodel/src/backend/mod.rs | 2 + crates/camodel/src/control/commit/cmt.rs | 1 + crates/camodel/src/control/commit/mod.rs | 96 + crates/camodel/src/control/commit/rob.rs | 1 + crates/camodel/src/control/mod.rs | 2 + crates/camodel/src/control/recovery/bru.rs | 1 + .../src/control/recovery/dynamic_target.rs | 1 + crates/camodel/src/control/recovery/fls.rs | 1 + crates/camodel/src/control/recovery/mod.rs | 602 ++ crates/camodel/src/core/config.rs | 52 + crates/camodel/src/core/engine.rs | 167 + crates/camodel/src/core/mod.rs | 6 + crates/camodel/src/core/model.rs | 409 + crates/camodel/src/core/uop.rs | 1 + crates/camodel/src/decode/builder.rs | 1 + crates/camodel/src/decode/classify.rs | 1 + crates/camodel/src/decode/mod.rs | 398 + crates/camodel/src/frontend/decode/d1.rs | 1 + crates/camodel/src/frontend/decode/d2.rs | 1 + crates/camodel/src/frontend/decode/d3.rs | 1 + crates/camodel/src/frontend/decode/mod.rs | 1 + .../src/frontend/dispatch/checkpoints.rs | 1 + crates/camodel/src/frontend/dispatch/mod.rs | 1 + .../camodel/src/frontend/dispatch/redirect.rs | 1 + crates/camodel/src/frontend/dispatch/s1.rs | 1 + crates/camodel/src/frontend/dispatch/s2.rs | 1 + crates/camodel/src/frontend/fetch/f0.rs | 1 + crates/camodel/src/frontend/fetch/f1.rs | 1 + crates/camodel/src/frontend/fetch/f2.rs | 1 + crates/camodel/src/frontend/fetch/f3.rs | 1 + crates/camodel/src/frontend/fetch/f4.rs | 1 + crates/camodel/src/frontend/fetch/ib.rs | 1 + crates/camodel/src/frontend/fetch/mod.rs | 1 + crates/camodel/src/frontend/mod.rs | 1009 ++ crates/camodel/src/issue/mod.rs | 2 + crates/camodel/src/issue/queues/iq.rs | 1 + crates/camodel/src/issue/queues/mod.rs | 539 + crates/camodel/src/issue/queues/qtag.rs | 1 + .../camodel/src/issue/queues/ready_tables.rs | 1 + crates/camodel/src/issue/select/i1.rs | 1 + crates/camodel/src/issue/select/i2.rs | 1 + crates/camodel/src/issue/select/mod.rs | 445 + crates/camodel/src/issue/select/p1.rs | 1 + crates/camodel/src/lib.rs | 23 + crates/camodel/src/tests.rs | 8811 +++++++++++++++++ crates/camodel/src/trace/emit.rs | 1 + crates/camodel/src/trace/labels.rs | 1 + crates/camodel/src/trace/mod.rs | 389 + crates/cosim/Cargo.toml | 12 + crates/cosim/src/compare/mod.rs | 146 + crates/cosim/src/lib.rs | 7 + crates/cosim/src/protocol/mod.rs | 1 + crates/cosim/src/qemu/mod.rs | 1 + crates/dse/Cargo.toml | 16 + crates/dse/src/lib.rs | 104 + crates/elf/Cargo.toml | 11 + crates/elf/src/lib.rs | 87 + crates/funcmodel/Cargo.toml | 17 + crates/funcmodel/src/core/mod.rs | 1 + crates/funcmodel/src/exec/mod.rs | 5139 ++++++++++ crates/funcmodel/src/lib.rs | 7 + crates/funcmodel/src/memory/mod.rs | 1 + crates/funcmodel/src/syscalls/mod.rs | 1 + crates/funcmodel/src/trace/mod.rs | 1 + crates/isa/Cargo.toml | 10 + crates/isa/src/lib.rs | 646 ++ crates/lx-tools/Cargo.toml | 19 + crates/lx-tools/src/bin/lx-cosim.rs | 39 + crates/lx-tools/src/bin/lx-run.rs | 47 + crates/lx-tools/src/bin/lx-sweep.rs | 27 + crates/lx-tools/src/bin/lx-trace.rs | 28 + crates/lx-tools/src/cli/mod.rs | 94 + crates/lx-tools/src/lib.rs | 3 + crates/runtime/Cargo.toml | 14 + crates/runtime/src/lib.rs | 961 ++ crates/trace/Cargo.toml | 13 + crates/trace/src/commit/mod.rs | 1 + crates/trace/src/lib.rs | 6 + crates/trace/src/linxtrace/mod.rs | 411 + crates/trace/src/schema/mod.rs | 1 + tests/fixtures/bootstrap_runtime.toml | 7 + tests/fixtures/bootstrap_sweep.toml | 11 + tests/fixtures/file_io_input.txt | 1 + .../linux_user_bootstrap_runtime.toml | 7 + tests/fixtures/linux_user_bootstrap_stack.c | 138 + tests/fixtures/linux_user_compiler_smoke.c | 42 + tests/fixtures/linux_user_epoll_eventfd.c | 117 + tests/fixtures/linux_user_fd_control.c | 93 + tests/fixtures/linux_user_file_io.c | 87 + .../fixtures/linux_user_file_io_runtime.toml | 6 + tests/fixtures/linux_user_futex_smoke.c | 67 + tests/fixtures/linux_user_heap_map.c | 116 + tests/fixtures/linux_user_identity_startup.c | 105 + tests/fixtures/linux_user_mprotect_signal.c | 121 + tests/fixtures/linux_user_path_random.c | 119 + .../linux_user_path_random_runtime.toml | 6 + tests/fixtures/linux_user_ppoll_sigaltstack.c | 128 + tests/fixtures/linux_user_pselect6.c | 136 + tests/fixtures/linux_user_setxid_identity.c | 70 + tests/fixtures/linux_user_stat_lseek.c | 136 + tests/fixtures/linux_user_sysinfo_prlimit.c | 114 + tests/fixtures/linux_user_thread_runtime.c | 94 + tests/fixtures/linux_user_tls_rseq.c | 82 + tests/fixtures/linux_user_wait4_nochild.c | 78 + tests/fixtures/linux_user_write_exit.s | 19 + 124 files changed, 23908 insertions(+), 1 deletion(-) create mode 100644 .gitignore create mode 100644 Cargo.lock create mode 100644 Cargo.toml create mode 100644 crates/camodel/Cargo.toml create mode 100644 crates/camodel/src/backend/execute/e1.rs create mode 100644 crates/camodel/src/backend/execute/e2.rs create mode 100644 crates/camodel/src/backend/execute/e3.rs create mode 100644 crates/camodel/src/backend/execute/e4.rs create mode 100644 crates/camodel/src/backend/execute/mod.rs create mode 100644 crates/camodel/src/backend/execute/w1.rs create mode 100644 crates/camodel/src/backend/execute/w2.rs create mode 100644 crates/camodel/src/backend/lsu/l1d.rs create mode 100644 crates/camodel/src/backend/lsu/lhq.rs create mode 100644 crates/camodel/src/backend/lsu/liq.rs create mode 100644 crates/camodel/src/backend/lsu/mdb.rs create mode 100644 crates/camodel/src/backend/lsu/mod.rs create mode 100644 crates/camodel/src/backend/lsu/scb.rs create mode 100644 crates/camodel/src/backend/lsu/stq.rs create mode 100644 crates/camodel/src/backend/mod.rs create mode 100644 crates/camodel/src/control/commit/cmt.rs create mode 100644 crates/camodel/src/control/commit/mod.rs create mode 100644 crates/camodel/src/control/commit/rob.rs create mode 100644 crates/camodel/src/control/mod.rs create mode 100644 crates/camodel/src/control/recovery/bru.rs create mode 100644 crates/camodel/src/control/recovery/dynamic_target.rs create mode 100644 crates/camodel/src/control/recovery/fls.rs create mode 100644 crates/camodel/src/control/recovery/mod.rs create mode 100644 crates/camodel/src/core/config.rs create mode 100644 crates/camodel/src/core/engine.rs create mode 100644 crates/camodel/src/core/mod.rs create mode 100644 crates/camodel/src/core/model.rs create mode 100644 crates/camodel/src/core/uop.rs create mode 100644 crates/camodel/src/decode/builder.rs create mode 100644 crates/camodel/src/decode/classify.rs create mode 100644 crates/camodel/src/decode/mod.rs create mode 100644 crates/camodel/src/frontend/decode/d1.rs create mode 100644 crates/camodel/src/frontend/decode/d2.rs create mode 100644 crates/camodel/src/frontend/decode/d3.rs create mode 100644 crates/camodel/src/frontend/decode/mod.rs create mode 100644 crates/camodel/src/frontend/dispatch/checkpoints.rs create mode 100644 crates/camodel/src/frontend/dispatch/mod.rs create mode 100644 crates/camodel/src/frontend/dispatch/redirect.rs create mode 100644 crates/camodel/src/frontend/dispatch/s1.rs create mode 100644 crates/camodel/src/frontend/dispatch/s2.rs create mode 100644 crates/camodel/src/frontend/fetch/f0.rs create mode 100644 crates/camodel/src/frontend/fetch/f1.rs create mode 100644 crates/camodel/src/frontend/fetch/f2.rs create mode 100644 crates/camodel/src/frontend/fetch/f3.rs create mode 100644 crates/camodel/src/frontend/fetch/f4.rs create mode 100644 crates/camodel/src/frontend/fetch/ib.rs create mode 100644 crates/camodel/src/frontend/fetch/mod.rs create mode 100644 crates/camodel/src/frontend/mod.rs create mode 100644 crates/camodel/src/issue/mod.rs create mode 100644 crates/camodel/src/issue/queues/iq.rs create mode 100644 crates/camodel/src/issue/queues/mod.rs create mode 100644 crates/camodel/src/issue/queues/qtag.rs create mode 100644 crates/camodel/src/issue/queues/ready_tables.rs create mode 100644 crates/camodel/src/issue/select/i1.rs create mode 100644 crates/camodel/src/issue/select/i2.rs create mode 100644 crates/camodel/src/issue/select/mod.rs create mode 100644 crates/camodel/src/issue/select/p1.rs create mode 100644 crates/camodel/src/lib.rs create mode 100644 crates/camodel/src/tests.rs create mode 100644 crates/camodel/src/trace/emit.rs create mode 100644 crates/camodel/src/trace/labels.rs create mode 100644 crates/camodel/src/trace/mod.rs create mode 100644 crates/cosim/Cargo.toml create mode 100644 crates/cosim/src/compare/mod.rs create mode 100644 crates/cosim/src/lib.rs create mode 100644 crates/cosim/src/protocol/mod.rs create mode 100644 crates/cosim/src/qemu/mod.rs create mode 100644 crates/dse/Cargo.toml create mode 100644 crates/dse/src/lib.rs create mode 100644 crates/elf/Cargo.toml create mode 100644 crates/elf/src/lib.rs create mode 100644 crates/funcmodel/Cargo.toml create mode 100644 crates/funcmodel/src/core/mod.rs create mode 100644 crates/funcmodel/src/exec/mod.rs create mode 100644 crates/funcmodel/src/lib.rs create mode 100644 crates/funcmodel/src/memory/mod.rs create mode 100644 crates/funcmodel/src/syscalls/mod.rs create mode 100644 crates/funcmodel/src/trace/mod.rs create mode 100644 crates/isa/Cargo.toml create mode 100644 crates/isa/src/lib.rs create mode 100644 crates/lx-tools/Cargo.toml create mode 100644 crates/lx-tools/src/bin/lx-cosim.rs create mode 100644 crates/lx-tools/src/bin/lx-run.rs create mode 100644 crates/lx-tools/src/bin/lx-sweep.rs create mode 100644 crates/lx-tools/src/bin/lx-trace.rs create mode 100644 crates/lx-tools/src/cli/mod.rs create mode 100644 crates/lx-tools/src/lib.rs create mode 100644 crates/runtime/Cargo.toml create mode 100644 crates/runtime/src/lib.rs create mode 100644 crates/trace/Cargo.toml create mode 100644 crates/trace/src/commit/mod.rs create mode 100644 crates/trace/src/lib.rs create mode 100644 crates/trace/src/linxtrace/mod.rs create mode 100644 crates/trace/src/schema/mod.rs create mode 100644 tests/fixtures/bootstrap_runtime.toml create mode 100644 tests/fixtures/bootstrap_sweep.toml create mode 100644 tests/fixtures/file_io_input.txt create mode 100644 tests/fixtures/linux_user_bootstrap_runtime.toml create mode 100644 tests/fixtures/linux_user_bootstrap_stack.c create mode 100644 tests/fixtures/linux_user_compiler_smoke.c create mode 100644 tests/fixtures/linux_user_epoll_eventfd.c create mode 100644 tests/fixtures/linux_user_fd_control.c create mode 100644 tests/fixtures/linux_user_file_io.c create mode 100644 tests/fixtures/linux_user_file_io_runtime.toml create mode 100644 tests/fixtures/linux_user_futex_smoke.c create mode 100644 tests/fixtures/linux_user_heap_map.c create mode 100644 tests/fixtures/linux_user_identity_startup.c create mode 100644 tests/fixtures/linux_user_mprotect_signal.c create mode 100644 tests/fixtures/linux_user_path_random.c create mode 100644 tests/fixtures/linux_user_path_random_runtime.toml create mode 100644 tests/fixtures/linux_user_ppoll_sigaltstack.c create mode 100644 tests/fixtures/linux_user_pselect6.c create mode 100644 tests/fixtures/linux_user_setxid_identity.c create mode 100644 tests/fixtures/linux_user_stat_lseek.c create mode 100644 tests/fixtures/linux_user_sysinfo_prlimit.c create mode 100644 tests/fixtures/linux_user_thread_runtime.c create mode 100644 tests/fixtures/linux_user_tls_rseq.c create mode 100644 tests/fixtures/linux_user_wait4_nochild.c create mode 100644 tests/fixtures/linux_user_write_exit.s diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..ee920b2 --- /dev/null +++ b/.gitignore @@ -0,0 +1,7 @@ +target/ +out/ +*.log +*.json +*.jsonl +*.linxtrace +*.md.tmp diff --git a/Cargo.lock b/Cargo.lock new file mode 100644 index 0000000..d002c33 --- /dev/null +++ b/Cargo.lock @@ -0,0 +1,753 @@ +# This file is automatically @generated by Cargo. +# It is not intended for manual editing. +version = 4 + +[[package]] +name = "anstream" +version = "0.6.21" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "43d5b281e737544384e969a5ccad3f1cdd24b48086a0fc1b2a5262a26b8f4f4a" +dependencies = [ + "anstyle", + "anstyle-parse", + "anstyle-query", + "anstyle-wincon", + "colorchoice", + "is_terminal_polyfill", + "utf8parse", +] + +[[package]] +name = "anstyle" +version = "1.0.13" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5192cca8006f1fd4f7237516f40fa183bb07f8fbdfedaa0036de5ea9b0b45e78" + +[[package]] +name = "anstyle-parse" +version = "0.2.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4e7644824f0aa2c7b9384579234ef10eb7efb6a0deb83f9630a49594dd9c15c2" +dependencies = [ + "utf8parse", +] + +[[package]] +name = "anstyle-query" +version = "1.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "40c48f72fd53cd289104fc64099abca73db4166ad86ea0b4341abe65af83dadc" +dependencies = [ + "windows-sys", +] + +[[package]] +name = "anstyle-wincon" +version = "3.0.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "291e6a250ff86cd4a820112fb8898808a366d8f9f58ce16d1f538353ad55747d" +dependencies = [ + "anstyle", + "once_cell_polyfill", + "windows-sys", +] + +[[package]] +name = "anyhow" +version = "1.0.102" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7f202df86484c868dbad7eaa557ef785d5c66295e41b460ef922eca0723b842c" + +[[package]] +name = "bitflags" +version = "2.11.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "843867be96c8daad0d758b57df9392b6d8d271134fce549de6ce169ff98a92af" + +[[package]] +name = "camodel" +version = "0.1.0" +dependencies = [ + "anyhow", + "elf", + "funcmodel", + "isa", + "runtime", + "serde", +] + +[[package]] +name = "cfg-if" +version = "1.0.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9330f8b2ff13f34540b44e946ef35111825727b38d33286ef986142615121801" + +[[package]] +name = "clap" +version = "4.5.60" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2797f34da339ce31042b27d23607e051786132987f595b02ba4f6a6dffb7030a" +dependencies = [ + "clap_builder", + "clap_derive", +] + +[[package]] +name = "clap_builder" +version = "4.5.60" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "24a241312cea5059b13574bb9b3861cabf758b879c15190b37b6d6fd63ab6876" +dependencies = [ + "anstream", + "anstyle", + "clap_lex", + "strsim", +] + +[[package]] +name = "clap_derive" +version = "4.5.55" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a92793da1a46a5f2a02a6f4c46c6496b28c43638adea8306fcb0caa1634f24e5" +dependencies = [ + "heck", + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "clap_lex" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3a822ea5bc7590f9d40f1ba12c0dc3c2760f3482c6984db1573ad11031420831" + +[[package]] +name = "colorchoice" +version = "1.0.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b05b61dc5112cbb17e4b6cd61790d9845d13888356391624cbe7e41efeac1e75" + +[[package]] +name = "cosim" +version = "0.1.0" +dependencies = [ + "anyhow", + "isa", + "serde", + "serde_json", +] + +[[package]] +name = "dse" +version = "0.1.0" +dependencies = [ + "anyhow", + "camodel", + "elf", + "funcmodel", + "isa", + "runtime", + "serde", + "toml", +] + +[[package]] +name = "elf" +version = "0.1.0" +dependencies = [ + "anyhow", + "goblin", + "serde", +] + +[[package]] +name = "equivalent" +version = "1.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "877a4ace8713b0bcf2a4e7eec82529c029f1d0619886d18145fea96c3ffe5c0f" + +[[package]] +name = "errno" +version = "0.3.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "39cab71617ae0d63f51a36d69f866391735b51691dbda63cf6f96d042b63efeb" +dependencies = [ + "libc", + "windows-sys", +] + +[[package]] +name = "fastrand" +version = "2.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "37909eebbb50d72f9059c3b6d82c0463f2ff062c9e95845c43a6c9c0355411be" + +[[package]] +name = "foldhash" +version = "0.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d9c4f5dac5e15c24eb999c26181a6ca40b39fe946cbe4c263c7209467bc83af2" + +[[package]] +name = "funcmodel" +version = "0.1.0" +dependencies = [ + "anyhow", + "elf", + "isa", + "libc", + "runtime", + "serde", + "tempfile", +] + +[[package]] +name = "getrandom" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0de51e6874e94e7bf76d726fc5d13ba782deca734ff60d5bb2fb2607c7406555" +dependencies = [ + "cfg-if", + "libc", + "r-efi", + "wasip2", + "wasip3", +] + +[[package]] +name = "goblin" +version = "0.10.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "983a6aafb3b12d4c41ea78d39e189af4298ce747353945ff5105b54a056e5cd9" +dependencies = [ + "log", + "plain", + "scroll", +] + +[[package]] +name = "hashbrown" +version = "0.15.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9229cfe53dfd69f0609a49f65461bd93001ea1ef889cd5529dd176593f5338a1" +dependencies = [ + "foldhash", +] + +[[package]] +name = "hashbrown" +version = "0.16.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "841d1cc9bed7f9236f321df977030373f4a4163ae1a7dbfe1a51a2c1a51d9100" + +[[package]] +name = "heck" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2304e00983f87ffb38b55b444b5e3b60a884b5d30c0fca7d82fe33449bbe55ea" + +[[package]] +name = "id-arena" +version = "2.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3d3067d79b975e8844ca9eb072e16b31c3c1c36928edf9c6789548c524d0d954" + +[[package]] +name = "indexmap" +version = "2.13.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7714e70437a7dc3ac8eb7e6f8df75fd8eb422675fc7678aff7364301092b1017" +dependencies = [ + "equivalent", + "hashbrown 0.16.1", + "serde", + "serde_core", +] + +[[package]] +name = "is_terminal_polyfill" +version = "1.70.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a6cb138bb79a146c1bd460005623e142ef0181e3d0219cb493e02f7d08a35695" + +[[package]] +name = "isa" +version = "0.1.0" +dependencies = [ + "serde", + "serde_json", +] + +[[package]] +name = "itoa" +version = "1.0.17" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "92ecc6618181def0457392ccd0ee51198e065e016d1d527a7ac1b6dc7c1f09d2" + +[[package]] +name = "leb128fmt" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "09edd9e8b54e49e587e4f6295a7d29c3ea94d469cb40ab8ca70b288248a81db2" + +[[package]] +name = "libc" +version = "0.2.183" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b5b646652bf6661599e1da8901b3b9522896f01e736bad5f723fe7a3a27f899d" + +[[package]] +name = "linux-raw-sys" +version = "0.12.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "32a66949e030da00e8c7d4434b251670a91556f4144941d37452769c25d58a53" + +[[package]] +name = "log" +version = "0.4.29" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5e5032e24019045c762d3c0f28f5b6b8bbf38563a65908389bf7978758920897" + +[[package]] +name = "lx-tools" +version = "0.1.0" +dependencies = [ + "anyhow", + "camodel", + "clap", + "cosim", + "dse", + "elf", + "funcmodel", + "isa", + "runtime", + "serde_json", + "trace", +] + +[[package]] +name = "memchr" +version = "2.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f8ca58f447f06ed17d5fc4043ce1b10dd205e060fb3ce5b979b8ed8e59ff3f79" + +[[package]] +name = "once_cell" +version = "1.21.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9f7c3e4beb33f85d45ae3e3a1792185706c8e16d043238c593331cc7cd313b50" + +[[package]] +name = "once_cell_polyfill" +version = "1.70.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "384b8ab6d37215f3c5301a95a4accb5d64aa607f1fcb26a11b5303878451b4fe" + +[[package]] +name = "plain" +version = "0.2.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b4596b6d070b27117e987119b4dac604f3c58cfb0b191112e24771b2faeac1a6" + +[[package]] +name = "prettyplease" +version = "0.2.37" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "479ca8adacdd7ce8f1fb39ce9ecccbfe93a3f1344b3d0d97f20bc0196208f62b" +dependencies = [ + "proc-macro2", + "syn", +] + +[[package]] +name = "proc-macro2" +version = "1.0.106" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8fd00f0bb2e90d81d1044c2b32617f68fcb9fa3bb7640c23e9c748e53fb30934" +dependencies = [ + "unicode-ident", +] + +[[package]] +name = "quote" +version = "1.0.45" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "41f2619966050689382d2b44f664f4bc593e129785a36d6ee376ddf37259b924" +dependencies = [ + "proc-macro2", +] + +[[package]] +name = "r-efi" +version = "6.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f8dcc9c7d52a811697d2151c701e0d08956f92b0e24136cf4cf27b57a6a0d9bf" + +[[package]] +name = "runtime" +version = "0.1.0" +dependencies = [ + "anyhow", + "elf", + "isa", + "libc", + "serde", + "toml", +] + +[[package]] +name = "rustix" +version = "1.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b6fe4565b9518b83ef4f91bb47ce29620ca828bd32cb7e408f0062e9930ba190" +dependencies = [ + "bitflags", + "errno", + "libc", + "linux-raw-sys", + "windows-sys", +] + +[[package]] +name = "scroll" +version = "0.13.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c1257cd4248b4132760d6524d6dda4e053bc648c9070b960929bf50cfb1e7add" +dependencies = [ + "scroll_derive", +] + +[[package]] +name = "scroll_derive" +version = "0.13.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ed76efe62313ab6610570951494bdaa81568026e0318eaa55f167de70eeea67d" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "semver" +version = "1.0.27" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d767eb0aabc880b29956c35734170f26ed551a859dbd361d140cdbeca61ab1e2" + +[[package]] +name = "serde" +version = "1.0.228" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9a8e94ea7f378bd32cbbd37198a4a91436180c5bb472411e48b5ec2e2124ae9e" +dependencies = [ + "serde_core", + "serde_derive", +] + +[[package]] +name = "serde_core" +version = "1.0.228" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "41d385c7d4ca58e59fc732af25c3983b67ac852c1a25000afe1175de458b67ad" +dependencies = [ + "serde_derive", +] + +[[package]] +name = "serde_derive" +version = "1.0.228" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d540f220d3187173da220f885ab66608367b6574e925011a9353e4badda91d79" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "serde_json" +version = "1.0.149" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "83fc039473c5595ace860d8c4fafa220ff474b3fc6bfdb4293327f1a37e94d86" +dependencies = [ + "itoa", + "memchr", + "serde", + "serde_core", + "zmij", +] + +[[package]] +name = "serde_spanned" +version = "1.0.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f8bbf91e5a4d6315eee45e704372590b30e260ee83af6639d64557f51b067776" +dependencies = [ + "serde_core", +] + +[[package]] +name = "strsim" +version = "0.11.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7da8b5736845d9f2fcb837ea5d9e2628564b3b043a70948a3f0b778838c5fb4f" + +[[package]] +name = "syn" +version = "2.0.117" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e665b8803e7b1d2a727f4023456bbbbe74da67099c585258af0ad9c5013b9b99" +dependencies = [ + "proc-macro2", + "quote", + "unicode-ident", +] + +[[package]] +name = "tempfile" +version = "3.27.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "32497e9a4c7b38532efcdebeef879707aa9f794296a4f0244f6f69e9bc8574bd" +dependencies = [ + "fastrand", + "getrandom", + "once_cell", + "rustix", + "windows-sys", +] + +[[package]] +name = "toml" +version = "0.9.12+spec-1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cf92845e79fc2e2def6a5d828f0801e29a2f8acc037becc5ab08595c7d5e9863" +dependencies = [ + "indexmap", + "serde_core", + "serde_spanned", + "toml_datetime", + "toml_parser", + "toml_writer", + "winnow", +] + +[[package]] +name = "toml_datetime" +version = "0.7.5+spec-1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "92e1cfed4a3038bc5a127e35a2d360f145e1f4b971b551a2ba5fd7aedf7e1347" +dependencies = [ + "serde_core", +] + +[[package]] +name = "toml_parser" +version = "1.0.9+spec-1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "702d4415e08923e7e1ef96cd5727c0dfed80b4d2fa25db9647fe5eb6f7c5a4c4" +dependencies = [ + "winnow", +] + +[[package]] +name = "toml_writer" +version = "1.0.6+spec-1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ab16f14aed21ee8bfd8ec22513f7287cd4a91aa92e44edfe2c17ddd004e92607" + +[[package]] +name = "trace" +version = "0.1.0" +dependencies = [ + "anyhow", + "isa", + "serde", + "serde_json", + "tempfile", +] + +[[package]] +name = "unicode-ident" +version = "1.0.24" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e6e4313cd5fcd3dad5cafa179702e2b244f760991f45397d14d4ebf38247da75" + +[[package]] +name = "unicode-xid" +version = "0.2.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ebc1c04c71510c7f702b52b7c350734c9ff1295c464a03335b00bb84fc54f853" + +[[package]] +name = "utf8parse" +version = "0.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "06abde3611657adf66d383f00b093d7faecc7fa57071cce2578660c9f1010821" + +[[package]] +name = "wasip2" +version = "1.0.2+wasi-0.2.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9517f9239f02c069db75e65f174b3da828fe5f5b945c4dd26bd25d89c03ebcf5" +dependencies = [ + "wit-bindgen", +] + +[[package]] +name = "wasip3" +version = "0.4.0+wasi-0.3.0-rc-2026-01-06" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5428f8bf88ea5ddc08faddef2ac4a67e390b88186c703ce6dbd955e1c145aca5" +dependencies = [ + "wit-bindgen", +] + +[[package]] +name = "wasm-encoder" +version = "0.244.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "990065f2fe63003fe337b932cfb5e3b80e0b4d0f5ff650e6985b1048f62c8319" +dependencies = [ + "leb128fmt", + "wasmparser", +] + +[[package]] +name = "wasm-metadata" +version = "0.244.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bb0e353e6a2fbdc176932bbaab493762eb1255a7900fe0fea1a2f96c296cc909" +dependencies = [ + "anyhow", + "indexmap", + "wasm-encoder", + "wasmparser", +] + +[[package]] +name = "wasmparser" +version = "0.244.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "47b807c72e1bac69382b3a6fb3dbe8ea4c0ed87ff5629b8685ae6b9a611028fe" +dependencies = [ + "bitflags", + "hashbrown 0.15.5", + "indexmap", + "semver", +] + +[[package]] +name = "windows-link" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f0805222e57f7521d6a62e36fa9163bc891acd422f971defe97d64e70d0a4fe5" + +[[package]] +name = "windows-sys" +version = "0.61.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ae137229bcbd6cdf0f7b80a31df61766145077ddf49416a728b02cb3921ff3fc" +dependencies = [ + "windows-link", +] + +[[package]] +name = "winnow" +version = "0.7.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "df79d97927682d2fd8adb29682d1140b343be4ac0f08fd68b7765d9c059d3945" + +[[package]] +name = "wit-bindgen" +version = "0.51.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d7249219f66ced02969388cf2bb044a09756a083d0fab1e566056b04d9fbcaa5" +dependencies = [ + "wit-bindgen-rust-macro", +] + +[[package]] +name = "wit-bindgen-core" +version = "0.51.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ea61de684c3ea68cb082b7a88508a8b27fcc8b797d738bfc99a82facf1d752dc" +dependencies = [ + "anyhow", + "heck", + "wit-parser", +] + +[[package]] +name = "wit-bindgen-rust" +version = "0.51.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b7c566e0f4b284dd6561c786d9cb0142da491f46a9fbed79ea69cdad5db17f21" +dependencies = [ + "anyhow", + "heck", + "indexmap", + "prettyplease", + "syn", + "wasm-metadata", + "wit-bindgen-core", + "wit-component", +] + +[[package]] +name = "wit-bindgen-rust-macro" +version = "0.51.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0c0f9bfd77e6a48eccf51359e3ae77140a7f50b1e2ebfe62422d8afdaffab17a" +dependencies = [ + "anyhow", + "prettyplease", + "proc-macro2", + "quote", + "syn", + "wit-bindgen-core", + "wit-bindgen-rust", +] + +[[package]] +name = "wit-component" +version = "0.244.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9d66ea20e9553b30172b5e831994e35fbde2d165325bec84fc43dbf6f4eb9cb2" +dependencies = [ + "anyhow", + "bitflags", + "indexmap", + "log", + "serde", + "serde_derive", + "serde_json", + "wasm-encoder", + "wasm-metadata", + "wasmparser", + "wit-parser", +] + +[[package]] +name = "wit-parser" +version = "0.244.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ecc8ac4bc1dc3381b7f59c34f00b67e18f910c2c0f50015669dde7def656a736" +dependencies = [ + "anyhow", + "id-arena", + "indexmap", + "log", + "semver", + "serde", + "serde_derive", + "serde_json", + "unicode-xid", + "wasmparser", +] + +[[package]] +name = "zmij" +version = "1.0.21" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b8848ee67ecc8aedbaf3e4122217aff892639231befc6a1b58d29fff4c2cabaa" diff --git a/Cargo.toml b/Cargo.toml new file mode 100644 index 0000000..302c85f --- /dev/null +++ b/Cargo.toml @@ -0,0 +1,20 @@ +[workspace] +members = ["crates/*"] +resolver = "2" + +[workspace.package] +version = "0.1.0" +edition = "2024" +license = "Apache-2.0" +authors = ["LinxISA"] + +[workspace.dependencies] +anyhow = "1.0" +clap = { version = "4.5", features = ["derive"] } +goblin = "0.10" +libc = "0.2" +serde = { version = "1.0", features = ["derive"] } +serde_json = "1.0" +tempfile = "3.15" +toml = "0.9" + diff --git a/README.md b/README.md index d48e131..4c4db4c 100644 --- a/README.md +++ b/README.md @@ -1,2 +1,84 @@ # LinxCoreModel -LinxCore architectural model repository + +LinxCoreModel is the Rust modeling workspace for LinxCore. + +Current workspace contents: + +- `isa`: shared architectural types and trace contracts +- `elf`: static ELF loading for LinxISA user-mode programs +- `runtime`: guest runtime state and syscall shim scaffolding +- `funcmodel`: first functional engine path +- `camodel`: first stage-structured cycle engine path +- `trace`: commit JSONL and `linxtrace.v1` emission +- `cosim`: M1 JSONL lockstep protocol helpers +- `dse`: sweep and report generation +- `lx-tools`: `lx-run`, `lx-cosim`, `lx-trace`, `lx-sweep` + +Current crate layout: + +- `camodel`: `core/`, `frontend/`, `issue/`, `backend/`, `control/`, `decode/`, `trace/` +- `funcmodel`: `core/`, `exec/`, `memory/`, `syscalls/`, `trace/` +- `trace`: `linxtrace/`, `commit/`, `schema/` +- `cosim`: `protocol/`, `compare/`, `qemu/` +- `lx-tools`: shared helpers in `cli/`, thin binaries in `bin/` + +The current implementation lands the full workspace shape, executable `lx-*` +surface, static ELF loading, runtime bootstrap, syscall allowlist scaffolding, +commit/pipeview emission, lockstep compare helpers, and a full table-driven +LinxISA `v0.4` decoder sourced from the canonical ISA JSON. + +Current functional-engine status: + +- decodes the full canonical `v0.4` instruction corpus, including 16/32/48/64-bit forms +- bootstraps a Linux-style user stack and places the initial stack pointer in `sp` +- executes a substantial user-mode scalar subset in Rust: + ALU/immediate ops, `W` variants, compares, shifts, loads/stores, `SETC`, `SETRET`, + `J/JR`, `SSRGET/SSRSET`, compressed scalar forms, and basic block/macro control flow +- forwards the current user-mode syscall subset through a host shim: + `read`, `write`, `eventfd2`, `epoll_create1`, `epoll_ctl`, `epoll_pwait`, `openat`, `close`, `lseek`, `fstat`, `futex`, + `getcwd`, `dup3`, `fcntl`, `ioctl`, `pipe2`, `pselect6`, `ppoll`, `newfstatat`, `readlinkat`, `getrandom`, + `prctl`, `madvise`, `membarrier`, `rseq`, + `sigaltstack`, `set_tid_address`, `set_robust_list`, `setuid`, `setgid`, `setresuid`, `getresuid`, + `setresgid`, `getresgid`, `uname`, `getppid`, `wait4`, `sysinfo`, `prlimit64`, + `brk`, `mmap`, `munmap`, `mprotect`, + `rt_sigaction`, `rt_sigprocmask`, `clock_gettime`, `getpid`, `getuid`, + `geteuid`, `getgid`, `getegid`, `gettid`, `exit`, `exit_group` +- enforces guest page permissions for architectural loads/stores and syscall + copy-in/copy-out, including `mprotect`-driven access changes +- normalizes host errno values to Linux guest errno numbers before returning + through the Linx Linux-user syscall ABI +- emits commit JSONL and `linxtrace.v1` traces from the executing functional path + +Still incomplete in this phase: + +- many ISA classes remain unsupported in the functional executor, especially FP, + atomics, wide HL forms, and full privileged behavior +- syscall ABI coverage is pragmatic bring-up coverage, not a complete Linux user ABI +- `futex` currently implements the single-process `WAIT/WAKE` subset only; there is + no real waiter queue or multi-thread scheduler yet +- `sysinfo` and `prlimit64` currently expose deterministic single-process model + values suitable for libc bring-up, not host-kernel passthrough +- `setxid` coverage is currently single-process identity-state emulation for + libc startup and credential probes, not full Linux credential or threading semantics +- `getrandom` currently returns deterministic model-generated bytes so user-mode + bring-up and regression traces stay reproducible across hosts +- `prctl`, `madvise`, and `membarrier` currently implement the small single-process + subset needed for libc/thread-runtime bring-up, not the full Linux kernel surface +- `rseq` currently supports deterministic single-thread register/unregister bring-up + semantics and initializes the guest ABI fields the runtime is expected to read +- `ioctl` currently implements deterministic tty-style `TIOCGWINSZ`, + `TIOCGPGRP`, and `TIOCSPGRP` behavior for guest stdio fds only +- `ppoll` currently marshals guest `pollfd` arrays onto host `poll(2)` and ignores + signal-mask effects beyond guest-memory validation +- `pselect6` currently marshals guest `fd_set` bitmaps onto host `poll(2)` and + validates, but does not apply, guest signal masks +- `eventfd2` and `epoll_*` currently run through a deterministic user-mode shim: + eventfd readiness is backed by internal counter state plus a pollable host pipe, + and epoll wait reuses host `poll(2)` across registered guest fds +- `wait4` currently reports deterministic single-process `-ECHILD` semantics after + validating guest status/rusage pointers; there is not yet a modeled child-task table +- `sigaltstack` currently tracks deterministic single-thread alternate-stack state + for libc/runtime queries; it does not imply modeled signal delivery yet +- bootstrap auxv now includes deterministic `AT_MINSIGSTKSZ`, `AT_PLATFORM`, + `AT_HWCAP*`, `AT_CLKTCK`, and `AT_SYSINFO_EHDR` entries for libc startup paths +- cycle-accurate execution and QEMU lockstep remain separate follow-on work diff --git a/crates/camodel/Cargo.toml b/crates/camodel/Cargo.toml new file mode 100644 index 0000000..dd9d90b --- /dev/null +++ b/crates/camodel/Cargo.toml @@ -0,0 +1,14 @@ +[package] +name = "camodel" +version.workspace = true +edition.workspace = true +license.workspace = true +authors.workspace = true + +[dependencies] +anyhow.workspace = true +serde.workspace = true +elf = { path = "../elf" } +funcmodel = { path = "../funcmodel" } +isa = { path = "../isa" } +runtime = { path = "../runtime" } diff --git a/crates/camodel/src/backend/execute/e1.rs b/crates/camodel/src/backend/execute/e1.rs new file mode 100644 index 0000000..32f08e5 --- /dev/null +++ b/crates/camodel/src/backend/execute/e1.rs @@ -0,0 +1 @@ +// E1 stage placeholder. diff --git a/crates/camodel/src/backend/execute/e2.rs b/crates/camodel/src/backend/execute/e2.rs new file mode 100644 index 0000000..8627d5f --- /dev/null +++ b/crates/camodel/src/backend/execute/e2.rs @@ -0,0 +1 @@ +// E2 stage placeholder. diff --git a/crates/camodel/src/backend/execute/e3.rs b/crates/camodel/src/backend/execute/e3.rs new file mode 100644 index 0000000..d7a972c --- /dev/null +++ b/crates/camodel/src/backend/execute/e3.rs @@ -0,0 +1 @@ +// E3 stage placeholder. diff --git a/crates/camodel/src/backend/execute/e4.rs b/crates/camodel/src/backend/execute/e4.rs new file mode 100644 index 0000000..4fdc21e --- /dev/null +++ b/crates/camodel/src/backend/execute/e4.rs @@ -0,0 +1 @@ +// E4 stage placeholder. diff --git a/crates/camodel/src/backend/execute/mod.rs b/crates/camodel/src/backend/execute/mod.rs new file mode 100644 index 0000000..f3fe89a --- /dev/null +++ b/crates/camodel/src/backend/execute/mod.rs @@ -0,0 +1,6 @@ +pub mod e1; +pub mod e2; +pub mod e3; +pub mod e4; +pub mod w1; +pub mod w2; diff --git a/crates/camodel/src/backend/execute/w1.rs b/crates/camodel/src/backend/execute/w1.rs new file mode 100644 index 0000000..3e19f82 --- /dev/null +++ b/crates/camodel/src/backend/execute/w1.rs @@ -0,0 +1 @@ +// W1 stage placeholder. diff --git a/crates/camodel/src/backend/execute/w2.rs b/crates/camodel/src/backend/execute/w2.rs new file mode 100644 index 0000000..e5c0166 --- /dev/null +++ b/crates/camodel/src/backend/execute/w2.rs @@ -0,0 +1 @@ +// W2 stage placeholder. diff --git a/crates/camodel/src/backend/lsu/l1d.rs b/crates/camodel/src/backend/lsu/l1d.rs new file mode 100644 index 0000000..455def7 --- /dev/null +++ b/crates/camodel/src/backend/lsu/l1d.rs @@ -0,0 +1 @@ +// L1D namespace placeholder. diff --git a/crates/camodel/src/backend/lsu/lhq.rs b/crates/camodel/src/backend/lsu/lhq.rs new file mode 100644 index 0000000..e23503c --- /dev/null +++ b/crates/camodel/src/backend/lsu/lhq.rs @@ -0,0 +1 @@ +// LHQ namespace placeholder. diff --git a/crates/camodel/src/backend/lsu/liq.rs b/crates/camodel/src/backend/lsu/liq.rs new file mode 100644 index 0000000..17bf0b6 --- /dev/null +++ b/crates/camodel/src/backend/lsu/liq.rs @@ -0,0 +1 @@ +// LIQ namespace placeholder. diff --git a/crates/camodel/src/backend/lsu/mdb.rs b/crates/camodel/src/backend/lsu/mdb.rs new file mode 100644 index 0000000..e06544b --- /dev/null +++ b/crates/camodel/src/backend/lsu/mdb.rs @@ -0,0 +1 @@ +// MDB namespace placeholder. diff --git a/crates/camodel/src/backend/lsu/mod.rs b/crates/camodel/src/backend/lsu/mod.rs new file mode 100644 index 0000000..7b2df37 --- /dev/null +++ b/crates/camodel/src/backend/lsu/mod.rs @@ -0,0 +1,257 @@ +pub mod l1d; +pub mod lhq; +pub mod liq; +pub mod mdb; +pub mod scb; +pub mod stq; + +use std::collections::VecDeque; + +use crate::{ + CycleRunOptions, CycleUop, ISSUE_WIDTH, L1D_WIDTH, L1dEntry, L1dTxnKind, LSU_WIDTH, MdbEntry, + ScbEntry, StageQueues, rob_age_rank, should_inject_load_miss, +}; + +pub(crate) fn advance_execute( + cycle: u64, + pipeline: &mut StageQueues, + uops: &mut [CycleUop], + options: &CycleRunOptions, +) { + pipeline.w2.clear(); + let mut prev_w1 = std::mem::take(&mut pipeline.w1); + advance_simple(&mut pipeline.w2, &mut prev_w1, ISSUE_WIDTH); + pipeline.w1 = prev_w1; + + let mut prev_e4 = std::mem::take(&mut pipeline.e4); + let mut stay_e4 = VecDeque::new(); + while let Some(seq) = prev_e4.pop_front() { + if should_inject_load_miss(seq, options, uops) { + let restart_cycle = cycle.saturating_add(options.load_miss_penalty.max(1)); + let uop = &mut uops[seq]; + uop.miss_injected = true; + uop.miss_pending_until = Some(restart_cycle); + uop.pick_wakeup_visible = None; + uop.data_ready_visible = None; + uop.e1_cycle = None; + uop.e4_cycle = None; + uop.w1_cycle = None; + uop.done_cycle = None; + pipeline.liq.push_back(crate::LiqEntry { + seq, + refill_ready_cycle: restart_cycle, + }); + pipeline.mdb.push_back(MdbEntry { + seq, + refill_ready_cycle: restart_cycle, + }); + remove_queue_entry(&mut pipeline.lhq, seq); + } else if lsid_cache_ready(seq, pipeline.lsid_cache_ptr, uops) + && can_accept_l1d(&pipeline.l1d) + { + uops[seq].miss_pending_until = None; + pipeline.l1d.push_back(L1dEntry { + seq, + kind: L1dTxnKind::LoadHit, + ready_cycle: cycle.saturating_add(1), + }); + } else { + stay_e4.push_back(seq); + } + } + pipeline.e4 = stay_e4; + + let mut prev_e3 = std::mem::take(&mut pipeline.e3); + advance_simple(&mut pipeline.e4, &mut prev_e3, LSU_WIDTH); + pipeline.e3 = prev_e3; + + let mut prev_e2 = std::mem::take(&mut pipeline.e2); + advance_simple(&mut pipeline.e3, &mut prev_e2, LSU_WIDTH); + pipeline.e2 = prev_e2; + + let mut stay_e1 = VecDeque::new(); + let prev_e1 = std::mem::take(&mut pipeline.e1); + for seq in prev_e1 { + if uops[seq].is_load { + if pipeline.e2.len() < LSU_WIDTH { + pipeline.e2.push_back(seq); + } else { + stay_e1.push_back(seq); + } + } else if pipeline.w1.len() < ISSUE_WIDTH { + pipeline.w1.push_back(seq); + } else { + stay_e1.push_back(seq); + } + } + pipeline.e1 = stay_e1; + + for entry in &mut pipeline.w2 { + if uops[*entry].done_cycle.is_none() && uops[*entry].w1_cycle == Some(cycle) { + // keep order stable; completion is tagged in `tag_stage_cycles`. + } + } +} + +pub(crate) fn advance_liq( + cycle: u64, + pipeline: &mut StageQueues, + uops: &mut [CycleUop], + rob: &VecDeque, +) { + if !load_slot_available(&pipeline.e1, uops) { + return; + } + + let mut entries = std::mem::take(&mut pipeline.liq) + .into_iter() + .collect::>(); + let Some((selected_idx, _)) = entries + .iter() + .enumerate() + .filter(|(_, entry)| { + entry.refill_ready_cycle <= cycle + && lsid_cache_ready(entry.seq, pipeline.lsid_cache_ptr, uops) + }) + .min_by_key(|(_, entry)| rob_age_rank(entry.seq, rob)) + else { + pipeline.liq = entries.into_iter().collect(); + return; + }; + + let selected = entries.remove(selected_idx); + uops[selected.seq].miss_pending_until = None; + lhq_insert(&mut pipeline.lhq, selected.seq); + remove_mdb_entry(&mut pipeline.mdb, selected.seq); + pipeline.e1.push_back(selected.seq); + pipeline.liq = entries.into_iter().collect(); +} + +pub(crate) fn advance_l1d(cycle: u64, pipeline: &mut StageQueues) { + let mut remaining = std::mem::take(&mut pipeline.l1d); + while let Some(entry) = remaining.pop_front() { + if entry.ready_cycle > cycle { + pipeline.l1d.push_back(entry); + continue; + } + + match entry.kind { + L1dTxnKind::LoadHit => { + if pipeline.w1.len() < ISSUE_WIDTH { + remove_queue_entry(&mut pipeline.lhq, entry.seq); + pipeline.w1.push_back(entry.seq); + pipeline.lsid_cache_ptr = pipeline.lsid_cache_ptr.saturating_add(1); + } else { + pipeline.l1d.push_back(entry); + } + } + L1dTxnKind::StoreDrain => { + pipeline.lsid_cache_ptr = pipeline.lsid_cache_ptr.saturating_add(1); + } + } + } + for entry in remaining { + pipeline.l1d.push_back(entry); + } +} + +pub(crate) fn lhq_insert(lhq: &mut VecDeque, seq: usize) { + if !lhq.contains(&seq) { + lhq.push_back(seq); + } +} + +pub(crate) fn stq_insert(stq: &mut VecDeque, seq: usize) { + if !stq.contains(&seq) { + stq.push_back(seq); + } +} + +pub(crate) fn remove_queue_entry(queue: &mut VecDeque, seq: usize) { + queue.retain(|&entry_seq| entry_seq != seq); +} + +pub(crate) fn remove_mdb_entry(mdb: &mut VecDeque, seq: usize) { + mdb.retain(|entry| entry.seq != seq); +} + +pub(crate) fn advance_scb(cycle: u64, pipeline: &mut StageQueues, uops: &[CycleUop]) { + while can_accept_l1d(&pipeline.l1d) { + let Some(idx) = pipeline.scb.iter().position(|entry| { + scb_entry_drain_ready(entry, cycle) + && lsid_cache_ready(entry.seq, pipeline.lsid_cache_ptr, uops) + }) else { + break; + }; + let entry = pipeline + .scb + .remove(idx) + .expect("ready SCB entry should exist"); + pipeline.l1d.push_back(L1dEntry { + seq: entry.seq, + kind: L1dTxnKind::StoreDrain, + ready_cycle: cycle.saturating_add(1), + }); + } +} + +fn scb_entry_drain_ready(entry: &ScbEntry, cycle: u64) -> bool { + entry.enqueue_cycle < cycle +} + +pub(crate) fn load_forward_visible(seq: usize, pipeline: &StageQueues, uops: &[CycleUop]) -> bool { + if !uops[seq].is_load { + return false; + } + pipeline + .stq + .iter() + .copied() + .any(|store_seq| store_seq < seq && store_matches_load(store_seq, seq, uops)) + || pipeline + .scb + .iter() + .any(|entry| entry.seq < seq && store_matches_load(entry.seq, seq, uops)) + || pipeline.l1d.iter().any(|entry| { + entry.kind == L1dTxnKind::StoreDrain + && entry.seq < seq + && store_matches_load(entry.seq, seq, uops) + }) +} + +fn store_matches_load(store_seq: usize, load_seq: usize, uops: &[CycleUop]) -> bool { + let store = &uops[store_seq].commit; + let load = &uops[load_seq].commit; + uops[store_seq].is_store + && uops[load_seq].is_load + && store.mem_addr == load.mem_addr + && store.mem_size == load.mem_size +} + +pub(crate) fn e1_can_accept(seq: usize, e1: &VecDeque, uops: &[CycleUop]) -> bool { + e1.len() < ISSUE_WIDTH && (!uops[seq].is_load || load_slot_available(e1, uops)) +} + +pub(crate) fn load_slot_available(queue: &VecDeque, uops: &[CycleUop]) -> bool { + queue.iter().filter(|&&seq| uops[seq].is_load).count() < LSU_WIDTH +} + +pub(crate) fn can_accept_l1d(l1d: &VecDeque) -> bool { + l1d.len() < L1D_WIDTH +} + +pub(crate) fn lsid_cache_ready(seq: usize, lsid_cache_ptr: usize, uops: &[CycleUop]) -> bool { + uops[seq] + .load_store_id + .map(|load_store_id| load_store_id == lsid_cache_ptr) + .unwrap_or(true) +} + +fn advance_simple(dst: &mut VecDeque, src: &mut VecDeque, capacity: usize) { + while dst.len() < capacity { + let Some(seq) = src.pop_front() else { + break; + }; + dst.push_back(seq); + } +} diff --git a/crates/camodel/src/backend/lsu/scb.rs b/crates/camodel/src/backend/lsu/scb.rs new file mode 100644 index 0000000..cb7a073 --- /dev/null +++ b/crates/camodel/src/backend/lsu/scb.rs @@ -0,0 +1 @@ +// SCB namespace placeholder. diff --git a/crates/camodel/src/backend/lsu/stq.rs b/crates/camodel/src/backend/lsu/stq.rs new file mode 100644 index 0000000..656c7fd --- /dev/null +++ b/crates/camodel/src/backend/lsu/stq.rs @@ -0,0 +1 @@ +// STQ namespace placeholder. diff --git a/crates/camodel/src/backend/mod.rs b/crates/camodel/src/backend/mod.rs new file mode 100644 index 0000000..654a9e0 --- /dev/null +++ b/crates/camodel/src/backend/mod.rs @@ -0,0 +1,2 @@ +pub mod execute; +pub mod lsu; diff --git a/crates/camodel/src/control/commit/cmt.rs b/crates/camodel/src/control/commit/cmt.rs new file mode 100644 index 0000000..564537b --- /dev/null +++ b/crates/camodel/src/control/commit/cmt.rs @@ -0,0 +1 @@ +// CMT namespace placeholder. diff --git a/crates/camodel/src/control/commit/mod.rs b/crates/camodel/src/control/commit/mod.rs new file mode 100644 index 0000000..f010c7e --- /dev/null +++ b/crates/camodel/src/control/commit/mod.rs @@ -0,0 +1,96 @@ +pub mod cmt; +pub mod rob; + +use std::collections::VecDeque; + +use isa::{CommitRecord, StageTraceEvent}; +use runtime::GuestRuntime; + +use crate::{ + COMMIT_WIDTH, CycleUop, ScbEntry, StageQueues, branch_kind_label, + call_materialization_kind_label, dynamic_target_source_kind_label, insert_ready_table_tag, + live_boundary_epoch_for_seq, live_branch_kind_for_seq, live_call_materialization_kind_for_seq, + live_control_target_owner_row_id_for_seq, live_dynamic_target_producer_kind_for_seq, + live_dynamic_target_setup_epoch_for_seq, live_dynamic_target_source_epoch_for_seq, + live_dynamic_target_source_kind_for_seq, live_dynamic_target_source_owner_row_id_for_seq, + live_return_consumer_kind_for_seq, live_rob_checkpoint_id_for_seq, remove_queue_entry, + return_consumer_kind_label, stage_event_with_meta, +}; + +pub(crate) fn retire_ready( + cycle: u64, + runtime: &GuestRuntime, + rob: &mut VecDeque, + committed: &mut Vec, + retired_seqs: &mut Vec, + pipeline: &mut StageQueues, + uops: &mut [CycleUop], + stage_events: &mut Vec, +) -> Option { + let mut retired_this_cycle = 0usize; + let mut trap_retired = None; + while retired_this_cycle < COMMIT_WIDTH { + let Some(&seq) = rob.front() else { + break; + }; + if uops[seq].done_cycle.is_none() { + break; + } + let mut commit = uops[seq].commit.clone(); + if let Some(pending_trap) = pipeline.pending_trap.filter(|pending| pending.seq == seq) { + commit.trap_valid = 1; + commit.trap_cause = pending_trap.cause; + commit.traparg0 = pending_trap.traparg0; + trap_retired = Some(pending_trap.cause); + pipeline.pending_trap = None; + } + let trap_cause = (commit.trap_valid != 0).then_some(commit.trap_cause); + let traparg0 = (commit.trap_valid != 0).then_some(commit.traparg0); + commit.cycle = cycle; + committed.push(commit); + retired_seqs.push(seq); + stage_events.push(stage_event_with_meta( + cycle, + runtime, + uops, + seq, + "CMT", + "retire", + Some(live_rob_checkpoint_id_for_seq(seq, pipeline, uops)), + trap_cause, + traparg0, + live_dynamic_target_setup_epoch_for_seq(seq, pipeline, uops), + live_boundary_epoch_for_seq(seq, pipeline, uops), + live_dynamic_target_source_owner_row_id_for_seq(seq, pipeline, uops).as_deref(), + live_dynamic_target_source_epoch_for_seq(seq, pipeline, uops), + live_control_target_owner_row_id_for_seq(seq, pipeline, uops).as_deref(), + live_dynamic_target_producer_kind_for_seq(seq, pipeline, uops) + .map(return_consumer_kind_label), + live_branch_kind_for_seq(seq, pipeline, uops).and_then(branch_kind_label), + live_return_consumer_kind_for_seq(seq, pipeline, uops).map(return_consumer_kind_label), + live_call_materialization_kind_for_seq(seq, pipeline, uops) + .map(call_materialization_kind_label), + live_dynamic_target_source_kind_for_seq(seq, pipeline, uops) + .map(dynamic_target_source_kind_label), + )); + rob.pop_front(); + if let Some(tag) = uops[seq].dst_logical_tag { + insert_ready_table_tag(pipeline, tag); + } + if uops[seq].is_store { + remove_queue_entry(&mut pipeline.stq, seq); + pipeline.scb.push_back(ScbEntry { + seq, + enqueue_cycle: cycle, + }); + } + retired_this_cycle += 1; + } + trap_retired +} + +pub(crate) fn rob_age_rank(seq: usize, rob: &VecDeque) -> usize { + rob.iter() + .position(|&rob_seq| rob_seq == seq) + .unwrap_or(usize::MAX) +} diff --git a/crates/camodel/src/control/commit/rob.rs b/crates/camodel/src/control/commit/rob.rs new file mode 100644 index 0000000..3b99071 --- /dev/null +++ b/crates/camodel/src/control/commit/rob.rs @@ -0,0 +1 @@ +// ROB namespace placeholder. diff --git a/crates/camodel/src/control/mod.rs b/crates/camodel/src/control/mod.rs new file mode 100644 index 0000000..3e31238 --- /dev/null +++ b/crates/camodel/src/control/mod.rs @@ -0,0 +1,2 @@ +pub mod commit; +pub mod recovery; diff --git a/crates/camodel/src/control/recovery/bru.rs b/crates/camodel/src/control/recovery/bru.rs new file mode 100644 index 0000000..9007646 --- /dev/null +++ b/crates/camodel/src/control/recovery/bru.rs @@ -0,0 +1 @@ +// BRU recovery namespace placeholder. diff --git a/crates/camodel/src/control/recovery/dynamic_target.rs b/crates/camodel/src/control/recovery/dynamic_target.rs new file mode 100644 index 0000000..a4ff03e --- /dev/null +++ b/crates/camodel/src/control/recovery/dynamic_target.rs @@ -0,0 +1 @@ +// Dynamic-target recovery namespace placeholder. diff --git a/crates/camodel/src/control/recovery/fls.rs b/crates/camodel/src/control/recovery/fls.rs new file mode 100644 index 0000000..6a2ea16 --- /dev/null +++ b/crates/camodel/src/control/recovery/fls.rs @@ -0,0 +1 @@ +// FLS recovery namespace placeholder. diff --git a/crates/camodel/src/control/recovery/mod.rs b/crates/camodel/src/control/recovery/mod.rs new file mode 100644 index 0000000..fe27d71 --- /dev/null +++ b/crates/camodel/src/control/recovery/mod.rs @@ -0,0 +1,602 @@ +pub mod bru; +pub mod dynamic_target; +pub mod fls; + +use std::collections::{BTreeSet, VecDeque}; + +use isa::{ + TRAP_BRU_RECOVERY_NOT_BSTART, TRAP_DYNAMIC_TARGET_MISSING, TRAP_DYNAMIC_TARGET_NOT_BSTART, + TRAP_DYNAMIC_TARGET_STALE, +}; + +use crate::{ + BranchOwnerKind, BruCorrectionState, CycleUop, FrontendRedirectState, IqEntry, + PendingFlushState, PendingTrapState, StageQueues, branch_context_for_seq, + deferred_bru_correction_target, is_boundary_redirect_owner, legal_redirect_restart_seq, + live_boundary_target_for_seq, live_branch_kind_for_seq, live_rob_checkpoint_id_for_seq, + prune_iq_wait_crossbar_on_redirect, rebuild_iq_owner_table, recovery_checkpoint_id_for_seq, + recovery_epoch_for_seq, restore_ready_tables_for_checkpoint, +}; + +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +pub(crate) struct ResolvedFrontendRedirect { + pub(crate) source_seq: usize, + pub(crate) target_pc: u64, + pub(crate) checkpoint_id: u8, + pub(crate) from_correction: bool, +} + +pub(crate) fn redirect_resolve_cycle(uop: &CycleUop) -> Option { + if uop.redirect_target.is_some() { + uop.w1_cycle.or(uop.done_cycle) + } else { + None + } +} + +pub(crate) fn unresolved_redirect_barrier( + next_fetch_seq: usize, + uops: &[CycleUop], +) -> Option { + (0..next_fetch_seq).find(|&seq| { + uops[seq].redirect_target.is_some() && redirect_resolve_cycle(&uops[seq]).is_none() + }) +} + +pub(crate) fn publish_bru_correction_state( + cycle: u64, + pipeline: &mut StageQueues, + uops: &[CycleUop], +) { + let fault = uops + .iter() + .enumerate() + .filter_map(|(seq, _uop)| { + let candidate = bru_correction_candidate(seq, cycle, pipeline, uops)?; + if !candidate.actual_take { + return None; + } + legal_redirect_restart_seq(seq, candidate.target_pc, uops) + .is_none() + .then_some(PendingTrapState { + seq, + cause: TRAP_BRU_RECOVERY_NOT_BSTART, + traparg0: uops[seq].commit.pc, + checkpoint_id: candidate.checkpoint_id, + visible_cycle: cycle, + }) + }) + .min_by_key(|trap| trap.seq); + + if let Some(fault) = fault { + pipeline.pending_trap = Some(match pipeline.pending_trap { + Some(active) if active.seq < fault.seq => active, + _ => fault, + }); + return; + } + + let next = uops + .iter() + .enumerate() + .filter_map(|(seq, _uop)| bru_correction_candidate(seq, cycle, pipeline, uops)) + .map(|candidate| BruCorrectionState { + source_seq: candidate.source_seq, + epoch: candidate.epoch, + actual_take: candidate.actual_take, + target_pc: candidate.target_pc, + checkpoint_id: candidate.checkpoint_id, + visible_cycle: candidate.visible_cycle, + }) + .max_by_key(|state| (state.epoch, state.source_seq)); + + if let Some(next) = next { + pipeline.pending_bru_correction = Some(match pipeline.pending_bru_correction { + Some(active) if active.source_seq > next.source_seq => active, + _ => next, + }); + } +} + +pub(crate) fn publish_dynamic_boundary_target_fault_state( + cycle: u64, + pipeline: &mut StageQueues, + uops: &[CycleUop], +) { + let fault = uops + .iter() + .enumerate() + .filter_map(|(seq, _uop)| dynamic_boundary_target_fault(seq, cycle, pipeline, uops)) + .min_by_key(|trap| trap.seq); + + if let Some(fault) = fault { + pipeline.pending_trap = Some(match pipeline.pending_trap { + Some(active) if active.seq < fault.seq => active, + _ => fault, + }); + } +} + +pub(crate) fn publish_call_header_fault_state( + cycle: u64, + pipeline: &mut StageQueues, + uops: &[CycleUop], +) { + let fault = pipeline + .seq_call_header_faults + .iter() + .filter_map(|(&seq, &cause)| { + let uop = uops.get(seq)?; + let visible_cycle = uop.w1_cycle.or(uop.done_cycle)?; + (visible_cycle == cycle).then_some(PendingTrapState { + seq, + cause, + traparg0: uop.commit.pc, + checkpoint_id: recovery_checkpoint_id_for_seq(seq, pipeline, uops), + visible_cycle: cycle, + }) + }) + .min_by_key(|trap| trap.seq); + + if let Some(fault) = fault { + pipeline.pending_trap = Some(match pipeline.pending_trap { + Some(active) if active.seq < fault.seq => active, + _ => fault, + }); + } +} + +pub(crate) fn prune_speculative_state_on_redirect( + cycle: u64, + pipeline: &mut StageQueues, + iq: &mut Vec, + rob: &mut VecDeque, + uops: &[CycleUop], +) { + let Some(flush_seq) = active_flush(cycle, pipeline, uops).map(|flush| flush.flush_seq) else { + return; + }; + + for queue in &mut pipeline.frontend { + queue.retain(|&seq| seq <= flush_seq); + } + pipeline.p1.retain(|&seq| seq <= flush_seq); + pipeline.i1.retain(|&seq| seq <= flush_seq); + pipeline.i2.retain(|&seq| seq <= flush_seq); + pipeline.e1.retain(|&seq| seq <= flush_seq); + pipeline.e2.retain(|&seq| seq <= flush_seq); + pipeline.e3.retain(|&seq| seq <= flush_seq); + pipeline.e4.retain(|&seq| seq <= flush_seq); + pipeline.w1.retain(|&seq| seq <= flush_seq); + pipeline.w2.retain(|&seq| seq <= flush_seq); + pipeline.iq_tags.retain(|&seq, _| seq <= flush_seq); + pipeline + .seq_checkpoint_ids + .retain(|&seq, _| seq <= flush_seq); + pipeline + .seq_rob_checkpoint_ids + .retain(|&seq, _| seq <= flush_seq); + pipeline + .seq_recovery_checkpoint_ids + .retain(|&seq, _| seq <= flush_seq); + pipeline + .seq_recovery_epochs + .retain(|&seq, _| seq <= flush_seq); + pipeline + .seq_dynamic_target_pcs + .retain(|&seq, _| seq <= flush_seq); + pipeline + .seq_boundary_target_pcs + .retain(|&seq, _| seq <= flush_seq); + pipeline + .seq_boundary_target_owner_seqs + .retain(|&seq, _| seq <= flush_seq); + pipeline + .seq_boundary_target_producer_kinds + .retain(|&seq, _| seq <= flush_seq); + pipeline + .seq_boundary_target_setup_epochs + .retain(|&seq, _| seq <= flush_seq); + pipeline + .seq_boundary_target_source_owner_seqs + .retain(|&seq, _| seq <= flush_seq); + pipeline + .seq_boundary_target_source_epochs + .retain(|&seq, _| seq <= flush_seq); + pipeline + .seq_boundary_target_source_kinds + .retain(|&seq, _| seq <= flush_seq); + pipeline + .seq_return_consumer_kinds + .retain(|&seq, _| seq <= flush_seq); + pipeline + .seq_call_return_target_pcs + .retain(|&seq, _| seq <= flush_seq); + pipeline + .seq_call_return_target_owner_seqs + .retain(|&seq, _| seq <= flush_seq); + pipeline + .seq_call_return_target_epochs + .retain(|&seq, _| seq <= flush_seq); + pipeline + .seq_call_materialization_kinds + .retain(|&seq, _| seq <= flush_seq); + pipeline + .seq_call_header_faults + .retain(|&seq, _| seq <= flush_seq); + prune_iq_wait_crossbar_on_redirect(&mut pipeline.qtag_wait_crossbar, flush_seq); + iq.retain(|entry| entry.seq <= flush_seq); + rebuild_iq_owner_table(&mut pipeline.iq_owner_table, iq, &pipeline.iq_tags); + rob.retain(|&seq| seq <= flush_seq); +} + +pub(crate) fn schedule_frontend_redirect_recovery( + cycle: u64, + pipeline: &mut StageQueues, + uops: &[CycleUop], +) { + let Some(redirect) = resolved_frontend_redirect(cycle, pipeline, uops) else { + return; + }; + let restart_seq = legal_redirect_restart_seq(redirect.source_seq, redirect.target_pc, uops) + .unwrap_or_else(|| redirect.source_seq.saturating_add(1)); + + let next = FrontendRedirectState { + source_seq: redirect.source_seq, + target_pc: redirect.target_pc, + restart_seq, + checkpoint_id: redirect.checkpoint_id, + from_correction: redirect.from_correction, + resume_cycle: cycle.saturating_add(crate::FRONTEND_REDIRECT_RESTART_DELAY), + }; + pipeline.frontend_redirect = Some(match pipeline.frontend_redirect { + Some(active) if active.resume_cycle >= next.resume_cycle => active, + _ => next, + }); + pipeline.flush_checkpoint_id = Some(redirect.checkpoint_id); + pipeline.pending_flush = Some(match pipeline.pending_flush { + Some(active) + if active.apply_cycle + <= cycle.saturating_add(crate::FRONTEND_REDIRECT_RESTART_DELAY) => + { + active + } + _ => PendingFlushState { + flush_seq: redirect.source_seq, + checkpoint_id: redirect.checkpoint_id, + apply_cycle: cycle.saturating_add(crate::FRONTEND_REDIRECT_RESTART_DELAY), + }, + }); + if redirect.from_correction { + pipeline.pending_bru_correction = None; + } else if pipeline.pending_bru_correction.is_some_and(|pending| { + pending.epoch < recovery_epoch_for_seq(redirect.source_seq, pipeline, uops) + }) { + pipeline.pending_bru_correction = None; + } +} + +pub(crate) fn prune_memory_owner_state_on_redirect( + cycle: u64, + pipeline: &mut StageQueues, + uops: &[CycleUop], +) { + let Some(flush_seq) = active_flush(cycle, pipeline, uops).map(|flush| flush.flush_seq) else { + return; + }; + + pipeline.stq.retain(|&seq| seq <= flush_seq); + pipeline.lhq.retain(|&seq| seq <= flush_seq); + pipeline.liq.retain(|entry| entry.seq <= flush_seq); + pipeline.mdb.retain(|entry| entry.seq <= flush_seq); + pipeline.scb.retain(|entry| entry.seq <= flush_seq); + pipeline.l1d.retain(|entry| entry.seq <= flush_seq); +} + +pub(crate) fn rebase_lsid_on_redirect( + cycle: u64, + pipeline: &mut StageQueues, + iq: &[IqEntry], + rob: &VecDeque, + uops: &[CycleUop], +) { + if active_flush(cycle, pipeline, uops).is_none() { + return; + } + + if let Some(head) = surviving_unissued_lsid_head(pipeline, iq, rob, uops) { + pipeline.lsid_issue_ptr = head; + pipeline.lsid_complete_ptr = head; + } + if let Some(head) = surviving_active_lsid_head(pipeline, iq, rob, uops) { + pipeline.lsid_cache_ptr = head; + } +} + +pub(crate) fn apply_pending_flush( + cycle: u64, + pipeline: &mut StageQueues, + iq: &mut Vec, + rob: &mut VecDeque, + uops: &[CycleUop], +) { + let Some(pending_flush) = pipeline.pending_flush else { + return; + }; + if pending_flush.apply_cycle > cycle { + return; + } + restore_ready_tables_for_checkpoint(pipeline, pending_flush.checkpoint_id); + pipeline.active_recovery_checkpoint_id = pending_flush.checkpoint_id; + prune_speculative_state_on_redirect(cycle, pipeline, iq, rob, uops); + prune_memory_owner_state_on_redirect(cycle, pipeline, uops); + rebase_lsid_on_redirect(cycle, pipeline, iq, rob, uops); + pipeline.pending_flush = None; +} + +pub(crate) fn resolved_frontend_redirect( + cycle: u64, + pipeline: &StageQueues, + uops: &[CycleUop], +) -> Option { + let source_seq = resolved_boundary_seq(cycle, uops)?; + if pipeline + .pending_trap + .is_some_and(|pending| pending.seq == source_seq && pending.visible_cycle <= cycle) + { + return None; + } + let boundary_epoch = recovery_epoch_for_seq(source_seq, pipeline, uops); + if let Some(pending) = pipeline + .pending_bru_correction + .filter(|pending| pending.source_seq < source_seq && pending.epoch == boundary_epoch) + { + return Some(ResolvedFrontendRedirect { + source_seq, + target_pc: if pending.actual_take { + pending.target_pc + } else { + fallthrough_pc(&uops[source_seq].commit) + }, + checkpoint_id: pending.checkpoint_id, + from_correction: true, + }); + } + crate::live_boundary_target_for_seq(source_seq, pipeline, uops).map(|target_pc| { + ResolvedFrontendRedirect { + source_seq, + target_pc, + checkpoint_id: live_rob_checkpoint_id_for_seq(source_seq, pipeline, uops), + from_correction: false, + } + }) +} + +fn resolved_boundary_seq(cycle: u64, uops: &[CycleUop]) -> Option { + uops.iter() + .enumerate() + .filter_map(|(seq, uop)| (boundary_resolve_cycle(uop) == Some(cycle)).then_some(seq)) + .min() +} + +fn active_flush( + cycle: u64, + pipeline: &StageQueues, + uops: &[CycleUop], +) -> Option { + pipeline + .pending_flush + .filter(|pending| pending.apply_cycle <= cycle) + .or_else(|| { + resolved_frontend_redirect(cycle, pipeline, uops).map(|redirect| PendingFlushState { + flush_seq: redirect.source_seq, + checkpoint_id: redirect.checkpoint_id, + apply_cycle: cycle, + }) + }) +} + +fn boundary_resolve_cycle(uop: &CycleUop) -> Option { + is_boundary_redirect_owner(&uop.decoded) + .then_some(uop.w1_cycle.or(uop.done_cycle)) + .flatten() +} + +fn dynamic_boundary_target_fault( + seq: usize, + cycle: u64, + pipeline: &StageQueues, + uops: &[CycleUop], +) -> Option { + let uop = uops.get(seq)?; + if boundary_resolve_cycle(uop)? != cycle { + return None; + } + let kind = live_branch_kind_for_seq(seq, pipeline, uops)?; + if !matches!( + kind, + BranchOwnerKind::Ret | BranchOwnerKind::Ind | BranchOwnerKind::ICall + ) { + return None; + } + + let target_pc = live_boundary_target_for_seq(seq, pipeline, uops); + let setup_epoch = pipeline.seq_boundary_target_setup_epochs.get(&seq).copied(); + let boundary_epoch = recovery_epoch_for_seq(seq, pipeline, uops); + let cause = match target_pc { + None => TRAP_DYNAMIC_TARGET_MISSING, + Some(_) if setup_epoch.is_some_and(|setup_epoch| setup_epoch != boundary_epoch) => { + TRAP_DYNAMIC_TARGET_STALE + } + Some(target_pc) if legal_redirect_restart_seq(seq, target_pc, uops).is_none() => { + TRAP_DYNAMIC_TARGET_NOT_BSTART + } + Some(_) => return None, + }; + Some(PendingTrapState { + seq, + cause, + traparg0: uop.commit.pc, + checkpoint_id: live_rob_checkpoint_id_for_seq(seq, pipeline, uops), + visible_cycle: cycle, + }) +} + +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +struct BruCorrectionCandidate { + source_seq: usize, + epoch: u16, + actual_take: bool, + target_pc: u64, + checkpoint_id: u8, + visible_cycle: u64, +} + +fn bru_correction_candidate( + seq: usize, + cycle: u64, + pipeline: &StageQueues, + uops: &[CycleUop], +) -> Option { + let uop = uops.get(seq)?; + let visible_cycle = uop.w1_cycle.or(uop.done_cycle)?; + if visible_cycle != cycle { + return None; + } + let has_live_branch_context = pipeline.seq_branch_contexts.contains_key(&seq); + let branch_context = branch_context_for_seq(seq, pipeline, uops); + let actual_take = bru_actual_take(uop)?; + let target_pc = bru_target_pc(uop, branch_context, actual_take, has_live_branch_context)?; + let mismatch = if matches!( + branch_context.kind, + BranchOwnerKind::Cond | BranchOwnerKind::Ret + ) { + actual_take != branch_context.pred_take + } else if has_live_branch_context { + false + } else { + actual_take + }; + mismatch.then_some(BruCorrectionCandidate { + source_seq: seq, + epoch: recovery_epoch_for_seq(seq, pipeline, uops), + actual_take, + target_pc, + checkpoint_id: recovery_checkpoint_id_for_seq(seq, pipeline, uops), + visible_cycle, + }) +} + +fn bru_actual_take(uop: &CycleUop) -> Option { + (uop.commit.trap_valid == 0 + && uop.decoded.uop_group == "BRU" + && !is_boundary_redirect_owner(&uop.decoded)) + .then_some(uop.commit.next_pc != fallthrough_pc(&uop.commit)) +} + +fn bru_target_pc( + uop: &CycleUop, + branch_context: crate::BranchOwnerContext, + actual_take: bool, + has_live_branch_context: bool, +) -> Option { + if branch_context.kind != BranchOwnerKind::None { + return Some(branch_context.target_pc); + } + if has_live_branch_context { + return None; + } + actual_take + .then(|| deferred_bru_correction_target(&uop.commit, &uop.decoded)) + .flatten() +} + +fn fallthrough_pc(commit: &isa::CommitRecord) -> u64 { + commit.pc.saturating_add(commit.len as u64) +} + +fn surviving_unissued_lsid_head( + pipeline: &StageQueues, + iq: &[IqEntry], + rob: &VecDeque, + uops: &[CycleUop], +) -> Option { + active_unissued_memory_seqs(pipeline, iq, rob) + .into_iter() + .filter_map(|seq| uops.get(seq).and_then(|uop| uop.load_store_id)) + .min() +} + +fn surviving_active_lsid_head( + pipeline: &StageQueues, + iq: &[IqEntry], + rob: &VecDeque, + uops: &[CycleUop], +) -> Option { + active_memory_seqs(pipeline, iq, rob) + .into_iter() + .filter_map(|seq| uops.get(seq).and_then(|uop| uop.load_store_id)) + .min() +} + +fn active_unissued_memory_seqs( + pipeline: &StageQueues, + iq: &[IqEntry], + rob: &VecDeque, +) -> BTreeSet { + let mut out = BTreeSet::new(); + out.extend(rob.iter().copied()); + out.extend(iq.iter().map(|entry| entry.seq)); + out.extend( + pipeline + .frontend + .iter() + .flat_map(|queue| queue.iter().copied()), + ); + out.extend(pipeline.p1.iter().copied()); + out.extend(pipeline.i1.iter().copied()); + out.extend(pipeline.i2.iter().copied()); + + for seq in issued_memory_seqs(pipeline) { + out.remove(&seq); + } + out +} + +fn active_memory_seqs( + pipeline: &StageQueues, + iq: &[IqEntry], + rob: &VecDeque, +) -> BTreeSet { + let mut out = BTreeSet::new(); + out.extend(rob.iter().copied()); + out.extend(iq.iter().map(|entry| entry.seq)); + out.extend( + pipeline + .frontend + .iter() + .flat_map(|queue| queue.iter().copied()), + ); + out.extend(pipeline.p1.iter().copied()); + out.extend(pipeline.i1.iter().copied()); + out.extend(pipeline.i2.iter().copied()); + out.extend(issued_memory_seqs(pipeline)); + out +} + +fn issued_memory_seqs(pipeline: &StageQueues) -> BTreeSet { + let mut out = BTreeSet::new(); + out.extend(pipeline.e1.iter().copied()); + out.extend(pipeline.e2.iter().copied()); + out.extend(pipeline.e3.iter().copied()); + out.extend(pipeline.e4.iter().copied()); + out.extend(pipeline.w1.iter().copied()); + out.extend(pipeline.w2.iter().copied()); + out.extend(pipeline.liq.iter().map(|entry| entry.seq)); + out.extend(pipeline.lhq.iter().copied()); + out.extend(pipeline.mdb.iter().map(|entry| entry.seq)); + out.extend(pipeline.stq.iter().copied()); + out.extend(pipeline.scb.iter().map(|entry| entry.seq)); + out.extend(pipeline.l1d.iter().map(|entry| entry.seq)); + out +} diff --git a/crates/camodel/src/core/config.rs b/crates/camodel/src/core/config.rs new file mode 100644 index 0000000..e19eafb --- /dev/null +++ b/crates/camodel/src/core/config.rs @@ -0,0 +1,52 @@ +use serde::{Deserialize, Serialize}; + +use isa::{RunResult, StageTraceEvent}; + +pub(crate) const FETCH_WIDTH: usize = 4; +pub(crate) const DISPATCH_WIDTH: usize = 4; +pub(crate) const ISSUE_WIDTH: usize = 4; +pub(crate) const COMMIT_WIDTH: usize = 4; +pub(crate) const READ_PORTS: usize = 3; +pub(crate) const ROB_CAPACITY: usize = 128; +pub(crate) const PHYS_IQ_COUNT: usize = 8; +pub(crate) const IQ_CAPACITY: usize = 32; +pub(crate) const IQ_ENQUEUE_PORTS: usize = 2; +pub(crate) const LSU_WIDTH: usize = 1; +pub(crate) const L1D_WIDTH: usize = 1; +pub(crate) const FRONTEND_REDIRECT_RESTART_DELAY: u64 = 1; +pub(crate) const REG_T1: u8 = 24; +pub(crate) const REG_U1: u8 = 28; +pub(crate) const LD_GEN_E1: u8 = 1 << 0; +pub(crate) const LD_GEN_E2: u8 = 1 << 1; +pub(crate) const LD_GEN_E3: u8 = 1 << 2; +pub(crate) const LD_GEN_E4: u8 = 1 << 3; + +pub(crate) const FRONTEND_STAGE_NAMES: [&str; 11] = [ + "F0", "F1", "F2", "F3", "IB", "F4", "D1", "D2", "D3", "S1", "S2", +]; + +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct CycleRunOptions { + pub max_cycles: u64, + pub load_miss_every: Option, + pub load_miss_penalty: u64, +} + +impl Default for CycleRunOptions { + fn default() -> Self { + Self { + max_cycles: 256, + load_miss_every: None, + load_miss_penalty: 8, + } + } +} + +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct CycleRunBundle { + pub result: RunResult, + pub stage_events: Vec, +} + +#[derive(Debug, Default)] +pub struct CycleEngine; diff --git a/crates/camodel/src/core/engine.rs b/crates/camodel/src/core/engine.rs new file mode 100644 index 0000000..af3203b --- /dev/null +++ b/crates/camodel/src/core/engine.rs @@ -0,0 +1,167 @@ +use anyhow::Result; +use funcmodel::{FuncEngine, FuncRunOptions}; +use isa::{CommitRecord, EngineKind, RunMetrics, RunResult}; +use runtime::GuestRuntime; +use std::collections::VecDeque; + +use crate::*; + +impl CycleEngine { + pub fn run(&self, runtime: &GuestRuntime, options: &CycleRunOptions) -> Result { + let func_limit = options + .max_cycles + .saturating_mul(COMMIT_WIDTH as u64) + .max(1); + let func_bundle = FuncEngine.run( + runtime, + &FuncRunOptions { + max_steps: func_limit, + }, + )?; + + let mut uops = build_uops(&func_bundle.result.commits, &func_bundle.result.decoded); + if uops.is_empty() { + return Ok(CycleRunBundle { + result: RunResult { + image_name: runtime.image.image_name(), + entry_pc: runtime.state.pc, + metrics: RunMetrics { + engine: EngineKind::Cycle, + cycles: 0, + commits: 0, + exit_reason: func_bundle.result.metrics.exit_reason, + }, + commits: Vec::new(), + decoded: Vec::new(), + }, + stage_events: Vec::new(), + }); + } + + let mut stage_events = Vec::new(); + let mut pipeline = StageQueues::default(); + let mut iq = Vec::::new(); + let mut rob = VecDeque::::new(); + let mut committed = Vec::::new(); + let mut retired_seqs = Vec::::new(); + let mut next_fetch_seq = 0usize; + let mut exit_reason = "cycle_limit".to_string(); + let target_commit_count = func_bundle.result.commits.len(); + + for cycle in 0..options.max_cycles { + apply_pending_flush(cycle, &mut pipeline, &mut iq, &mut rob, &uops); + fill_fetch(cycle, &mut pipeline, &mut next_fetch_seq, &uops); + + tag_stage_cycles(cycle, &pipeline, &mut uops); + publish_bru_correction_state(cycle, &mut pipeline, &uops); + publish_dynamic_boundary_target_fault_state(cycle, &mut pipeline, &uops); + publish_call_header_fault_state(cycle, &mut pipeline, &uops); + update_iq_entries_for_cycle( + cycle, + &mut iq, + &pipeline.ready_table_t, + &pipeline.ready_table_u, + &pipeline.iq_owner_table, + &pipeline.iq_tags, + &pipeline.qtag_wait_crossbar, + &uops, + ); + emit_stage_events( + cycle, + runtime, + &pipeline, + &iq, + &rob, + &uops, + &mut stage_events, + ); + advance_scb(cycle, &mut pipeline, &uops); + + let trap_retired = retire_ready( + cycle, + runtime, + &mut rob, + &mut committed, + &mut retired_seqs, + &mut pipeline, + &mut uops, + &mut stage_events, + ); + if let Some(cause) = trap_retired { + exit_reason = format!("trap(0x{cause:08x})"); + break; + } + schedule_frontend_redirect_recovery(cycle, &mut pipeline, &uops); + + if committed.len() == target_commit_count { + exit_reason = func_bundle.result.metrics.exit_reason.clone(); + break; + } + + advance_execute(cycle, &mut pipeline, &mut uops, options); + advance_l1d(cycle, &mut pipeline); + advance_liq(cycle, &mut pipeline, &mut uops, &rob); + advance_i1_to_i2(&mut pipeline, &mut iq); + let mut admitted_i1 = arbitrate_i1(cycle, &mut pipeline.p1, &mut iq, &uops, &rob); + advance_p1_to_i1(&mut pipeline.i1, &mut admitted_i1, &mut pipeline.p1); + advance_i2( + cycle, + &mut pipeline.i2, + &mut pipeline.e1, + &mut pipeline.lhq, + &mut pipeline.stq, + &mut pipeline.lsid_issue_ptr, + &mut pipeline.lsid_complete_ptr, + &uops, + ); + pick_from_iq( + cycle, + pipeline.lsid_issue_ptr, + &mut iq, + &uops, + &mut pipeline.p1, + &rob, + ); + dispatch_to_iq_and_bypass(cycle, &mut pipeline, &mut iq, &mut rob, &mut uops); + advance_frontend(&mut pipeline, &mut rob); + + if committed.len() == target_commit_count { + exit_reason = func_bundle.result.metrics.exit_reason.clone(); + break; + } + } + + if committed.len() == target_commit_count { + exit_reason = func_bundle.result.metrics.exit_reason; + } + + let decoded = retired_seqs + .into_iter() + .filter_map(|seq| uops.get(seq).map(|uop| uop.decoded.clone())) + .collect::>(); + + let result = RunResult { + image_name: runtime.image.image_name(), + entry_pc: runtime.state.pc, + metrics: RunMetrics { + engine: EngineKind::Cycle, + cycles: if exit_reason == "cycle_limit" { + options.max_cycles + } else { + committed + .last() + .map(|commit| commit.cycle.saturating_add(1)) + .unwrap_or(options.max_cycles) + }, + commits: committed.len() as u64, + exit_reason, + }, + commits: committed, + decoded, + }; + Ok(CycleRunBundle { + result, + stage_events, + }) + } +} diff --git a/crates/camodel/src/core/mod.rs b/crates/camodel/src/core/mod.rs new file mode 100644 index 0000000..67b4c3a --- /dev/null +++ b/crates/camodel/src/core/mod.rs @@ -0,0 +1,6 @@ +pub mod config; +pub mod engine; +pub mod model; +pub mod uop; + +pub use config::{CycleEngine, CycleRunBundle, CycleRunOptions}; diff --git a/crates/camodel/src/core/model.rs b/crates/camodel/src/core/model.rs new file mode 100644 index 0000000..9859054 --- /dev/null +++ b/crates/camodel/src/core/model.rs @@ -0,0 +1,409 @@ +use isa::{CommitRecord, DecodedInstruction}; +use std::collections::{BTreeMap, BTreeSet, VecDeque}; + +use crate::{FRONTEND_STAGE_NAMES, IQ_CAPACITY, PHYS_IQ_COUNT}; + +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +pub(crate) enum QueueWakeKind { + T, + U, +} + +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +pub(crate) enum IqWakeKind { + Spec, + Nonspec, +} + +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +pub(crate) enum PhysIq { + AluIq0, + SharedIq1, + BruIq, + AguIq0, + AguIq1, + StdIq0, + StdIq1, + CmdIq, +} + +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +pub(crate) struct QTag { + pub(crate) phys_iq: PhysIq, + pub(crate) entry_id: usize, +} + +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +pub(crate) struct LogicalQueueTag { + pub(crate) kind: QueueWakeKind, + pub(crate) tag: usize, +} + +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +pub(crate) enum BranchOwnerKind { + None, + Fall, + Cond, + Call, + Ret, + Direct, + Ind, + ICall, +} + +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +pub(crate) enum ReturnConsumerKind { + SetcTgt, + FretRa, + FretStk, +} + +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +pub(crate) enum CallMaterializationKind { + FusedCall, + AdjacentSetret, +} + +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +pub(crate) enum DynamicTargetSourceKind { + ArchTargetSetup, + CallReturnFused, + CallReturnAdjacentSetret, +} + +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +pub(crate) struct BranchOwnerContext { + pub(crate) kind: BranchOwnerKind, + pub(crate) base_pc: u64, + pub(crate) target_pc: u64, + pub(crate) off: u64, + pub(crate) pred_take: bool, + pub(crate) epoch: u16, +} + +impl Default for BranchOwnerContext { + fn default() -> Self { + Self { + kind: BranchOwnerKind::None, + base_pc: 0, + target_pc: 0, + off: 0, + pred_take: false, + epoch: 0, + } + } +} + +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +pub(crate) struct IqWakeEvent { + pub(crate) producer: usize, + pub(crate) wake_kind: IqWakeKind, + pub(crate) queue_kind: Option, + pub(crate) logical_tag: Option, + pub(crate) qtag: Option, +} + +#[derive(Debug, Clone)] +pub(crate) struct CycleUop { + pub(crate) decoded: DecodedInstruction, + pub(crate) commit: CommitRecord, + pub(crate) deps: [Option; 2], + pub(crate) src_queue_kinds: [Option; 2], + pub(crate) src_logical_tags: [Option; 2], + pub(crate) src_qtags: [Option; 2], + pub(crate) dst_queue_kind: Option, + pub(crate) dst_logical_tag: Option, + pub(crate) dst_qtag: Option, + pub(crate) bypass_d2: bool, + pub(crate) is_load: bool, + pub(crate) is_store: bool, + pub(crate) load_ordinal: Option, + pub(crate) load_store_id: Option, + pub(crate) miss_injected: bool, + pub(crate) redirect_target: Option, + pub(crate) phys_iq: Option, + pub(crate) pick_wakeup_visible: Option, + pub(crate) data_ready_visible: Option, + pub(crate) miss_pending_until: Option, + pub(crate) e1_cycle: Option, + pub(crate) e4_cycle: Option, + pub(crate) w1_cycle: Option, + pub(crate) done_cycle: Option, +} + +#[derive(Debug, Clone)] +pub(crate) struct IqEntry { + pub(crate) seq: usize, + pub(crate) phys_iq: PhysIq, + pub(crate) inflight: bool, + pub(crate) src_valid: [bool; 2], + pub(crate) src_ready_nonspec: [bool; 2], + pub(crate) src_ready_spec: [bool; 2], + pub(crate) src_wait_qtag: [bool; 2], +} + +impl PhysIq { + pub(crate) fn index(self) -> usize { + self as usize + } + + pub(crate) fn lane_id(self) -> &'static str { + match self { + Self::AluIq0 => "alu_iq0", + Self::SharedIq1 => "shared_iq1", + Self::BruIq => "bru_iq", + Self::AguIq0 => "agu_iq0", + Self::AguIq1 => "agu_iq1", + Self::StdIq0 => "std_iq0", + Self::StdIq1 => "std_iq1", + Self::CmdIq => "cmd_iq", + } + } + + pub(crate) fn capacity(self) -> usize { + IQ_CAPACITY + } +} + +#[derive(Debug, Clone)] +pub(crate) struct StageQueues { + pub(crate) frontend: [VecDeque; FRONTEND_STAGE_NAMES.len()], + pub(crate) frontend_redirect: Option, + pub(crate) pending_flush: Option, + pub(crate) flush_checkpoint_id: Option, + pub(crate) seq_checkpoint_ids: BTreeMap, + pub(crate) seq_rob_checkpoint_ids: BTreeMap, + pub(crate) seq_recovery_checkpoint_ids: BTreeMap, + pub(crate) seq_recovery_epochs: BTreeMap, + pub(crate) seq_branch_contexts: BTreeMap, + pub(crate) seq_dynamic_target_pcs: BTreeMap, + pub(crate) seq_boundary_target_pcs: BTreeMap, + pub(crate) seq_boundary_target_owner_seqs: BTreeMap, + pub(crate) seq_boundary_target_producer_kinds: BTreeMap, + pub(crate) seq_boundary_target_setup_epochs: BTreeMap, + pub(crate) seq_boundary_target_source_owner_seqs: BTreeMap, + pub(crate) seq_boundary_target_source_epochs: BTreeMap, + pub(crate) seq_boundary_target_source_kinds: BTreeMap, + pub(crate) seq_return_consumer_kinds: BTreeMap, + pub(crate) seq_call_return_target_pcs: BTreeMap, + pub(crate) seq_call_return_target_owner_seqs: BTreeMap, + pub(crate) seq_call_return_target_epochs: BTreeMap, + pub(crate) seq_call_materialization_kinds: BTreeMap, + pub(crate) seq_call_header_faults: BTreeMap, + pub(crate) active_recovery_checkpoint_id: u8, + pub(crate) active_recovery_epoch: u16, + pub(crate) active_block_head: bool, + pub(crate) active_branch_context: BranchOwnerContext, + pub(crate) active_dynamic_target_pc: Option, + pub(crate) active_dynamic_target_owner_seq: Option, + pub(crate) active_dynamic_target_producer_kind: Option, + pub(crate) active_dynamic_target_setup_epoch: Option, + pub(crate) active_dynamic_target_owner_kind: Option, + pub(crate) active_dynamic_target_source_owner_seq: Option, + pub(crate) active_dynamic_target_source_epoch: Option, + pub(crate) active_dynamic_target_source_kind: Option, + pub(crate) active_dynamic_target_call_materialization_kind: Option, + pub(crate) active_call_header_seq: Option, + pub(crate) active_call_return_target_pc: Option, + pub(crate) active_call_return_target_owner_seq: Option, + pub(crate) active_call_return_target_epoch: Option, + pub(crate) active_call_return_materialization_kind: Option, + pub(crate) ready_table_checkpoints: BTreeMap, + pub(crate) pending_bru_correction: Option, + pub(crate) pending_trap: Option, + pub(crate) iq_tags: BTreeMap, + pub(crate) iq_owner_table: Vec>>, + pub(crate) qtag_wait_crossbar: Vec>>, + pub(crate) ready_table_t: BTreeSet, + pub(crate) ready_table_u: BTreeSet, + pub(crate) liq: VecDeque, + pub(crate) lhq: VecDeque, + pub(crate) mdb: VecDeque, + pub(crate) stq: VecDeque, + pub(crate) scb: VecDeque, + pub(crate) l1d: VecDeque, + pub(crate) p1: VecDeque, + pub(crate) i1: VecDeque, + pub(crate) i2: VecDeque, + pub(crate) e1: VecDeque, + pub(crate) e2: VecDeque, + pub(crate) e3: VecDeque, + pub(crate) e4: VecDeque, + pub(crate) w1: VecDeque, + pub(crate) w2: VecDeque, + pub(crate) lsid_issue_ptr: usize, + pub(crate) lsid_complete_ptr: usize, + pub(crate) lsid_cache_ptr: usize, +} + +impl Default for StageQueues { + fn default() -> Self { + Self { + frontend: Default::default(), + frontend_redirect: None, + pending_flush: None, + flush_checkpoint_id: None, + seq_checkpoint_ids: Default::default(), + seq_rob_checkpoint_ids: Default::default(), + seq_recovery_checkpoint_ids: Default::default(), + seq_recovery_epochs: Default::default(), + seq_branch_contexts: Default::default(), + seq_dynamic_target_pcs: Default::default(), + seq_boundary_target_pcs: Default::default(), + seq_boundary_target_owner_seqs: Default::default(), + seq_boundary_target_producer_kinds: Default::default(), + seq_boundary_target_setup_epochs: Default::default(), + seq_boundary_target_source_owner_seqs: Default::default(), + seq_boundary_target_source_epochs: Default::default(), + seq_boundary_target_source_kinds: Default::default(), + seq_return_consumer_kinds: Default::default(), + seq_call_return_target_pcs: Default::default(), + seq_call_return_target_owner_seqs: Default::default(), + seq_call_return_target_epochs: Default::default(), + seq_call_materialization_kinds: Default::default(), + seq_call_header_faults: Default::default(), + active_recovery_checkpoint_id: 0, + active_recovery_epoch: 0, + active_block_head: true, + active_branch_context: BranchOwnerContext::default(), + active_dynamic_target_pc: None, + active_dynamic_target_owner_seq: None, + active_dynamic_target_producer_kind: None, + active_dynamic_target_setup_epoch: None, + active_dynamic_target_owner_kind: None, + active_dynamic_target_source_owner_seq: None, + active_dynamic_target_source_epoch: None, + active_dynamic_target_source_kind: None, + active_dynamic_target_call_materialization_kind: None, + active_call_header_seq: None, + active_call_return_target_pc: None, + active_call_return_target_owner_seq: None, + active_call_return_target_epoch: None, + active_call_return_materialization_kind: None, + ready_table_checkpoints: Default::default(), + pending_bru_correction: None, + pending_trap: None, + iq_tags: Default::default(), + iq_owner_table: empty_iq_owner_table(), + qtag_wait_crossbar: empty_qtag_wait_crossbar(), + ready_table_t: Default::default(), + ready_table_u: Default::default(), + liq: Default::default(), + lhq: Default::default(), + mdb: Default::default(), + stq: Default::default(), + scb: Default::default(), + l1d: Default::default(), + p1: Default::default(), + i1: Default::default(), + i2: Default::default(), + e1: Default::default(), + e2: Default::default(), + e3: Default::default(), + e4: Default::default(), + w1: Default::default(), + w2: Default::default(), + lsid_issue_ptr: 0, + lsid_complete_ptr: 0, + lsid_cache_ptr: 0, + } + } +} + +#[derive(Debug, Clone, PartialEq, Eq)] +pub(crate) struct ReadyTableCheckpoint { + pub(crate) ready_table_t: BTreeSet, + pub(crate) ready_table_u: BTreeSet, + pub(crate) recovery_epoch: u16, + pub(crate) block_head: bool, + pub(crate) branch_context: BranchOwnerContext, + pub(crate) dynamic_target_pc: Option, + pub(crate) dynamic_target_owner_seq: Option, + pub(crate) dynamic_target_producer_kind: Option, + pub(crate) dynamic_target_setup_epoch: Option, + pub(crate) dynamic_target_owner_kind: Option, + pub(crate) dynamic_target_source_owner_seq: Option, + pub(crate) dynamic_target_source_epoch: Option, + pub(crate) dynamic_target_source_kind: Option, + pub(crate) dynamic_target_call_materialization_kind: Option, + pub(crate) call_header_seq: Option, + pub(crate) call_return_target_pc: Option, + pub(crate) call_return_target_owner_seq: Option, + pub(crate) call_return_target_epoch: Option, + pub(crate) call_return_materialization_kind: Option, +} + +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +pub(crate) struct FrontendRedirectState { + pub(crate) source_seq: usize, + pub(crate) target_pc: u64, + pub(crate) restart_seq: usize, + pub(crate) checkpoint_id: u8, + pub(crate) from_correction: bool, + pub(crate) resume_cycle: u64, +} + +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +pub(crate) struct PendingFlushState { + pub(crate) flush_seq: usize, + pub(crate) checkpoint_id: u8, + pub(crate) apply_cycle: u64, +} + +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +pub(crate) struct BruCorrectionState { + pub(crate) source_seq: usize, + pub(crate) epoch: u16, + pub(crate) actual_take: bool, + pub(crate) target_pc: u64, + pub(crate) checkpoint_id: u8, + pub(crate) visible_cycle: u64, +} + +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +pub(crate) struct PendingTrapState { + pub(crate) seq: usize, + pub(crate) cause: u64, + pub(crate) traparg0: u64, + pub(crate) checkpoint_id: u8, + pub(crate) visible_cycle: u64, +} + +#[derive(Debug, Clone)] +pub(crate) struct LiqEntry { + pub(crate) seq: usize, + pub(crate) refill_ready_cycle: u64, +} + +#[derive(Debug, Clone)] +pub(crate) struct MdbEntry { + pub(crate) seq: usize, + pub(crate) refill_ready_cycle: u64, +} + +#[derive(Debug, Clone)] +pub(crate) struct ScbEntry { + pub(crate) seq: usize, + pub(crate) enqueue_cycle: u64, +} + +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +pub(crate) enum L1dTxnKind { + LoadHit, + StoreDrain, +} + +#[derive(Debug, Clone)] +pub(crate) struct L1dEntry { + pub(crate) seq: usize, + pub(crate) kind: L1dTxnKind, + pub(crate) ready_cycle: u64, +} + +pub(crate) fn empty_qtag_wait_crossbar() -> Vec>> { + vec![vec![Vec::new(); IQ_CAPACITY]; PHYS_IQ_COUNT] +} + +pub(crate) fn empty_iq_owner_table() -> Vec>> { + vec![vec![None; IQ_CAPACITY]; PHYS_IQ_COUNT] +} diff --git a/crates/camodel/src/core/uop.rs b/crates/camodel/src/core/uop.rs new file mode 100644 index 0000000..f8c1010 --- /dev/null +++ b/crates/camodel/src/core/uop.rs @@ -0,0 +1 @@ +// Uop-domain placeholder for future model/uop extraction. diff --git a/crates/camodel/src/decode/builder.rs b/crates/camodel/src/decode/builder.rs new file mode 100644 index 0000000..6b75b05 --- /dev/null +++ b/crates/camodel/src/decode/builder.rs @@ -0,0 +1 @@ +// Decode-builder placeholder; committed-stream to uop build stays in decode owner module. diff --git a/crates/camodel/src/decode/classify.rs b/crates/camodel/src/decode/classify.rs new file mode 100644 index 0000000..3138665 --- /dev/null +++ b/crates/camodel/src/decode/classify.rs @@ -0,0 +1 @@ +// Decode-classify placeholder; instruction classification stays in decode owner module. diff --git a/crates/camodel/src/decode/mod.rs b/crates/camodel/src/decode/mod.rs new file mode 100644 index 0000000..93a1dfa --- /dev/null +++ b/crates/camodel/src/decode/mod.rs @@ -0,0 +1,398 @@ +pub mod builder; +pub mod classify; + +use crate::{CycleUop, LogicalQueueTag, QueueWakeKind, REG_T1, REG_U1, d2_bypass}; +use isa::{CommitRecord, DecodedInstruction}; + +pub(crate) fn build_uops( + commits: &[CommitRecord], + decoded: &[DecodedInstruction], +) -> Vec { + let mut last_writer = [None; 32]; + let mut t_history = Vec::<(usize, LogicalQueueTag)>::new(); + let mut u_history = Vec::<(usize, LogicalQueueTag)>::new(); + let mut next_t_tag = 0usize; + let mut next_u_tag = 0usize; + let mut deps = Vec::with_capacity(commits.len()); + let mut src_queue_kinds = Vec::with_capacity(commits.len()); + let mut src_logical_tags = Vec::with_capacity(commits.len()); + let mut dst_queue_kinds = Vec::with_capacity(commits.len()); + let mut dst_logical_tags = Vec::with_capacity(commits.len()); + for (seq, commit) in commits.iter().enumerate() { + let Some(decoded) = decoded.get(seq) else { + break; + }; + let src_kinds = [ + queue_src_kind(commit, decoded, 0), + queue_src_kind(commit, decoded, 1), + ]; + let src_tags = [ + src_kinds[0].and_then(|kind| { + queue_src_rel(commit, decoded, 0) + .and_then(|rel| resolve_logical_queue_src(kind, rel, &t_history, &u_history)) + .map(|(_, tag)| tag) + }), + src_kinds[1].and_then(|kind| { + queue_src_rel(commit, decoded, 1) + .and_then(|rel| resolve_logical_queue_src(kind, rel, &t_history, &u_history)) + .map(|(_, tag)| tag) + }), + ]; + let src0 = src_tags[0] + .and_then(|tag| logical_tag_producer(tag, &t_history, &u_history)) + .or_else(|| { + if commit.src0_valid != 0 && commit.src0_reg != 0 { + last_writer[commit.src0_reg as usize] + } else { + None + } + }); + let src1 = src_tags[1] + .and_then(|tag| logical_tag_producer(tag, &t_history, &u_history)) + .or_else(|| { + if commit.src1_valid != 0 && commit.src1_reg != 0 { + last_writer[commit.src1_reg as usize] + } else { + None + } + }); + let dst_kind = queue_dst_kind(commit, decoded); + let dst_logical_tag = + allocate_logical_queue_tag(dst_kind, &mut next_t_tag, &mut next_u_tag); + deps.push([src0, src1]); + src_queue_kinds.push(src_kinds); + src_logical_tags.push(src_tags); + dst_queue_kinds.push(dst_kind); + dst_logical_tags.push(dst_logical_tag); + if commit.wb_valid != 0 && commit.wb_rd != 0 { + last_writer[commit.wb_rd as usize] = Some(deps.len() - 1); + } + if let Some(tag) = dst_logical_tag { + match tag.kind { + QueueWakeKind::T => t_history.push((seq, tag)), + QueueWakeKind::U => u_history.push((seq, tag)), + } + } + } + + let mut load_ordinal = 0usize; + let mut load_store_id = 0usize; + commits + .iter() + .enumerate() + .filter_map(|(seq, commit)| { + let is_load = commit.mem_valid != 0 && commit.mem_is_store == 0; + let is_store = commit.mem_valid != 0 && commit.mem_is_store != 0; + let this_load_ordinal = is_load.then_some(load_ordinal); + let this_load_store_id = (is_load || is_store).then_some(load_store_id); + if is_load { + load_ordinal += 1; + } + if is_load || is_store { + load_store_id += 1; + } + decoded.get(seq).cloned().map(|decoded| { + let bypass_d2 = d2_bypass(&decoded); + let redirect_target = architectural_redirect_target(commit, &decoded); + CycleUop { + bypass_d2, + src_queue_kinds: src_queue_kinds[seq], + src_logical_tags: src_logical_tags[seq], + src_qtags: [None, None], + dst_queue_kind: dst_queue_kinds[seq], + dst_logical_tag: dst_logical_tags[seq], + dst_qtag: None, + is_load, + is_store, + load_ordinal: this_load_ordinal, + load_store_id: this_load_store_id, + miss_injected: false, + decoded, + commit: commit.clone(), + deps: deps[seq], + phys_iq: None, + pick_wakeup_visible: None, + data_ready_visible: None, + miss_pending_until: None, + redirect_target, + e1_cycle: None, + e4_cycle: None, + w1_cycle: None, + done_cycle: None, + } + }) + }) + .collect::>() +} + +pub(crate) fn checkpoint_id_for_seq(seq: usize, uops: &[CycleUop]) -> u8 { + let Some(packet_head_seq) = fetch_packet_head_seq(seq, uops) else { + return 0; + }; + packet_checkpoint_id(uops[packet_head_seq].commit.pc) +} + +pub(crate) fn rob_checkpoint_id_for_seq(seq: usize, uops: &[CycleUop]) -> u8 { + if !is_start_marker(&uops[seq].decoded) { + return 0; + } + let packet_checkpoint = checkpoint_id_for_seq(seq, uops); + let slot = packet_slot_for_seq(seq, uops).unwrap_or(0) as u8; + packet_checkpoint.wrapping_add(slot) & 0x3f +} + +pub(crate) fn packet_checkpoint_id(packet_head_pc: u64) -> u8 { + ((packet_head_pc >> 2) & 0x3f) as u8 +} + +fn fetch_packet_head_seq(seq: usize, uops: &[CycleUop]) -> Option { + let mut head = 0usize; + while head < uops.len() { + let mut packet_end = head; + while packet_end + 1 < uops.len() && (packet_end - head + 1) < crate::FETCH_WIDTH { + if uops[packet_end].redirect_target.is_some() { + break; + } + packet_end += 1; + } + if seq >= head && seq <= packet_end { + return Some(head); + } + head = packet_end.saturating_add(1); + } + None +} + +fn packet_slot_for_seq(seq: usize, uops: &[CycleUop]) -> Option { + let head = fetch_packet_head_seq(seq, uops)?; + Some(seq.saturating_sub(head)) +} + +pub(crate) fn architectural_redirect_target( + commit: &CommitRecord, + decoded: &DecodedInstruction, +) -> Option { + let fallthrough = commit.pc.saturating_add(commit.len as u64); + (commit.trap_valid == 0 && commit.next_pc != fallthrough && is_boundary_redirect_owner(decoded)) + .then_some(commit.next_pc) +} + +pub(crate) fn deferred_bru_correction_target( + commit: &CommitRecord, + decoded: &DecodedInstruction, +) -> Option { + let fallthrough = commit.pc.saturating_add(commit.len as u64); + (commit.trap_valid == 0 + && commit.next_pc != fallthrough + && decoded.uop_group == "BRU" + && !is_boundary_redirect_owner(decoded)) + .then_some(commit.next_pc) +} + +pub(crate) fn block_epoch_for_seq(seq: usize, uops: &[CycleUop]) -> u16 { + let mut epoch = 0u16; + let mut block_head = true; + for (idx, uop) in uops.iter().enumerate().take(seq.saturating_add(1)) { + if idx == seq { + return epoch; + } + let is_bstart_head = is_bstart(decoded_ref(uop)) && block_head; + let is_bstart_mid = is_bstart(decoded_ref(uop)) && !block_head; + let is_boundary = + is_bstart_mid || is_bstop(decoded_ref(uop)) || is_macro_boundary(decoded_ref(uop)); + if is_boundary { + block_head = true; + } + if is_bstart_head { + block_head = false; + } + if is_boundary || is_bstart_head { + epoch = epoch.wrapping_add(1); + } + } + epoch +} + +fn decoded_ref(uop: &CycleUop) -> &DecodedInstruction { + &uop.decoded +} + +pub(crate) fn is_bstart(decoded: &DecodedInstruction) -> bool { + matches!( + decoded.mnemonic.as_str(), + "BSTART" + | "BSTART.ACCCVT" + | "BSTART.CALL" + | "BSTART.CUBE" + | "BSTART.FIXP" + | "BSTART.FP" + | "BSTART.MPAR" + | "BSTART.MSEQ" + | "BSTART.PAR" + | "BSTART.STD" + | "BSTART.SYS" + | "BSTART.TEPL" + | "BSTART.TLOAD" + | "BSTART.TMA" + | "BSTART.TMATMUL" + | "BSTART.TMATMUL.ACC" + | "BSTART.TMOV" + | "BSTART.TSTORE" + | "BSTART.VPAR" + | "BSTART.VSEQ" + | "C.BSTART" + | "C.BSTART.STD" + | "C.BSTART.SYS" + | "C.BSTART.VPAR" + | "C.BSTART.VSEQ" + | "HL.BSTART CALL" + | "HL.BSTART.FP" + | "HL.BSTART.STD" + | "HL.BSTART.SYS" + ) +} + +pub(crate) fn is_bstop(decoded: &DecodedInstruction) -> bool { + matches!(decoded.mnemonic.as_str(), "BSTOP" | "C.BSTOP") +} + +pub(crate) fn is_macro_boundary(decoded: &DecodedInstruction) -> bool { + matches!( + decoded.mnemonic.as_str(), + "FENTRY" | "FEXIT" | "FRET.RA" | "FRET.STK" + ) +} + +pub(crate) fn is_start_marker(decoded: &DecodedInstruction) -> bool { + is_bstart(decoded) || is_macro_boundary(decoded) +} + +pub(crate) fn is_boundary_redirect_owner(decoded: &DecodedInstruction) -> bool { + is_bstart(decoded) || is_bstop(decoded) || is_macro_boundary(decoded) +} + +pub(crate) fn is_legal_redirect_restart(decoded: &DecodedInstruction) -> bool { + is_bstart(decoded) || is_macro_boundary(decoded) +} + +pub(crate) fn legal_redirect_restart_seq( + source_seq: usize, + target_pc: u64, + uops: &[CycleUop], +) -> Option { + uops.iter() + .enumerate() + .skip(source_seq.saturating_add(1)) + .find_map(|(seq, uop)| { + (uop.commit.pc == target_pc && is_legal_redirect_restart(&uop.decoded)).then_some(seq) + }) +} + +fn queue_src_kind( + commit: &CommitRecord, + decoded: &DecodedInstruction, + idx: usize, +) -> Option { + let reg = match idx { + 0 if commit.src0_valid != 0 => commit.src0_reg, + 1 if commit.src1_valid != 0 => commit.src1_reg, + _ => return None, + }; + let asm = decoded.asm.to_ascii_lowercase(); + match reg { + REG_T1 if asm.contains("t#") => Some(QueueWakeKind::T), + REG_U1 if asm.contains("u#") => Some(QueueWakeKind::U), + _ => None, + } +} + +fn queue_src_rel(commit: &CommitRecord, decoded: &DecodedInstruction, idx: usize) -> Option { + let kind = queue_src_kind(commit, decoded, idx)?; + let asm = decoded.asm.to_ascii_lowercase(); + parse_queue_relative( + &asm, + match kind { + QueueWakeKind::T => "t#", + QueueWakeKind::U => "u#", + }, + ) +} + +fn parse_queue_relative(asm: &str, prefix: &str) -> Option { + let start = asm.find(prefix)?; + let digits = asm[start + prefix.len()..] + .chars() + .take_while(|ch| ch.is_ascii_digit()) + .collect::(); + (!digits.is_empty()).then(|| digits.parse().ok()).flatten() +} + +fn queue_dst_kind(commit: &CommitRecord, decoded: &DecodedInstruction) -> Option { + if commit.wb_valid == 0 { + return None; + } + let asm = decoded.asm.to_ascii_lowercase(); + match commit.wb_rd { + REG_T1 if asm.contains("->t") => Some(QueueWakeKind::T), + REG_U1 if asm.contains("->u") => Some(QueueWakeKind::U), + _ => None, + } +} + +fn allocate_logical_queue_tag( + kind: Option, + next_t_tag: &mut usize, + next_u_tag: &mut usize, +) -> Option { + match kind { + Some(QueueWakeKind::T) => { + let tag = LogicalQueueTag { + kind: QueueWakeKind::T, + tag: *next_t_tag, + }; + *next_t_tag += 1; + Some(tag) + } + Some(QueueWakeKind::U) => { + let tag = LogicalQueueTag { + kind: QueueWakeKind::U, + tag: *next_u_tag, + }; + *next_u_tag += 1; + Some(tag) + } + None => None, + } +} + +fn resolve_logical_queue_src( + kind: QueueWakeKind, + rel: usize, + t_history: &[(usize, LogicalQueueTag)], + u_history: &[(usize, LogicalQueueTag)], +) -> Option<(usize, LogicalQueueTag)> { + if rel == 0 { + return None; + } + match kind { + QueueWakeKind::T => t_history.iter().rev().nth(rel - 1).copied(), + QueueWakeKind::U => u_history.iter().rev().nth(rel - 1).copied(), + } +} + +fn logical_tag_producer( + tag: LogicalQueueTag, + t_history: &[(usize, LogicalQueueTag)], + u_history: &[(usize, LogicalQueueTag)], +) -> Option { + match tag.kind { + QueueWakeKind::T => t_history + .iter() + .find(|(_, candidate)| *candidate == tag) + .map(|(seq, _)| *seq), + QueueWakeKind::U => u_history + .iter() + .find(|(_, candidate)| *candidate == tag) + .map(|(seq, _)| *seq), + } +} diff --git a/crates/camodel/src/frontend/decode/d1.rs b/crates/camodel/src/frontend/decode/d1.rs new file mode 100644 index 0000000..957f07c --- /dev/null +++ b/crates/camodel/src/frontend/decode/d1.rs @@ -0,0 +1 @@ +// D1 stage placeholder. diff --git a/crates/camodel/src/frontend/decode/d2.rs b/crates/camodel/src/frontend/decode/d2.rs new file mode 100644 index 0000000..f3f4614 --- /dev/null +++ b/crates/camodel/src/frontend/decode/d2.rs @@ -0,0 +1 @@ +// D2 stage placeholder. diff --git a/crates/camodel/src/frontend/decode/d3.rs b/crates/camodel/src/frontend/decode/d3.rs new file mode 100644 index 0000000..21832df --- /dev/null +++ b/crates/camodel/src/frontend/decode/d3.rs @@ -0,0 +1 @@ +// D3 stage placeholder. diff --git a/crates/camodel/src/frontend/decode/mod.rs b/crates/camodel/src/frontend/decode/mod.rs new file mode 100644 index 0000000..1398315 --- /dev/null +++ b/crates/camodel/src/frontend/decode/mod.rs @@ -0,0 +1 @@ +// Frontend-decode namespace; decode-stage ownership remains in frontend owner module. diff --git a/crates/camodel/src/frontend/dispatch/checkpoints.rs b/crates/camodel/src/frontend/dispatch/checkpoints.rs new file mode 100644 index 0000000..7d15540 --- /dev/null +++ b/crates/camodel/src/frontend/dispatch/checkpoints.rs @@ -0,0 +1 @@ +// Checkpoint helper namespace. diff --git a/crates/camodel/src/frontend/dispatch/mod.rs b/crates/camodel/src/frontend/dispatch/mod.rs new file mode 100644 index 0000000..4b66da2 --- /dev/null +++ b/crates/camodel/src/frontend/dispatch/mod.rs @@ -0,0 +1 @@ +// Frontend-dispatch namespace; dispatch ownership remains in frontend owner module. diff --git a/crates/camodel/src/frontend/dispatch/redirect.rs b/crates/camodel/src/frontend/dispatch/redirect.rs new file mode 100644 index 0000000..d021d51 --- /dev/null +++ b/crates/camodel/src/frontend/dispatch/redirect.rs @@ -0,0 +1 @@ +// Redirect helper namespace. diff --git a/crates/camodel/src/frontend/dispatch/s1.rs b/crates/camodel/src/frontend/dispatch/s1.rs new file mode 100644 index 0000000..341c31d --- /dev/null +++ b/crates/camodel/src/frontend/dispatch/s1.rs @@ -0,0 +1 @@ +// S1 stage placeholder. diff --git a/crates/camodel/src/frontend/dispatch/s2.rs b/crates/camodel/src/frontend/dispatch/s2.rs new file mode 100644 index 0000000..51c6402 --- /dev/null +++ b/crates/camodel/src/frontend/dispatch/s2.rs @@ -0,0 +1 @@ +// S2 stage placeholder. diff --git a/crates/camodel/src/frontend/fetch/f0.rs b/crates/camodel/src/frontend/fetch/f0.rs new file mode 100644 index 0000000..cf39dfd --- /dev/null +++ b/crates/camodel/src/frontend/fetch/f0.rs @@ -0,0 +1 @@ +// F0 stage placeholder. diff --git a/crates/camodel/src/frontend/fetch/f1.rs b/crates/camodel/src/frontend/fetch/f1.rs new file mode 100644 index 0000000..d1df03a --- /dev/null +++ b/crates/camodel/src/frontend/fetch/f1.rs @@ -0,0 +1 @@ +// F1 stage placeholder. diff --git a/crates/camodel/src/frontend/fetch/f2.rs b/crates/camodel/src/frontend/fetch/f2.rs new file mode 100644 index 0000000..0de42a3 --- /dev/null +++ b/crates/camodel/src/frontend/fetch/f2.rs @@ -0,0 +1 @@ +// F2 stage placeholder. diff --git a/crates/camodel/src/frontend/fetch/f3.rs b/crates/camodel/src/frontend/fetch/f3.rs new file mode 100644 index 0000000..93f6657 --- /dev/null +++ b/crates/camodel/src/frontend/fetch/f3.rs @@ -0,0 +1 @@ +// F3 stage placeholder. diff --git a/crates/camodel/src/frontend/fetch/f4.rs b/crates/camodel/src/frontend/fetch/f4.rs new file mode 100644 index 0000000..b3cdb57 --- /dev/null +++ b/crates/camodel/src/frontend/fetch/f4.rs @@ -0,0 +1 @@ +// F4 stage placeholder. diff --git a/crates/camodel/src/frontend/fetch/ib.rs b/crates/camodel/src/frontend/fetch/ib.rs new file mode 100644 index 0000000..e0db35f --- /dev/null +++ b/crates/camodel/src/frontend/fetch/ib.rs @@ -0,0 +1 @@ +// IB stage placeholder. diff --git a/crates/camodel/src/frontend/fetch/mod.rs b/crates/camodel/src/frontend/fetch/mod.rs new file mode 100644 index 0000000..83119fe --- /dev/null +++ b/crates/camodel/src/frontend/fetch/mod.rs @@ -0,0 +1 @@ +// Fetch-stage namespace; fetch ownership remains in frontend owner module. diff --git a/crates/camodel/src/frontend/mod.rs b/crates/camodel/src/frontend/mod.rs new file mode 100644 index 0000000..07531b4 --- /dev/null +++ b/crates/camodel/src/frontend/mod.rs @@ -0,0 +1,1009 @@ +pub mod decode; +pub mod dispatch; +pub mod fetch; + +use std::collections::VecDeque; + +use isa::DecodedInstruction; +use isa::TRAP_SETRET_NOT_ADJACENT; + +use crate::{ + BranchOwnerContext, BranchOwnerKind, CallMaterializationKind, CycleUop, DISPATCH_WIDTH, + DynamicTargetSourceKind, FRONTEND_STAGE_NAMES, IQ_ENQUEUE_PORTS, ISSUE_WIDTH, IqEntry, + PHYS_IQ_COUNT, PhysIq, ReturnConsumerKind, StageQueues, allocate_qtag, annotate_qtag_sources, + checkpoint_id_for_seq, is_boundary_redirect_owner, is_bstart, is_bstop, is_macro_boundary, + is_start_marker, packet_checkpoint_id, rebuild_iq_owner_table, register_iq_wait_crossbar_entry, + rob_checkpoint_id_for_seq, route_phys_iq, snapshot_ready_tables_for_checkpoint, + unresolved_redirect_barrier, +}; + +#[cfg(test)] +pub(crate) fn live_checkpoint_id_for_seq( + seq: usize, + pipeline: &StageQueues, + uops: &[CycleUop], +) -> u8 { + pipeline + .seq_checkpoint_ids + .get(&seq) + .copied() + .unwrap_or_else(|| checkpoint_id_for_seq(seq, uops)) +} + +pub(crate) fn live_rob_checkpoint_id_for_seq( + seq: usize, + pipeline: &StageQueues, + uops: &[CycleUop], +) -> u8 { + pipeline + .seq_rob_checkpoint_ids + .get(&seq) + .copied() + .unwrap_or_else(|| { + if !is_start_marker(&uops[seq].decoded) { + return 0; + } + let static_fetch_checkpoint = checkpoint_id_for_seq(seq, uops); + let live_fetch_checkpoint = pipeline + .seq_checkpoint_ids + .get(&seq) + .copied() + .unwrap_or(static_fetch_checkpoint); + let slot_offset = + rob_checkpoint_id_for_seq(seq, uops).wrapping_sub(static_fetch_checkpoint) & 0x3f; + live_fetch_checkpoint.wrapping_add(slot_offset) & 0x3f + }) +} + +pub(crate) fn recovery_checkpoint_id_for_seq( + seq: usize, + pipeline: &StageQueues, + uops: &[CycleUop], +) -> u8 { + if let Some(checkpoint_id) = pipeline.seq_recovery_checkpoint_ids.get(&seq).copied() { + return checkpoint_id; + } + if is_start_marker(&uops[seq].decoded) { + return live_rob_checkpoint_id_for_seq(seq, pipeline, uops); + } + (0..seq) + .rev() + .find(|&candidate| is_start_marker(&uops[candidate].decoded)) + .map(|candidate| live_rob_checkpoint_id_for_seq(candidate, pipeline, uops)) + .unwrap_or(0) +} + +pub(crate) fn recovery_epoch_for_seq(seq: usize, pipeline: &StageQueues, uops: &[CycleUop]) -> u16 { + pipeline + .seq_recovery_epochs + .get(&seq) + .copied() + .unwrap_or_else(|| crate::block_epoch_for_seq(seq, uops)) +} + +pub(crate) fn branch_context_for_seq( + seq: usize, + pipeline: &StageQueues, + uops: &[CycleUop], +) -> BranchOwnerContext { + pipeline + .seq_branch_contexts + .get(&seq) + .copied() + .or_else(|| fallback_branch_context_for_seq(seq, pipeline, uops)) + .unwrap_or_default() +} + +pub(crate) fn live_boundary_target_for_seq( + seq: usize, + pipeline: &StageQueues, + uops: &[CycleUop], +) -> Option { + let uop = uops.get(seq)?; + if is_boundary_redirect_owner(&uop.decoded) { + return boundary_context_from_uop(seq, uop, pipeline) + .map(|context| context.target_pc) + .filter(|target_pc| *target_pc != 0) + .or_else(|| { + pipeline + .seq_branch_contexts + .get(&seq) + .map(|context| context.target_pc) + .filter(|target_pc| *target_pc != 0) + }) + .or(uop.redirect_target); + } + pipeline + .seq_branch_contexts + .get(&seq) + .map(|context| context.target_pc) + .filter(|target_pc| *target_pc != 0) +} + +pub(crate) fn live_branch_kind_for_seq( + seq: usize, + pipeline: &StageQueues, + uops: &[CycleUop], +) -> Option { + let uop = uops.get(seq)?; + if is_boundary_redirect_owner(&uop.decoded) { + return boundary_context_from_uop(seq, uop, pipeline) + .map(|context| context.kind) + .filter(|kind| *kind != BranchOwnerKind::None) + .or_else(|| { + pipeline + .seq_branch_contexts + .get(&seq) + .map(|context| context.kind) + .filter(|kind| *kind != BranchOwnerKind::None) + }); + } + let kind = branch_context_for_seq(seq, pipeline, uops).kind; + (kind != BranchOwnerKind::None).then_some(kind) +} + +pub(crate) fn branch_kind_label(kind: BranchOwnerKind) -> Option<&'static str> { + match kind { + BranchOwnerKind::None => None, + BranchOwnerKind::Fall => Some("fall"), + BranchOwnerKind::Cond => Some("cond"), + BranchOwnerKind::Call => Some("call"), + BranchOwnerKind::Ret => Some("ret"), + BranchOwnerKind::Direct => Some("direct"), + BranchOwnerKind::Ind => Some("ind"), + BranchOwnerKind::ICall => Some("icall"), + } +} + +pub(crate) fn return_consumer_kind_label(kind: ReturnConsumerKind) -> &'static str { + match kind { + ReturnConsumerKind::SetcTgt => "setc_tgt", + ReturnConsumerKind::FretRa => "fret_ra", + ReturnConsumerKind::FretStk => "fret_stk", + } +} + +pub(crate) fn call_materialization_kind_label(kind: CallMaterializationKind) -> &'static str { + match kind { + CallMaterializationKind::FusedCall => "fused_call", + CallMaterializationKind::AdjacentSetret => "adjacent_setret", + } +} + +pub(crate) fn dynamic_target_source_kind_label(kind: DynamicTargetSourceKind) -> &'static str { + match kind { + DynamicTargetSourceKind::ArchTargetSetup => "arch_target_setup", + DynamicTargetSourceKind::CallReturnFused => "call_return_fused", + DynamicTargetSourceKind::CallReturnAdjacentSetret => "call_return_adjacent_setret", + } +} + +pub(crate) fn live_return_consumer_kind_for_seq( + seq: usize, + pipeline: &StageQueues, + uops: &[CycleUop], +) -> Option { + if let Some(kind) = pipeline.seq_return_consumer_kinds.get(&seq).copied() { + return Some(kind); + } + match uops.get(seq)?.decoded.mnemonic.as_str() { + "FRET.RA" => Some(ReturnConsumerKind::FretRa), + "FRET.STK" => Some(ReturnConsumerKind::FretStk), + _ => None, + } +} + +pub(crate) fn live_call_materialization_kind_for_seq( + seq: usize, + pipeline: &StageQueues, + uops: &[CycleUop], +) -> Option { + if let Some(kind) = pipeline.seq_call_materialization_kinds.get(&seq).copied() { + return Some(kind); + } + let uop = uops.get(seq)?; + if is_setret(&uop.decoded) { + return Some(CallMaterializationKind::AdjacentSetret); + } + (is_call_header_uop(uop) && call_return_target_pc(uop).is_some()) + .then_some(CallMaterializationKind::FusedCall) +} + +fn dynamic_target_source_kind_from_call_materialization( + kind: CallMaterializationKind, +) -> DynamicTargetSourceKind { + match kind { + CallMaterializationKind::FusedCall => DynamicTargetSourceKind::CallReturnFused, + CallMaterializationKind::AdjacentSetret => { + DynamicTargetSourceKind::CallReturnAdjacentSetret + } + } +} + +pub(crate) fn live_dynamic_target_source_kind_for_seq( + seq: usize, + pipeline: &StageQueues, + _uops: &[CycleUop], +) -> Option { + pipeline.seq_boundary_target_source_kinds.get(&seq).copied() +} + +pub(crate) fn live_dynamic_target_setup_epoch_for_seq( + seq: usize, + pipeline: &StageQueues, + _uops: &[CycleUop], +) -> Option { + pipeline.seq_boundary_target_setup_epochs.get(&seq).copied() +} + +pub(crate) fn live_dynamic_target_source_epoch_for_seq( + seq: usize, + pipeline: &StageQueues, + _uops: &[CycleUop], +) -> Option { + pipeline + .seq_boundary_target_source_epochs + .get(&seq) + .copied() +} + +pub(crate) fn live_boundary_epoch_for_seq( + seq: usize, + pipeline: &StageQueues, + uops: &[CycleUop], +) -> Option { + live_branch_kind_for_seq(seq, pipeline, uops) + .map(|_| recovery_epoch_for_seq(seq, pipeline, uops)) +} + +pub(crate) fn live_dynamic_target_producer_kind_for_seq( + seq: usize, + pipeline: &StageQueues, + _uops: &[CycleUop], +) -> Option { + pipeline + .seq_boundary_target_producer_kinds + .get(&seq) + .copied() +} + +pub(crate) fn live_dynamic_target_source_owner_row_id_for_seq( + seq: usize, + pipeline: &StageQueues, + _uops: &[CycleUop], +) -> Option { + pipeline + .seq_boundary_target_source_owner_seqs + .get(&seq) + .copied() + .map(|owner_seq| format!("uop{owner_seq}")) +} + +pub(crate) fn live_control_target_owner_row_id_for_seq( + seq: usize, + pipeline: &StageQueues, + uops: &[CycleUop], +) -> Option { + if let Some(owner_seq) = pipeline.seq_boundary_target_owner_seqs.get(&seq).copied() { + return Some(format!("uop{owner_seq}")); + } + if let Some(owner_seq) = pipeline + .seq_call_return_target_owner_seqs + .get(&seq) + .copied() + { + return Some(format!("uop{owner_seq}")); + } + let decoded = &uops.get(seq)?.decoded; + if is_setret(decoded) || matches!(decoded.mnemonic.as_str(), "FRET.RA" | "FRET.STK") { + return Some(format!("uop{seq}")); + } + None +} + +fn fallback_branch_context_for_seq( + seq: usize, + pipeline: &StageQueues, + uops: &[CycleUop], +) -> Option { + (0..seq) + .rev() + .find_map(|candidate| boundary_context_from_uop(candidate, &uops[candidate], pipeline)) +} + +fn branch_owner_kind_for_uop(uop: &CycleUop) -> BranchOwnerKind { + if matches!(uop.decoded.mnemonic.as_str(), "FRET.RA" | "FRET.STK") { + return BranchOwnerKind::Ret; + } + if matches!( + uop.decoded.mnemonic.as_str(), + "BSTART CALL" | "HL.BSTART CALL" + ) { + return BranchOwnerKind::Call; + } + + let asm = uop.decoded.asm.to_ascii_uppercase(); + if asm.contains(" COND") { + return BranchOwnerKind::Cond; + } + if asm.contains(" CALL") { + return BranchOwnerKind::Call; + } + if asm.contains(" RET") { + return BranchOwnerKind::Ret; + } + if asm.contains(" DIRECT") { + return BranchOwnerKind::Direct; + } + if asm.contains(" ICALL") { + return BranchOwnerKind::ICall; + } + if asm.contains(" IND") { + return BranchOwnerKind::Ind; + } + if asm.contains(" FALL") { + return BranchOwnerKind::Fall; + } + + if uop.decoded.mnemonic == "C.BSTART.STD" { + return match decoded_field_u64(&uop.decoded, "BrType") { + Some(1) => BranchOwnerKind::Fall, + Some(2) => BranchOwnerKind::Direct, + Some(3) => BranchOwnerKind::Cond, + Some(4) => BranchOwnerKind::Call, + Some(5) => BranchOwnerKind::Ind, + Some(6) => BranchOwnerKind::ICall, + Some(7) => BranchOwnerKind::Ret, + _ => BranchOwnerKind::None, + }; + } + + BranchOwnerKind::None +} + +fn boundary_context_from_uop( + seq: usize, + uop: &CycleUop, + pipeline: &StageQueues, +) -> Option { + let kind = branch_owner_kind_for_uop(uop); + if kind == BranchOwnerKind::None { + return None; + } + let target_pc = boundary_target_pc(seq, uop, pipeline, kind).unwrap_or(0); + Some(BranchOwnerContext { + kind, + base_pc: uop.commit.pc, + target_pc, + off: if target_pc == 0 { + 0 + } else { + target_pc.wrapping_sub(uop.commit.pc) + }, + pred_take: match kind { + BranchOwnerKind::Cond => target_pc != 0 && target_pc < uop.commit.pc, + BranchOwnerKind::Ret => false, + BranchOwnerKind::Fall + | BranchOwnerKind::Call + | BranchOwnerKind::Direct + | BranchOwnerKind::Ind + | BranchOwnerKind::ICall + | BranchOwnerKind::None => false, + }, + epoch: pipeline + .seq_recovery_epochs + .get(&seq) + .copied() + .unwrap_or(0) + .wrapping_add(1), + }) +} + +fn boundary_target_pc( + seq: usize, + uop: &CycleUop, + pipeline: &StageQueues, + kind: BranchOwnerKind, +) -> Option { + match kind { + BranchOwnerKind::Ret | BranchOwnerKind::Ind | BranchOwnerKind::ICall => pipeline + .seq_boundary_target_pcs + .get(&seq) + .copied() + .or_else(|| pipeline.seq_dynamic_target_pcs.get(&seq).copied()) + .or(pipeline.active_dynamic_target_pc) + .or(uop.redirect_target), + BranchOwnerKind::None => None, + BranchOwnerKind::Fall + | BranchOwnerKind::Cond + | BranchOwnerKind::Call + | BranchOwnerKind::Direct => uop.redirect_target, + } +} + +fn is_call_header_uop(uop: &CycleUop) -> bool { + is_bstart(&uop.decoded) && branch_owner_kind_for_uop(uop) == BranchOwnerKind::Call +} + +fn is_setret(decoded: &DecodedInstruction) -> bool { + matches!( + decoded.mnemonic.as_str(), + "SETRET" | "C.SETRET" | "HL.SETRET" + ) +} + +fn call_return_target_pc(uop: &CycleUop) -> Option { + ((is_call_header_uop(uop) + || matches!( + uop.decoded.mnemonic.as_str(), + "SETRET" | "C.SETRET" | "HL.SETRET" + )) + && uop.commit.wb_valid != 0 + && uop.commit.wb_rd == 10) + .then_some(uop.commit.wb_data) +} + +fn update_dynamic_target_owner(seq: usize, pipeline: &mut StageQueues, uops: &[CycleUop]) { + let uop = &uops[seq]; + let boundary_kind = branch_owner_kind_for_uop(uop); + let target_pc = match uop.decoded.mnemonic.as_str() { + "SETC.TGT" | "C.SETC.TGT" if uop.commit.src0_valid != 0 => Some(uop.commit.src0_data), + "FRET.RA" | "FRET.STK" => uop.redirect_target, + "BSTOP" | "C.BSTOP" => pipeline.active_dynamic_target_pc, + _ => None, + }; + if let Some(target_pc) = target_pc { + pipeline.seq_dynamic_target_pcs.insert(seq, target_pc); + } + if matches!(uop.decoded.mnemonic.as_str(), "SETC.TGT" | "C.SETC.TGT") { + let setup_epoch = target_pc.map(|_| recovery_epoch_for_seq(seq, pipeline, uops)); + let sourced_from_call = target_pc.is_some() + && uop.commit.src0_reg == 10 + && pipeline.active_call_return_target_pc == target_pc; + let source_owner_seq = target_pc.map(|_| { + if sourced_from_call { + pipeline.active_call_return_target_owner_seq.unwrap_or(seq) + } else { + seq + } + }); + let source_epoch = target_pc.map(|_| { + if sourced_from_call { + pipeline + .active_call_return_target_epoch + .unwrap_or_else(|| recovery_epoch_for_seq(seq, pipeline, uops)) + } else { + recovery_epoch_for_seq(seq, pipeline, uops) + } + }); + pipeline.active_dynamic_target_pc = target_pc; + pipeline.active_dynamic_target_owner_seq = target_pc.map(|_| seq); + pipeline.active_dynamic_target_producer_kind = + target_pc.map(|_| ReturnConsumerKind::SetcTgt); + pipeline.active_dynamic_target_setup_epoch = setup_epoch; + pipeline.active_dynamic_target_owner_kind = target_pc.map(|_| ReturnConsumerKind::SetcTgt); + pipeline.active_dynamic_target_source_owner_seq = source_owner_seq; + pipeline.active_dynamic_target_source_epoch = source_epoch; + pipeline.active_dynamic_target_call_materialization_kind = + target_pc.and_then(|target_pc| { + (uop.commit.src0_reg == 10 + && pipeline.active_call_return_target_pc == Some(target_pc)) + .then_some(pipeline.active_call_return_materialization_kind) + .flatten() + }); + pipeline.active_dynamic_target_source_kind = target_pc.map(|target_pc| { + if uop.commit.src0_reg == 10 && pipeline.active_call_return_target_pc == Some(target_pc) + { + pipeline + .active_call_return_materialization_kind + .map(dynamic_target_source_kind_from_call_materialization) + .unwrap_or(DynamicTargetSourceKind::ArchTargetSetup) + } else { + DynamicTargetSourceKind::ArchTargetSetup + } + }); + if matches!( + pipeline.active_branch_context.kind, + BranchOwnerKind::Ret | BranchOwnerKind::Ind | BranchOwnerKind::ICall + ) { + pipeline.active_branch_context.target_pc = target_pc.unwrap_or(0); + pipeline.active_branch_context.off = target_pc + .map(|target_pc| target_pc.wrapping_sub(pipeline.active_branch_context.base_pc)) + .unwrap_or(0); + } + } + let boundary_owner = match uop.decoded.mnemonic.as_str() { + "FRET.RA" => target_pc.map(|target_pc| { + let sourced_from_call = pipeline.active_call_return_target_pc == Some(target_pc); + let source_owner_seq = if sourced_from_call { + pipeline.active_call_return_target_owner_seq.unwrap_or(seq) + } else { + seq + }; + let source_epoch = if sourced_from_call { + pipeline + .active_call_return_target_epoch + .unwrap_or_else(|| recovery_epoch_for_seq(seq, pipeline, uops)) + } else { + recovery_epoch_for_seq(seq, pipeline, uops) + }; + ( + target_pc, + if sourced_from_call { + pipeline.active_call_return_target_owner_seq.unwrap_or(seq) + } else { + seq + }, + Some(ReturnConsumerKind::FretRa), + Some(recovery_epoch_for_seq(seq, pipeline, uops)), + Some(ReturnConsumerKind::FretRa), + Some(source_owner_seq), + Some(source_epoch), + sourced_from_call + .then_some(pipeline.active_call_return_materialization_kind) + .flatten(), + sourced_from_call + .then_some( + pipeline + .active_call_return_materialization_kind + .map(dynamic_target_source_kind_from_call_materialization), + ) + .flatten(), + ) + }), + "FRET.STK" => target_pc.map(|target_pc| { + ( + target_pc, + seq, + Some(ReturnConsumerKind::FretStk), + Some(recovery_epoch_for_seq(seq, pipeline, uops)), + Some(ReturnConsumerKind::FretStk), + Some(seq), + Some(recovery_epoch_for_seq(seq, pipeline, uops)), + None, + None, + ) + }), + "BSTOP" | "C.BSTOP" + if matches!( + pipeline.active_branch_context.kind, + BranchOwnerKind::Ret | BranchOwnerKind::Ind | BranchOwnerKind::ICall + ) => + { + pipeline + .active_dynamic_target_pc + .zip(pipeline.active_dynamic_target_owner_seq) + .zip(pipeline.active_dynamic_target_owner_kind) + .zip(pipeline.active_dynamic_target_producer_kind) + .zip(pipeline.active_dynamic_target_setup_epoch) + .zip(pipeline.active_dynamic_target_source_owner_seq) + .zip(pipeline.active_dynamic_target_source_epoch) + .map( + |( + ( + ((((target_pc, owner_seq), owner_kind), producer_kind), setup_epoch), + source_owner_seq, + ), + source_epoch, + )| { + ( + target_pc, + owner_seq, + Some(producer_kind), + Some(setup_epoch), + matches!(pipeline.active_branch_context.kind, BranchOwnerKind::Ret) + .then_some(owner_kind), + Some(source_owner_seq), + Some(source_epoch), + pipeline.active_dynamic_target_call_materialization_kind, + pipeline.active_dynamic_target_source_kind, + ) + }, + ) + } + _ if matches!(boundary_kind, BranchOwnerKind::Ret) => pipeline + .active_dynamic_target_pc + .zip(pipeline.active_dynamic_target_owner_seq) + .zip(pipeline.active_dynamic_target_owner_kind) + .zip(pipeline.active_dynamic_target_producer_kind) + .zip(pipeline.active_dynamic_target_setup_epoch) + .zip(pipeline.active_dynamic_target_source_owner_seq) + .zip(pipeline.active_dynamic_target_source_epoch) + .map( + |( + ( + ((((target_pc, owner_seq), owner_kind), producer_kind), setup_epoch), + source_owner_seq, + ), + source_epoch, + )| { + ( + target_pc, + owner_seq, + Some(producer_kind), + Some(setup_epoch), + Some(owner_kind), + Some(source_owner_seq), + Some(source_epoch), + pipeline.active_dynamic_target_call_materialization_kind, + pipeline.active_dynamic_target_source_kind, + ) + }, + ), + _ if matches!(boundary_kind, BranchOwnerKind::Ind | BranchOwnerKind::ICall) => pipeline + .active_dynamic_target_pc + .zip(pipeline.active_dynamic_target_owner_seq) + .zip(pipeline.active_dynamic_target_producer_kind) + .zip(pipeline.active_dynamic_target_setup_epoch) + .zip(pipeline.active_dynamic_target_source_owner_seq) + .zip(pipeline.active_dynamic_target_source_epoch) + .map( + |( + ((((target_pc, owner_seq), producer_kind), setup_epoch), source_owner_seq), + source_epoch, + )| { + ( + target_pc, + owner_seq, + Some(producer_kind), + Some(setup_epoch), + None, + Some(source_owner_seq), + Some(source_epoch), + pipeline.active_dynamic_target_call_materialization_kind, + pipeline.active_dynamic_target_source_kind, + ) + }, + ), + _ => None, + }; + if let Some(( + target_pc, + owner_seq, + producer_kind, + setup_epoch, + return_kind, + source_owner_seq, + source_epoch, + call_materialization_kind, + target_source_kind, + )) = boundary_owner + { + pipeline.seq_boundary_target_pcs.insert(seq, target_pc); + pipeline + .seq_boundary_target_owner_seqs + .insert(seq, owner_seq); + if let Some(producer_kind) = producer_kind { + pipeline + .seq_boundary_target_producer_kinds + .insert(seq, producer_kind); + } + if let Some(setup_epoch) = setup_epoch { + pipeline + .seq_boundary_target_setup_epochs + .insert(seq, setup_epoch); + } + if let Some(source_owner_seq) = source_owner_seq { + pipeline + .seq_boundary_target_source_owner_seqs + .insert(seq, source_owner_seq); + } + if let Some(source_epoch) = source_epoch { + pipeline + .seq_boundary_target_source_epochs + .insert(seq, source_epoch); + } + if let Some(target_source_kind) = target_source_kind { + pipeline + .seq_boundary_target_source_kinds + .insert(seq, target_source_kind); + } + if let Some(return_kind) = return_kind { + pipeline.seq_return_consumer_kinds.insert(seq, return_kind); + } + if let Some(call_materialization_kind) = call_materialization_kind { + pipeline + .seq_call_materialization_kinds + .insert(seq, call_materialization_kind); + } + } +} + +fn update_call_header_owner(seq: usize, pipeline: &mut StageQueues, uops: &[CycleUop]) { + let uop = &uops[seq]; + let is_call_header = is_call_header_uop(uop); + let is_setret = is_setret(&uop.decoded); + + if let Some(active_header_seq) = pipeline.active_call_header_seq { + if seq != active_header_seq.saturating_add(1) || !is_setret { + pipeline.active_call_header_seq = None; + } + } + + if is_call_header { + pipeline.active_call_header_seq = None; + if let Some(target_pc) = call_return_target_pc(uop) { + let target_epoch = recovery_epoch_for_seq(seq, pipeline, uops); + pipeline.seq_call_return_target_pcs.insert(seq, target_pc); + pipeline.seq_call_return_target_owner_seqs.insert(seq, seq); + pipeline + .seq_call_return_target_epochs + .insert(seq, target_epoch); + pipeline + .seq_call_materialization_kinds + .insert(seq, CallMaterializationKind::FusedCall); + pipeline.active_call_return_target_pc = Some(target_pc); + pipeline.active_call_return_target_owner_seq = Some(seq); + pipeline.active_call_return_target_epoch = Some(target_epoch); + pipeline.active_call_return_materialization_kind = + Some(CallMaterializationKind::FusedCall); + } else { + pipeline.active_call_header_seq = Some(seq); + } + return; + } + + if is_setret { + if let Some(header_seq) = pipeline + .active_call_header_seq + .filter(|&header_seq| seq == header_seq.saturating_add(1)) + { + if let Some(target_pc) = call_return_target_pc(uop) { + let target_epoch = recovery_epoch_for_seq(seq, pipeline, uops); + pipeline + .seq_call_return_target_pcs + .insert(header_seq, target_pc); + pipeline + .seq_call_return_target_owner_seqs + .insert(header_seq, seq); + pipeline + .seq_call_return_target_epochs + .insert(header_seq, target_epoch); + pipeline + .seq_call_materialization_kinds + .insert(header_seq, CallMaterializationKind::AdjacentSetret); + pipeline.seq_call_return_target_pcs.insert(seq, target_pc); + pipeline.seq_call_return_target_owner_seqs.insert(seq, seq); + pipeline + .seq_call_return_target_epochs + .insert(seq, target_epoch); + pipeline + .seq_call_materialization_kinds + .insert(seq, CallMaterializationKind::AdjacentSetret); + pipeline.active_call_return_target_pc = Some(target_pc); + pipeline.active_call_return_target_owner_seq = Some(seq); + pipeline.active_call_return_target_epoch = Some(target_epoch); + pipeline.active_call_return_materialization_kind = + Some(CallMaterializationKind::AdjacentSetret); + } + pipeline.active_call_header_seq = None; + } else { + pipeline + .seq_call_materialization_kinds + .insert(seq, CallMaterializationKind::AdjacentSetret); + pipeline + .seq_call_header_faults + .insert(seq, TRAP_SETRET_NOT_ADJACENT); + pipeline.active_call_header_seq = None; + } + } +} + +fn decoded_field_u64(decoded: &DecodedInstruction, name: &str) -> Option { + decoded + .fields + .iter() + .find(|field| field.name == name) + .map(|field| field.value_u64) +} + +pub(crate) fn fill_fetch( + cycle: u64, + pipeline: &mut StageQueues, + next_fetch_seq: &mut usize, + uops: &[CycleUop], +) { + if let Some(redirect) = pipeline.frontend_redirect { + if cycle < redirect.resume_cycle { + return; + } + *next_fetch_seq = redirect.restart_seq; + pipeline.frontend_redirect = None; + } + + let total_uops = uops.len(); + let mut fetched = Vec::new(); + while pipeline.frontend[0].len() < crate::FETCH_WIDTH && *next_fetch_seq < total_uops { + if let Some(barrier_seq) = unresolved_redirect_barrier(*next_fetch_seq, uops) { + if *next_fetch_seq > barrier_seq { + break; + } + } + let seq = *next_fetch_seq; + pipeline.frontend[0].push_back(seq); + fetched.push(seq); + *next_fetch_seq += 1; + if uops[seq].redirect_target.is_some() { + break; + } + } + if let Some(&packet_head_seq) = fetched.first() { + let checkpoint_id = packet_checkpoint_id(uops[packet_head_seq].commit.pc); + for seq in fetched { + pipeline + .seq_checkpoint_ids + .entry(seq) + .or_insert(checkpoint_id); + } + } +} + +pub(crate) fn dispatch_to_iq_and_bypass( + cycle: u64, + pipeline: &mut StageQueues, + iq: &mut Vec, + _rob: &mut VecDeque, + uops: &mut [CycleUop], +) { + let mut stay_s2 = VecDeque::new(); + let mut enqueue_ports_used = [0usize; PHYS_IQ_COUNT]; + while let Some(seq) = pipeline.frontend[10].pop_front() { + let decoded = &uops[seq].decoded; + let is_call_header = is_call_header_uop(&uops[seq]); + let is_bstart_head = is_bstart(decoded) && pipeline.active_block_head; + let is_bstart_mid = is_bstart(decoded) && !pipeline.active_block_head; + let is_boundary = is_bstart_mid || is_bstop(decoded) || is_macro_boundary(decoded); + let recovery_checkpoint_id = if is_start_marker(&uops[seq].decoded) { + let checkpoint_id = live_rob_checkpoint_id_for_seq(seq, pipeline, uops); + pipeline.seq_rob_checkpoint_ids.insert(seq, checkpoint_id); + pipeline.active_recovery_checkpoint_id = checkpoint_id; + snapshot_ready_tables_for_checkpoint(pipeline, checkpoint_id); + checkpoint_id + } else { + pipeline.active_recovery_checkpoint_id + }; + pipeline + .seq_recovery_checkpoint_ids + .insert(seq, recovery_checkpoint_id); + pipeline + .seq_recovery_epochs + .insert(seq, pipeline.active_recovery_epoch); + pipeline + .seq_branch_contexts + .insert(seq, pipeline.active_branch_context); + update_dynamic_target_owner(seq, pipeline, uops); + update_call_header_owner(seq, pipeline, uops); + if is_boundary { + pipeline.active_block_head = true; + } + if is_bstart_head { + pipeline.active_block_head = false; + } + if is_boundary || is_bstart_head { + pipeline.active_recovery_epoch = pipeline.active_recovery_epoch.wrapping_add(1); + } + if is_boundary || is_bstart_head { + pipeline.active_branch_context = boundary_context_from_uop(seq, &uops[seq], pipeline) + .map(|context| BranchOwnerContext { + epoch: pipeline.active_recovery_epoch, + ..context + }) + .unwrap_or(BranchOwnerContext { + epoch: pipeline.active_recovery_epoch, + ..BranchOwnerContext::default() + }); + pipeline.active_dynamic_target_pc = None; + pipeline.active_dynamic_target_owner_seq = None; + pipeline.active_dynamic_target_producer_kind = None; + pipeline.active_dynamic_target_setup_epoch = None; + pipeline.active_dynamic_target_owner_kind = None; + pipeline.active_dynamic_target_source_owner_seq = None; + pipeline.active_dynamic_target_source_epoch = None; + pipeline.active_dynamic_target_source_kind = None; + pipeline.active_dynamic_target_call_materialization_kind = None; + if !is_call_header { + pipeline.active_call_header_seq = None; + } + } + if uops[seq].bypass_d2 { + uops[seq].pick_wakeup_visible.get_or_insert(cycle + 1); + uops[seq].data_ready_visible.get_or_insert(cycle + 1); + uops[seq].phys_iq = Some(PhysIq::CmdIq); + if pipeline.w2.len() < ISSUE_WIDTH { + pipeline.w2.push_back(seq); + } else { + stay_s2.push_back(seq); + } + } else { + let Some(phys_iq) = route_phys_iq(seq, iq, uops, &enqueue_ports_used) else { + stay_s2.push_back(seq); + continue; + }; + let Some(qtag) = allocate_qtag(&pipeline.iq_tags, phys_iq) else { + stay_s2.push_back(seq); + continue; + }; + let queue_idx = phys_iq.index(); + if enqueue_ports_used[queue_idx] >= IQ_ENQUEUE_PORTS { + stay_s2.push_back(seq); + continue; + } + enqueue_ports_used[queue_idx] += 1; + uops[seq].phys_iq = Some(phys_iq); + uops[seq].dst_qtag = Some(qtag); + pipeline.iq_tags.insert(seq, qtag); + annotate_qtag_sources( + seq, + &pipeline.iq_tags, + &pipeline.ready_table_t, + &pipeline.ready_table_u, + uops, + ); + register_iq_wait_crossbar_entry(&mut pipeline.qtag_wait_crossbar, seq, &uops[seq]); + iq.push(crate::make_iq_entry( + cycle, + seq, + phys_iq, + &pipeline.ready_table_t, + &pipeline.ready_table_u, + uops, + )); + } + } + pipeline.frontend[10] = stay_s2; + rebuild_iq_owner_table(&mut pipeline.iq_owner_table, iq, &pipeline.iq_tags); +} + +pub(crate) fn advance_frontend(pipeline: &mut StageQueues, rob: &mut VecDeque) { + for idx in (1..FRONTEND_STAGE_NAMES.len()).rev() { + let mut prev = std::mem::take(&mut pipeline.frontend[idx - 1]); + if idx == 7 { + let needed_rob = prev.iter().filter(|seq| !rob.contains(seq)).count(); + if rob.len() + needed_rob > crate::ROB_CAPACITY { + pipeline.frontend[idx - 1] = prev; + continue; + } + let mut bypass = VecDeque::new(); + for &seq in &prev { + if !rob.contains(&seq) { + rob.push_back(seq); + } + bypass.push_back(seq); + } + advance_simple(&mut pipeline.frontend[idx], &mut bypass, DISPATCH_WIDTH); + pipeline.frontend[idx - 1] = bypass; + } else { + advance_simple(&mut pipeline.frontend[idx], &mut prev, DISPATCH_WIDTH); + pipeline.frontend[idx - 1] = prev; + } + } +} + +pub(crate) fn issue_queue_candidates(uop: &CycleUop) -> Vec { + match uop.decoded.uop_group.as_str() { + "ALU" => vec![PhysIq::AluIq0, PhysIq::SharedIq1], + "BRU" => vec![PhysIq::BruIq], + "LDA/BASE_IMM" | "LDA" | "AGU" => vec![PhysIq::AguIq0, PhysIq::AguIq1], + "STA/BASE_IMM" | "STA" | "STD" => vec![PhysIq::StdIq0, PhysIq::StdIq1], + "FSU" => vec![PhysIq::SharedIq1], + "SYS" => vec![PhysIq::SharedIq1], + "CMD" | "BBD" => vec![PhysIq::CmdIq], + _ => vec![PhysIq::SharedIq1], + } +} + +pub(crate) fn d2_bypass(decoded: &DecodedInstruction) -> bool { + matches!( + decoded.mnemonic.as_str(), + "SETRET" | "C.SETRET" | "HL.SETRET" + ) || decoded.uop_group == "BBD" +} + +fn advance_simple(dst: &mut VecDeque, src: &mut VecDeque, capacity: usize) { + while dst.len() < capacity { + let Some(seq) = src.pop_front() else { + break; + }; + dst.push_back(seq); + } +} diff --git a/crates/camodel/src/issue/mod.rs b/crates/camodel/src/issue/mod.rs new file mode 100644 index 0000000..46c026e --- /dev/null +++ b/crates/camodel/src/issue/mod.rs @@ -0,0 +1,2 @@ +pub mod queues; +pub mod select; diff --git a/crates/camodel/src/issue/queues/iq.rs b/crates/camodel/src/issue/queues/iq.rs new file mode 100644 index 0000000..5d5243f --- /dev/null +++ b/crates/camodel/src/issue/queues/iq.rs @@ -0,0 +1 @@ +// IQ namespace placeholder. diff --git a/crates/camodel/src/issue/queues/mod.rs b/crates/camodel/src/issue/queues/mod.rs new file mode 100644 index 0000000..2ac6e00 --- /dev/null +++ b/crates/camodel/src/issue/queues/mod.rs @@ -0,0 +1,539 @@ +pub mod iq; +pub mod qtag; +pub mod ready_tables; + +use std::collections::{BTreeMap, BTreeSet}; + +use crate::{ + CycleUop, IQ_ENQUEUE_PORTS, IqEntry, IqWakeEvent, IqWakeKind, LogicalQueueTag, PHYS_IQ_COUNT, + PhysIq, QTag, QueueWakeKind, ReadyTableCheckpoint, StageQueues, dep_data_ready_cycle, + dep_pick_ready_cycle, issue_queue_candidates, source_uses_qtag_wakeup, source_valid, +}; + +pub(crate) fn route_phys_iq( + seq: usize, + iq: &[IqEntry], + uops: &[CycleUop], + enqueue_ports_used: &[usize; PHYS_IQ_COUNT], +) -> Option { + let candidates = issue_queue_candidates(&uops[seq]); + candidates.into_iter().find(|&phys_iq| { + enqueue_ports_used[phys_iq.index()] < IQ_ENQUEUE_PORTS + && iq_occupancy(iq, phys_iq) < phys_iq.capacity() + }) +} + +fn iq_occupancy(iq: &[IqEntry], phys_iq: PhysIq) -> usize { + iq.iter().filter(|entry| entry.phys_iq == phys_iq).count() +} + +pub(crate) fn insert_ready_table_tag(pipeline: &mut StageQueues, tag: LogicalQueueTag) { + match tag.kind { + QueueWakeKind::T => { + pipeline.ready_table_t.insert(tag.tag); + } + QueueWakeKind::U => { + pipeline.ready_table_u.insert(tag.tag); + } + } +} + +pub(crate) fn snapshot_ready_tables_for_checkpoint(pipeline: &mut StageQueues, checkpoint_id: u8) { + pipeline.ready_table_checkpoints.insert( + checkpoint_id, + ReadyTableCheckpoint { + ready_table_t: pipeline.ready_table_t.clone(), + ready_table_u: pipeline.ready_table_u.clone(), + recovery_epoch: pipeline.active_recovery_epoch, + block_head: pipeline.active_block_head, + branch_context: pipeline.active_branch_context, + dynamic_target_pc: pipeline.active_dynamic_target_pc, + dynamic_target_owner_seq: pipeline.active_dynamic_target_owner_seq, + dynamic_target_producer_kind: pipeline.active_dynamic_target_producer_kind, + dynamic_target_setup_epoch: pipeline.active_dynamic_target_setup_epoch, + dynamic_target_owner_kind: pipeline.active_dynamic_target_owner_kind, + dynamic_target_source_owner_seq: pipeline.active_dynamic_target_source_owner_seq, + dynamic_target_source_epoch: pipeline.active_dynamic_target_source_epoch, + dynamic_target_source_kind: pipeline.active_dynamic_target_source_kind, + dynamic_target_call_materialization_kind: pipeline + .active_dynamic_target_call_materialization_kind, + call_header_seq: pipeline.active_call_header_seq, + call_return_target_pc: pipeline.active_call_return_target_pc, + call_return_target_owner_seq: pipeline.active_call_return_target_owner_seq, + call_return_target_epoch: pipeline.active_call_return_target_epoch, + call_return_materialization_kind: pipeline.active_call_return_materialization_kind, + }, + ); +} + +pub(crate) fn restore_ready_tables_for_checkpoint( + pipeline: &mut StageQueues, + checkpoint_id: u8, +) -> bool { + let Some(snapshot) = pipeline + .ready_table_checkpoints + .get(&checkpoint_id) + .cloned() + else { + return false; + }; + pipeline.ready_table_t = snapshot.ready_table_t; + pipeline.ready_table_u = snapshot.ready_table_u; + pipeline.active_recovery_epoch = snapshot.recovery_epoch; + pipeline.active_block_head = snapshot.block_head; + pipeline.active_branch_context = snapshot.branch_context; + pipeline.active_dynamic_target_pc = snapshot.dynamic_target_pc; + pipeline.active_dynamic_target_owner_seq = snapshot.dynamic_target_owner_seq; + pipeline.active_dynamic_target_producer_kind = snapshot.dynamic_target_producer_kind; + pipeline.active_dynamic_target_setup_epoch = snapshot.dynamic_target_setup_epoch; + pipeline.active_dynamic_target_owner_kind = snapshot.dynamic_target_owner_kind; + pipeline.active_dynamic_target_source_owner_seq = snapshot.dynamic_target_source_owner_seq; + pipeline.active_dynamic_target_source_epoch = snapshot.dynamic_target_source_epoch; + pipeline.active_dynamic_target_source_kind = snapshot.dynamic_target_source_kind; + pipeline.active_dynamic_target_call_materialization_kind = + snapshot.dynamic_target_call_materialization_kind; + pipeline.active_call_header_seq = snapshot.call_header_seq; + pipeline.active_call_return_target_pc = snapshot.call_return_target_pc; + pipeline.active_call_return_target_owner_seq = snapshot.call_return_target_owner_seq; + pipeline.active_call_return_target_epoch = snapshot.call_return_target_epoch; + pipeline.active_call_return_materialization_kind = snapshot.call_return_materialization_kind; + true +} + +pub(crate) fn logical_tag_ready( + tag: LogicalQueueTag, + ready_table_t: &BTreeSet, + ready_table_u: &BTreeSet, +) -> bool { + match tag.kind { + QueueWakeKind::T => ready_table_t.contains(&tag.tag), + QueueWakeKind::U => ready_table_u.contains(&tag.tag), + } +} + +pub(crate) fn annotate_qtag_sources( + seq: usize, + iq_tags: &BTreeMap, + ready_table_t: &BTreeSet, + ready_table_u: &BTreeSet, + uops: &mut [CycleUop], +) { + for idx in 0..uops[seq].src_qtags.len() { + let Some(kind) = uops[seq].src_queue_kinds[idx] else { + uops[seq].src_qtags[idx] = None; + continue; + }; + let Some(logical_tag) = uops[seq].src_logical_tags[idx] else { + uops[seq].src_qtags[idx] = None; + continue; + }; + if logical_tag_ready(logical_tag, ready_table_t, ready_table_u) { + uops[seq].src_qtags[idx] = None; + continue; + } + let Some(producer) = uops[seq].deps[idx] else { + uops[seq].src_qtags[idx] = None; + continue; + }; + if uops[producer].dst_queue_kind != Some(kind) { + uops[seq].src_qtags[idx] = None; + continue; + } + if uops[producer].dst_logical_tag != Some(logical_tag) { + uops[seq].src_qtags[idx] = None; + continue; + } + uops[seq].src_qtags[idx] = uops[producer] + .dst_qtag + .or_else(|| iq_tags.get(&producer).copied()); + } +} + +pub(crate) fn make_iq_entry( + cycle: u64, + seq: usize, + phys_iq: PhysIq, + ready_table_t: &BTreeSet, + ready_table_u: &BTreeSet, + uops: &[CycleUop], +) -> IqEntry { + let mut entry = IqEntry { + seq, + phys_iq, + inflight: false, + src_valid: [false; 2], + src_ready_nonspec: [false; 2], + src_ready_spec: [false; 2], + src_wait_qtag: [false; 2], + }; + initialize_iq_entry_source_state(cycle, &mut entry, ready_table_t, ready_table_u, uops); + entry +} + +pub(crate) fn update_iq_entries_for_cycle( + cycle: u64, + iq: &mut [IqEntry], + ready_table_t: &BTreeSet, + ready_table_u: &BTreeSet, + iq_owner_table: &[Vec>], + iq_tags: &BTreeMap, + qtag_wait_crossbar: &[Vec>], + uops: &[CycleUop], +) { + for entry in &mut *iq { + update_iq_entry_source_state(cycle, entry, ready_table_t, ready_table_u, uops); + } + let wake_events = collect_iq_wake_events(cycle, uops); + for event in wake_events { + publish_iq_wake_event(event, iq, iq_owner_table, iq_tags, qtag_wait_crossbar, uops); + } +} + +fn initialize_iq_entry_source_state( + cycle: u64, + entry: &mut IqEntry, + ready_table_t: &BTreeSet, + ready_table_u: &BTreeSet, + uops: &[CycleUop], +) { + for idx in 0..2 { + entry.src_valid[idx] = source_valid(&uops[entry.seq].commit, idx); + reset_iq_entry_source_state(entry, idx); + seed_iq_entry_source_state(cycle, entry, idx, ready_table_t, ready_table_u, uops); + } +} + +fn update_iq_entry_source_state( + cycle: u64, + entry: &mut IqEntry, + ready_table_t: &BTreeSet, + ready_table_u: &BTreeSet, + uops: &[CycleUop], +) { + for idx in 0..2 { + if !entry.src_valid[idx] { + continue; + } + revoke_stale_iq_source_state(cycle, entry, idx, ready_table_t, ready_table_u, uops); + apply_ready_table_wakeup(entry, idx, ready_table_t, ready_table_u, uops); + } +} + +fn seed_iq_entry_source_state( + cycle: u64, + entry: &mut IqEntry, + idx: usize, + ready_table_t: &BTreeSet, + ready_table_u: &BTreeSet, + uops: &[CycleUop], +) { + let logical_ready = source_logical_ready(entry.seq, idx, ready_table_t, ready_table_u, uops); + let qtag_wait = source_uses_qtag_wakeup(entry.seq, idx, uops); + match uops[entry.seq].deps[idx] { + None => { + entry.src_ready_nonspec[idx] = true; + } + Some(producer) if logical_ready || dep_data_ready_cycle(producer, uops) <= cycle => { + entry.src_ready_nonspec[idx] = true; + } + Some(producer) if dep_pick_ready_cycle(producer, uops) <= cycle => { + entry.src_ready_spec[idx] = true; + entry.src_wait_qtag[idx] = false; + } + Some(_) => { + entry.src_wait_qtag[idx] = qtag_wait; + } + } +} + +fn revoke_stale_iq_source_state( + cycle: u64, + entry: &mut IqEntry, + idx: usize, + ready_table_t: &BTreeSet, + ready_table_u: &BTreeSet, + uops: &[CycleUop], +) { + if !entry.src_valid[idx] { + return; + } + if uops[entry.seq].deps[idx].is_none() { + entry.src_ready_nonspec[idx] = true; + entry.src_ready_spec[idx] = false; + entry.src_wait_qtag[idx] = false; + return; + } + + let logical_ready = source_logical_ready(entry.seq, idx, ready_table_t, ready_table_u, uops); + if entry.src_ready_nonspec[idx] + && !logical_ready + && uops[entry.seq].deps[idx] + .map(|producer| dep_data_ready_cycle(producer, uops) > cycle) + .unwrap_or(false) + { + entry.src_ready_nonspec[idx] = false; + } + + if entry.src_ready_spec[idx] + && uops[entry.seq].deps[idx] + .map(|producer| dep_pick_ready_cycle(producer, uops) > cycle) + .unwrap_or(true) + { + entry.src_ready_spec[idx] = false; + } + + if !(entry.src_ready_nonspec[idx] || entry.src_ready_spec[idx]) { + entry.src_wait_qtag[idx] = source_uses_qtag_wakeup(entry.seq, idx, uops); + } +} + +fn apply_ready_table_wakeup( + entry: &mut IqEntry, + idx: usize, + ready_table_t: &BTreeSet, + ready_table_u: &BTreeSet, + uops: &[CycleUop], +) { + if !entry.src_valid[idx] || entry.src_ready_nonspec[idx] { + return; + } + + if source_logical_ready(entry.seq, idx, ready_table_t, ready_table_u, uops) { + entry.src_ready_nonspec[idx] = true; + entry.src_ready_spec[idx] = false; + entry.src_wait_qtag[idx] = false; + } +} + +fn collect_iq_wake_events(cycle: u64, uops: &[CycleUop]) -> Vec { + let mut out = Vec::new(); + for (producer, uop) in uops.iter().enumerate() { + let queue_kind = uop.dst_queue_kind; + let logical_tag = uop.dst_logical_tag; + let qtag = uop.dst_qtag; + if producer_publishes_nonspec_wakeup(cycle, producer, uops) { + out.push(IqWakeEvent { + producer, + wake_kind: IqWakeKind::Nonspec, + queue_kind, + logical_tag, + qtag, + }); + } else if producer_publishes_spec_wakeup(cycle, producer, uops) { + out.push(IqWakeEvent { + producer, + wake_kind: IqWakeKind::Spec, + queue_kind, + logical_tag, + qtag, + }); + } + } + out +} + +fn publish_iq_wake_event( + event: IqWakeEvent, + iq: &mut [IqEntry], + iq_owner_table: &[Vec>], + iq_tags: &BTreeMap, + qtag_wait_table: &[Vec>], + uops: &[CycleUop], +) { + if let (Some(_queue_kind), Some(_logical_tag), Some(qtag)) = + (event.queue_kind, event.logical_tag, event.qtag) + { + for &(seq, src_idx) in &qtag_wait_table[qtag.phys_iq.index()][qtag.entry_id] { + if let Some(entry) = iq_owner_entry_mut(seq, iq, iq_owner_table, iq_tags) { + publish_wake_into_entry_source(entry, src_idx, event, uops); + } + } + for entry in iq { + publish_nonqueue_wake_into_entry(entry, event, uops); + } + } else { + for entry in iq { + publish_wake_into_entry(entry, event, uops); + } + } +} + +fn publish_wake_into_entry(entry: &mut IqEntry, event: IqWakeEvent, uops: &[CycleUop]) { + for idx in 0..2 { + publish_wake_into_entry_source(entry, idx, event, uops); + } +} + +fn publish_nonqueue_wake_into_entry(entry: &mut IqEntry, event: IqWakeEvent, uops: &[CycleUop]) { + for idx in 0..2 { + if uops[entry.seq].src_queue_kinds[idx].is_none() { + publish_wake_into_entry_source(entry, idx, event, uops); + } + } +} + +fn publish_wake_into_entry_source( + entry: &mut IqEntry, + idx: usize, + event: IqWakeEvent, + uops: &[CycleUop], +) { + if !iq_source_matches_wake_event(entry.seq, idx, event, uops) { + return; + } + match event.wake_kind { + IqWakeKind::Nonspec => { + entry.src_ready_nonspec[idx] = true; + entry.src_ready_spec[idx] = false; + entry.src_wait_qtag[idx] = false; + } + IqWakeKind::Spec => { + if !entry.src_ready_nonspec[idx] { + entry.src_ready_spec[idx] = true; + entry.src_wait_qtag[idx] = false; + } + } + } +} + +fn iq_source_matches_wake_event( + seq: usize, + idx: usize, + event: IqWakeEvent, + uops: &[CycleUop], +) -> bool { + if !source_valid(&uops[seq].commit, idx) || uops[seq].deps[idx] != Some(event.producer) { + return false; + } + + match uops[seq].src_queue_kinds[idx] { + Some(queue_kind) => { + Some(queue_kind) == event.queue_kind + && uops[seq].src_logical_tags[idx] == event.logical_tag + && uops[seq].src_qtags[idx].is_some() + && uops[seq].src_qtags[idx] == event.qtag + } + None => true, + } +} + +fn producer_publishes_spec_wakeup(cycle: u64, producer: usize, uops: &[CycleUop]) -> bool { + let uop = &uops[producer]; + if !uop.is_load { + return false; + } + + if uop.e1_cycle == Some(cycle.saturating_sub(1)) { + return true; + } + + dep_pick_ready_cycle(producer, uops) == cycle +} + +fn producer_publishes_nonspec_wakeup(cycle: u64, producer: usize, uops: &[CycleUop]) -> bool { + let uop = &uops[producer]; + if uop.is_load { + if uop.e4_cycle == Some(cycle.saturating_sub(1)) { + return true; + } + } else if uop.w1_cycle == Some(cycle.saturating_sub(1)) { + return true; + } + + dep_data_ready_cycle(producer, uops) == cycle +} + +fn source_logical_ready( + seq: usize, + idx: usize, + ready_table_t: &BTreeSet, + ready_table_u: &BTreeSet, + uops: &[CycleUop], +) -> bool { + uops[seq].src_logical_tags[idx] + .map(|tag| logical_tag_ready(tag, ready_table_t, ready_table_u)) + .unwrap_or(false) +} + +fn reset_iq_entry_source_state(entry: &mut IqEntry, idx: usize) { + entry.src_ready_nonspec[idx] = false; + entry.src_ready_spec[idx] = false; + entry.src_wait_qtag[idx] = false; +} + +pub(crate) fn rebuild_iq_owner_table( + iq_owner_table: &mut [Vec>], + iq: &[IqEntry], + iq_tags: &BTreeMap, +) { + for phys_iq in iq_owner_table.iter_mut() { + for owner in phys_iq.iter_mut() { + *owner = None; + } + } + for (idx, entry) in iq.iter().enumerate() { + let Some(qtag) = iq_tags.get(&entry.seq).copied() else { + continue; + }; + iq_owner_table[qtag.phys_iq.index()][qtag.entry_id] = Some(idx); + } +} + +fn iq_owner_entry_mut<'a>( + seq: usize, + iq: &'a mut [IqEntry], + iq_owner_table: &[Vec>], + iq_tags: &BTreeMap, +) -> Option<&'a mut IqEntry> { + let qtag = iq_tags.get(&seq).copied()?; + let idx = iq_owner_table[qtag.phys_iq.index()][qtag.entry_id]?; + (iq.get(idx).map(|entry| entry.seq == seq).unwrap_or(false)).then(|| &mut iq[idx]) +} + +pub(crate) fn register_iq_wait_crossbar_entry( + qtag_wait_crossbar: &mut [Vec>], + seq: usize, + uop: &CycleUop, +) { + for (src_idx, qtag) in uop.src_qtags.iter().copied().enumerate() { + let Some(qtag) = qtag else { + continue; + }; + let waiters = &mut qtag_wait_crossbar[qtag.phys_iq.index()][qtag.entry_id]; + if !waiters.contains(&(seq, src_idx)) { + waiters.push((seq, src_idx)); + } + } +} + +pub(crate) fn unregister_iq_wait_crossbar_seq( + qtag_wait_crossbar: &mut [Vec>], + seq: usize, +) { + for phys_iq in qtag_wait_crossbar.iter_mut() { + for waiters in phys_iq.iter_mut() { + waiters.retain(|(entry_seq, _)| *entry_seq != seq); + } + } +} + +pub(crate) fn prune_iq_wait_crossbar_on_redirect( + qtag_wait_crossbar: &mut [Vec>], + flush_seq: usize, +) { + for phys_iq in qtag_wait_crossbar.iter_mut() { + for waiters in phys_iq.iter_mut() { + waiters.retain(|(entry_seq, _)| *entry_seq <= flush_seq); + } + } +} + +pub(crate) fn allocate_qtag(iq_tags: &BTreeMap, phys_iq: PhysIq) -> Option { + (0..phys_iq.capacity()) + .find(|&entry_id| { + !iq_tags + .values() + .any(|tag| tag.phys_iq == phys_iq && tag.entry_id == entry_id) + }) + .map(|entry_id| QTag { phys_iq, entry_id }) +} diff --git a/crates/camodel/src/issue/queues/qtag.rs b/crates/camodel/src/issue/queues/qtag.rs new file mode 100644 index 0000000..778c629 --- /dev/null +++ b/crates/camodel/src/issue/queues/qtag.rs @@ -0,0 +1 @@ +// QTag namespace placeholder. diff --git a/crates/camodel/src/issue/queues/ready_tables.rs b/crates/camodel/src/issue/queues/ready_tables.rs new file mode 100644 index 0000000..73ce7c6 --- /dev/null +++ b/crates/camodel/src/issue/queues/ready_tables.rs @@ -0,0 +1 @@ +// Ready-table namespace placeholder. diff --git a/crates/camodel/src/issue/select/i1.rs b/crates/camodel/src/issue/select/i1.rs new file mode 100644 index 0000000..3f8384e --- /dev/null +++ b/crates/camodel/src/issue/select/i1.rs @@ -0,0 +1 @@ +// I1 stage placeholder. diff --git a/crates/camodel/src/issue/select/i2.rs b/crates/camodel/src/issue/select/i2.rs new file mode 100644 index 0000000..b18f284 --- /dev/null +++ b/crates/camodel/src/issue/select/i2.rs @@ -0,0 +1 @@ +// I2 stage placeholder. diff --git a/crates/camodel/src/issue/select/mod.rs b/crates/camodel/src/issue/select/mod.rs new file mode 100644 index 0000000..19fe779 --- /dev/null +++ b/crates/camodel/src/issue/select/mod.rs @@ -0,0 +1,445 @@ +pub mod i1; +pub mod i2; +pub mod p1; + +use std::collections::{BTreeSet, VecDeque}; + +use isa::CommitRecord; + +#[cfg(test)] +use crate::make_iq_entry; +use crate::{ + CycleRunOptions, CycleUop, ISSUE_WIDTH, IqEntry, LD_GEN_E1, LD_GEN_E2, LD_GEN_E3, LD_GEN_E4, + PhysIq, READ_PORTS, StageQueues, e1_can_accept, lhq_insert, rebuild_iq_owner_table, + rob_age_rank, stq_insert, unregister_iq_wait_crossbar_seq, +}; + +pub(crate) fn dep_pick_ready_cycle(producer: usize, uops: &[CycleUop]) -> u64 { + uops[producer].pick_wakeup_visible.unwrap_or(u64::MAX) +} + +pub(crate) fn dep_data_ready_cycle(producer: usize, uops: &[CycleUop]) -> u64 { + uops[producer].data_ready_visible.unwrap_or(u64::MAX) +} + +pub(crate) fn source_valid(commit: &CommitRecord, idx: usize) -> bool { + match idx { + 0 => commit.src0_valid != 0, + 1 => commit.src1_valid != 0, + _ => false, + } +} + +pub(crate) fn source_uses_qtag_wakeup(seq: usize, idx: usize, uops: &[CycleUop]) -> bool { + let Some(kind) = uops[seq].src_queue_kinds[idx] else { + return false; + }; + let Some(producer) = uops[seq].deps[idx] else { + return false; + }; + uops[seq].src_qtags[idx].is_some() + && uops[producer].dst_queue_kind == Some(kind) + && uops[producer].dst_logical_tag == uops[seq].src_logical_tags[idx] +} + +pub(crate) fn arbitrate_i1( + cycle: u64, + p1: &mut VecDeque, + iq: &mut Vec, + uops: &[CycleUop], + rob: &VecDeque, +) -> VecDeque { + let mut admitted = VecDeque::new(); + let mut used_ports = 0usize; + let mut used_queues = BTreeSet::new(); + let mut attempts = p1.drain(..).collect::>(); + attempts.sort_by_key(|&seq| rob_age_rank(seq, rob)); + for seq in attempts { + let Some(entry) = iq.iter().find(|entry| entry.seq == seq).cloned() else { + continue; + }; + let needed = read_ports_needed_from_entry(&entry, cycle, uops); + if !used_queues.contains(&entry.phys_iq.index()) && used_ports + needed <= READ_PORTS { + used_ports += needed; + used_queues.insert(entry.phys_iq.index()); + admitted.push_back(seq); + } else if let Some(entry) = iq.iter_mut().find(|entry| entry.seq == seq) { + entry.inflight = false; + } + } + admitted +} + +#[cfg(test)] +pub(crate) fn read_ports_needed(seq: usize, cycle: u64, uops: &[CycleUop]) -> usize { + let entry = make_iq_entry( + cycle, + seq, + uops[seq].phys_iq.unwrap_or(PhysIq::SharedIq1), + &BTreeSet::new(), + &BTreeSet::new(), + uops, + ); + read_ports_needed_from_entry(&entry, cycle, uops) +} + +#[cfg(test)] +pub(crate) fn iq_entry_ready( + seq: usize, + cycle: u64, + lsid_issue_ptr: usize, + uops: &[CycleUop], +) -> bool { + let entry = make_iq_entry( + cycle, + seq, + uops[seq].phys_iq.unwrap_or(PhysIq::SharedIq1), + &BTreeSet::new(), + &BTreeSet::new(), + uops, + ); + iq_entry_ready_from_state(&entry, cycle, lsid_issue_ptr, uops) +} + +#[cfg(test)] +pub(crate) fn iq_entry_wait_cause( + seq: usize, + cycle: u64, + lsid_issue_ptr: usize, + uops: &[CycleUop], +) -> Option<&'static str> { + let entry = make_iq_entry( + cycle, + seq, + uops[seq].phys_iq.unwrap_or(PhysIq::SharedIq1), + &BTreeSet::new(), + &BTreeSet::new(), + uops, + ); + iq_entry_wait_cause_from_state(&entry, cycle, lsid_issue_ptr, uops) +} + +pub(crate) fn read_ports_needed_from_entry( + entry: &IqEntry, + cycle: u64, + uops: &[CycleUop], +) -> usize { + let seq = entry.seq; + uops[seq] + .deps + .into_iter() + .enumerate() + .filter(|(idx, dep)| { + source_needs_rf_read_from_entry(entry, &uops[seq].commit, *idx, *dep, cycle, uops) + }) + .count() +} + +pub(crate) fn source_needs_rf_read_from_entry( + entry: &IqEntry, + commit: &CommitRecord, + idx: usize, + dep: Option, + cycle: u64, + uops: &[CycleUop], +) -> bool { + if !source_valid(commit, idx) { + return false; + } + if source_uses_qtag_wakeup(entry.seq, idx, uops) { + return false; + } + if entry.src_ready_spec[idx] { + return false; + } + + match dep { + None => true, + Some(producer) => { + if dep_data_ready_cycle(producer, uops) <= cycle { + return true; + } + + !(uops[producer].is_load && dep_pick_ready_cycle(producer, uops) <= cycle) + } + } +} + +pub(crate) fn i2_ready(seq: usize, cycle: u64, uops: &[CycleUop]) -> bool { + uops[seq].deps.into_iter().all(|dep| { + dep.map(|producer| dep_data_ready_cycle(producer, uops) <= cycle) + .unwrap_or(true) + }) +} + +pub(crate) fn lsid_issue_ready(seq: usize, lsid_issue_ptr: usize, uops: &[CycleUop]) -> bool { + uops[seq] + .load_store_id + .map(|load_store_id| load_store_id == lsid_issue_ptr) + .unwrap_or(true) +} + +pub(crate) fn i2_issue_eligible( + seq: usize, + cycle: u64, + lsid_issue_ptr: usize, + uops: &[CycleUop], +) -> bool { + i2_ready(seq, cycle, uops) && lsid_issue_ready(seq, lsid_issue_ptr, uops) +} + +pub(crate) fn i2_waits_on_lsid( + seq: usize, + cycle: u64, + lsid_issue_ptr: usize, + uops: &[CycleUop], +) -> bool { + i2_ready(seq, cycle, uops) && !lsid_issue_ready(seq, lsid_issue_ptr, uops) +} + +pub(crate) fn advance_i2( + cycle: u64, + i2: &mut VecDeque, + e1: &mut VecDeque, + lhq: &mut VecDeque, + stq: &mut VecDeque, + lsid_issue_ptr: &mut usize, + lsid_complete_ptr: &mut usize, + uops: &[CycleUop], +) { + let mut stay = VecDeque::new(); + while let Some(seq) = i2.pop_front() { + if i2_issue_eligible(seq, cycle, *lsid_issue_ptr, uops) && e1_can_accept(seq, e1, uops) { + e1.push_back(seq); + if uops[seq].is_load { + lhq_insert(lhq, seq); + } else if uops[seq].is_store { + stq_insert(stq, seq); + } + if uops[seq].load_store_id.is_some() { + *lsid_issue_ptr += 1; + *lsid_complete_ptr += 1; + } + } else { + stay.push_back(seq); + } + } + *i2 = stay; +} + +pub(crate) fn advance_p1_to_i1( + i1: &mut VecDeque, + admitted_i1: &mut VecDeque, + p1: &mut VecDeque, +) { + advance_simple(i1, admitted_i1, ISSUE_WIDTH); + while let Some(seq) = admitted_i1.pop_front() { + p1.push_back(seq); + } +} + +pub(crate) fn advance_i1_to_i2(pipeline: &mut StageQueues, iq: &mut Vec) { + let mut prev_i1 = std::mem::take(&mut pipeline.i1); + let mut moved = Vec::new(); + let mut stay = VecDeque::new(); + let mut used_queues = BTreeSet::new(); + while let Some(seq) = prev_i1.pop_front() { + if pipeline.i2.len() >= ISSUE_WIDTH { + stay.push_back(seq); + continue; + } + let Some(phys_iq) = iq + .iter() + .find(|entry| entry.seq == seq) + .map(|entry| entry.phys_iq) + else { + continue; + }; + if used_queues.contains(&phys_iq.index()) { + stay.push_back(seq); + continue; + } + used_queues.insert(phys_iq.index()); + pipeline.i2.push_back(seq); + moved.push(seq); + } + stay.extend(prev_i1); + iq.retain(|entry| !moved.contains(&entry.seq)); + for seq in moved { + pipeline.iq_tags.remove(&seq); + unregister_iq_wait_crossbar_seq(&mut pipeline.qtag_wait_crossbar, seq); + } + rebuild_iq_owner_table(&mut pipeline.iq_owner_table, iq, &pipeline.iq_tags); + pipeline.i1 = stay; +} + +pub(crate) fn pick_from_iq( + cycle: u64, + lsid_issue_ptr: usize, + iq: &mut [IqEntry], + uops: &[CycleUop], + p1: &mut VecDeque, + rob: &VecDeque, +) { + let queue_winners = ready_iq_winners(cycle, lsid_issue_ptr, iq, uops, rob); + + let mut candidates = queue_winners + .into_iter() + .map(|(idx, seq, _)| (idx, seq)) + .collect::>(); + candidates.sort_by_key(|&(_, seq)| rob_age_rank(seq, rob)); + + for (idx, seq) in candidates { + if p1.len() >= ISSUE_WIDTH { + break; + } + iq[idx].inflight = true; + p1.push_back(seq); + } +} + +pub(crate) fn iq_entry_ready_from_state( + entry: &IqEntry, + cycle: u64, + lsid_issue_ptr: usize, + uops: &[CycleUop], +) -> bool { + iq_entry_wait_cause_from_state(entry, cycle, lsid_issue_ptr, uops).is_none() +} + +pub(crate) fn ready_iq_winners( + cycle: u64, + lsid_issue_ptr: usize, + iq: &[IqEntry], + uops: &[CycleUop], + rob: &VecDeque, +) -> Vec<(usize, usize, PhysIq)> { + let mut queue_winners = iq + .iter() + .enumerate() + .filter_map(|(idx, entry)| { + (!entry.inflight && iq_entry_ready_from_state(entry, cycle, lsid_issue_ptr, uops)) + .then_some((idx, entry.seq, entry.phys_iq)) + }) + .collect::>(); + queue_winners.sort_by_key(|&(_, seq, phys_iq)| (phys_iq.index(), rob_age_rank(seq, rob))); + queue_winners.dedup_by_key(|entry| entry.2); + queue_winners +} + +pub(crate) fn iq_entry_wait_cause_from_state( + entry: &IqEntry, + cycle: u64, + lsid_issue_ptr: usize, + uops: &[CycleUop], +) -> Option<&'static str> { + let seq = entry.seq; + if !lsid_issue_ready(seq, lsid_issue_ptr, uops) { + return Some("wait_lsid"); + } + + let miss_pending = miss_pending_active(cycle, uops); + for (idx, dep) in uops[seq].deps.into_iter().enumerate() { + if let Some(dep) = dep { + if miss_pending && (dep_load_gen_vec(dep, cycle, uops) & LD_GEN_E4 != 0) { + return Some("wait_miss"); + } + } + if entry.src_valid[idx] && !(entry.src_ready_nonspec[idx] || entry.src_ready_spec[idx]) { + return Some(if entry.src_wait_qtag[idx] { + "wait_qtag" + } else { + "wait_dep" + }); + } + } + + None +} + +pub(crate) fn miss_pending_active(cycle: u64, uops: &[CycleUop]) -> bool { + let _ = cycle; + uops.iter().any(|uop| { + uop.is_load + && uop.miss_injected + && uop.done_cycle.is_none() + && (uop.miss_pending_until.is_some() + || uop.e1_cycle.is_some() + || uop.e4_cycle.is_some() + || uop.w1_cycle.is_some()) + }) +} + +pub(crate) fn dep_load_gen_vec(seq: usize, cycle: u64, uops: &[CycleUop]) -> u8 { + let mut memo = vec![None; uops.len()]; + dep_load_gen_vec_inner(seq, cycle, uops, &mut memo) +} + +fn dep_load_gen_vec_inner( + seq: usize, + cycle: u64, + uops: &[CycleUop], + memo: &mut [Option], +) -> u8 { + if let Some(mask) = memo[seq] { + return mask; + } + + let mut mask = current_load_stage_mask(seq, cycle, uops); + for dep in uops[seq].deps.into_iter().flatten() { + mask |= dep_load_gen_vec_inner(dep, cycle, uops, memo); + } + memo[seq] = Some(mask); + mask +} + +fn current_load_stage_mask(seq: usize, cycle: u64, uops: &[CycleUop]) -> u8 { + let uop = &uops[seq]; + if !uop.is_load { + return 0; + } + + if uop.miss_pending_until.is_some() { + return LD_GEN_E4; + } + + match (uop.e1_cycle, uop.e4_cycle) { + (_, Some(e4_cycle)) if cycle == e4_cycle => LD_GEN_E4, + (Some(e1_cycle), _) if cycle >= e1_cycle => match cycle - e1_cycle { + 0 => LD_GEN_E1, + 1 => LD_GEN_E2, + 2 => LD_GEN_E3, + _ => 0, + }, + _ => 0, + } +} + +pub(crate) fn should_inject_load_miss( + seq: usize, + options: &CycleRunOptions, + uops: &[CycleUop], +) -> bool { + let Some(every) = options.load_miss_every else { + return false; + }; + if every == 0 { + return false; + } + + let uop = &uops[seq]; + uop.is_load + && !uop.miss_injected + && uop + .load_ordinal + .is_some_and(|ordinal| ((ordinal as u64) + 1) % every == 0) +} + +fn advance_simple(dst: &mut VecDeque, src: &mut VecDeque, capacity: usize) { + while dst.len() < capacity { + let Some(seq) = src.pop_front() else { + break; + }; + dst.push_back(seq); + } +} diff --git a/crates/camodel/src/issue/select/p1.rs b/crates/camodel/src/issue/select/p1.rs new file mode 100644 index 0000000..b9d9da2 --- /dev/null +++ b/crates/camodel/src/issue/select/p1.rs @@ -0,0 +1 @@ +// P1 stage placeholder. diff --git a/crates/camodel/src/lib.rs b/crates/camodel/src/lib.rs new file mode 100644 index 0000000..4963569 --- /dev/null +++ b/crates/camodel/src/lib.rs @@ -0,0 +1,23 @@ +pub mod backend; +pub mod control; +pub mod core; +pub mod decode; +pub mod frontend; +pub mod issue; +pub mod trace; + +pub use core::{CycleEngine, CycleRunBundle, CycleRunOptions}; + +pub(crate) use backend::lsu::*; +pub(crate) use control::commit::*; +pub(crate) use control::recovery::*; +pub(crate) use core::config::*; +pub(crate) use core::model::*; +pub(crate) use decode::*; +pub(crate) use frontend::*; +pub(crate) use issue::queues::*; +pub(crate) use issue::select::*; +pub(crate) use trace::*; + +#[cfg(test)] +mod tests; diff --git a/crates/camodel/src/tests.rs b/crates/camodel/src/tests.rs new file mode 100644 index 0000000..5e5676b --- /dev/null +++ b/crates/camodel/src/tests.rs @@ -0,0 +1,8811 @@ +use super::*; +use elf::{LoadedElf, SegmentImage}; +use isa::CommitRecord; +use runtime::GuestRuntime; +use runtime::{BootInfo, GuestMemory, MemoryRegion, RuntimeConfig}; +use std::collections::{BTreeMap, BTreeSet, HashMap, VecDeque}; +use std::path::PathBuf; + +fn test_qtag_wait_crossbar(iq: &[IqEntry], uops: &[CycleUop]) -> Vec>> { + let mut crossbar = empty_qtag_wait_crossbar(); + for entry in iq { + register_iq_wait_crossbar_entry(&mut crossbar, entry.seq, &uops[entry.seq]); + } + crossbar +} + +fn test_iq_tags(iq: &[IqEntry]) -> BTreeMap { + let mut next_entry_ids = [0usize; PHYS_IQ_COUNT]; + let mut out = BTreeMap::new(); + for entry in iq { + let entry_id = next_entry_ids[entry.phys_iq.index()]; + next_entry_ids[entry.phys_iq.index()] += 1; + out.insert( + entry.seq, + QTag { + phys_iq: entry.phys_iq, + entry_id, + }, + ); + } + out +} + +fn test_iq_owner_table(iq: &[IqEntry], iq_tags: &BTreeMap) -> Vec>> { + let mut owner_table = empty_iq_owner_table(); + rebuild_iq_owner_table(&mut owner_table, iq, iq_tags); + owner_table +} + +#[test] +fn cycle_engine_retires_multiple_uops() { + let program = vec![ + enc_addi(2, 0, 1), + enc_addi(3, 0, 2), + enc_addi(4, 0, 3), + enc_addi(5, 0, 4), + enc_addi(9, 0, 93), + enc_acrc(1), + ]; + let runtime = sample_runtime(&program, &[]); + let bundle = CycleEngine + .run( + &runtime, + &CycleRunOptions { + max_cycles: 64, + ..CycleRunOptions::default() + }, + ) + .unwrap(); + assert_eq!(bundle.result.metrics.exit_reason, "guest_exit(1)"); + assert_eq!(bundle.result.commits.len(), 6); + assert!( + bundle + .stage_events + .iter() + .any(|event| event.stage_id == "IQ") + ); + assert!( + bundle + .stage_events + .iter() + .any(|event| event.stage_id == "ROB") + ); + assert!( + bundle + .stage_events + .iter() + .filter(|event| event.stage_id == "CMT") + .count() + > 0 + ); +} + +#[test] +fn dependent_uop_picks_after_producer_wakeup_window() { + let program = vec![ + enc_addi(2, 0, 5), + enc_addi(3, 2, 6), + enc_addi(9, 0, 93), + enc_acrc(1), + ]; + let runtime = sample_runtime(&program, &[]); + let bundle = CycleEngine + .run( + &runtime, + &CycleRunOptions { + max_cycles: 64, + ..CycleRunOptions::default() + }, + ) + .unwrap(); + + let mut first_w1 = None; + let mut second_p1 = None; + for event in &bundle.stage_events { + if event.row_id == "uop0" && event.stage_id == "W1" && first_w1.is_none() { + first_w1 = Some(event.cycle); + } + if event.row_id == "uop1" && event.stage_id == "P1" && second_p1.is_none() { + second_p1 = Some(event.cycle); + } + } + + assert!(first_w1.is_some()); + assert!(second_p1.is_some()); + assert!(second_p1.unwrap() > first_w1.unwrap()); +} + +#[test] +fn s2_captures_qtag_for_implicit_t_consumer() { + let mut pipeline = StageQueues::default(); + pipeline.frontend[10].extend([0, 1]); + let mut iq = Vec::new(); + let mut rob = VecDeque::new(); + let producer = isa::decode_word(enc_addi(31, 0, 5) as u64).expect("decode implicit-t producer"); + let consumer = + isa::decode_word(enc_addi(2, REG_T1 as u32, 6) as u64).expect("decode t1 consumer"); + let mut uops = vec![ + CycleUop { + decoded: producer, + commit: CommitRecord::unsupported(0, 0x1000, 0, 4, &isa::BlockMeta::default()), + deps: [None, None], + src_queue_kinds: [None, None], + src_logical_tags: [None, None], + src_qtags: [None, None], + dst_queue_kind: Some(QueueWakeKind::T), + dst_logical_tag: Some(LogicalQueueTag { + kind: QueueWakeKind::T, + tag: 0, + }), + dst_qtag: None, + bypass_d2: false, + is_load: false, + is_store: false, + load_ordinal: None, + load_store_id: None, + miss_injected: false, + redirect_target: Some(0x2000), + phys_iq: None, + pick_wakeup_visible: None, + data_ready_visible: None, + miss_pending_until: None, + e1_cycle: None, + e4_cycle: None, + w1_cycle: None, + done_cycle: None, + }, + CycleUop { + decoded: consumer, + commit: CommitRecord { + src0_valid: 1, + src0_reg: REG_T1, + ..CommitRecord::unsupported(0, 0x1004, 0, 4, &isa::BlockMeta::default()) + }, + deps: [Some(0), None], + src_queue_kinds: [Some(QueueWakeKind::T), None], + src_logical_tags: [ + Some(LogicalQueueTag { + kind: QueueWakeKind::T, + tag: 0, + }), + None, + ], + src_qtags: [None, None], + dst_queue_kind: None, + dst_logical_tag: None, + dst_qtag: None, + bypass_d2: false, + is_load: false, + is_store: false, + load_ordinal: None, + load_store_id: None, + miss_injected: false, + redirect_target: None, + phys_iq: None, + pick_wakeup_visible: None, + data_ready_visible: None, + miss_pending_until: None, + e1_cycle: None, + e4_cycle: None, + w1_cycle: None, + done_cycle: None, + }, + ]; + + dispatch_to_iq_and_bypass(0, &mut pipeline, &mut iq, &mut rob, &mut uops); + + assert_eq!( + uops[1].src_qtags[0], + Some(QTag { + phys_iq: PhysIq::AluIq0, + entry_id: 0, + }) + ); +} + +#[test] +fn later_implicit_t_consumer_captures_persistent_producer_qtag() { + let producer = isa::decode_word(enc_addi(31, 0, 5) as u64).expect("decode implicit-t producer"); + let consumer = + isa::decode_word(enc_addi(2, REG_T1 as u32, 6) as u64).expect("decode t1 consumer"); + let mut uops = vec![ + CycleUop { + decoded: producer, + commit: CommitRecord::unsupported(0, 0x1000, 0, 4, &isa::BlockMeta::default()), + deps: [None, None], + src_queue_kinds: [None, None], + src_logical_tags: [None, None], + src_qtags: [None, None], + dst_queue_kind: Some(QueueWakeKind::T), + dst_logical_tag: Some(LogicalQueueTag { + kind: QueueWakeKind::T, + tag: 1, + }), + dst_qtag: Some(QTag { + phys_iq: PhysIq::AluIq0, + entry_id: 1, + }), + bypass_d2: false, + is_load: false, + is_store: false, + load_ordinal: None, + load_store_id: None, + miss_injected: false, + redirect_target: None, + phys_iq: Some(PhysIq::AluIq0), + pick_wakeup_visible: Some(4), + data_ready_visible: Some(4), + miss_pending_until: None, + e1_cycle: None, + e4_cycle: None, + w1_cycle: None, + done_cycle: Some(5), + }, + CycleUop { + decoded: consumer, + commit: CommitRecord { + src0_valid: 1, + src0_reg: REG_T1, + ..CommitRecord::unsupported(0, 0x1004, 0, 4, &isa::BlockMeta::default()) + }, + deps: [Some(0), None], + src_queue_kinds: [Some(QueueWakeKind::T), None], + src_logical_tags: [ + Some(LogicalQueueTag { + kind: QueueWakeKind::T, + tag: 1, + }), + None, + ], + src_qtags: [None, None], + dst_queue_kind: None, + dst_logical_tag: None, + dst_qtag: None, + bypass_d2: false, + is_load: false, + is_store: false, + load_ordinal: None, + load_store_id: None, + miss_injected: false, + redirect_target: None, + phys_iq: None, + pick_wakeup_visible: None, + data_ready_visible: None, + miss_pending_until: None, + e1_cycle: None, + e4_cycle: None, + w1_cycle: None, + done_cycle: None, + }, + ]; + + annotate_qtag_sources( + 1, + &BTreeMap::new(), + &BTreeSet::new(), + &BTreeSet::new(), + &mut uops, + ); + + assert_eq!( + uops[1].src_qtags[0], + Some(QTag { + phys_iq: PhysIq::AluIq0, + entry_id: 1, + }) + ); +} + +#[test] +fn build_uops_resolves_t_rel_to_latest_logical_t_tag() { + let producer0 = isa::decode_word(16_474).expect("decode c.ldi ->t producer0"); + let producer1 = isa::decode_word(14_426).expect("decode c.ldi ->t producer1"); + let consumer = isa::decode_word(8_314).expect("decode c.sdi t#1 consumer"); + let commits = vec![ + CommitRecord { + wb_valid: 1, + wb_rd: REG_T1, + mem_valid: 1, + mem_is_store: 0, + ..CommitRecord::unsupported(0, 0x1000, 16_474, 4, &isa::BlockMeta::default()) + }, + CommitRecord { + wb_valid: 1, + wb_rd: REG_T1, + mem_valid: 1, + mem_is_store: 0, + ..CommitRecord::unsupported(0, 0x1002, 14_426, 4, &isa::BlockMeta::default()) + }, + CommitRecord { + src0_valid: 1, + src0_reg: REG_T1, + mem_valid: 1, + mem_is_store: 1, + ..CommitRecord::unsupported(0, 0x1004, 8_314, 4, &isa::BlockMeta::default()) + }, + ]; + let decoded = vec![producer0, producer1, consumer]; + + let uops = build_uops(&commits, &decoded); + + assert_eq!( + uops[0].dst_logical_tag, + Some(LogicalQueueTag { + kind: QueueWakeKind::T, + tag: 0, + }) + ); + assert_eq!( + uops[1].dst_logical_tag, + Some(LogicalQueueTag { + kind: QueueWakeKind::T, + tag: 1, + }) + ); + assert_eq!( + uops[2].src_logical_tags[0], + Some(LogicalQueueTag { + kind: QueueWakeKind::T, + tag: 1, + }) + ); + assert_eq!(uops[2].deps[0], Some(1)); +} + +#[test] +fn ready_table_t_makes_late_consumer_ready_without_qtag() { + let producer = isa::decode_word(16_474).expect("decode c.ldi ->t producer"); + let consumer = isa::decode_word(8_314).expect("decode c.sdi t#1 consumer"); + let mut uops = vec![ + CycleUop { + decoded: producer, + commit: CommitRecord { + wb_valid: 1, + wb_rd: REG_T1, + mem_valid: 1, + mem_is_store: 0, + ..CommitRecord::unsupported(0, 0x1000, 16_474, 4, &isa::BlockMeta::default()) + }, + deps: [None, None], + src_queue_kinds: [None, None], + src_logical_tags: [None, None], + src_qtags: [None, None], + dst_queue_kind: Some(QueueWakeKind::T), + dst_logical_tag: Some(LogicalQueueTag { + kind: QueueWakeKind::T, + tag: 0, + }), + dst_qtag: Some(QTag { + phys_iq: PhysIq::AguIq0, + entry_id: 3, + }), + bypass_d2: false, + is_load: true, + is_store: false, + load_ordinal: Some(0), + load_store_id: Some(0), + miss_injected: false, + redirect_target: None, + phys_iq: Some(PhysIq::AguIq0), + pick_wakeup_visible: Some(4), + data_ready_visible: Some(4), + miss_pending_until: None, + e1_cycle: Some(2), + e4_cycle: Some(3), + w1_cycle: Some(3), + done_cycle: Some(4), + }, + CycleUop { + decoded: consumer, + commit: CommitRecord { + src0_valid: 1, + src0_reg: REG_T1, + mem_valid: 1, + mem_is_store: 1, + ..CommitRecord::unsupported(0, 0x1002, 8_314, 4, &isa::BlockMeta::default()) + }, + deps: [Some(0), None], + src_queue_kinds: [Some(QueueWakeKind::T), None], + src_logical_tags: [ + Some(LogicalQueueTag { + kind: QueueWakeKind::T, + tag: 0, + }), + None, + ], + src_qtags: [None, None], + dst_queue_kind: None, + dst_logical_tag: None, + dst_qtag: None, + bypass_d2: false, + is_load: false, + is_store: true, + load_ordinal: None, + load_store_id: Some(1), + miss_injected: false, + redirect_target: None, + phys_iq: None, + pick_wakeup_visible: None, + data_ready_visible: None, + miss_pending_until: None, + e1_cycle: None, + e4_cycle: None, + w1_cycle: None, + done_cycle: None, + }, + ]; + let mut ready_table_t = BTreeSet::new(); + ready_table_t.insert(0); + + annotate_qtag_sources( + 1, + &BTreeMap::new(), + &ready_table_t, + &BTreeSet::new(), + &mut uops, + ); + + assert_eq!(uops[1].src_qtags[0], None); + assert_eq!(iq_entry_wait_cause(1, 5, 1, &uops), None); +} + +#[test] +fn implicit_t_consumer_needs_no_rf_read_ports() { + let decoded = isa::decode_word(enc_addi(2, 0, 1) as u64).expect("decode addi"); + let uops = vec![ + CycleUop { + decoded: decoded.clone(), + commit: CommitRecord { + wb_valid: 1, + wb_rd: REG_T1, + ..CommitRecord::unsupported(0, 0x1000, 0, 4, &isa::BlockMeta::default()) + }, + deps: [None, None], + src_queue_kinds: [None, None], + src_logical_tags: [None, None], + src_qtags: [None, None], + dst_queue_kind: Some(QueueWakeKind::T), + dst_logical_tag: Some(LogicalQueueTag { + kind: QueueWakeKind::T, + tag: 0, + }), + dst_qtag: None, + bypass_d2: false, + is_load: false, + is_store: false, + load_ordinal: None, + load_store_id: None, + miss_injected: false, + redirect_target: None, + phys_iq: Some(PhysIq::AluIq0), + pick_wakeup_visible: Some(6), + data_ready_visible: Some(6), + miss_pending_until: None, + e1_cycle: None, + e4_cycle: None, + w1_cycle: None, + done_cycle: None, + }, + CycleUop { + decoded, + commit: CommitRecord { + src0_valid: 1, + src0_reg: REG_T1, + ..CommitRecord::unsupported(0, 0x1004, 0, 4, &isa::BlockMeta::default()) + }, + deps: [Some(0), None], + src_queue_kinds: [Some(QueueWakeKind::T), None], + src_logical_tags: [ + Some(LogicalQueueTag { + kind: QueueWakeKind::T, + tag: 0, + }), + None, + ], + src_qtags: [ + Some(QTag { + phys_iq: PhysIq::AluIq0, + entry_id: 0, + }), + None, + ], + dst_queue_kind: None, + dst_logical_tag: None, + dst_qtag: None, + bypass_d2: false, + is_load: false, + is_store: false, + load_ordinal: None, + load_store_id: None, + miss_injected: false, + redirect_target: None, + phys_iq: Some(PhysIq::AluIq0), + pick_wakeup_visible: None, + data_ready_visible: None, + miss_pending_until: None, + e1_cycle: None, + e4_cycle: None, + w1_cycle: None, + done_cycle: None, + }, + ]; + + assert_eq!(read_ports_needed(1, 5, &uops), 0); +} + +#[test] +fn iq_wait_cause_reports_wait_qtag_for_implicit_t_dependency() { + let decoded = isa::decode_word(enc_addi(2, 0, 1) as u64).expect("decode addi"); + let uops = vec![ + CycleUop { + decoded: decoded.clone(), + commit: CommitRecord { + wb_valid: 1, + wb_rd: REG_T1, + ..CommitRecord::unsupported(0, 0x1000, 0, 4, &isa::BlockMeta::default()) + }, + deps: [None, None], + src_queue_kinds: [None, None], + src_logical_tags: [None, None], + src_qtags: [None, None], + dst_queue_kind: Some(QueueWakeKind::T), + dst_logical_tag: Some(LogicalQueueTag { + kind: QueueWakeKind::T, + tag: 0, + }), + dst_qtag: None, + bypass_d2: false, + is_load: false, + is_store: false, + load_ordinal: None, + load_store_id: None, + miss_injected: false, + redirect_target: None, + phys_iq: Some(PhysIq::AluIq0), + pick_wakeup_visible: Some(6), + data_ready_visible: Some(6), + miss_pending_until: None, + e1_cycle: None, + e4_cycle: None, + w1_cycle: None, + done_cycle: None, + }, + CycleUop { + decoded, + commit: CommitRecord { + src0_valid: 1, + src0_reg: REG_T1, + ..CommitRecord::unsupported(0, 0x1004, 0, 4, &isa::BlockMeta::default()) + }, + deps: [Some(0), None], + src_queue_kinds: [Some(QueueWakeKind::T), None], + src_logical_tags: [ + Some(LogicalQueueTag { + kind: QueueWakeKind::T, + tag: 0, + }), + None, + ], + src_qtags: [ + Some(QTag { + phys_iq: PhysIq::AluIq0, + entry_id: 0, + }), + None, + ], + dst_queue_kind: None, + dst_logical_tag: None, + dst_qtag: None, + bypass_d2: false, + is_load: false, + is_store: false, + load_ordinal: None, + load_store_id: None, + miss_injected: false, + redirect_target: None, + phys_iq: Some(PhysIq::AluIq0), + pick_wakeup_visible: None, + data_ready_visible: None, + miss_pending_until: None, + e1_cycle: None, + e4_cycle: None, + w1_cycle: None, + done_cycle: None, + }, + ]; + + assert_eq!(iq_entry_wait_cause(1, 5, 0, &uops), Some("wait_qtag")); + assert_eq!(iq_entry_wait_cause(1, 6, 0, &uops), None); +} + +#[test] +fn completed_uops_leave_iq_and_w2() { + let program = vec![ + enc_addi(2, 0, 1), + enc_addi(3, 2, 2), + enc_addi(9, 0, 93), + enc_acrc(1), + ]; + let runtime = sample_runtime(&program, &[]); + let bundle = CycleEngine + .run( + &runtime, + &CycleRunOptions { + max_cycles: 64, + ..CycleRunOptions::default() + }, + ) + .unwrap(); + + let retire_cycle = bundle + .stage_events + .iter() + .find(|event| event.row_id == "uop0" && event.stage_id == "CMT") + .map(|event| event.cycle) + .expect("expected retirement"); + + assert!(!bundle.stage_events.iter().any(|event| { + event.row_id == "uop0" && event.stage_id == "IQ" && event.cycle > retire_cycle + })); + assert_eq!( + bundle + .stage_events + .iter() + .filter(|event| event.row_id == "uop0" && event.stage_id == "W2") + .count(), + 1 + ); +} + +#[test] +fn fetch_serializes_at_unresolved_redirects() { + let commits = vec![ + CommitRecord { + schema_version: "1.0".to_string(), + cycle: 0, + pc: 0x1000, + insn: 0, + len: 4, + next_pc: 0x1100, + src0_valid: 0, + src0_reg: 0, + src0_data: 0, + src1_valid: 0, + src1_reg: 0, + src1_data: 0, + dst_valid: 0, + dst_reg: 0, + dst_data: 0, + wb_valid: 0, + wb_rd: 0, + wb_data: 0, + mem_valid: 0, + mem_is_store: 0, + mem_addr: 0, + mem_wdata: 0, + mem_rdata: 0, + mem_size: 0, + trap_valid: 0, + trap_cause: 0, + traparg0: 0, + block_kind: "sys".to_string(), + lane_id: "scalar0".to_string(), + tile_meta: String::new(), + tile_ref_src: 0, + tile_ref_dst: 0, + }, + CommitRecord::unsupported(0, 0x1100, 0, 4, &isa::BlockMeta::default()), + ]; + let decoded = vec![ + isa::decode_word(2048).expect("decode c.bstart.std"), + isa::decode_word(enc_addi(2, 0, 1) as u64).expect("decode addi"), + ]; + let mut uops = build_uops(&commits, &decoded); + let mut pipeline = StageQueues::default(); + let mut next_fetch_seq = 0usize; + + fill_fetch(0, &mut pipeline, &mut next_fetch_seq, &uops); + assert_eq!( + pipeline.frontend[0].iter().copied().collect::>(), + vec![0] + ); + + pipeline.frontend[0].clear(); + fill_fetch(1, &mut pipeline, &mut next_fetch_seq, &uops); + assert!(pipeline.frontend[0].is_empty()); + + uops[0].w1_cycle = Some(4); + fill_fetch(4, &mut pipeline, &mut next_fetch_seq, &uops); + assert_eq!( + pipeline.frontend[0].iter().copied().collect::>(), + vec![1] + ); +} + +#[test] +fn fetch_does_not_serialize_on_bru_correction_before_boundary() { + let commits = vec![ + CommitRecord { + schema_version: "1.0".to_string(), + cycle: 0, + pc: 0x1000, + insn: 0, + len: 4, + next_pc: 0x2000, + src0_valid: 1, + src0_reg: 2, + src0_data: 1, + src1_valid: 0, + src1_reg: 0, + src1_data: 0, + dst_valid: 1, + dst_reg: 2, + dst_data: 1, + wb_valid: 1, + wb_rd: 2, + wb_data: 1, + mem_valid: 0, + mem_is_store: 0, + mem_addr: 0, + mem_wdata: 0, + mem_rdata: 0, + mem_size: 0, + trap_valid: 0, + trap_cause: 0, + traparg0: 0, + block_kind: "sys".to_string(), + lane_id: "scalar0".to_string(), + tile_meta: String::new(), + tile_ref_src: 0, + tile_ref_dst: 0, + }, + CommitRecord { + schema_version: "1.0".to_string(), + cycle: 1, + pc: 0x1004, + insn: 2048, + len: 2, + next_pc: 0x2000, + src0_valid: 0, + src0_reg: 0, + src0_data: 0, + src1_valid: 0, + src1_reg: 0, + src1_data: 0, + dst_valid: 0, + dst_reg: 0, + dst_data: 0, + wb_valid: 0, + wb_rd: 0, + wb_data: 0, + mem_valid: 0, + mem_is_store: 0, + mem_addr: 0, + mem_wdata: 0, + mem_rdata: 0, + mem_size: 0, + trap_valid: 0, + trap_cause: 0, + traparg0: 0, + block_kind: "sys".to_string(), + lane_id: "scalar0".to_string(), + tile_meta: String::new(), + tile_ref_src: 0, + tile_ref_dst: 0, + }, + CommitRecord::unsupported(0, 0x2000, 0, 4, &isa::BlockMeta::default()), + ]; + let decoded = vec![ + isa::decode_word(30_478_677).expect("decode cmp.nei"), + isa::decode_word(2048).expect("decode c.bstart.std"), + isa::decode_word(enc_addi(2, 0, 1) as u64).expect("decode addi"), + ]; + let mut uops = build_uops(&commits, &decoded); + let mut pipeline = StageQueues::default(); + let mut next_fetch_seq = 0usize; + + assert_eq!(uops[0].redirect_target, None); + assert_eq!(uops[1].redirect_target, Some(0x2000)); + + fill_fetch(0, &mut pipeline, &mut next_fetch_seq, &uops); + assert_eq!( + pipeline.frontend[0].iter().copied().collect::>(), + vec![0, 1] + ); + + let runtime = sample_runtime(&[enc_addi(2, 0, 1)], &[]); + uops[0].w1_cycle = Some(7); + uops[1].w1_cycle = Some(7); + let mut out = Vec::new(); + emit_stage_events( + 7, + &runtime, + &StageQueues::default(), + &[], + &VecDeque::new(), + &uops, + &mut out, + ); + assert!( + !out.iter() + .any(|event| event.row_id == "uop0" && event.stage_id == "FLS") + ); + assert!( + out.iter() + .any(|event| event.row_id == "uop1" && event.stage_id == "FLS") + ); +} + +#[test] +fn boundary_consumes_pending_bru_correction_before_local_target() { + let commits = vec![ + CommitRecord { + schema_version: "1.0".to_string(), + cycle: 0, + pc: 0x1000, + insn: 0, + len: 4, + next_pc: 0x2000, + src0_valid: 1, + src0_reg: 2, + src0_data: 1, + src1_valid: 0, + src1_reg: 0, + src1_data: 0, + dst_valid: 1, + dst_reg: 2, + dst_data: 1, + wb_valid: 1, + wb_rd: 2, + wb_data: 1, + mem_valid: 0, + mem_is_store: 0, + mem_addr: 0, + mem_wdata: 0, + mem_rdata: 0, + mem_size: 0, + trap_valid: 0, + trap_cause: 0, + traparg0: 0, + block_kind: "sys".to_string(), + lane_id: "scalar0".to_string(), + tile_meta: String::new(), + tile_ref_src: 0, + tile_ref_dst: 0, + }, + CommitRecord { + schema_version: "1.0".to_string(), + cycle: 1, + pc: 0x1004, + insn: 0, + len: 2, + next_pc: 0x1006, + src0_valid: 0, + src0_reg: 0, + src0_data: 0, + src1_valid: 0, + src1_reg: 0, + src1_data: 0, + dst_valid: 0, + dst_reg: 0, + dst_data: 0, + wb_valid: 0, + wb_rd: 0, + wb_data: 0, + mem_valid: 0, + mem_is_store: 0, + mem_addr: 0, + mem_wdata: 0, + mem_rdata: 0, + mem_size: 0, + trap_valid: 0, + trap_cause: 0, + traparg0: 0, + block_kind: "sys".to_string(), + lane_id: "scalar0".to_string(), + tile_meta: String::new(), + tile_ref_src: 0, + tile_ref_dst: 0, + }, + CommitRecord::unsupported(0, 0x2000, 0, 4, &isa::BlockMeta::default()), + ]; + let decoded = vec![ + isa::decode_word(30_478_677).expect("decode cmp.nei"), + isa::decode_word(0).expect("decode c.bstop"), + isa::decode_word(2048).expect("decode c.bstart.std"), + ]; + let mut uops = build_uops(&commits, &decoded); + let mut pipeline = StageQueues::default(); + + uops[0].w1_cycle = Some(5); + uops[1].w1_cycle = Some(7); + publish_bru_correction_state(5, &mut pipeline, &uops); + assert_eq!( + pipeline.pending_bru_correction, + Some(BruCorrectionState { + source_seq: 0, + epoch: 0, + actual_take: true, + target_pc: 0x2000, + checkpoint_id: checkpoint_id_for_seq(0, &uops), + visible_cycle: 5, + }) + ); + + schedule_frontend_redirect_recovery(7, &mut pipeline, &uops); + assert_eq!( + pipeline.frontend_redirect, + Some(FrontendRedirectState { + source_seq: 1, + target_pc: 0x2000, + restart_seq: 2, + checkpoint_id: checkpoint_id_for_seq(0, &uops), + from_correction: true, + resume_cycle: 8, + }) + ); + assert_eq!( + pipeline.flush_checkpoint_id, + Some(checkpoint_id_for_seq(0, &uops)) + ); + assert_eq!(pipeline.pending_bru_correction, None); +} + +#[test] +fn boundary_consumes_not_taken_bru_correction_to_fallthrough() { + let start = isa::decode_word(4).expect("decode c.bstart cond"); + let bru = isa::decode_word(30_478_677).expect("decode cmp.nei"); + let bstop = isa::decode_word(0).expect("decode c.bstop"); + let target = isa::decode_word(2048).expect("decode target c.bstart.std"); + let mut uops = vec![ + CycleUop { + decoded: start.clone(), + commit: CommitRecord { + schema_version: "1.0".to_string(), + cycle: 0, + pc: 0x1000, + insn: 4, + len: 2, + next_pc: 0x0ff0, + src0_valid: 0, + src0_reg: 0, + src0_data: 0, + src1_valid: 0, + src1_reg: 0, + src1_data: 0, + dst_valid: 0, + dst_reg: 0, + dst_data: 0, + wb_valid: 0, + wb_rd: 0, + wb_data: 0, + mem_valid: 0, + mem_is_store: 0, + mem_addr: 0, + mem_wdata: 0, + mem_rdata: 0, + mem_size: 0, + trap_valid: 0, + trap_cause: 0, + traparg0: 0, + block_kind: "sys".to_string(), + lane_id: "scalar0".to_string(), + tile_meta: String::new(), + tile_ref_src: 0, + tile_ref_dst: 0, + }, + deps: [None, None], + src_queue_kinds: [None, None], + src_logical_tags: [None, None], + src_qtags: [None, None], + dst_queue_kind: None, + dst_logical_tag: None, + dst_qtag: None, + bypass_d2: false, + is_load: false, + is_store: false, + load_ordinal: None, + load_store_id: None, + miss_injected: false, + redirect_target: Some(0x0ff0), + phys_iq: None, + pick_wakeup_visible: None, + data_ready_visible: None, + miss_pending_until: None, + e1_cycle: None, + e4_cycle: None, + w1_cycle: None, + done_cycle: None, + }, + CycleUop { + decoded: bru, + commit: CommitRecord { + schema_version: "1.0".to_string(), + cycle: 1, + pc: 0x1002, + insn: 30_478_677, + len: 4, + next_pc: 0x1006, + src0_valid: 1, + src0_reg: 2, + src0_data: 0, + src1_valid: 0, + src1_reg: 0, + src1_data: 0, + dst_valid: 1, + dst_reg: 2, + dst_data: 0, + wb_valid: 1, + wb_rd: 2, + wb_data: 0, + mem_valid: 0, + mem_is_store: 0, + mem_addr: 0, + mem_wdata: 0, + mem_rdata: 0, + mem_size: 0, + trap_valid: 0, + trap_cause: 0, + traparg0: 0, + block_kind: "sys".to_string(), + lane_id: "scalar0".to_string(), + tile_meta: String::new(), + tile_ref_src: 0, + tile_ref_dst: 0, + }, + deps: [None, None], + src_queue_kinds: [None, None], + src_logical_tags: [None, None], + src_qtags: [None, None], + dst_queue_kind: None, + dst_logical_tag: None, + dst_qtag: None, + bypass_d2: false, + is_load: false, + is_store: false, + load_ordinal: None, + load_store_id: None, + miss_injected: false, + redirect_target: None, + phys_iq: None, + pick_wakeup_visible: None, + data_ready_visible: None, + miss_pending_until: None, + e1_cycle: None, + e4_cycle: None, + w1_cycle: Some(5), + done_cycle: None, + }, + CycleUop { + decoded: bstop, + commit: CommitRecord { + schema_version: "1.0".to_string(), + cycle: 2, + pc: 0x1006, + insn: 0, + len: 2, + next_pc: 0x1008, + src0_valid: 0, + src0_reg: 0, + src0_data: 0, + src1_valid: 0, + src1_reg: 0, + src1_data: 0, + dst_valid: 0, + dst_reg: 0, + dst_data: 0, + wb_valid: 0, + wb_rd: 0, + wb_data: 0, + mem_valid: 0, + mem_is_store: 0, + mem_addr: 0, + mem_wdata: 0, + mem_rdata: 0, + mem_size: 0, + trap_valid: 0, + trap_cause: 0, + traparg0: 0, + block_kind: "sys".to_string(), + lane_id: "scalar0".to_string(), + tile_meta: String::new(), + tile_ref_src: 0, + tile_ref_dst: 0, + }, + deps: [None, None], + src_queue_kinds: [None, None], + src_logical_tags: [None, None], + src_qtags: [None, None], + dst_queue_kind: None, + dst_logical_tag: None, + dst_qtag: None, + bypass_d2: false, + is_load: false, + is_store: false, + load_ordinal: None, + load_store_id: None, + miss_injected: false, + redirect_target: None, + phys_iq: None, + pick_wakeup_visible: None, + data_ready_visible: None, + miss_pending_until: None, + e1_cycle: None, + e4_cycle: None, + w1_cycle: Some(7), + done_cycle: None, + }, + CycleUop { + decoded: target, + commit: CommitRecord::unsupported(0, 0x2000, 2048, 2, &isa::BlockMeta::default()), + deps: [None, None], + src_queue_kinds: [None, None], + src_logical_tags: [None, None], + src_qtags: [None, None], + dst_queue_kind: None, + dst_logical_tag: None, + dst_qtag: None, + bypass_d2: false, + is_load: false, + is_store: false, + load_ordinal: None, + load_store_id: None, + miss_injected: false, + redirect_target: None, + phys_iq: None, + pick_wakeup_visible: None, + data_ready_visible: None, + miss_pending_until: None, + e1_cycle: None, + e4_cycle: None, + w1_cycle: None, + done_cycle: None, + }, + ]; + let mut pipeline = StageQueues::default(); + let mut iq = Vec::new(); + let mut rob = VecDeque::new(); + pipeline.frontend[10].extend([0, 1, 2]); + pipeline.seq_checkpoint_ids.insert(0, 0); + pipeline.seq_checkpoint_ids.insert(1, 0); + pipeline.seq_checkpoint_ids.insert(2, 0); + + dispatch_to_iq_and_bypass(0, &mut pipeline, &mut iq, &mut rob, &mut uops); + assert_eq!( + pipeline.seq_branch_contexts.get(&1).copied(), + Some(BranchOwnerContext { + kind: BranchOwnerKind::Cond, + base_pc: 0x1000, + target_pc: 0x0ff0, + off: 0xfffffffffffffff0, + pred_take: true, + epoch: 1, + }) + ); + publish_bru_correction_state(5, &mut pipeline, &uops); + + assert_eq!( + pipeline.pending_bru_correction, + Some(BruCorrectionState { + source_seq: 1, + epoch: 1, + actual_take: false, + target_pc: 0x0ff0, + checkpoint_id: 0, + visible_cycle: 5, + }) + ); + + schedule_frontend_redirect_recovery(7, &mut pipeline, &uops); + assert_eq!( + pipeline.frontend_redirect, + Some(FrontendRedirectState { + source_seq: 2, + target_pc: 0x1008, + restart_seq: 3, + checkpoint_id: 0, + from_correction: true, + resume_cycle: 8, + }) + ); + assert_eq!(pipeline.pending_bru_correction, None); +} + +#[test] +fn later_boundary_epoch_does_not_consume_stale_bru_correction() { + let commits = vec![ + CommitRecord { + schema_version: "1.0".to_string(), + cycle: 0, + pc: 0x1000, + insn: 0, + len: 4, + next_pc: 0x3000, + src0_valid: 1, + src0_reg: 2, + src0_data: 1, + src1_valid: 0, + src1_reg: 0, + src1_data: 0, + dst_valid: 1, + dst_reg: 2, + dst_data: 1, + wb_valid: 1, + wb_rd: 2, + wb_data: 1, + mem_valid: 0, + mem_is_store: 0, + mem_addr: 0, + mem_wdata: 0, + mem_rdata: 0, + mem_size: 0, + trap_valid: 0, + trap_cause: 0, + traparg0: 0, + block_kind: "sys".to_string(), + lane_id: "scalar0".to_string(), + tile_meta: String::new(), + tile_ref_src: 0, + tile_ref_dst: 0, + }, + CommitRecord { + schema_version: "1.0".to_string(), + cycle: 1, + pc: 0x1004, + insn: 0, + len: 2, + next_pc: 0x1006, + src0_valid: 0, + src0_reg: 0, + src0_data: 0, + src1_valid: 0, + src1_reg: 0, + src1_data: 0, + dst_valid: 0, + dst_reg: 0, + dst_data: 0, + wb_valid: 0, + wb_rd: 0, + wb_data: 0, + mem_valid: 0, + mem_is_store: 0, + mem_addr: 0, + mem_wdata: 0, + mem_rdata: 0, + mem_size: 0, + trap_valid: 0, + trap_cause: 0, + traparg0: 0, + block_kind: "sys".to_string(), + lane_id: "scalar0".to_string(), + tile_meta: String::new(), + tile_ref_src: 0, + tile_ref_dst: 0, + }, + CommitRecord::unsupported(0, 0x2000, 0, 2, &isa::BlockMeta::default()), + CommitRecord { + schema_version: "1.0".to_string(), + cycle: 3, + pc: 0x2002, + insn: 0, + len: 2, + next_pc: 0x4000, + src0_valid: 0, + src0_reg: 0, + src0_data: 0, + src1_valid: 0, + src1_reg: 0, + src1_data: 0, + dst_valid: 0, + dst_reg: 0, + dst_data: 0, + wb_valid: 0, + wb_rd: 0, + wb_data: 0, + mem_valid: 0, + mem_is_store: 0, + mem_addr: 0, + mem_wdata: 0, + mem_rdata: 0, + mem_size: 0, + trap_valid: 0, + trap_cause: 0, + traparg0: 0, + block_kind: "sys".to_string(), + lane_id: "scalar0".to_string(), + tile_meta: String::new(), + tile_ref_src: 0, + tile_ref_dst: 0, + }, + CommitRecord::unsupported(0, 0x4000, 0, 2, &isa::BlockMeta::default()), + CommitRecord::unsupported(0, 0x3000, 0, 2, &isa::BlockMeta::default()), + ]; + let decoded = vec![ + isa::decode_word(30_478_677).expect("decode cmp.nei"), + isa::decode_word(0).expect("decode c.bstop"), + isa::decode_word(2048).expect("decode c.bstart.std"), + isa::decode_word(0).expect("decode c.bstop"), + isa::decode_word(2048).expect("decode target c.bstart.std"), + isa::decode_word(2048).expect("decode stale-target c.bstart.std"), + ]; + let mut uops = build_uops(&commits, &decoded); + let mut pipeline = StageQueues::default(); + + uops[0].w1_cycle = Some(5); + uops[3].w1_cycle = Some(9); + publish_bru_correction_state(5, &mut pipeline, &uops); + assert_eq!( + pipeline.pending_bru_correction, + Some(BruCorrectionState { + source_seq: 0, + epoch: 0, + actual_take: true, + target_pc: 0x3000, + checkpoint_id: checkpoint_id_for_seq(0, &uops), + visible_cycle: 5, + }) + ); + + schedule_frontend_redirect_recovery(9, &mut pipeline, &uops); + assert_eq!( + pipeline.frontend_redirect, + Some(FrontendRedirectState { + source_seq: 3, + target_pc: 0x4000, + restart_seq: 4, + checkpoint_id: checkpoint_id_for_seq(3, &uops), + from_correction: false, + resume_cycle: 10, + }) + ); + assert_eq!( + pipeline.flush_checkpoint_id, + Some(checkpoint_id_for_seq(3, &uops)) + ); + assert_eq!(pipeline.pending_bru_correction, None); +} + +#[test] +fn invalid_bru_recovery_target_raises_pending_precise_trap() { + let commits = vec![ + CommitRecord { + schema_version: "1.0".to_string(), + cycle: 0, + pc: 0x1000, + insn: 0, + len: 4, + next_pc: 0x3000, + src0_valid: 1, + src0_reg: 2, + src0_data: 1, + src1_valid: 0, + src1_reg: 0, + src1_data: 0, + dst_valid: 1, + dst_reg: 2, + dst_data: 1, + wb_valid: 1, + wb_rd: 2, + wb_data: 1, + mem_valid: 0, + mem_is_store: 0, + mem_addr: 0, + mem_wdata: 0, + mem_rdata: 0, + mem_size: 0, + trap_valid: 0, + trap_cause: 0, + traparg0: 0, + block_kind: "sys".to_string(), + lane_id: "scalar0".to_string(), + tile_meta: String::new(), + tile_ref_src: 0, + tile_ref_dst: 0, + }, + CommitRecord { + schema_version: "1.0".to_string(), + cycle: 1, + pc: 0x1004, + insn: 0, + len: 2, + next_pc: 0x1006, + src0_valid: 0, + src0_reg: 0, + src0_data: 0, + src1_valid: 0, + src1_reg: 0, + src1_data: 0, + dst_valid: 0, + dst_reg: 0, + dst_data: 0, + wb_valid: 0, + wb_rd: 0, + wb_data: 0, + mem_valid: 0, + mem_is_store: 0, + mem_addr: 0, + mem_wdata: 0, + mem_rdata: 0, + mem_size: 0, + trap_valid: 0, + trap_cause: 0, + traparg0: 0, + block_kind: "sys".to_string(), + lane_id: "scalar0".to_string(), + tile_meta: String::new(), + tile_ref_src: 0, + tile_ref_dst: 0, + }, + ]; + let decoded = vec![ + isa::decode_word(30_478_677).expect("decode cmp.nei"), + isa::decode_word(0).expect("decode c.bstop"), + ]; + let mut uops = build_uops(&commits, &decoded); + let mut pipeline = StageQueues::default(); + + uops[0].w1_cycle = Some(5); + publish_bru_correction_state(5, &mut pipeline, &uops); + + assert_eq!(pipeline.pending_bru_correction, None); + assert_eq!( + pipeline.pending_trap, + Some(PendingTrapState { + seq: 0, + cause: isa::TRAP_BRU_RECOVERY_NOT_BSTART, + traparg0: 0x1000, + checkpoint_id: checkpoint_id_for_seq(0, &uops), + visible_cycle: 5, + }) + ); +} + +#[test] +fn retire_ready_attaches_bru_recovery_trap_to_offending_uop() { + let runtime = sample_runtime(&[enc_addi(2, 0, 1)], &[]); + let decoded = isa::decode_word(enc_addi(2, 0, 1) as u64).expect("decode addi"); + let mut pipeline = StageQueues::default(); + pipeline.pending_trap = Some(PendingTrapState { + seq: 0, + cause: isa::TRAP_BRU_RECOVERY_NOT_BSTART, + traparg0: 0x1000, + checkpoint_id: 0, + visible_cycle: 3, + }); + let mut committed = Vec::new(); + let mut retired = Vec::new(); + let mut stage_events = Vec::new(); + let mut rob = VecDeque::from([0usize]); + let mut uops = vec![CycleUop { + decoded, + commit: CommitRecord::unsupported( + 0, + 0x1000, + enc_addi(2, 0, 1) as u64, + 0, + &isa::BlockMeta::default(), + ), + deps: [None, None], + src_queue_kinds: [None, None], + src_logical_tags: [None, None], + src_qtags: [None, None], + dst_queue_kind: None, + dst_logical_tag: None, + dst_qtag: None, + bypass_d2: false, + is_load: false, + is_store: false, + load_ordinal: None, + load_store_id: None, + miss_injected: false, + redirect_target: None, + phys_iq: None, + pick_wakeup_visible: None, + data_ready_visible: None, + miss_pending_until: None, + e1_cycle: None, + e4_cycle: None, + w1_cycle: Some(3), + done_cycle: Some(3), + }]; + + let trap = retire_ready( + 3, + &runtime, + &mut rob, + &mut committed, + &mut retired, + &mut pipeline, + &mut uops, + &mut stage_events, + ); + + assert_eq!(trap, Some(isa::TRAP_BRU_RECOVERY_NOT_BSTART)); + assert_eq!(committed.len(), 1); + assert_eq!(committed[0].trap_valid, 1); + assert_eq!(committed[0].trap_cause, isa::TRAP_BRU_RECOVERY_NOT_BSTART); + assert_eq!(committed[0].traparg0, 0x1000); + assert_eq!(stage_events.len(), 1); + assert_eq!(stage_events[0].checkpoint_id, Some(0)); + assert_eq!( + stage_events[0].trap_cause, + Some(isa::TRAP_BRU_RECOVERY_NOT_BSTART) + ); + assert_eq!(stage_events[0].traparg0, Some(0x1000)); + assert_eq!(pipeline.pending_trap, None); +} + +#[test] +fn frontend_redirect_restart_waits_until_next_cycle() { + let commits = vec![ + CommitRecord { + schema_version: "1.0".to_string(), + cycle: 0, + pc: 0x1000, + insn: 0, + len: 4, + next_pc: 0x1100, + src0_valid: 0, + src0_reg: 0, + src0_data: 0, + src1_valid: 0, + src1_reg: 0, + src1_data: 0, + dst_valid: 0, + dst_reg: 0, + dst_data: 0, + wb_valid: 0, + wb_rd: 0, + wb_data: 0, + mem_valid: 0, + mem_is_store: 0, + mem_addr: 0, + mem_wdata: 0, + mem_rdata: 0, + mem_size: 0, + trap_valid: 0, + trap_cause: 0, + traparg0: 0, + block_kind: "sys".to_string(), + lane_id: "scalar0".to_string(), + tile_meta: String::new(), + tile_ref_src: 0, + tile_ref_dst: 0, + }, + CommitRecord::unsupported(0, 0x1100, 0, 4, &isa::BlockMeta::default()), + ]; + let decoded = vec![ + isa::decode_word(2048).expect("decode c.bstart.std"), + isa::decode_word(enc_addi(2, 0, 1) as u64).expect("decode addi"), + ]; + let mut uops = build_uops(&commits, &decoded); + let mut pipeline = StageQueues::default(); + let mut iq = Vec::new(); + let mut rob = VecDeque::new(); + let mut next_fetch_seq = 1usize; + + uops[0].w1_cycle = Some(7); + schedule_frontend_redirect_recovery(7, &mut pipeline, &uops); + assert_eq!( + pipeline.frontend_redirect, + Some(FrontendRedirectState { + source_seq: 0, + target_pc: 0x1100, + restart_seq: 1, + checkpoint_id: checkpoint_id_for_seq(0, &uops), + from_correction: false, + resume_cycle: 8, + }), + "redirect restart should be delayed to the next cycle" + ); + assert_eq!( + pipeline.flush_checkpoint_id, + Some(checkpoint_id_for_seq(0, &uops)) + ); + assert_eq!( + pipeline.pending_flush, + Some(PendingFlushState { + flush_seq: 0, + checkpoint_id: checkpoint_id_for_seq(0, &uops), + apply_cycle: 8, + }) + ); + + fill_fetch(7, &mut pipeline, &mut next_fetch_seq, &uops); + assert!(pipeline.frontend[0].is_empty()); + + apply_pending_flush(7, &mut pipeline, &mut iq, &mut rob, &uops); + assert_eq!( + pipeline.pending_flush, + Some(PendingFlushState { + flush_seq: 0, + checkpoint_id: checkpoint_id_for_seq(0, &uops), + apply_cycle: 8, + }) + ); + apply_pending_flush(8, &mut pipeline, &mut iq, &mut rob, &uops); + assert_eq!(pipeline.pending_flush, None); + fill_fetch(8, &mut pipeline, &mut next_fetch_seq, &uops); + assert_eq!( + pipeline.frontend[0].iter().copied().collect::>(), + vec![1] + ); +} + +#[test] +fn pending_flush_prunes_speculative_state_on_registered_cycle() { + let boundary = isa::decode_word(2048).expect("decode c.bstart.std"); + let decoded = isa::decode_word(enc_addi(2, 0, 1) as u64).expect("decode addi"); + let uops = vec![ + CycleUop { + decoded: boundary, + commit: CommitRecord { + schema_version: "1.0".to_string(), + cycle: 0, + pc: 0x1000, + insn: 2048, + len: 2, + next_pc: 0x1100, + src0_valid: 0, + src0_reg: 0, + src0_data: 0, + src1_valid: 0, + src1_reg: 0, + src1_data: 0, + dst_valid: 0, + dst_reg: 0, + dst_data: 0, + wb_valid: 0, + wb_rd: 0, + wb_data: 0, + mem_valid: 0, + mem_is_store: 0, + mem_addr: 0, + mem_wdata: 0, + mem_rdata: 0, + mem_size: 0, + trap_valid: 0, + trap_cause: 0, + traparg0: 0, + block_kind: "sys".to_string(), + lane_id: "scalar0".to_string(), + tile_meta: String::new(), + tile_ref_src: 0, + tile_ref_dst: 0, + }, + deps: [None, None], + src_queue_kinds: [None, None], + src_logical_tags: [None, None], + src_qtags: [None, None], + dst_queue_kind: None, + dst_logical_tag: None, + dst_qtag: None, + bypass_d2: false, + is_load: false, + is_store: false, + load_ordinal: None, + load_store_id: None, + miss_injected: false, + redirect_target: Some(0x1100), + phys_iq: Some(PhysIq::BruIq), + pick_wakeup_visible: None, + data_ready_visible: None, + miss_pending_until: None, + e1_cycle: None, + e4_cycle: None, + w1_cycle: Some(7), + done_cycle: None, + }, + CycleUop { + decoded, + commit: CommitRecord::unsupported( + 0, + 0x1002, + enc_addi(2, 0, 1) as u64, + 4, + &isa::BlockMeta::default(), + ), + deps: [None, None], + src_queue_kinds: [None, None], + src_logical_tags: [None, None], + src_qtags: [None, None], + dst_queue_kind: None, + dst_logical_tag: None, + dst_qtag: None, + bypass_d2: false, + is_load: false, + is_store: false, + load_ordinal: None, + load_store_id: None, + miss_injected: false, + redirect_target: None, + phys_iq: Some(PhysIq::SharedIq1), + pick_wakeup_visible: None, + data_ready_visible: None, + miss_pending_until: None, + e1_cycle: None, + e4_cycle: None, + w1_cycle: None, + done_cycle: None, + }, + ]; + let mut pipeline = StageQueues::default(); + let mut iq = vec![IqEntry { + seq: 1, + phys_iq: PhysIq::SharedIq1, + inflight: false, + src_valid: [false, false], + src_ready_nonspec: [false, false], + src_ready_spec: [false, false], + src_wait_qtag: [false, false], + }]; + let mut rob = VecDeque::from([0usize, 1usize]); + pipeline.frontend[0].push_back(1); + pipeline.iq_tags.insert( + 1, + QTag { + phys_iq: PhysIq::SharedIq1, + entry_id: 0, + }, + ); + rebuild_iq_owner_table(&mut pipeline.iq_owner_table, &iq, &pipeline.iq_tags); + + schedule_frontend_redirect_recovery(7, &mut pipeline, &uops); + assert_eq!( + pipeline.frontend[0].iter().copied().collect::>(), + vec![1] + ); + assert_eq!( + iq.iter().map(|entry| entry.seq).collect::>(), + vec![1] + ); + assert_eq!(rob.iter().copied().collect::>(), vec![0, 1]); + + apply_pending_flush(7, &mut pipeline, &mut iq, &mut rob, &uops); + assert_eq!( + pipeline.frontend[0].iter().copied().collect::>(), + vec![1] + ); + assert_eq!( + iq.iter().map(|entry| entry.seq).collect::>(), + vec![1] + ); + assert_eq!(rob.iter().copied().collect::>(), vec![0, 1]); + + apply_pending_flush(8, &mut pipeline, &mut iq, &mut rob, &uops); + assert!(pipeline.frontend[0].is_empty()); + assert!(iq.is_empty()); + assert_eq!(rob.iter().copied().collect::>(), vec![0]); + assert_eq!(pipeline.pending_flush, None); +} + +#[test] +fn pending_flush_restores_ready_tables_from_checkpoint_snapshot() { + let boundary = isa::decode_word(2048).expect("decode c.bstart.std"); + let decoded = isa::decode_word(enc_addi(2, 0, 1) as u64).expect("decode addi"); + let uops = vec![ + CycleUop { + decoded: boundary, + commit: CommitRecord { + schema_version: "1.0".to_string(), + cycle: 0, + pc: 0x1000, + insn: 2048, + len: 2, + next_pc: 0x1100, + src0_valid: 0, + src0_reg: 0, + src0_data: 0, + src1_valid: 0, + src1_reg: 0, + src1_data: 0, + dst_valid: 0, + dst_reg: 0, + dst_data: 0, + wb_valid: 0, + wb_rd: 0, + wb_data: 0, + mem_valid: 0, + mem_is_store: 0, + mem_addr: 0, + mem_wdata: 0, + mem_rdata: 0, + mem_size: 0, + trap_valid: 0, + trap_cause: 0, + traparg0: 0, + block_kind: "sys".to_string(), + lane_id: "scalar0".to_string(), + tile_meta: String::new(), + tile_ref_src: 0, + tile_ref_dst: 0, + }, + deps: [None, None], + src_queue_kinds: [None, None], + src_logical_tags: [None, None], + src_qtags: [None, None], + dst_queue_kind: None, + dst_logical_tag: None, + dst_qtag: None, + bypass_d2: false, + is_load: false, + is_store: false, + load_ordinal: None, + load_store_id: None, + miss_injected: false, + redirect_target: Some(0x1100), + phys_iq: Some(PhysIq::BruIq), + pick_wakeup_visible: None, + data_ready_visible: None, + miss_pending_until: None, + e1_cycle: None, + e4_cycle: None, + w1_cycle: Some(7), + done_cycle: None, + }, + CycleUop { + decoded, + commit: CommitRecord::unsupported( + 0, + 0x1002, + enc_addi(2, 0, 1) as u64, + 4, + &isa::BlockMeta::default(), + ), + deps: [None, None], + src_queue_kinds: [None, None], + src_logical_tags: [None, None], + src_qtags: [None, None], + dst_queue_kind: None, + dst_logical_tag: None, + dst_qtag: None, + bypass_d2: false, + is_load: false, + is_store: false, + load_ordinal: None, + load_store_id: None, + miss_injected: false, + redirect_target: None, + phys_iq: None, + pick_wakeup_visible: None, + data_ready_visible: None, + miss_pending_until: None, + e1_cycle: None, + e4_cycle: None, + w1_cycle: None, + done_cycle: None, + }, + ]; + let mut pipeline = StageQueues::default(); + let mut iq = Vec::new(); + let mut rob = VecDeque::from([0usize]); + pipeline.ready_table_t = BTreeSet::from([1, 3]); + pipeline.ready_table_u = BTreeSet::from([2]); + pipeline.ready_table_checkpoints.insert( + 5, + ReadyTableCheckpoint { + ready_table_t: pipeline.ready_table_t.clone(), + ready_table_u: pipeline.ready_table_u.clone(), + recovery_epoch: 7, + block_head: false, + branch_context: BranchOwnerContext { + kind: BranchOwnerKind::Cond, + base_pc: 0x1000, + target_pc: 0x2000, + off: 0x1000, + pred_take: true, + epoch: 7, + }, + dynamic_target_pc: Some(0x2222), + dynamic_target_owner_seq: Some(6), + dynamic_target_producer_kind: Some(ReturnConsumerKind::SetcTgt), + dynamic_target_setup_epoch: Some(7), + dynamic_target_owner_kind: Some(ReturnConsumerKind::SetcTgt), + dynamic_target_source_owner_seq: Some(4), + dynamic_target_source_epoch: Some(6), + dynamic_target_source_kind: Some(DynamicTargetSourceKind::ArchTargetSetup), + dynamic_target_call_materialization_kind: Some(CallMaterializationKind::AdjacentSetret), + call_header_seq: Some(11), + call_return_target_pc: Some(0x3333), + call_return_target_owner_seq: Some(12), + call_return_target_epoch: Some(6), + call_return_materialization_kind: Some(CallMaterializationKind::AdjacentSetret), + }, + ); + pipeline.ready_table_t.insert(9); + pipeline.ready_table_u.insert(10); + pipeline.active_recovery_epoch = 12; + pipeline.active_block_head = true; + pipeline.active_branch_context = BranchOwnerContext { + kind: BranchOwnerKind::Ret, + base_pc: 0x3000, + target_pc: 0x3010, + off: 0x10, + pred_take: true, + epoch: 12, + }; + pipeline.active_dynamic_target_pc = Some(0x9999); + pipeline.active_dynamic_target_owner_seq = Some(23); + pipeline.active_dynamic_target_producer_kind = Some(ReturnConsumerKind::SetcTgt); + pipeline.active_dynamic_target_setup_epoch = Some(12); + pipeline.active_dynamic_target_owner_kind = Some(ReturnConsumerKind::FretStk); + pipeline.active_dynamic_target_source_owner_seq = Some(21); + pipeline.active_dynamic_target_source_epoch = Some(11); + pipeline.active_dynamic_target_source_kind = Some(DynamicTargetSourceKind::ArchTargetSetup); + pipeline.active_dynamic_target_call_materialization_kind = + Some(CallMaterializationKind::FusedCall); + pipeline.active_call_header_seq = Some(22); + pipeline.active_call_return_target_pc = Some(0x4444); + pipeline.active_call_return_target_owner_seq = Some(24); + pipeline.active_call_return_target_epoch = Some(11); + pipeline.active_call_return_materialization_kind = Some(CallMaterializationKind::FusedCall); + pipeline.pending_flush = Some(PendingFlushState { + flush_seq: 0, + checkpoint_id: 5, + apply_cycle: 8, + }); + + apply_pending_flush(8, &mut pipeline, &mut iq, &mut rob, &uops); + + assert_eq!(pipeline.ready_table_t, BTreeSet::from([1, 3])); + assert_eq!(pipeline.ready_table_u, BTreeSet::from([2])); + assert_eq!(pipeline.active_recovery_epoch, 7); + assert!(!pipeline.active_block_head); + assert_eq!( + pipeline.active_branch_context, + BranchOwnerContext { + kind: BranchOwnerKind::Cond, + base_pc: 0x1000, + target_pc: 0x2000, + off: 0x1000, + pred_take: true, + epoch: 7, + } + ); + assert_eq!(pipeline.active_dynamic_target_pc, Some(0x2222)); + assert_eq!(pipeline.active_dynamic_target_owner_seq, Some(6)); + assert_eq!( + pipeline.active_dynamic_target_producer_kind, + Some(ReturnConsumerKind::SetcTgt) + ); + assert_eq!(pipeline.active_dynamic_target_setup_epoch, Some(7)); + assert_eq!( + pipeline.active_dynamic_target_owner_kind, + Some(ReturnConsumerKind::SetcTgt) + ); + assert_eq!(pipeline.active_dynamic_target_source_owner_seq, Some(4)); + assert_eq!(pipeline.active_dynamic_target_source_epoch, Some(6)); + assert_eq!( + pipeline.active_dynamic_target_source_kind, + Some(DynamicTargetSourceKind::ArchTargetSetup) + ); + assert_eq!( + pipeline.active_dynamic_target_call_materialization_kind, + Some(CallMaterializationKind::AdjacentSetret) + ); + assert_eq!(pipeline.active_call_header_seq, Some(11)); + assert_eq!(pipeline.active_call_return_target_pc, Some(0x3333)); + assert_eq!(pipeline.active_call_return_target_owner_seq, Some(12)); + assert_eq!(pipeline.active_call_return_target_epoch, Some(6)); + assert_eq!( + pipeline.active_call_return_materialization_kind, + Some(CallMaterializationKind::AdjacentSetret) + ); + assert_eq!(pipeline.pending_flush, None); +} + +#[test] +fn frontend_redirect_restart_uses_legal_block_start_target_seq() { + let redirect = isa::decode_word(2048).expect("decode c.bstart.std"); + let wrong_path = isa::decode_word(enc_addi(2, 0, 1) as u64).expect("decode addi"); + let target_head = isa::decode_word(2048).expect("decode target c.bstart.std"); + let target_body = isa::decode_word(enc_addi(3, 0, 2) as u64).expect("decode addi"); + let uops = vec![ + CycleUop { + decoded: redirect, + commit: CommitRecord { + schema_version: "1.0".to_string(), + cycle: 0, + pc: 0x1000, + insn: 2048, + len: 2, + next_pc: 0x2000, + src0_valid: 0, + src0_reg: 0, + src0_data: 0, + src1_valid: 0, + src1_reg: 0, + src1_data: 0, + dst_valid: 0, + dst_reg: 0, + dst_data: 0, + wb_valid: 0, + wb_rd: 0, + wb_data: 0, + mem_valid: 0, + mem_is_store: 0, + mem_addr: 0, + mem_wdata: 0, + mem_rdata: 0, + mem_size: 0, + trap_valid: 0, + trap_cause: 0, + traparg0: 0, + block_kind: "sys".to_string(), + lane_id: "scalar0".to_string(), + tile_meta: String::new(), + tile_ref_src: 0, + tile_ref_dst: 0, + }, + deps: [None, None], + src_queue_kinds: [None, None], + src_logical_tags: [None, None], + src_qtags: [None, None], + dst_queue_kind: None, + dst_logical_tag: None, + dst_qtag: None, + bypass_d2: false, + is_load: false, + is_store: false, + load_ordinal: None, + load_store_id: None, + miss_injected: false, + redirect_target: Some(0x2000), + phys_iq: Some(PhysIq::CmdIq), + pick_wakeup_visible: None, + data_ready_visible: None, + miss_pending_until: None, + e1_cycle: None, + e4_cycle: None, + w1_cycle: Some(5), + done_cycle: None, + }, + CycleUop { + decoded: wrong_path, + commit: CommitRecord::unsupported(0, 0x1002, 0, 4, &isa::BlockMeta::default()), + deps: [None, None], + src_queue_kinds: [None, None], + src_logical_tags: [None, None], + src_qtags: [None, None], + dst_queue_kind: None, + dst_logical_tag: None, + dst_qtag: None, + bypass_d2: false, + is_load: false, + is_store: false, + load_ordinal: None, + load_store_id: None, + miss_injected: false, + redirect_target: None, + phys_iq: None, + pick_wakeup_visible: None, + data_ready_visible: None, + miss_pending_until: None, + e1_cycle: None, + e4_cycle: None, + w1_cycle: None, + done_cycle: None, + }, + CycleUop { + decoded: target_head, + commit: CommitRecord { + pc: 0x2000, + insn: 2048, + len: 2, + next_pc: 0x2002, + ..CommitRecord::unsupported(0, 0x2000, 0, 2, &isa::BlockMeta::default()) + }, + deps: [None, None], + src_queue_kinds: [None, None], + src_logical_tags: [None, None], + src_qtags: [None, None], + dst_queue_kind: None, + dst_logical_tag: None, + dst_qtag: None, + bypass_d2: false, + is_load: false, + is_store: false, + load_ordinal: None, + load_store_id: None, + miss_injected: false, + redirect_target: None, + phys_iq: None, + pick_wakeup_visible: None, + data_ready_visible: None, + miss_pending_until: None, + e1_cycle: None, + e4_cycle: None, + w1_cycle: None, + done_cycle: None, + }, + CycleUop { + decoded: target_body, + commit: CommitRecord::unsupported(0, 0x2002, 0, 4, &isa::BlockMeta::default()), + deps: [None, None], + src_queue_kinds: [None, None], + src_logical_tags: [None, None], + src_qtags: [None, None], + dst_queue_kind: None, + dst_logical_tag: None, + dst_qtag: None, + bypass_d2: false, + is_load: false, + is_store: false, + load_ordinal: None, + load_store_id: None, + miss_injected: false, + redirect_target: None, + phys_iq: None, + pick_wakeup_visible: None, + data_ready_visible: None, + miss_pending_until: None, + e1_cycle: None, + e4_cycle: None, + w1_cycle: None, + done_cycle: None, + }, + ]; + let mut pipeline = StageQueues::default(); + let mut next_fetch_seq = 4usize; + + schedule_frontend_redirect_recovery(5, &mut pipeline, &uops); + assert_eq!( + pipeline.frontend_redirect, + Some(FrontendRedirectState { + source_seq: 0, + target_pc: 0x2000, + restart_seq: 2, + checkpoint_id: checkpoint_id_for_seq(0, &uops), + from_correction: false, + resume_cycle: 6, + }) + ); + assert_eq!( + pipeline.flush_checkpoint_id, + Some(checkpoint_id_for_seq(0, &uops)) + ); + + fill_fetch(6, &mut pipeline, &mut next_fetch_seq, &uops); + assert_eq!(next_fetch_seq, 4); + assert_eq!( + pipeline.frontend[0].iter().copied().collect::>(), + vec![2, 3] + ); +} + +#[test] +fn fill_fetch_assigns_packet_checkpoint_from_head_pc() { + let uops = (0..5usize) + .map(|idx| CycleUop { + decoded: isa::decode_word(enc_addi(2, 0, 1) as u64).expect("decode addi"), + commit: CommitRecord { + schema_version: "1.0".to_string(), + cycle: 0, + pc: 0x1000 + (idx as u64) * 4, + insn: enc_addi(2, 0, 1) as u64, + len: 4, + next_pc: 0x1004 + (idx as u64) * 4, + src0_valid: 0, + src0_reg: 0, + src0_data: 0, + src1_valid: 0, + src1_reg: 0, + src1_data: 0, + dst_valid: 1, + dst_reg: 2, + dst_data: 1, + wb_valid: 1, + wb_rd: 2, + wb_data: 1, + mem_valid: 0, + mem_is_store: 0, + mem_addr: 0, + mem_wdata: 0, + mem_rdata: 0, + mem_size: 0, + trap_valid: 0, + trap_cause: 0, + traparg0: 0, + block_kind: "sys".to_string(), + lane_id: "scalar0".to_string(), + tile_meta: String::new(), + tile_ref_src: 0, + tile_ref_dst: 0, + }, + deps: [None, None], + src_queue_kinds: [None, None], + src_logical_tags: [None, None], + src_qtags: [None, None], + dst_queue_kind: None, + dst_logical_tag: None, + dst_qtag: None, + bypass_d2: false, + is_load: false, + is_store: false, + load_ordinal: None, + load_store_id: None, + miss_injected: false, + redirect_target: None, + phys_iq: None, + pick_wakeup_visible: None, + data_ready_visible: None, + miss_pending_until: None, + e1_cycle: None, + e4_cycle: None, + w1_cycle: None, + done_cycle: None, + }) + .collect::>(); + let mut pipeline = StageQueues::default(); + let mut next_fetch_seq = 0usize; + + fill_fetch(0, &mut pipeline, &mut next_fetch_seq, &uops); + assert_eq!(pipeline.seq_checkpoint_ids.get(&0).copied(), Some(0)); + assert_eq!(pipeline.seq_checkpoint_ids.get(&1).copied(), Some(0)); + assert_eq!(pipeline.seq_checkpoint_ids.get(&2).copied(), Some(0)); + assert_eq!(pipeline.seq_checkpoint_ids.get(&3).copied(), Some(0)); + + pipeline.frontend[0].clear(); + fill_fetch(1, &mut pipeline, &mut next_fetch_seq, &uops); + assert_eq!(pipeline.seq_checkpoint_ids.get(&4).copied(), Some(4)); + assert_eq!(live_checkpoint_id_for_seq(4, &pipeline, &uops), 4); +} + +#[test] +fn start_marker_dispatch_snapshots_ready_tables_for_checkpoint() { + let decoded = isa::decode_word(2048).expect("decode c.bstart.std"); + let mut uops = vec![CycleUop { + decoded, + commit: CommitRecord::unsupported(0, 0x1000, 2048, 2, &isa::BlockMeta::default()), + deps: [None, None], + src_queue_kinds: [None, None], + src_logical_tags: [None, None], + src_qtags: [None, None], + dst_queue_kind: None, + dst_logical_tag: None, + dst_qtag: None, + bypass_d2: false, + is_load: false, + is_store: false, + load_ordinal: None, + load_store_id: None, + miss_injected: false, + redirect_target: None, + phys_iq: None, + pick_wakeup_visible: None, + data_ready_visible: None, + miss_pending_until: None, + e1_cycle: None, + e4_cycle: None, + w1_cycle: None, + done_cycle: None, + }]; + let mut pipeline = StageQueues::default(); + let mut iq = Vec::new(); + let mut rob = VecDeque::new(); + pipeline.frontend[10].push_back(0); + pipeline.seq_checkpoint_ids.insert(0, 5); + pipeline.ready_table_t.extend([1, 3]); + pipeline.ready_table_u.insert(2); + + dispatch_to_iq_and_bypass(0, &mut pipeline, &mut iq, &mut rob, &mut uops); + + assert_eq!( + pipeline.ready_table_checkpoints.get(&5), + Some(&ReadyTableCheckpoint { + ready_table_t: BTreeSet::from([1, 3]), + ready_table_u: BTreeSet::from([2]), + recovery_epoch: 0, + block_head: true, + branch_context: BranchOwnerContext::default(), + dynamic_target_pc: None, + dynamic_target_owner_seq: None, + dynamic_target_producer_kind: None, + dynamic_target_setup_epoch: None, + dynamic_target_owner_kind: None, + dynamic_target_source_owner_seq: None, + dynamic_target_source_epoch: None, + dynamic_target_source_kind: None, + dynamic_target_call_materialization_kind: None, + call_header_seq: None, + call_return_target_pc: None, + call_return_target_owner_seq: None, + call_return_target_epoch: None, + call_return_materialization_kind: None, + }) + ); +} + +#[test] +fn start_marker_rob_checkpoint_id_uses_packet_slot_offset() { + let plain = isa::decode_word(enc_addi(2, 0, 1) as u64).expect("decode addi"); + let start = isa::decode_word(2048).expect("decode c.bstart.std"); + let mut uops = vec![ + CycleUop { + decoded: plain, + commit: CommitRecord::unsupported( + 0, + 0x1000, + enc_addi(2, 0, 1) as u64, + 4, + &isa::BlockMeta::default(), + ), + deps: [None, None], + src_queue_kinds: [None, None], + src_logical_tags: [None, None], + src_qtags: [None, None], + dst_queue_kind: None, + dst_logical_tag: None, + dst_qtag: None, + bypass_d2: false, + is_load: false, + is_store: false, + load_ordinal: None, + load_store_id: None, + miss_injected: false, + redirect_target: None, + phys_iq: None, + pick_wakeup_visible: None, + data_ready_visible: None, + miss_pending_until: None, + e1_cycle: None, + e4_cycle: None, + w1_cycle: None, + done_cycle: None, + }, + CycleUop { + decoded: start.clone(), + commit: CommitRecord { + schema_version: "1.0".to_string(), + cycle: 1, + pc: 0x1004, + insn: 2048, + len: 2, + next_pc: 0x2000, + src0_valid: 0, + src0_reg: 0, + src0_data: 0, + src1_valid: 0, + src1_reg: 0, + src1_data: 0, + dst_valid: 0, + dst_reg: 0, + dst_data: 0, + wb_valid: 0, + wb_rd: 0, + wb_data: 0, + mem_valid: 0, + mem_is_store: 0, + mem_addr: 0, + mem_wdata: 0, + mem_rdata: 0, + mem_size: 0, + trap_valid: 0, + trap_cause: 0, + traparg0: 0, + block_kind: "sys".to_string(), + lane_id: "scalar0".to_string(), + tile_meta: String::new(), + tile_ref_src: 0, + tile_ref_dst: 0, + }, + deps: [None, None], + src_queue_kinds: [None, None], + src_logical_tags: [None, None], + src_qtags: [None, None], + dst_queue_kind: None, + dst_logical_tag: None, + dst_qtag: None, + bypass_d2: false, + is_load: false, + is_store: false, + load_ordinal: None, + load_store_id: None, + miss_injected: false, + redirect_target: None, + phys_iq: None, + pick_wakeup_visible: None, + data_ready_visible: None, + miss_pending_until: None, + e1_cycle: None, + e4_cycle: None, + w1_cycle: None, + done_cycle: None, + }, + ]; + let mut pipeline = StageQueues::default(); + let mut iq = Vec::new(); + let mut rob = VecDeque::new(); + pipeline.frontend[10].extend([0, 1]); + pipeline.seq_checkpoint_ids.insert(0, 0); + pipeline.seq_checkpoint_ids.insert(1, 0); + + dispatch_to_iq_and_bypass(0, &mut pipeline, &mut iq, &mut rob, &mut uops); + + assert_eq!(pipeline.seq_rob_checkpoint_ids.get(&1).copied(), Some(1)); + assert_eq!(live_rob_checkpoint_id_for_seq(1, &pipeline, &uops), 1); +} + +#[test] +fn dispatch_assigns_bru_recovery_checkpoint_from_backend_context() { + let plain = isa::decode_word(enc_addi(2, 0, 1) as u64).expect("decode addi"); + let start = isa::decode_word(2048).expect("decode c.bstart.std"); + let bru = isa::decode_word(30_478_677).expect("decode cmp.nei"); + let mut uops = vec![ + CycleUop { + decoded: plain, + commit: CommitRecord::unsupported( + 0, + 0x1000, + enc_addi(2, 0, 1) as u64, + 4, + &isa::BlockMeta::default(), + ), + deps: [None, None], + src_queue_kinds: [None, None], + src_logical_tags: [None, None], + src_qtags: [None, None], + dst_queue_kind: None, + dst_logical_tag: None, + dst_qtag: None, + bypass_d2: false, + is_load: false, + is_store: false, + load_ordinal: None, + load_store_id: None, + miss_injected: false, + redirect_target: None, + phys_iq: None, + pick_wakeup_visible: None, + data_ready_visible: None, + miss_pending_until: None, + e1_cycle: None, + e4_cycle: None, + w1_cycle: None, + done_cycle: None, + }, + CycleUop { + decoded: start.clone(), + commit: CommitRecord { + schema_version: "1.0".to_string(), + cycle: 1, + pc: 0x1004, + insn: 2048, + len: 2, + next_pc: 0x2000, + src0_valid: 0, + src0_reg: 0, + src0_data: 0, + src1_valid: 0, + src1_reg: 0, + src1_data: 0, + dst_valid: 0, + dst_reg: 0, + dst_data: 0, + wb_valid: 0, + wb_rd: 0, + wb_data: 0, + mem_valid: 0, + mem_is_store: 0, + mem_addr: 0, + mem_wdata: 0, + mem_rdata: 0, + mem_size: 0, + trap_valid: 0, + trap_cause: 0, + traparg0: 0, + block_kind: "sys".to_string(), + lane_id: "scalar0".to_string(), + tile_meta: String::new(), + tile_ref_src: 0, + tile_ref_dst: 0, + }, + deps: [None, None], + src_queue_kinds: [None, None], + src_logical_tags: [None, None], + src_qtags: [None, None], + dst_queue_kind: None, + dst_logical_tag: None, + dst_qtag: None, + bypass_d2: false, + is_load: false, + is_store: false, + load_ordinal: None, + load_store_id: None, + miss_injected: false, + redirect_target: Some(0x2000), + phys_iq: None, + pick_wakeup_visible: None, + data_ready_visible: None, + miss_pending_until: None, + e1_cycle: None, + e4_cycle: None, + w1_cycle: None, + done_cycle: None, + }, + CycleUop { + decoded: bru, + commit: CommitRecord { + schema_version: "1.0".to_string(), + cycle: 2, + pc: 0x1006, + insn: 30_478_677, + len: 4, + next_pc: 0x2000, + src0_valid: 1, + src0_reg: 2, + src0_data: 1, + src1_valid: 0, + src1_reg: 0, + src1_data: 0, + dst_valid: 1, + dst_reg: 2, + dst_data: 1, + wb_valid: 1, + wb_rd: 2, + wb_data: 1, + mem_valid: 0, + mem_is_store: 0, + mem_addr: 0, + mem_wdata: 0, + mem_rdata: 0, + mem_size: 0, + trap_valid: 0, + trap_cause: 0, + traparg0: 0, + block_kind: "sys".to_string(), + lane_id: "scalar0".to_string(), + tile_meta: String::new(), + tile_ref_src: 0, + tile_ref_dst: 0, + }, + deps: [None, None], + src_queue_kinds: [None, None], + src_logical_tags: [None, None], + src_qtags: [None, None], + dst_queue_kind: None, + dst_logical_tag: None, + dst_qtag: None, + bypass_d2: false, + is_load: false, + is_store: false, + load_ordinal: None, + load_store_id: None, + miss_injected: false, + redirect_target: None, + phys_iq: None, + pick_wakeup_visible: None, + data_ready_visible: None, + miss_pending_until: None, + e1_cycle: None, + e4_cycle: None, + w1_cycle: Some(5), + done_cycle: None, + }, + CycleUop { + decoded: start, + commit: CommitRecord::unsupported(0, 0x2000, 2048, 2, &isa::BlockMeta::default()), + deps: [None, None], + src_queue_kinds: [None, None], + src_logical_tags: [None, None], + src_qtags: [None, None], + dst_queue_kind: None, + dst_logical_tag: None, + dst_qtag: None, + bypass_d2: false, + is_load: false, + is_store: false, + load_ordinal: None, + load_store_id: None, + miss_injected: false, + redirect_target: None, + phys_iq: None, + pick_wakeup_visible: None, + data_ready_visible: None, + miss_pending_until: None, + e1_cycle: None, + e4_cycle: None, + w1_cycle: None, + done_cycle: None, + }, + ]; + let mut pipeline = StageQueues::default(); + let mut iq = Vec::new(); + let mut rob = VecDeque::new(); + pipeline.frontend[10].extend([0, 1, 2]); + pipeline.seq_checkpoint_ids.insert(0, 0); + pipeline.seq_checkpoint_ids.insert(1, 0); + pipeline.seq_checkpoint_ids.insert(2, 0); + + dispatch_to_iq_and_bypass(0, &mut pipeline, &mut iq, &mut rob, &mut uops); + + publish_bru_correction_state(5, &mut pipeline, &uops); + + assert_eq!(pipeline.seq_rob_checkpoint_ids.get(&1).copied(), Some(1)); + assert_eq!( + pipeline.seq_recovery_checkpoint_ids.get(&2).copied(), + Some(1) + ); + assert_eq!(pipeline.seq_recovery_epochs.get(&1).copied(), Some(0)); + assert_eq!(pipeline.seq_recovery_epochs.get(&2).copied(), Some(1)); + assert_eq!( + pipeline.seq_branch_contexts.get(&2).copied(), + Some(BranchOwnerContext { + kind: BranchOwnerKind::Fall, + base_pc: 0x1004, + target_pc: 0x2000, + off: 0xffc, + pred_take: false, + epoch: 1, + }) + ); + assert_eq!(recovery_checkpoint_id_for_seq(2, &pipeline, &uops), 1); + assert_eq!( + pipeline.pending_bru_correction, None, + "matching actual_take/pred_take should not publish a deferred correction" + ); +} + +#[test] +fn dispatch_assigns_cond_boundary_prediction_from_target_direction() { + fn run_case(pc: u64, target_pc: u64, expected_pred_take: bool) { + let cond = isa::decode_word(4).expect("decode c.bstart cond"); + let plain = isa::decode_word(enc_addi(2, 0, 1) as u64).expect("decode addi"); + let mut uops = vec![ + CycleUop { + decoded: cond, + commit: CommitRecord { + schema_version: "1.0".to_string(), + cycle: 0, + pc, + insn: 4, + len: 2, + next_pc: target_pc, + src0_valid: 0, + src0_reg: 0, + src0_data: 0, + src1_valid: 0, + src1_reg: 0, + src1_data: 0, + dst_valid: 0, + dst_reg: 0, + dst_data: 0, + wb_valid: 0, + wb_rd: 0, + wb_data: 0, + mem_valid: 0, + mem_is_store: 0, + mem_addr: 0, + mem_wdata: 0, + mem_rdata: 0, + mem_size: 0, + trap_valid: 0, + trap_cause: 0, + traparg0: 0, + block_kind: "sys".to_string(), + lane_id: "scalar0".to_string(), + tile_meta: String::new(), + tile_ref_src: 0, + tile_ref_dst: 0, + }, + deps: [None, None], + src_queue_kinds: [None, None], + src_logical_tags: [None, None], + src_qtags: [None, None], + dst_queue_kind: None, + dst_logical_tag: None, + dst_qtag: None, + bypass_d2: false, + is_load: false, + is_store: false, + load_ordinal: None, + load_store_id: None, + miss_injected: false, + redirect_target: Some(target_pc), + phys_iq: None, + pick_wakeup_visible: None, + data_ready_visible: None, + miss_pending_until: None, + e1_cycle: None, + e4_cycle: None, + w1_cycle: None, + done_cycle: None, + }, + CycleUop { + decoded: plain, + commit: CommitRecord::unsupported( + 0, + pc.wrapping_add(2), + enc_addi(2, 0, 1) as u64, + 4, + &isa::BlockMeta::default(), + ), + deps: [None, None], + src_queue_kinds: [None, None], + src_logical_tags: [None, None], + src_qtags: [None, None], + dst_queue_kind: None, + dst_logical_tag: None, + dst_qtag: None, + bypass_d2: false, + is_load: false, + is_store: false, + load_ordinal: None, + load_store_id: None, + miss_injected: false, + redirect_target: None, + phys_iq: None, + pick_wakeup_visible: None, + data_ready_visible: None, + miss_pending_until: None, + e1_cycle: None, + e4_cycle: None, + w1_cycle: None, + done_cycle: None, + }, + ]; + let mut pipeline = StageQueues::default(); + let mut iq = Vec::new(); + let mut rob = VecDeque::new(); + pipeline.frontend[10].extend([0, 1]); + pipeline.seq_checkpoint_ids.insert(0, 0); + pipeline.seq_checkpoint_ids.insert(1, 0); + + dispatch_to_iq_and_bypass(0, &mut pipeline, &mut iq, &mut rob, &mut uops); + + assert_eq!( + pipeline.seq_branch_contexts.get(&1).copied(), + Some(BranchOwnerContext { + kind: BranchOwnerKind::Cond, + base_pc: pc, + target_pc, + off: target_pc.wrapping_sub(pc), + pred_take: expected_pred_take, + epoch: 1, + }) + ); + } + + run_case(0x1000, 0x1100, false); + run_case(0x1100, 0x1000, true); +} + +#[test] +fn dispatch_maps_c_bstart_std_brtype_to_full_boundary_kind_taxonomy() { + fn run_case(insn: u64, target_pc: u64, expected_kind: BranchOwnerKind) { + let bstart = isa::decode_word(insn).expect("decode c.bstart.std variant"); + let plain = isa::decode_word(enc_addi(2, 0, 1) as u64).expect("decode addi"); + let mut uops = vec![ + CycleUop { + decoded: bstart, + commit: CommitRecord { + schema_version: "1.0".to_string(), + cycle: 0, + pc: 0x1000, + insn, + len: 2, + next_pc: target_pc, + src0_valid: 0, + src0_reg: 0, + src0_data: 0, + src1_valid: 0, + src1_reg: 0, + src1_data: 0, + dst_valid: 0, + dst_reg: 0, + dst_data: 0, + wb_valid: 0, + wb_rd: 0, + wb_data: 0, + mem_valid: 0, + mem_is_store: 0, + mem_addr: 0, + mem_wdata: 0, + mem_rdata: 0, + mem_size: 0, + trap_valid: 0, + trap_cause: 0, + traparg0: 0, + block_kind: "sys".to_string(), + lane_id: "scalar0".to_string(), + tile_meta: String::new(), + tile_ref_src: 0, + tile_ref_dst: 0, + }, + deps: [None, None], + src_queue_kinds: [None, None], + src_logical_tags: [None, None], + src_qtags: [None, None], + dst_queue_kind: None, + dst_logical_tag: None, + dst_qtag: None, + bypass_d2: false, + is_load: false, + is_store: false, + load_ordinal: None, + load_store_id: None, + miss_injected: false, + redirect_target: Some(target_pc), + phys_iq: None, + pick_wakeup_visible: None, + data_ready_visible: None, + miss_pending_until: None, + e1_cycle: None, + e4_cycle: None, + w1_cycle: None, + done_cycle: None, + }, + CycleUop { + decoded: plain, + commit: CommitRecord::unsupported( + 0, + 0x1002, + enc_addi(2, 0, 1) as u64, + 4, + &isa::BlockMeta::default(), + ), + deps: [None, None], + src_queue_kinds: [None, None], + src_logical_tags: [None, None], + src_qtags: [None, None], + dst_queue_kind: None, + dst_logical_tag: None, + dst_qtag: None, + bypass_d2: false, + is_load: false, + is_store: false, + load_ordinal: None, + load_store_id: None, + miss_injected: false, + redirect_target: None, + phys_iq: None, + pick_wakeup_visible: None, + data_ready_visible: None, + miss_pending_until: None, + e1_cycle: None, + e4_cycle: None, + w1_cycle: None, + done_cycle: None, + }, + ]; + let mut pipeline = StageQueues::default(); + let mut iq = Vec::new(); + let mut rob = VecDeque::new(); + pipeline.frontend[10].extend([0, 1]); + pipeline.seq_checkpoint_ids.insert(0, 0); + pipeline.seq_checkpoint_ids.insert(1, 0); + + dispatch_to_iq_and_bypass(0, &mut pipeline, &mut iq, &mut rob, &mut uops); + + assert_eq!( + pipeline.seq_branch_contexts.get(&1).copied(), + Some(BranchOwnerContext { + kind: expected_kind, + base_pc: 0x1000, + target_pc, + off: target_pc.wrapping_sub(0x1000), + pred_take: matches!(expected_kind, BranchOwnerKind::Cond) && target_pc < 0x1000, + epoch: 1, + }) + ); + } + + run_case(2048, 0x1010, BranchOwnerKind::Fall); + run_case(4096, 0x1010, BranchOwnerKind::Direct); + run_case(6144, 0x0ff0, BranchOwnerKind::Cond); + run_case(8192, 0x1010, BranchOwnerKind::Call); + run_case(10240, 0x1010, BranchOwnerKind::Ind); + run_case(12288, 0x1010, BranchOwnerKind::ICall); + run_case(14336, 0x1010, BranchOwnerKind::Ret); +} + +#[test] +fn ret_start_marker_preserves_live_kind_before_target_setup() { + let start = isa::decode_word(14336).expect("decode c.bstart.std ret"); + let plain = isa::decode_word(enc_addi(2, 0, 1) as u64).expect("decode addi"); + let commits = vec![ + CommitRecord::unsupported(0, 0x1000, 14336, 2, &isa::BlockMeta::default()), + CommitRecord::unsupported( + 0, + 0x1002, + enc_addi(2, 0, 1) as u64, + 4, + &isa::BlockMeta::default(), + ), + ]; + let decoded = vec![start, plain]; + let mut uops = build_uops(&commits, &decoded); + let mut pipeline = StageQueues::default(); + let mut iq = Vec::new(); + let mut rob = VecDeque::new(); + pipeline.frontend[10].extend([0, 1]); + pipeline.seq_checkpoint_ids.insert(0, 0); + pipeline.seq_checkpoint_ids.insert(1, 0); + + dispatch_to_iq_and_bypass(0, &mut pipeline, &mut iq, &mut rob, &mut uops); + + assert_eq!(pipeline.active_branch_context.kind, BranchOwnerKind::Ret); + assert_eq!( + pipeline.seq_branch_contexts.get(&1).copied(), + Some(BranchOwnerContext { + kind: BranchOwnerKind::Ret, + base_pc: 0x1000, + target_pc: 0, + off: 0, + pred_take: false, + epoch: 1, + }) + ); +} + +#[test] +fn ret_block_redirect_uses_live_setc_tgt_owner_not_row_surrogate() { + let start = isa::decode_word(14336).expect("decode c.bstart.std ret"); + let setc_tgt_ra = isa::decode_word(0x029c).expect("decode c.setc.tgt ra"); + let bstop = isa::decode_word(0).expect("decode c.bstop"); + let target = isa::decode_word(2048).expect("decode target c.bstart.std"); + let commits = vec![ + CommitRecord::unsupported(0, 0x1000, 14336, 2, &isa::BlockMeta::default()), + CommitRecord { + schema_version: "1.0".to_string(), + cycle: 1, + pc: 0x1002, + insn: 0x029c, + len: 2, + next_pc: 0x1004, + src0_valid: 1, + src0_reg: 10, + src0_data: 0x2000, + src1_valid: 0, + src1_reg: 0, + src1_data: 0, + dst_valid: 0, + dst_reg: 0, + dst_data: 0, + wb_valid: 0, + wb_rd: 0, + wb_data: 0, + mem_valid: 0, + mem_is_store: 0, + mem_addr: 0, + mem_wdata: 0, + mem_rdata: 0, + mem_size: 0, + trap_valid: 0, + trap_cause: 0, + traparg0: 0, + block_kind: "sys".to_string(), + lane_id: "scalar0".to_string(), + tile_meta: String::new(), + tile_ref_src: 0, + tile_ref_dst: 0, + }, + CommitRecord { + schema_version: "1.0".to_string(), + cycle: 2, + pc: 0x1004, + insn: 0, + len: 2, + next_pc: 0x2000, + src0_valid: 0, + src0_reg: 0, + src0_data: 0, + src1_valid: 0, + src1_reg: 0, + src1_data: 0, + dst_valid: 0, + dst_reg: 0, + dst_data: 0, + wb_valid: 0, + wb_rd: 0, + wb_data: 0, + mem_valid: 0, + mem_is_store: 0, + mem_addr: 0, + mem_wdata: 0, + mem_rdata: 0, + mem_size: 0, + trap_valid: 0, + trap_cause: 0, + traparg0: 0, + block_kind: "sys".to_string(), + lane_id: "scalar0".to_string(), + tile_meta: String::new(), + tile_ref_src: 0, + tile_ref_dst: 0, + }, + CommitRecord::unsupported(0, 0x2000, 2048, 2, &isa::BlockMeta::default()), + ]; + let decoded = vec![start, setc_tgt_ra, bstop, target]; + let mut uops = build_uops(&commits, &decoded); + let mut pipeline = StageQueues::default(); + let mut iq = Vec::new(); + let mut rob = VecDeque::new(); + pipeline.frontend[10].extend([0, 1, 2]); + pipeline.seq_checkpoint_ids.insert(0, 0); + pipeline.seq_checkpoint_ids.insert(1, 0); + pipeline.seq_checkpoint_ids.insert(2, 0); + + dispatch_to_iq_and_bypass(0, &mut pipeline, &mut iq, &mut rob, &mut uops); + + assert_eq!( + pipeline.seq_dynamic_target_pcs.get(&1).copied(), + Some(0x2000) + ); + assert_eq!( + pipeline.seq_boundary_target_pcs.get(&2).copied(), + Some(0x2000) + ); + assert_eq!( + pipeline.seq_boundary_target_owner_seqs.get(&2).copied(), + Some(1) + ); + assert_eq!( + pipeline.seq_branch_contexts.get(&2).copied(), + Some(BranchOwnerContext { + kind: BranchOwnerKind::Ret, + base_pc: 0x1000, + target_pc: 0x2000, + off: 0x1000, + pred_take: false, + epoch: 1, + }) + ); + + pipeline.active_dynamic_target_pc = Some(0x3333); + pipeline.active_dynamic_target_owner_seq = Some(99); + uops[2].redirect_target = None; + uops[2].w1_cycle = Some(7); + + schedule_frontend_redirect_recovery(7, &mut pipeline, &uops); + + assert_eq!( + pipeline.frontend_redirect, + Some(FrontendRedirectState { + source_seq: 2, + target_pc: 0x2000, + restart_seq: 3, + checkpoint_id: 0, + from_correction: false, + resume_cycle: 8, + }) + ); +} + +#[test] +fn ret_block_missing_setc_tgt_raises_precise_dynamic_target_trap() { + let start = isa::decode_word(14336).expect("decode c.bstart.std ret"); + let bstop = isa::decode_word(0).expect("decode c.bstop"); + let target = isa::decode_word(2048).expect("decode target c.bstart.std"); + let commits = vec![ + CommitRecord::unsupported(0, 0x1000, 14336, 2, &isa::BlockMeta::default()), + CommitRecord { + schema_version: "1.0".to_string(), + cycle: 1, + pc: 0x1002, + insn: 0, + len: 2, + next_pc: 0x2000, + src0_valid: 0, + src0_reg: 0, + src0_data: 0, + src1_valid: 0, + src1_reg: 0, + src1_data: 0, + dst_valid: 0, + dst_reg: 0, + dst_data: 0, + wb_valid: 0, + wb_rd: 0, + wb_data: 0, + mem_valid: 0, + mem_is_store: 0, + mem_addr: 0, + mem_wdata: 0, + mem_rdata: 0, + mem_size: 0, + trap_valid: 0, + trap_cause: 0, + traparg0: 0, + block_kind: "sys".to_string(), + lane_id: "scalar0".to_string(), + tile_meta: String::new(), + tile_ref_src: 0, + tile_ref_dst: 0, + }, + CommitRecord::unsupported(0, 0x2000, 2048, 2, &isa::BlockMeta::default()), + ]; + let decoded = vec![start, bstop, target]; + let mut uops = build_uops(&commits, &decoded); + let mut pipeline = StageQueues::default(); + let mut iq = Vec::new(); + let mut rob = VecDeque::new(); + pipeline.frontend[10].extend([0, 1]); + pipeline.seq_checkpoint_ids.insert(0, 0); + pipeline.seq_checkpoint_ids.insert(1, 0); + + dispatch_to_iq_and_bypass(0, &mut pipeline, &mut iq, &mut rob, &mut uops); + assert_eq!(pipeline.seq_dynamic_target_pcs.get(&1), None); + + uops[1].redirect_target = None; + uops[1].w1_cycle = Some(7); + + publish_dynamic_boundary_target_fault_state(7, &mut pipeline, &uops); + assert_eq!( + pipeline.pending_trap, + Some(PendingTrapState { + seq: 1, + cause: isa::TRAP_DYNAMIC_TARGET_MISSING, + traparg0: 0x1002, + checkpoint_id: 0, + visible_cycle: 7, + }) + ); + + schedule_frontend_redirect_recovery(7, &mut pipeline, &uops); + assert_eq!(pipeline.frontend_redirect, None); +} + +#[test] +fn ret_block_non_block_target_raises_precise_dynamic_target_trap() { + let start = isa::decode_word(14336).expect("decode c.bstart.std ret"); + let setc_tgt_ra = isa::decode_word(0x029c).expect("decode c.setc.tgt ra"); + let bstop = isa::decode_word(0).expect("decode c.bstop"); + let illegal_target = isa::decode_word(enc_addi(2, 0, 1) as u64).expect("decode addi"); + let commits = vec![ + CommitRecord::unsupported(0, 0x1000, 14336, 2, &isa::BlockMeta::default()), + CommitRecord { + schema_version: "1.0".to_string(), + cycle: 1, + pc: 0x1002, + insn: 0x029c, + len: 2, + next_pc: 0x1004, + src0_valid: 1, + src0_reg: 10, + src0_data: 0x2000, + src1_valid: 0, + src1_reg: 0, + src1_data: 0, + dst_valid: 0, + dst_reg: 0, + dst_data: 0, + wb_valid: 0, + wb_rd: 0, + wb_data: 0, + mem_valid: 0, + mem_is_store: 0, + mem_addr: 0, + mem_wdata: 0, + mem_rdata: 0, + mem_size: 0, + trap_valid: 0, + trap_cause: 0, + traparg0: 0, + block_kind: "sys".to_string(), + lane_id: "scalar0".to_string(), + tile_meta: String::new(), + tile_ref_src: 0, + tile_ref_dst: 0, + }, + CommitRecord { + schema_version: "1.0".to_string(), + cycle: 2, + pc: 0x1004, + insn: 0, + len: 2, + next_pc: 0x2000, + src0_valid: 0, + src0_reg: 0, + src0_data: 0, + src1_valid: 0, + src1_reg: 0, + src1_data: 0, + dst_valid: 0, + dst_reg: 0, + dst_data: 0, + wb_valid: 0, + wb_rd: 0, + wb_data: 0, + mem_valid: 0, + mem_is_store: 0, + mem_addr: 0, + mem_wdata: 0, + mem_rdata: 0, + mem_size: 0, + trap_valid: 0, + trap_cause: 0, + traparg0: 0, + block_kind: "sys".to_string(), + lane_id: "scalar0".to_string(), + tile_meta: String::new(), + tile_ref_src: 0, + tile_ref_dst: 0, + }, + CommitRecord::unsupported( + 0, + 0x2000, + enc_addi(2, 0, 1) as u64, + 4, + &isa::BlockMeta::default(), + ), + ]; + let decoded = vec![start, setc_tgt_ra, bstop, illegal_target]; + let mut uops = build_uops(&commits, &decoded); + let mut pipeline = StageQueues::default(); + let mut iq = Vec::new(); + let mut rob = VecDeque::new(); + pipeline.frontend[10].extend([0, 1, 2]); + pipeline.seq_checkpoint_ids.insert(0, 0); + pipeline.seq_checkpoint_ids.insert(1, 0); + pipeline.seq_checkpoint_ids.insert(2, 0); + + dispatch_to_iq_and_bypass(0, &mut pipeline, &mut iq, &mut rob, &mut uops); + uops[2].redirect_target = None; + uops[2].w1_cycle = Some(7); + + publish_dynamic_boundary_target_fault_state(7, &mut pipeline, &uops); + assert_eq!( + pipeline.pending_trap, + Some(PendingTrapState { + seq: 2, + cause: isa::TRAP_DYNAMIC_TARGET_NOT_BSTART, + traparg0: 0x1004, + checkpoint_id: 0, + visible_cycle: 7, + }) + ); + + schedule_frontend_redirect_recovery(7, &mut pipeline, &uops); + assert_eq!(pipeline.frontend_redirect, None); +} + +#[test] +fn ret_block_stale_setc_tgt_epoch_raises_precise_dynamic_target_trap() { + let bstop = isa::decode_word(0).expect("decode c.bstop"); + let commits = vec![CommitRecord::unsupported( + 0, + 0x1004, + 0, + 2, + &isa::BlockMeta::default(), + )]; + let mut uops = build_uops(&commits, &[bstop]); + uops[0].redirect_target = Some(0x2000); + uops[0].w1_cycle = Some(7); + + let mut pipeline = StageQueues::default(); + pipeline.seq_branch_contexts.insert( + 0, + BranchOwnerContext { + kind: BranchOwnerKind::Ret, + base_pc: 0x1004, + target_pc: 0x2000, + off: 0x0ffc, + pred_take: false, + epoch: 2, + }, + ); + pipeline.seq_recovery_epochs.insert(0, 2); + pipeline.seq_boundary_target_pcs.insert(0, 0x2000); + pipeline.seq_boundary_target_owner_seqs.insert(0, 3); + pipeline + .seq_boundary_target_producer_kinds + .insert(0, ReturnConsumerKind::SetcTgt); + pipeline.seq_boundary_target_setup_epochs.insert(0, 1); + pipeline.seq_boundary_target_source_owner_seqs.insert(0, 3); + pipeline.seq_boundary_target_source_epochs.insert(0, 1); + pipeline + .seq_boundary_target_source_kinds + .insert(0, DynamicTargetSourceKind::ArchTargetSetup); + + publish_dynamic_boundary_target_fault_state(7, &mut pipeline, &uops); + assert_eq!( + pipeline.pending_trap, + Some(PendingTrapState { + seq: 0, + cause: isa::TRAP_DYNAMIC_TARGET_STALE, + traparg0: 0x1004, + checkpoint_id: 0, + visible_cycle: 7, + }) + ); +} + +#[test] +fn fused_bstart_call_materializes_return_target_without_setret() { + let call_header = isa::decode_word(1_589_249).expect("decode bstart.std call header"); + let target = isa::decode_word(2048).expect("decode c.bstart.std"); + let commits = vec![ + CommitRecord { + schema_version: "1.0".to_string(), + cycle: 0, + pc: 0x1000, + insn: 1_589_249, + len: 4, + next_pc: 0x3000, + src0_valid: 0, + src0_reg: 0, + src0_data: 0, + src1_valid: 0, + src1_reg: 0, + src1_data: 0, + dst_valid: 1, + dst_reg: 10, + dst_data: 0x2000, + wb_valid: 1, + wb_rd: 10, + wb_data: 0x2000, + mem_valid: 0, + mem_is_store: 0, + mem_addr: 0, + mem_wdata: 0, + mem_rdata: 0, + mem_size: 0, + trap_valid: 0, + trap_cause: 0, + traparg0: 0, + block_kind: "sys".to_string(), + lane_id: "scalar0".to_string(), + tile_meta: String::new(), + tile_ref_src: 0, + tile_ref_dst: 0, + }, + CommitRecord::unsupported(0, 0x2000, 2048, 2, &isa::BlockMeta::default()), + ]; + let decoded = vec![call_header, target]; + let mut uops = build_uops(&commits, &decoded); + let mut pipeline = StageQueues::default(); + let mut iq = Vec::new(); + let mut rob = VecDeque::new(); + pipeline.frontend[10].extend([0, 1]); + pipeline.seq_checkpoint_ids.insert(0, 0); + pipeline.seq_checkpoint_ids.insert(1, 0); + + dispatch_to_iq_and_bypass(0, &mut pipeline, &mut iq, &mut rob, &mut uops); + + assert_eq!( + pipeline.seq_call_return_target_pcs.get(&0).copied(), + Some(0x2000) + ); + assert_eq!( + pipeline.seq_call_return_target_owner_seqs.get(&0).copied(), + Some(0) + ); + assert_eq!(pipeline.active_call_header_seq, None); + assert_eq!(pipeline.active_call_return_target_pc, Some(0x2000)); + assert_eq!(pipeline.active_call_return_target_owner_seq, Some(0)); + assert_eq!( + pipeline.active_call_return_materialization_kind, + Some(CallMaterializationKind::FusedCall) + ); + assert!(pipeline.seq_call_header_faults.is_empty()); +} + +#[test] +fn call_header_without_setret_stays_non_returning() { + let call_header = isa::decode_word(1_589_249).expect("decode bstart.std call header"); + let plain = isa::decode_word(enc_addi(2, 0, 1) as u64).expect("decode addi"); + let commits = vec![ + CommitRecord { + schema_version: "1.0".to_string(), + cycle: 0, + pc: 0x1000, + insn: 1_589_249, + len: 4, + next_pc: 0x3000, + src0_valid: 0, + src0_reg: 0, + src0_data: 0, + src1_valid: 0, + src1_reg: 0, + src1_data: 0, + dst_valid: 0, + dst_reg: 0, + dst_data: 0, + wb_valid: 0, + wb_rd: 0, + wb_data: 0, + mem_valid: 0, + mem_is_store: 0, + mem_addr: 0, + mem_wdata: 0, + mem_rdata: 0, + mem_size: 0, + trap_valid: 0, + trap_cause: 0, + traparg0: 0, + block_kind: "sys".to_string(), + lane_id: "scalar0".to_string(), + tile_meta: String::new(), + tile_ref_src: 0, + tile_ref_dst: 0, + }, + CommitRecord { + schema_version: "1.0".to_string(), + cycle: 1, + pc: 0x1004, + insn: enc_addi(2, 0, 1) as u64, + len: 4, + next_pc: 0x1008, + src0_valid: 0, + src0_reg: 0, + src0_data: 0, + src1_valid: 0, + src1_reg: 0, + src1_data: 0, + dst_valid: 1, + dst_reg: 2, + dst_data: 1, + wb_valid: 1, + wb_rd: 2, + wb_data: 1, + mem_valid: 0, + mem_is_store: 0, + mem_addr: 0, + mem_wdata: 0, + mem_rdata: 0, + mem_size: 0, + trap_valid: 0, + trap_cause: 0, + traparg0: 0, + block_kind: "sys".to_string(), + lane_id: "scalar0".to_string(), + tile_meta: String::new(), + tile_ref_src: 0, + tile_ref_dst: 0, + }, + ]; + let decoded = vec![call_header, plain]; + let mut uops = build_uops(&commits, &decoded); + let mut pipeline = StageQueues::default(); + let mut iq = Vec::new(); + let mut rob = VecDeque::new(); + pipeline.frontend[10].extend([0, 1]); + pipeline.seq_checkpoint_ids.insert(0, 0); + pipeline.seq_checkpoint_ids.insert(1, 0); + + dispatch_to_iq_and_bypass(0, &mut pipeline, &mut iq, &mut rob, &mut uops); + + assert_eq!(pipeline.seq_call_return_target_pcs.get(&0), None); + assert_eq!(pipeline.seq_call_return_target_owner_seqs.get(&0), None); + assert_eq!(pipeline.active_call_header_seq, None); + assert_eq!(pipeline.active_call_return_target_pc, None); + assert_eq!(pipeline.active_call_return_target_owner_seq, None); + assert!(pipeline.seq_call_header_faults.is_empty()); +} + +#[test] +fn adjacent_setret_materializes_call_header_owner_seq() { + let call_header = isa::decode_word(1_589_249).expect("decode bstart.std call header"); + let setret = isa::decode_word(0x5056).expect("decode c.setret"); + let commits = vec![ + CommitRecord { + schema_version: "1.0".to_string(), + cycle: 0, + pc: 0x1000, + insn: 1_589_249, + len: 4, + next_pc: 0x3000, + src0_valid: 0, + src0_reg: 0, + src0_data: 0, + src1_valid: 0, + src1_reg: 0, + src1_data: 0, + dst_valid: 0, + dst_reg: 0, + dst_data: 0, + wb_valid: 0, + wb_rd: 0, + wb_data: 0, + mem_valid: 0, + mem_is_store: 0, + mem_addr: 0, + mem_wdata: 0, + mem_rdata: 0, + mem_size: 0, + trap_valid: 0, + trap_cause: 0, + traparg0: 0, + block_kind: "sys".to_string(), + lane_id: "scalar0".to_string(), + tile_meta: String::new(), + tile_ref_src: 0, + tile_ref_dst: 0, + }, + CommitRecord { + schema_version: "1.0".to_string(), + cycle: 1, + pc: 0x1004, + insn: 0x5056, + len: 2, + next_pc: 0x1006, + src0_valid: 0, + src0_reg: 0, + src0_data: 0, + src1_valid: 0, + src1_reg: 0, + src1_data: 0, + dst_valid: 1, + dst_reg: 10, + dst_data: 0x2000, + wb_valid: 1, + wb_rd: 10, + wb_data: 0x2000, + mem_valid: 0, + mem_is_store: 0, + mem_addr: 0, + mem_wdata: 0, + mem_rdata: 0, + mem_size: 0, + trap_valid: 0, + trap_cause: 0, + traparg0: 0, + block_kind: "sys".to_string(), + lane_id: "scalar0".to_string(), + tile_meta: String::new(), + tile_ref_src: 0, + tile_ref_dst: 0, + }, + ]; + let decoded = vec![call_header, setret]; + let mut uops = build_uops(&commits, &decoded); + let mut pipeline = StageQueues::default(); + let mut iq = Vec::new(); + let mut rob = VecDeque::new(); + pipeline.frontend[10].extend([0, 1]); + pipeline.seq_checkpoint_ids.insert(0, 0); + pipeline.seq_checkpoint_ids.insert(1, 0); + + dispatch_to_iq_and_bypass(0, &mut pipeline, &mut iq, &mut rob, &mut uops); + + assert_eq!( + pipeline.seq_call_return_target_pcs.get(&0).copied(), + Some(0x2000) + ); + assert_eq!( + pipeline.seq_call_return_target_owner_seqs.get(&0).copied(), + Some(1) + ); + assert_eq!( + pipeline.seq_call_return_target_owner_seqs.get(&1).copied(), + Some(1) + ); + assert_eq!(pipeline.active_call_return_target_pc, Some(0x2000)); + assert_eq!(pipeline.active_call_return_target_owner_seq, Some(1)); + assert_eq!( + pipeline.active_call_return_materialization_kind, + Some(CallMaterializationKind::AdjacentSetret) + ); + assert!(pipeline.seq_call_header_faults.is_empty()); +} + +#[test] +fn fret_ra_inherits_call_return_source_owner_and_materialization_kind() { + let decoded = isa::decode_word(346369857).expect("decode fret.ra"); + let commits = vec![CommitRecord::unsupported( + 0, + 0x1000, + 346369857, + 4, + &isa::BlockMeta::default(), + )]; + let mut uops = build_uops(&commits, &[decoded]); + uops[0].redirect_target = Some(0x2000); + + let mut pipeline = StageQueues::default(); + let mut iq = Vec::new(); + let mut rob = VecDeque::new(); + pipeline.frontend[10].push_back(0); + pipeline.active_call_return_target_pc = Some(0x2000); + pipeline.active_call_return_target_owner_seq = Some(7); + pipeline.active_call_return_materialization_kind = Some(CallMaterializationKind::FusedCall); + pipeline.active_branch_context = BranchOwnerContext { + kind: BranchOwnerKind::Ret, + base_pc: 0x1000, + target_pc: 0x2000, + off: 0x1000, + pred_take: false, + epoch: 1, + }; + + dispatch_to_iq_and_bypass(0, &mut pipeline, &mut iq, &mut rob, &mut uops); + + assert_eq!( + pipeline.seq_boundary_target_pcs.get(&0).copied(), + Some(0x2000) + ); + assert_eq!( + pipeline.seq_boundary_target_owner_seqs.get(&0).copied(), + Some(7) + ); + assert_eq!( + pipeline.seq_return_consumer_kinds.get(&0).copied(), + Some(ReturnConsumerKind::FretRa) + ); + assert_eq!( + pipeline.seq_call_materialization_kinds.get(&0).copied(), + Some(CallMaterializationKind::FusedCall) + ); + assert_eq!( + pipeline.seq_boundary_target_source_kinds.get(&0).copied(), + Some(DynamicTargetSourceKind::CallReturnFused) + ); +} + +#[test] +fn non_adjacent_setret_raises_precise_call_header_trap() { + let call_header = isa::decode_word(1_589_249).expect("decode bstart.std call header"); + let plain = isa::decode_word(enc_addi(2, 0, 1) as u64).expect("decode addi"); + let setret = isa::decode_word(0x5056).expect("decode c.setret"); + let commits = vec![ + CommitRecord { + schema_version: "1.0".to_string(), + cycle: 0, + pc: 0x1000, + insn: 1_589_249, + len: 4, + next_pc: 0x3000, + src0_valid: 0, + src0_reg: 0, + src0_data: 0, + src1_valid: 0, + src1_reg: 0, + src1_data: 0, + dst_valid: 0, + dst_reg: 0, + dst_data: 0, + wb_valid: 0, + wb_rd: 0, + wb_data: 0, + mem_valid: 0, + mem_is_store: 0, + mem_addr: 0, + mem_wdata: 0, + mem_rdata: 0, + mem_size: 0, + trap_valid: 0, + trap_cause: 0, + traparg0: 0, + block_kind: "sys".to_string(), + lane_id: "scalar0".to_string(), + tile_meta: String::new(), + tile_ref_src: 0, + tile_ref_dst: 0, + }, + CommitRecord { + schema_version: "1.0".to_string(), + cycle: 1, + pc: 0x1004, + insn: enc_addi(2, 0, 1) as u64, + len: 4, + next_pc: 0x1008, + src0_valid: 0, + src0_reg: 0, + src0_data: 0, + src1_valid: 0, + src1_reg: 0, + src1_data: 0, + dst_valid: 1, + dst_reg: 2, + dst_data: 1, + wb_valid: 1, + wb_rd: 2, + wb_data: 1, + mem_valid: 0, + mem_is_store: 0, + mem_addr: 0, + mem_wdata: 0, + mem_rdata: 0, + mem_size: 0, + trap_valid: 0, + trap_cause: 0, + traparg0: 0, + block_kind: "sys".to_string(), + lane_id: "scalar0".to_string(), + tile_meta: String::new(), + tile_ref_src: 0, + tile_ref_dst: 0, + }, + CommitRecord { + schema_version: "1.0".to_string(), + cycle: 2, + pc: 0x1008, + insn: 0x5056, + len: 2, + next_pc: 0x100a, + src0_valid: 0, + src0_reg: 0, + src0_data: 0, + src1_valid: 0, + src1_reg: 0, + src1_data: 0, + dst_valid: 1, + dst_reg: 10, + dst_data: 0x2000, + wb_valid: 1, + wb_rd: 10, + wb_data: 0x2000, + mem_valid: 0, + mem_is_store: 0, + mem_addr: 0, + mem_wdata: 0, + mem_rdata: 0, + mem_size: 0, + trap_valid: 0, + trap_cause: 0, + traparg0: 0, + block_kind: "sys".to_string(), + lane_id: "scalar0".to_string(), + tile_meta: String::new(), + tile_ref_src: 0, + tile_ref_dst: 0, + }, + ]; + let decoded = vec![call_header, plain, setret]; + let mut uops = build_uops(&commits, &decoded); + let mut pipeline = StageQueues::default(); + let mut iq = Vec::new(); + let mut rob = VecDeque::new(); + pipeline.frontend[10].extend([0, 1, 2]); + pipeline.seq_checkpoint_ids.insert(0, 0); + pipeline.seq_checkpoint_ids.insert(1, 0); + pipeline.seq_checkpoint_ids.insert(2, 0); + + dispatch_to_iq_and_bypass(0, &mut pipeline, &mut iq, &mut rob, &mut uops); + assert_eq!( + pipeline.seq_call_header_faults.get(&2).copied(), + Some(isa::TRAP_SETRET_NOT_ADJACENT) + ); + + uops[2].w1_cycle = Some(7); + publish_call_header_fault_state(7, &mut pipeline, &uops); + + assert_eq!( + pipeline.pending_trap, + Some(PendingTrapState { + seq: 2, + cause: isa::TRAP_SETRET_NOT_ADJACENT, + traparg0: 0x1008, + checkpoint_id: 0, + visible_cycle: 7, + }) + ); +} + +#[test] +fn call_header_fault_flush_emits_attempted_owner_row_id() { + let runtime = sample_runtime(&[enc_addi(2, 0, 1)], &[]); + let call_header = isa::decode_word(1_589_249).expect("decode bstart.std call header"); + let plain = isa::decode_word(enc_addi(2, 0, 1) as u64).expect("decode addi"); + let setret = isa::decode_word(0x5056).expect("decode c.setret"); + let commits = vec![ + CommitRecord { + schema_version: "1.0".to_string(), + cycle: 0, + pc: 0x1000, + insn: 1_589_249, + len: 4, + next_pc: 0x3000, + src0_valid: 0, + src0_reg: 0, + src0_data: 0, + src1_valid: 0, + src1_reg: 0, + src1_data: 0, + dst_valid: 0, + dst_reg: 0, + dst_data: 0, + wb_valid: 0, + wb_rd: 0, + wb_data: 0, + mem_valid: 0, + mem_is_store: 0, + mem_addr: 0, + mem_wdata: 0, + mem_rdata: 0, + mem_size: 0, + trap_valid: 0, + trap_cause: 0, + traparg0: 0, + block_kind: "sys".to_string(), + lane_id: "scalar0".to_string(), + tile_meta: String::new(), + tile_ref_src: 0, + tile_ref_dst: 0, + }, + CommitRecord { + schema_version: "1.0".to_string(), + cycle: 1, + pc: 0x1004, + insn: enc_addi(2, 0, 1) as u64, + len: 4, + next_pc: 0x1008, + src0_valid: 0, + src0_reg: 0, + src0_data: 0, + src1_valid: 0, + src1_reg: 0, + src1_data: 0, + dst_valid: 1, + dst_reg: 2, + dst_data: 1, + wb_valid: 1, + wb_rd: 2, + wb_data: 1, + mem_valid: 0, + mem_is_store: 0, + mem_addr: 0, + mem_wdata: 0, + mem_rdata: 0, + mem_size: 0, + trap_valid: 0, + trap_cause: 0, + traparg0: 0, + block_kind: "sys".to_string(), + lane_id: "scalar0".to_string(), + tile_meta: String::new(), + tile_ref_src: 0, + tile_ref_dst: 0, + }, + CommitRecord { + schema_version: "1.0".to_string(), + cycle: 2, + pc: 0x1008, + insn: 0x5056, + len: 2, + next_pc: 0x100a, + src0_valid: 0, + src0_reg: 0, + src0_data: 0, + src1_valid: 0, + src1_reg: 0, + src1_data: 0, + dst_valid: 1, + dst_reg: 10, + dst_data: 0x2000, + wb_valid: 1, + wb_rd: 10, + wb_data: 0x2000, + mem_valid: 0, + mem_is_store: 0, + mem_addr: 0, + mem_wdata: 0, + mem_rdata: 0, + mem_size: 0, + trap_valid: 0, + trap_cause: 0, + traparg0: 0, + block_kind: "sys".to_string(), + lane_id: "scalar0".to_string(), + tile_meta: String::new(), + tile_ref_src: 0, + tile_ref_dst: 0, + }, + ]; + let decoded = vec![call_header, plain, setret]; + let mut uops = build_uops(&commits, &decoded); + let mut pipeline = StageQueues::default(); + let mut iq = Vec::new(); + let mut rob = VecDeque::new(); + pipeline.frontend[10].extend([0, 1, 2]); + pipeline.seq_checkpoint_ids.insert(0, 0); + pipeline.seq_checkpoint_ids.insert(1, 0); + pipeline.seq_checkpoint_ids.insert(2, 0); + + dispatch_to_iq_and_bypass(0, &mut pipeline, &mut iq, &mut rob, &mut uops); + uops[2].w1_cycle = Some(7); + publish_call_header_fault_state(7, &mut pipeline, &uops); + + let mut out = Vec::new(); + emit_stage_events(7, &runtime, &pipeline, &iq, &rob, &uops, &mut out); + + assert!(out.iter().any(|event| { + event.stage_id == "FLS" + && event.row_id == "uop2" + && event.cause == "call_header_fault" + && event.trap_cause == Some(isa::TRAP_SETRET_NOT_ADJACENT) + && event.traparg0 == Some(0x1008) + && event.branch_kind.as_deref() == Some("call") + && event.target_owner_row_id.as_deref() == Some("uop2") + && event.call_materialization_kind.as_deref() == Some("adjacent_setret") + })); +} + +#[test] +fn redirecting_fused_call_emits_call_materialization_kind_on_fls() { + let runtime = sample_runtime(&[enc_addi(2, 0, 1)], &[]); + let decoded = isa::decode_word(1_589_249).expect("decode bstart.std call header"); + let uops = vec![CycleUop { + decoded, + commit: CommitRecord { + schema_version: "1.0".to_string(), + cycle: 0, + pc: 0x1000, + insn: 1_589_249, + len: 4, + next_pc: 0x3000, + src0_valid: 0, + src0_reg: 0, + src0_data: 0, + src1_valid: 0, + src1_reg: 0, + src1_data: 0, + dst_valid: 1, + dst_reg: 10, + dst_data: 0x2000, + wb_valid: 1, + wb_rd: 10, + wb_data: 0x2000, + mem_valid: 0, + mem_is_store: 0, + mem_addr: 0, + mem_wdata: 0, + mem_rdata: 0, + mem_size: 0, + trap_valid: 0, + trap_cause: 0, + traparg0: 0, + block_kind: "sys".to_string(), + lane_id: "scalar0".to_string(), + tile_meta: String::new(), + tile_ref_src: 0, + tile_ref_dst: 0, + }, + deps: [None, None], + src_queue_kinds: [None, None], + src_logical_tags: [None, None], + src_qtags: [None, None], + dst_queue_kind: None, + dst_logical_tag: None, + dst_qtag: None, + bypass_d2: false, + is_load: false, + is_store: false, + load_ordinal: None, + load_store_id: None, + miss_injected: false, + redirect_target: Some(0x3000), + phys_iq: Some(PhysIq::CmdIq), + pick_wakeup_visible: None, + data_ready_visible: None, + miss_pending_until: None, + e1_cycle: None, + e4_cycle: None, + w1_cycle: Some(7), + done_cycle: None, + }]; + let mut pipeline = StageQueues::default(); + pipeline + .seq_call_materialization_kinds + .insert(0, CallMaterializationKind::FusedCall); + + let mut out = Vec::new(); + emit_stage_events( + 7, + &runtime, + &pipeline, + &[], + &VecDeque::new(), + &uops, + &mut out, + ); + + assert!(out.iter().any(|event| { + event.stage_id == "FLS" + && event.row_id == "uop0" + && event.branch_kind.as_deref() == Some("call") + && event.call_materialization_kind.as_deref() == Some("fused_call") + })); +} + +#[test] +fn redirecting_control_emits_flush_stage() { + let runtime = sample_runtime(&[enc_addi(2, 0, 1)], &[]); + let decoded = isa::decode_word(2048).expect("decode c.bstart.std"); + let uops = vec![CycleUop { + decoded, + commit: CommitRecord { + schema_version: "1.0".to_string(), + cycle: 0, + pc: 0x1000, + insn: 2048, + len: 2, + next_pc: 0x1010, + src0_valid: 0, + src0_reg: 0, + src0_data: 0, + src1_valid: 0, + src1_reg: 0, + src1_data: 0, + dst_valid: 0, + dst_reg: 0, + dst_data: 0, + wb_valid: 0, + wb_rd: 0, + wb_data: 0, + mem_valid: 0, + mem_is_store: 0, + mem_addr: 0, + mem_wdata: 0, + mem_rdata: 0, + mem_size: 0, + trap_valid: 0, + trap_cause: 0, + traparg0: 0, + block_kind: "sys".to_string(), + lane_id: "scalar0".to_string(), + tile_meta: String::new(), + tile_ref_src: 0, + tile_ref_dst: 0, + }, + deps: [None, None], + src_queue_kinds: [None, None], + src_logical_tags: [None, None], + src_qtags: [None, None], + dst_queue_kind: None, + dst_logical_tag: None, + dst_qtag: None, + bypass_d2: false, + is_load: false, + is_store: false, + load_ordinal: None, + load_store_id: None, + miss_injected: false, + redirect_target: Some(0x1010), + phys_iq: Some(PhysIq::CmdIq), + pick_wakeup_visible: None, + data_ready_visible: None, + miss_pending_until: None, + e1_cycle: None, + e4_cycle: None, + w1_cycle: Some(7), + done_cycle: None, + }]; + let mut out = Vec::new(); + + emit_stage_events( + 7, + &runtime, + &StageQueues::default(), + &[], + &VecDeque::new(), + &uops, + &mut out, + ); + assert!(out.iter().any(|event| { + event.stage_id == "FLS" + && event.cause == "redirect_boundary" + && event.branch_kind.as_deref() == Some("fall") + })); +} + +#[test] +fn ret_dynamic_target_fault_emits_precise_flush_cause_and_return_kind() { + let runtime = sample_runtime(&[enc_addi(2, 0, 1)], &[]); + let decoded = isa::decode_word(346370113).expect("decode fret.stk"); + let uops = vec![CycleUop { + decoded, + commit: CommitRecord::unsupported(0, 0x1000, 346370113, 4, &isa::BlockMeta::default()), + deps: [None, None], + src_queue_kinds: [None, None], + src_logical_tags: [None, None], + src_qtags: [None, None], + dst_queue_kind: None, + dst_logical_tag: None, + dst_qtag: None, + bypass_d2: false, + is_load: false, + is_store: false, + load_ordinal: None, + load_store_id: None, + miss_injected: false, + redirect_target: None, + phys_iq: None, + pick_wakeup_visible: None, + data_ready_visible: None, + miss_pending_until: None, + e1_cycle: None, + e4_cycle: None, + w1_cycle: Some(7), + done_cycle: None, + }]; + let mut pipeline = StageQueues::default(); + pipeline.seq_branch_contexts.insert( + 0, + BranchOwnerContext { + kind: BranchOwnerKind::Ret, + base_pc: 0x1000, + target_pc: 0x2000, + off: 0x1000, + pred_take: false, + epoch: 1, + }, + ); + pipeline + .seq_return_consumer_kinds + .insert(0, ReturnConsumerKind::FretStk); + pipeline.pending_trap = Some(PendingTrapState { + seq: 0, + cause: isa::TRAP_DYNAMIC_TARGET_MISSING, + traparg0: 0x1000, + checkpoint_id: 3, + visible_cycle: 7, + }); + + let mut out = Vec::new(); + emit_stage_events( + 7, + &runtime, + &pipeline, + &[], + &VecDeque::new(), + &uops, + &mut out, + ); + + assert!(out.iter().any(|event| { + event.stage_id == "FLS" + && event.row_id == "uop0" + && event.cause == "dynamic_target_missing" + && event.trap_cause == Some(isa::TRAP_DYNAMIC_TARGET_MISSING) + && event.traparg0 == Some(0x1000) + && event.branch_kind.as_deref() == Some("ret") + && event.return_kind.as_deref() == Some("fret_stk") + })); +} + +#[test] +fn ret_fault_emits_call_materialization_kind_from_live_return_source() { + let runtime = sample_runtime(&[enc_addi(2, 0, 1)], &[]); + let decoded = isa::decode_word(346369857).expect("decode fret.ra"); + let uops = vec![CycleUop { + decoded, + commit: CommitRecord::unsupported(0, 0x1000, 346369857, 4, &isa::BlockMeta::default()), + deps: [None, None], + src_queue_kinds: [None, None], + src_logical_tags: [None, None], + src_qtags: [None, None], + dst_queue_kind: None, + dst_logical_tag: None, + dst_qtag: None, + bypass_d2: false, + is_load: false, + is_store: false, + load_ordinal: None, + load_store_id: None, + miss_injected: false, + redirect_target: Some(0x2000), + phys_iq: None, + pick_wakeup_visible: None, + data_ready_visible: None, + miss_pending_until: None, + e1_cycle: None, + e4_cycle: None, + w1_cycle: Some(7), + done_cycle: None, + }]; + let mut pipeline = StageQueues::default(); + pipeline.seq_branch_contexts.insert( + 0, + BranchOwnerContext { + kind: BranchOwnerKind::Ret, + base_pc: 0x1000, + target_pc: 0x2000, + off: 0x1000, + pred_take: false, + epoch: 1, + }, + ); + pipeline + .seq_return_consumer_kinds + .insert(0, ReturnConsumerKind::FretRa); + pipeline + .seq_call_materialization_kinds + .insert(0, CallMaterializationKind::AdjacentSetret); + pipeline + .seq_boundary_target_source_kinds + .insert(0, DynamicTargetSourceKind::CallReturnAdjacentSetret); + pipeline.pending_trap = Some(PendingTrapState { + seq: 0, + cause: isa::TRAP_DYNAMIC_TARGET_MISSING, + traparg0: 0x1000, + checkpoint_id: 3, + visible_cycle: 7, + }); + + let mut out = Vec::new(); + emit_stage_events( + 7, + &runtime, + &pipeline, + &[], + &VecDeque::new(), + &uops, + &mut out, + ); + + assert!(out.iter().any(|event| { + event.stage_id == "FLS" + && event.row_id == "uop0" + && event.cause == "dynamic_target_missing" + && event.branch_kind.as_deref() == Some("ret") + && event.return_kind.as_deref() == Some("fret_ra") + && event.call_materialization_kind.as_deref() == Some("adjacent_setret") + && event.target_source_kind.as_deref() == Some("call_return_adjacent_setret") + })); +} + +#[test] +fn ret_fault_emits_stale_dynamic_target_cause_and_setup_provenance() { + let runtime = sample_runtime(&[enc_addi(2, 0, 1)], &[]); + let decoded = isa::decode_word(0).expect("decode c.bstop"); + let uops = vec![CycleUop { + decoded, + commit: CommitRecord::unsupported(0, 0x1004, 0, 2, &isa::BlockMeta::default()), + deps: [None, None], + src_queue_kinds: [None, None], + src_logical_tags: [None, None], + src_qtags: [None, None], + dst_queue_kind: None, + dst_logical_tag: None, + dst_qtag: None, + bypass_d2: false, + is_load: false, + is_store: false, + load_ordinal: None, + load_store_id: None, + miss_injected: false, + redirect_target: Some(0x2000), + phys_iq: None, + pick_wakeup_visible: None, + data_ready_visible: None, + miss_pending_until: None, + e1_cycle: None, + e4_cycle: None, + w1_cycle: Some(7), + done_cycle: None, + }]; + let mut pipeline = StageQueues::default(); + pipeline.seq_branch_contexts.insert( + 0, + BranchOwnerContext { + kind: BranchOwnerKind::Ret, + base_pc: 0x1004, + target_pc: 0x2000, + off: 0x0ffc, + pred_take: false, + epoch: 2, + }, + ); + pipeline.seq_recovery_epochs.insert(0, 2); + pipeline.seq_boundary_target_owner_seqs.insert(0, 3); + pipeline + .seq_boundary_target_producer_kinds + .insert(0, ReturnConsumerKind::SetcTgt); + pipeline.seq_boundary_target_setup_epochs.insert(0, 1); + pipeline.seq_boundary_target_source_owner_seqs.insert(0, 3); + pipeline.seq_boundary_target_source_epochs.insert(0, 1); + pipeline + .seq_boundary_target_source_kinds + .insert(0, DynamicTargetSourceKind::ArchTargetSetup); + pipeline.pending_trap = Some(PendingTrapState { + seq: 0, + cause: isa::TRAP_DYNAMIC_TARGET_STALE, + traparg0: 0x1004, + checkpoint_id: 0, + visible_cycle: 7, + }); + + let mut out = Vec::new(); + emit_stage_events( + 7, + &runtime, + &pipeline, + &[], + &VecDeque::new(), + &uops, + &mut out, + ); + + let event = out + .iter() + .find(|event| { + event.stage_id == "FLS" + && event.row_id == "uop0" + && event.cause == "dynamic_target_stale_setup" + }) + .expect("missing stale setup FLS event"); + assert_eq!(event.target_owner_row_id.as_deref(), Some("uop3")); + assert_eq!(event.target_producer_kind.as_deref(), Some("setc_tgt")); + assert_eq!(event.branch_kind.as_deref(), Some("ret")); + assert_eq!(event.target_setup_epoch, Some(1)); + assert_eq!(event.boundary_epoch, Some(2)); + assert_eq!(event.target_source_owner_row_id.as_deref(), Some("uop3")); + assert_eq!(event.target_source_epoch, Some(1)); + assert_eq!( + event.target_source_kind.as_deref(), + Some("arch_target_setup") + ); +} + +#[test] +fn ind_fault_emits_stale_return_dynamic_target_cause_and_provenance() { + let runtime = sample_runtime(&[enc_addi(2, 0, 1)], &[]); + let decoded = isa::decode_word(0).expect("decode c.bstop"); + let uops = vec![CycleUop { + decoded, + commit: CommitRecord::unsupported(0, 0x1006, 0, 2, &isa::BlockMeta::default()), + deps: [None, None], + src_queue_kinds: [None, None], + src_logical_tags: [None, None], + src_qtags: [None, None], + dst_queue_kind: None, + dst_logical_tag: None, + dst_qtag: None, + bypass_d2: false, + is_load: false, + is_store: false, + load_ordinal: None, + load_store_id: None, + miss_injected: false, + redirect_target: Some(0x2000), + phys_iq: None, + pick_wakeup_visible: None, + data_ready_visible: None, + miss_pending_until: None, + e1_cycle: None, + e4_cycle: None, + w1_cycle: Some(7), + done_cycle: None, + }]; + let mut pipeline = StageQueues::default(); + pipeline.seq_branch_contexts.insert( + 0, + BranchOwnerContext { + kind: BranchOwnerKind::Ind, + base_pc: 0x1006, + target_pc: 0x2000, + off: 0x0ffa, + pred_take: false, + epoch: 2, + }, + ); + pipeline.seq_recovery_epochs.insert(0, 2); + pipeline.seq_boundary_target_owner_seqs.insert(0, 4); + pipeline + .seq_boundary_target_producer_kinds + .insert(0, ReturnConsumerKind::SetcTgt); + pipeline.seq_boundary_target_setup_epochs.insert(0, 1); + pipeline.seq_boundary_target_source_owner_seqs.insert(0, 9); + pipeline.seq_boundary_target_source_epochs.insert(0, 0); + pipeline + .seq_boundary_target_source_kinds + .insert(0, DynamicTargetSourceKind::CallReturnFused); + pipeline + .seq_call_materialization_kinds + .insert(0, CallMaterializationKind::FusedCall); + pipeline.pending_trap = Some(PendingTrapState { + seq: 0, + cause: isa::TRAP_DYNAMIC_TARGET_STALE, + traparg0: 0x1006, + checkpoint_id: 0, + visible_cycle: 7, + }); + + let mut out = Vec::new(); + emit_stage_events( + 7, + &runtime, + &pipeline, + &[], + &VecDeque::new(), + &uops, + &mut out, + ); + + assert!(out.iter().any(|event| { + event.stage_id == "FLS" + && event.row_id == "uop0" + && event.cause == "dynamic_target_stale_return" + && event.target_owner_row_id.as_deref() == Some("uop4") + && event.target_producer_kind.as_deref() == Some("setc_tgt") + && event.branch_kind.as_deref() == Some("ind") + && event.target_setup_epoch == Some(1) + && event.boundary_epoch == Some(2) + && event.target_source_owner_row_id.as_deref() == Some("uop9") + && event.target_source_epoch == Some(0) + && event.call_materialization_kind.as_deref() == Some("fused_call") + && event.target_source_kind.as_deref() == Some("call_return_fused") + })); +} + +#[test] +fn ret_boundary_flush_emits_dynamic_target_owner_row_id() { + let runtime = sample_runtime(&[enc_addi(2, 0, 1)], &[]); + let decoded = isa::decode_word(0).expect("decode c.bstop"); + let uops = vec![ + CycleUop { + decoded: isa::decode_word(0x029c).expect("decode c.setc.tgt ra"), + commit: CommitRecord::unsupported(0, 0x1000, 0x029c, 2, &isa::BlockMeta::default()), + deps: [None, None], + src_queue_kinds: [None, None], + src_logical_tags: [None, None], + src_qtags: [None, None], + dst_queue_kind: None, + dst_logical_tag: None, + dst_qtag: None, + bypass_d2: false, + is_load: false, + is_store: false, + load_ordinal: None, + load_store_id: None, + miss_injected: false, + redirect_target: None, + phys_iq: None, + pick_wakeup_visible: None, + data_ready_visible: None, + miss_pending_until: None, + e1_cycle: None, + e4_cycle: None, + w1_cycle: Some(6), + done_cycle: None, + }, + CycleUop { + decoded, + commit: CommitRecord::unsupported(0, 0x1002, 0, 2, &isa::BlockMeta::default()), + deps: [None, None], + src_queue_kinds: [None, None], + src_logical_tags: [None, None], + src_qtags: [None, None], + dst_queue_kind: None, + dst_logical_tag: None, + dst_qtag: None, + bypass_d2: false, + is_load: false, + is_store: false, + load_ordinal: None, + load_store_id: None, + miss_injected: false, + redirect_target: Some(0x2000), + phys_iq: None, + pick_wakeup_visible: None, + data_ready_visible: None, + miss_pending_until: None, + e1_cycle: None, + e4_cycle: None, + w1_cycle: Some(7), + done_cycle: None, + }, + ]; + let mut pipeline = StageQueues::default(); + pipeline.seq_branch_contexts.insert( + 1, + BranchOwnerContext { + kind: BranchOwnerKind::Ret, + base_pc: 0x1002, + target_pc: 0x2000, + off: 0x0ffe, + pred_take: false, + epoch: 1, + }, + ); + pipeline.seq_boundary_target_pcs.insert(1, 0x2000); + pipeline.seq_boundary_target_owner_seqs.insert(1, 0); + pipeline + .seq_boundary_target_source_kinds + .insert(1, DynamicTargetSourceKind::ArchTargetSetup); + pipeline + .seq_return_consumer_kinds + .insert(1, ReturnConsumerKind::SetcTgt); + + let mut out = Vec::new(); + emit_stage_events( + 7, + &runtime, + &pipeline, + &[], + &VecDeque::new(), + &uops, + &mut out, + ); + + assert!(out.iter().any(|event| { + event.stage_id == "FLS" + && event.row_id == "uop1" + && event.cause == "redirect_boundary" + && event.target_owner_row_id.as_deref() == Some("uop0") + && event.branch_kind.as_deref() == Some("ret") + && event.return_kind.as_deref() == Some("setc_tgt") + && event.target_source_kind.as_deref() == Some("arch_target_setup") + })); +} + +#[test] +fn ind_boundary_preserves_call_return_target_source_kind_via_setc_tgt() { + let call_header = isa::decode_word(1_589_249).expect("decode bstart.std call header"); + let setc_tgt = isa::decode_word(0x029c).expect("decode c.setc.tgt ra"); + let bstop = isa::decode_word(0).expect("decode c.bstop"); + let commits = vec![ + CommitRecord { + schema_version: "1.0".to_string(), + cycle: 0, + pc: 0x1000, + insn: 1_589_249, + len: 4, + next_pc: 0x3000, + src0_valid: 0, + src0_reg: 0, + src0_data: 0, + src1_valid: 0, + src1_reg: 0, + src1_data: 0, + dst_valid: 1, + dst_reg: 10, + dst_data: 0x2000, + wb_valid: 1, + wb_rd: 10, + wb_data: 0x2000, + mem_valid: 0, + mem_is_store: 0, + mem_addr: 0, + mem_wdata: 0, + mem_rdata: 0, + mem_size: 0, + trap_valid: 0, + trap_cause: 0, + traparg0: 0, + block_kind: "sys".to_string(), + lane_id: "scalar0".to_string(), + tile_meta: String::new(), + tile_ref_src: 0, + tile_ref_dst: 0, + }, + CommitRecord { + schema_version: "1.0".to_string(), + cycle: 1, + pc: 0x1004, + insn: 0x029c, + len: 2, + next_pc: 0x1006, + src0_valid: 1, + src0_reg: 10, + src0_data: 0x2000, + src1_valid: 0, + src1_reg: 0, + src1_data: 0, + dst_valid: 0, + dst_reg: 0, + dst_data: 0, + wb_valid: 0, + wb_rd: 0, + wb_data: 0, + mem_valid: 0, + mem_is_store: 0, + mem_addr: 0, + mem_wdata: 0, + mem_rdata: 0, + mem_size: 0, + trap_valid: 0, + trap_cause: 0, + traparg0: 0, + block_kind: "sys".to_string(), + lane_id: "scalar0".to_string(), + tile_meta: String::new(), + tile_ref_src: 0, + tile_ref_dst: 0, + }, + CommitRecord::unsupported(2, 0x1006, 0, 2, &isa::BlockMeta::default()), + ]; + let decoded = vec![call_header, setc_tgt, bstop]; + let mut uops = build_uops(&commits, &decoded); + let mut pipeline = StageQueues::default(); + let mut iq = Vec::new(); + let mut rob = VecDeque::new(); + pipeline.frontend[10].extend([0, 1]); + pipeline.seq_checkpoint_ids.insert(0, 0); + pipeline.seq_checkpoint_ids.insert(1, 0); + pipeline.seq_checkpoint_ids.insert(2, 0); + + dispatch_to_iq_and_bypass(0, &mut pipeline, &mut iq, &mut rob, &mut uops); + pipeline.frontend[10].push_back(2); + pipeline.active_branch_context = BranchOwnerContext { + kind: BranchOwnerKind::Ind, + base_pc: 0x1006, + target_pc: 0x2000, + off: 0x0ffa, + pred_take: false, + epoch: 1, + }; + dispatch_to_iq_and_bypass(1, &mut pipeline, &mut iq, &mut rob, &mut uops); + + assert_eq!( + pipeline.seq_boundary_target_owner_seqs.get(&2).copied(), + Some(1) + ); + assert_eq!( + pipeline.seq_boundary_target_producer_kinds.get(&2).copied(), + Some(ReturnConsumerKind::SetcTgt) + ); + assert_eq!( + pipeline.seq_boundary_target_source_kinds.get(&2).copied(), + Some(DynamicTargetSourceKind::CallReturnFused) + ); + assert_eq!( + pipeline + .seq_boundary_target_source_owner_seqs + .get(&2) + .copied(), + Some(0) + ); + assert_eq!( + pipeline.seq_boundary_target_source_epochs.get(&2).copied(), + Some(0) + ); + assert_eq!( + pipeline.seq_call_materialization_kinds.get(&2).copied(), + Some(CallMaterializationKind::FusedCall) + ); +} + +#[test] +fn ind_boundary_flush_emits_call_materialization_kind_from_dynamic_target_source() { + let runtime = sample_runtime(&[enc_addi(2, 0, 1)], &[]); + let decoded = isa::decode_word(0).expect("decode c.bstop"); + let uops = vec![ + CycleUop { + decoded: isa::decode_word(0x029c).expect("decode c.setc.tgt ra"), + commit: CommitRecord::unsupported(0, 0x1000, 0x029c, 2, &isa::BlockMeta::default()), + deps: [None, None], + src_queue_kinds: [None, None], + src_logical_tags: [None, None], + src_qtags: [None, None], + dst_queue_kind: None, + dst_logical_tag: None, + dst_qtag: None, + bypass_d2: false, + is_load: false, + is_store: false, + load_ordinal: None, + load_store_id: None, + miss_injected: false, + redirect_target: None, + phys_iq: None, + pick_wakeup_visible: None, + data_ready_visible: None, + miss_pending_until: None, + e1_cycle: None, + e4_cycle: None, + w1_cycle: Some(6), + done_cycle: None, + }, + CycleUop { + decoded, + commit: CommitRecord::unsupported(0, 0x1002, 0, 2, &isa::BlockMeta::default()), + deps: [None, None], + src_queue_kinds: [None, None], + src_logical_tags: [None, None], + src_qtags: [None, None], + dst_queue_kind: None, + dst_logical_tag: None, + dst_qtag: None, + bypass_d2: false, + is_load: false, + is_store: false, + load_ordinal: None, + load_store_id: None, + miss_injected: false, + redirect_target: Some(0x2000), + phys_iq: None, + pick_wakeup_visible: None, + data_ready_visible: None, + miss_pending_until: None, + e1_cycle: None, + e4_cycle: None, + w1_cycle: Some(7), + done_cycle: None, + }, + ]; + let mut pipeline = StageQueues::default(); + pipeline.seq_branch_contexts.insert( + 1, + BranchOwnerContext { + kind: BranchOwnerKind::Ind, + base_pc: 0x1002, + target_pc: 0x2000, + off: 0x0ffe, + pred_take: false, + epoch: 1, + }, + ); + pipeline.seq_boundary_target_pcs.insert(1, 0x2000); + pipeline.seq_boundary_target_owner_seqs.insert(1, 0); + pipeline + .seq_boundary_target_producer_kinds + .insert(1, ReturnConsumerKind::SetcTgt); + pipeline.seq_boundary_target_source_owner_seqs.insert(1, 0); + pipeline.seq_boundary_target_source_epochs.insert(1, 0); + pipeline + .seq_boundary_target_source_kinds + .insert(1, DynamicTargetSourceKind::CallReturnFused); + pipeline + .seq_call_materialization_kinds + .insert(1, CallMaterializationKind::FusedCall); + + let mut out = Vec::new(); + emit_stage_events( + 7, + &runtime, + &pipeline, + &[], + &VecDeque::new(), + &uops, + &mut out, + ); + + assert!(out.iter().any(|event| { + event.stage_id == "FLS" + && event.row_id == "uop1" + && event.cause == "redirect_boundary" + && event.target_owner_row_id.as_deref() == Some("uop0") + && event.target_producer_kind.as_deref() == Some("setc_tgt") + && event.branch_kind.as_deref() == Some("ind") + && event.target_source_owner_row_id.as_deref() == Some("uop0") + && event.target_source_epoch == Some(0) + && event.call_materialization_kind.as_deref() == Some("fused_call") + && event.target_source_kind.as_deref() == Some("call_return_fused") + })); +} + +#[test] +fn retire_emits_live_branch_kind_on_cmt_event() { + let runtime = sample_runtime(&[enc_addi(2, 0, 1)], &[]); + let decoded = isa::decode_word(30_478_677).expect("decode cmp.nei"); + let mut uops = vec![CycleUop { + decoded, + commit: CommitRecord::unsupported(0, 0x1004, 30_478_677, 4, &isa::BlockMeta::default()), + deps: [None, None], + src_queue_kinds: [None, None], + src_logical_tags: [None, None], + src_qtags: [None, None], + dst_queue_kind: None, + dst_logical_tag: None, + dst_qtag: None, + bypass_d2: false, + is_load: false, + is_store: false, + load_ordinal: None, + load_store_id: None, + miss_injected: false, + redirect_target: None, + phys_iq: None, + pick_wakeup_visible: None, + data_ready_visible: None, + miss_pending_until: None, + e1_cycle: None, + e4_cycle: None, + w1_cycle: Some(3), + done_cycle: Some(3), + }]; + let mut pipeline = StageQueues::default(); + let mut rob = VecDeque::from([0usize]); + let mut committed = Vec::new(); + let mut retired = Vec::new(); + let mut stage_events = Vec::new(); + pipeline.seq_branch_contexts.insert( + 0, + BranchOwnerContext { + kind: BranchOwnerKind::Cond, + base_pc: 0x1000, + target_pc: 0x0ff0, + off: 0xfffffffffffffff0, + pred_take: true, + epoch: 1, + }, + ); + + let trap = retire_ready( + 3, + &runtime, + &mut rob, + &mut committed, + &mut retired, + &mut pipeline, + &mut uops, + &mut stage_events, + ); + + assert_eq!(trap, None); + assert_eq!(committed.len(), 1); + assert_eq!(stage_events.len(), 1); + assert_eq!(stage_events[0].stage_id, "CMT"); + assert_eq!(stage_events[0].branch_kind.as_deref(), Some("cond")); +} + +#[test] +fn retire_emits_distinct_return_kind_on_cmt_event() { + let runtime = sample_runtime(&[enc_addi(2, 0, 1)], &[]); + let fret_ra = isa::DecodedInstruction { + uid: "test_fret_ra".to_string(), + mnemonic: "FRET.RA".to_string(), + asm: "FRET.RA [x10], sp!, 16".to_string(), + group: "Block Split".to_string(), + encoding_kind: "L32".to_string(), + length_bits: 32, + mask: 0, + match_bits: 0, + instruction_bits: 0, + uop_group: "CMD".to_string(), + fields: Vec::new(), + }; + let fret_stk = isa::decode_word(346370113).expect("decode fret.stk"); + let mut uops = vec![ + CycleUop { + decoded: fret_ra, + commit: CommitRecord::unsupported(0, 0x1000, 0xfeed0001, 4, &isa::BlockMeta::default()), + deps: [None, None], + src_queue_kinds: [None, None], + src_logical_tags: [None, None], + src_qtags: [None, None], + dst_queue_kind: None, + dst_logical_tag: None, + dst_qtag: None, + bypass_d2: false, + is_load: false, + is_store: false, + load_ordinal: None, + load_store_id: None, + miss_injected: false, + redirect_target: Some(0x2000), + phys_iq: None, + pick_wakeup_visible: None, + data_ready_visible: None, + miss_pending_until: None, + e1_cycle: None, + e4_cycle: None, + w1_cycle: Some(3), + done_cycle: Some(3), + }, + CycleUop { + decoded: fret_stk, + commit: CommitRecord::unsupported(0, 0x1004, 346370113, 4, &isa::BlockMeta::default()), + deps: [None, None], + src_queue_kinds: [None, None], + src_logical_tags: [None, None], + src_qtags: [None, None], + dst_queue_kind: None, + dst_logical_tag: None, + dst_qtag: None, + bypass_d2: false, + is_load: false, + is_store: false, + load_ordinal: None, + load_store_id: None, + miss_injected: false, + redirect_target: Some(0x3000), + phys_iq: None, + pick_wakeup_visible: None, + data_ready_visible: None, + miss_pending_until: None, + e1_cycle: None, + e4_cycle: None, + w1_cycle: Some(3), + done_cycle: Some(3), + }, + ]; + let mut pipeline = StageQueues::default(); + let mut rob = VecDeque::from([0usize, 1usize]); + let mut committed = Vec::new(); + let mut retired = Vec::new(); + let mut stage_events = Vec::new(); + pipeline.seq_branch_contexts.insert( + 0, + BranchOwnerContext { + kind: BranchOwnerKind::Ret, + base_pc: 0x1000, + target_pc: 0x2000, + off: 0x1000, + pred_take: false, + epoch: 1, + }, + ); + pipeline.seq_branch_contexts.insert( + 1, + BranchOwnerContext { + kind: BranchOwnerKind::Ret, + base_pc: 0x1004, + target_pc: 0x3000, + off: 0x1ffc, + pred_take: false, + epoch: 1, + }, + ); + pipeline + .seq_return_consumer_kinds + .insert(0, ReturnConsumerKind::FretRa); + pipeline + .seq_return_consumer_kinds + .insert(1, ReturnConsumerKind::FretStk); + + let trap = retire_ready( + 3, + &runtime, + &mut rob, + &mut committed, + &mut retired, + &mut pipeline, + &mut uops, + &mut stage_events, + ); + + assert_eq!(trap, None); + assert_eq!(stage_events.len(), 2); + assert_eq!(stage_events[0].return_kind.as_deref(), Some("fret_ra")); + assert_eq!(stage_events[1].return_kind.as_deref(), Some("fret_stk")); + assert_eq!(stage_events[0].target_owner_row_id.as_deref(), Some("uop0")); + assert_eq!(stage_events[1].target_owner_row_id.as_deref(), Some("uop1")); +} + +#[test] +fn retire_emits_call_header_target_owner_row_id_on_cmt_event() { + let runtime = sample_runtime(&[enc_addi(2, 0, 1)], &[]); + let call_header = isa::decode_word(1_589_249).expect("decode bstart.std call header"); + let setret = isa::decode_word(0x5056).expect("decode c.setret"); + let mut uops = vec![ + CycleUop { + decoded: call_header, + commit: CommitRecord::unsupported(0, 0x1000, 1_589_249, 4, &isa::BlockMeta::default()), + deps: [None, None], + src_queue_kinds: [None, None], + src_logical_tags: [None, None], + src_qtags: [None, None], + dst_queue_kind: None, + dst_logical_tag: None, + dst_qtag: None, + bypass_d2: false, + is_load: false, + is_store: false, + load_ordinal: None, + load_store_id: None, + miss_injected: false, + redirect_target: Some(0x3000), + phys_iq: None, + pick_wakeup_visible: None, + data_ready_visible: None, + miss_pending_until: None, + e1_cycle: None, + e4_cycle: None, + w1_cycle: Some(3), + done_cycle: Some(3), + }, + CycleUop { + decoded: setret, + commit: CommitRecord::unsupported(0, 0x1004, 0x5056, 2, &isa::BlockMeta::default()), + deps: [None, None], + src_queue_kinds: [None, None], + src_logical_tags: [None, None], + src_qtags: [None, None], + dst_queue_kind: None, + dst_logical_tag: None, + dst_qtag: None, + bypass_d2: false, + is_load: false, + is_store: false, + load_ordinal: None, + load_store_id: None, + miss_injected: false, + redirect_target: None, + phys_iq: None, + pick_wakeup_visible: None, + data_ready_visible: None, + miss_pending_until: None, + e1_cycle: None, + e4_cycle: None, + w1_cycle: Some(3), + done_cycle: Some(3), + }, + ]; + let mut pipeline = StageQueues::default(); + pipeline.seq_branch_contexts.insert( + 0, + BranchOwnerContext { + kind: BranchOwnerKind::Call, + base_pc: 0x1000, + target_pc: 0x3000, + off: 0x2000, + pred_take: false, + epoch: 1, + }, + ); + pipeline.seq_call_return_target_owner_seqs.insert(0, 1); + pipeline.seq_call_return_target_owner_seqs.insert(1, 1); + pipeline + .seq_call_materialization_kinds + .insert(0, CallMaterializationKind::AdjacentSetret); + pipeline + .seq_call_materialization_kinds + .insert(1, CallMaterializationKind::AdjacentSetret); + let mut rob = VecDeque::from([0usize, 1usize]); + let mut committed = Vec::new(); + let mut retired = Vec::new(); + let mut stage_events = Vec::new(); + + let trap = retire_ready( + 3, + &runtime, + &mut rob, + &mut committed, + &mut retired, + &mut pipeline, + &mut uops, + &mut stage_events, + ); + + assert_eq!(trap, None); + assert_eq!(stage_events.len(), 2); + assert_eq!(stage_events[0].branch_kind.as_deref(), Some("call")); + assert_eq!(stage_events[0].target_owner_row_id.as_deref(), Some("uop1")); + assert_eq!(stage_events[1].target_owner_row_id.as_deref(), Some("uop1")); + assert_eq!( + stage_events[0].call_materialization_kind.as_deref(), + Some("adjacent_setret") + ); + assert_eq!( + stage_events[1].call_materialization_kind.as_deref(), + Some("adjacent_setret") + ); +} + +#[test] +fn cycle_limit_reports_requested_cycle_budget() { + let program = vec![ + enc_addi(2, 0, 1), + enc_addi(3, 2, 2), + enc_addi(4, 3, 3), + enc_addi(5, 4, 4), + enc_addi(6, 5, 5), + enc_addi(7, 6, 6), + enc_addi(8, 7, 7), + enc_addi(9, 8, 8), + enc_addi(10, 9, 9), + enc_addi(11, 10, 10), + enc_addi(12, 11, 11), + enc_acrc(1), + ]; + let runtime = sample_runtime(&program, &[]); + let bundle = CycleEngine + .run( + &runtime, + &CycleRunOptions { + max_cycles: 4, + ..CycleRunOptions::default() + }, + ) + .unwrap(); + + assert_eq!(bundle.result.metrics.exit_reason, "cycle_limit"); + assert_eq!(bundle.result.metrics.cycles, 4); +} + +#[test] +fn advance_frontend_preserves_overflowing_uops() { + let mut pipeline = StageQueues::default(); + pipeline.frontend[8].extend([10, 11, 12, 13, 14]); + pipeline.frontend[9].extend([20, 21, 22]); + let mut rob = VecDeque::new(); + + advance_frontend(&mut pipeline, &mut rob); + + assert_eq!( + pipeline.frontend[10].iter().copied().collect::>(), + vec![20, 21, 22] + ); + assert_eq!( + pipeline.frontend[9].iter().copied().collect::>(), + vec![10, 11, 12, 13] + ); + assert_eq!( + pipeline.frontend[8].iter().copied().collect::>(), + vec![14] + ); +} + +#[test] +fn d2_bypass_matches_documented_immediate_only_path() { + let setret = isa::decode_word(281474524250110).expect("decode hl.setret"); + let fentry = isa::decode_word(178585665).expect("decode fentry"); + + assert!(d2_bypass(&setret)); + assert!(!d2_bypass(&fentry)); +} + +#[test] +fn iq_entry_remains_inflight_until_i2_dealloc() { + let decoded = isa::decode_word(enc_addi(2, 0, 1) as u64).expect("decode addi"); + let uops = vec![CycleUop { + decoded, + commit: CommitRecord::unsupported(0, 0x1000, 0, 4, &isa::BlockMeta::default()), + deps: [None, None], + src_queue_kinds: [None, None], + src_logical_tags: [None, None], + src_qtags: [None, None], + dst_queue_kind: None, + dst_logical_tag: None, + dst_qtag: None, + bypass_d2: false, + is_load: false, + is_store: false, + load_ordinal: None, + load_store_id: None, + miss_injected: false, + redirect_target: None, + phys_iq: Some(PhysIq::AluIq0), + pick_wakeup_visible: None, + data_ready_visible: None, + miss_pending_until: None, + e1_cycle: None, + e4_cycle: None, + w1_cycle: None, + done_cycle: None, + }]; + let mut iq = vec![IqEntry { + seq: 0, + phys_iq: PhysIq::AluIq0, + inflight: true, + src_valid: [false; 2], + src_ready_nonspec: [false; 2], + src_ready_spec: [false; 2], + src_wait_qtag: [false; 2], + }]; + let mut p1 = VecDeque::from([0usize]); + let rob = VecDeque::from([0usize]); + let admitted = arbitrate_i1(0, &mut p1, &mut iq, &uops, &rob); + + assert_eq!(admitted.iter().copied().collect::>(), vec![0]); + assert_eq!(iq.len(), 1); + assert!(iq[0].inflight); + + let mut pipeline = StageQueues::default(); + pipeline.i1 = admitted; + advance_i1_to_i2(&mut pipeline, &mut iq); + + assert_eq!(pipeline.i2.iter().copied().collect::>(), vec![0]); + assert!(iq.is_empty()); +} + +#[test] +fn pick_uses_oldest_ready_rob_order() { + let decoded = isa::decode_word(enc_addi(2, 0, 1) as u64).expect("decode addi"); + let uops = [PhysIq::BruIq, PhysIq::AluIq0, PhysIq::SharedIq1] + .into_iter() + .map(|phys_iq| CycleUop { + decoded: decoded.clone(), + commit: CommitRecord::unsupported(0, 0x1000, 0, 4, &isa::BlockMeta::default()), + deps: [None, None], + src_queue_kinds: [None, None], + src_logical_tags: [None, None], + src_qtags: [None, None], + dst_queue_kind: None, + dst_logical_tag: None, + dst_qtag: None, + bypass_d2: false, + is_load: false, + is_store: false, + load_ordinal: None, + load_store_id: None, + miss_injected: false, + redirect_target: None, + phys_iq: Some(phys_iq), + pick_wakeup_visible: None, + data_ready_visible: None, + miss_pending_until: None, + e1_cycle: None, + e4_cycle: None, + w1_cycle: None, + done_cycle: None, + }) + .collect::>(); + let mut iq = vec![ + IqEntry { + seq: 2, + phys_iq: PhysIq::SharedIq1, + inflight: false, + src_valid: [false; 2], + src_ready_nonspec: [false; 2], + src_ready_spec: [false; 2], + src_wait_qtag: [false; 2], + }, + IqEntry { + seq: 0, + phys_iq: PhysIq::BruIq, + inflight: false, + src_valid: [false; 2], + src_ready_nonspec: [false; 2], + src_ready_spec: [false; 2], + src_wait_qtag: [false; 2], + }, + IqEntry { + seq: 1, + phys_iq: PhysIq::AluIq0, + inflight: false, + src_valid: [false; 2], + src_ready_nonspec: [false; 2], + src_ready_spec: [false; 2], + src_wait_qtag: [false; 2], + }, + ]; + let rob = VecDeque::from([0usize, 1, 2]); + let mut p1 = VecDeque::new(); + + pick_from_iq(0, 0, &mut iq, &uops, &mut p1, &rob); + + assert_eq!(p1.iter().copied().collect::>(), vec![0, 1, 2]); + assert!(iq.iter().all(|entry| entry.inflight)); +} + +#[test] +fn pick_limits_to_one_winner_per_physical_iq() { + let decoded = isa::decode_word(enc_addi(2, 0, 1) as u64).expect("decode addi"); + let uops = (0..3) + .map(|_| CycleUop { + decoded: decoded.clone(), + commit: CommitRecord::unsupported(0, 0x1000, 0, 4, &isa::BlockMeta::default()), + deps: [None, None], + src_queue_kinds: [None, None], + src_logical_tags: [None, None], + src_qtags: [None, None], + dst_queue_kind: None, + dst_logical_tag: None, + dst_qtag: None, + bypass_d2: false, + is_load: false, + is_store: false, + load_ordinal: None, + load_store_id: None, + miss_injected: false, + redirect_target: None, + phys_iq: Some(PhysIq::AluIq0), + pick_wakeup_visible: None, + data_ready_visible: None, + miss_pending_until: None, + e1_cycle: None, + e4_cycle: None, + w1_cycle: None, + done_cycle: None, + }) + .collect::>(); + let mut iq = vec![ + IqEntry { + seq: 2, + phys_iq: PhysIq::AluIq0, + inflight: false, + src_valid: [false; 2], + src_ready_nonspec: [false; 2], + src_ready_spec: [false; 2], + src_wait_qtag: [false; 2], + }, + IqEntry { + seq: 0, + phys_iq: PhysIq::AluIq0, + inflight: false, + src_valid: [false; 2], + src_ready_nonspec: [false; 2], + src_ready_spec: [false; 2], + src_wait_qtag: [false; 2], + }, + IqEntry { + seq: 1, + phys_iq: PhysIq::AluIq0, + inflight: false, + src_valid: [false; 2], + src_ready_nonspec: [false; 2], + src_ready_spec: [false; 2], + src_wait_qtag: [false; 2], + }, + ]; + let rob = VecDeque::from([0usize, 1, 2]); + let mut p1 = VecDeque::new(); + + pick_from_iq(0, 0, &mut iq, &uops, &mut p1, &rob); + + assert_eq!(p1.iter().copied().collect::>(), vec![0]); + assert!(iq.iter().find(|entry| entry.seq == 0).unwrap().inflight); + assert!(!iq.iter().find(|entry| entry.seq == 1).unwrap().inflight); + assert!(!iq.iter().find(|entry| entry.seq == 2).unwrap().inflight); +} + +#[test] +fn i1_to_i2_limits_one_admit_per_physical_iq() { + let mut pipeline = StageQueues::default(); + pipeline.i1 = VecDeque::from([0usize, 1, 2]); + let mut iq = vec![ + IqEntry { + seq: 0, + phys_iq: PhysIq::AluIq0, + inflight: true, + src_valid: [false; 2], + src_ready_nonspec: [false; 2], + src_ready_spec: [false; 2], + src_wait_qtag: [false; 2], + }, + IqEntry { + seq: 1, + phys_iq: PhysIq::AluIq0, + inflight: true, + src_valid: [false; 2], + src_ready_nonspec: [false; 2], + src_ready_spec: [false; 2], + src_wait_qtag: [false; 2], + }, + IqEntry { + seq: 2, + phys_iq: PhysIq::BruIq, + inflight: true, + src_valid: [false; 2], + src_ready_nonspec: [false; 2], + src_ready_spec: [false; 2], + src_wait_qtag: [false; 2], + }, + ]; + + advance_i1_to_i2(&mut pipeline, &mut iq); + + assert_eq!(pipeline.i2.iter().copied().collect::>(), vec![0, 2]); + assert_eq!(pipeline.i1.iter().copied().collect::>(), vec![1]); + assert_eq!( + iq.iter().map(|entry| entry.seq).collect::>(), + vec![1] + ); +} + +#[test] +fn emit_stage_events_reports_ready_vs_wait_iq_age_per_queue() { + let runtime = sample_runtime(&[enc_addi(2, 0, 1)], &[]); + let decoded = isa::decode_word(enc_addi(2, 0, 1) as u64).expect("decode addi"); + let uops = vec![ + CycleUop { + decoded: decoded.clone(), + commit: CommitRecord::unsupported(0, 0x1000, 0, 4, &isa::BlockMeta::default()), + deps: [None, None], + src_queue_kinds: [None, None], + src_logical_tags: [None, None], + src_qtags: [None, None], + dst_queue_kind: None, + dst_logical_tag: None, + dst_qtag: None, + bypass_d2: false, + is_load: false, + is_store: false, + load_ordinal: None, + load_store_id: None, + miss_injected: false, + redirect_target: None, + phys_iq: Some(PhysIq::AluIq0), + pick_wakeup_visible: None, + data_ready_visible: None, + miss_pending_until: None, + e1_cycle: None, + e4_cycle: None, + w1_cycle: None, + done_cycle: None, + }, + CycleUop { + decoded, + commit: CommitRecord::unsupported(0, 0x1004, 0, 4, &isa::BlockMeta::default()), + deps: [None, None], + src_queue_kinds: [None, None], + src_logical_tags: [None, None], + src_qtags: [None, None], + dst_queue_kind: None, + dst_logical_tag: None, + dst_qtag: None, + bypass_d2: false, + is_load: false, + is_store: false, + load_ordinal: None, + load_store_id: None, + miss_injected: false, + redirect_target: None, + phys_iq: Some(PhysIq::AluIq0), + pick_wakeup_visible: None, + data_ready_visible: None, + miss_pending_until: None, + e1_cycle: None, + e4_cycle: None, + w1_cycle: None, + done_cycle: None, + }, + ]; + let pipeline = StageQueues::default(); + let iq = vec![ + IqEntry { + seq: 1, + phys_iq: PhysIq::AluIq0, + inflight: false, + src_valid: [false; 2], + src_ready_nonspec: [false; 2], + src_ready_spec: [false; 2], + src_wait_qtag: [false; 2], + }, + IqEntry { + seq: 0, + phys_iq: PhysIq::AluIq0, + inflight: false, + src_valid: [false; 2], + src_ready_nonspec: [false; 2], + src_ready_spec: [false; 2], + src_wait_qtag: [false; 2], + }, + ]; + let rob = VecDeque::from([0usize, 1]); + let mut out = Vec::new(); + + emit_stage_events(0, &runtime, &pipeline, &iq, &rob, &uops, &mut out); + + assert!(out.iter().any(|event| { + event.stage_id == "IQ" && event.row_id == "uop0" && event.cause == "ready" + })); + assert!(out.iter().any(|event| { + event.stage_id == "IQ" && event.row_id == "uop1" && event.cause == "wait_iq_age" + })); +} + +#[test] +fn i1_arbitration_uses_oldest_first_read_port_policy() { + let decoded = isa::decode_word(enc_addi(2, 0, 1) as u64).expect("decode addi"); + let mut uops = (0..3) + .map(|seq| CycleUop { + decoded: decoded.clone(), + commit: CommitRecord { + src0_valid: 1, + src0_reg: 1, + src1_valid: 1, + src1_reg: 2, + ..CommitRecord::unsupported( + 0, + 0x1000 + (seq as u64) * 4, + 0, + 4, + &isa::BlockMeta::default(), + ) + }, + deps: [None, None], + src_queue_kinds: [None, None], + src_logical_tags: [None, None], + src_qtags: [None, None], + dst_queue_kind: None, + dst_logical_tag: None, + dst_qtag: None, + bypass_d2: false, + is_load: false, + is_store: false, + load_ordinal: None, + load_store_id: None, + miss_injected: false, + redirect_target: None, + phys_iq: Some(PhysIq::AluIq0), + pick_wakeup_visible: None, + data_ready_visible: None, + miss_pending_until: None, + e1_cycle: None, + e4_cycle: None, + w1_cycle: None, + done_cycle: None, + }) + .collect::>(); + for uop in &mut uops { + uop.commit.src0_data = 1; + uop.commit.src1_data = 2; + } + let mut iq = vec![ + IqEntry { + seq: 2, + phys_iq: PhysIq::AluIq0, + inflight: true, + src_valid: [false; 2], + src_ready_nonspec: [false; 2], + src_ready_spec: [false; 2], + src_wait_qtag: [false; 2], + }, + IqEntry { + seq: 0, + phys_iq: PhysIq::AluIq0, + inflight: true, + src_valid: [false; 2], + src_ready_nonspec: [false; 2], + src_ready_spec: [false; 2], + src_wait_qtag: [false; 2], + }, + IqEntry { + seq: 1, + phys_iq: PhysIq::AluIq0, + inflight: true, + src_valid: [false; 2], + src_ready_nonspec: [false; 2], + src_ready_spec: [false; 2], + src_wait_qtag: [false; 2], + }, + ]; + let mut p1 = VecDeque::from([2usize, 0, 1]); + let rob = VecDeque::from([0usize, 1, 2]); + + let admitted = arbitrate_i1(0, &mut p1, &mut iq, &uops, &rob); + + assert_eq!(admitted.iter().copied().collect::>(), vec![0]); + assert!(!iq.iter().find(|entry| entry.seq == 1).unwrap().inflight); + assert!(!iq.iter().find(|entry| entry.seq == 2).unwrap().inflight); +} + +#[test] +fn load_spec_ready_source_skips_rf_read_ports() { + let load = isa::decode_word(enc_addi(2, 0, 1) as u64).expect("decode addi"); + let consumer = isa::decode_word(enc_addi(3, 2, 1) as u64).expect("decode addi"); + let uops = vec![ + CycleUop { + decoded: load, + commit: CommitRecord::unsupported(0, 0x1000, 0, 4, &isa::BlockMeta::default()), + deps: [None, None], + src_queue_kinds: [None, None], + src_logical_tags: [None, None], + src_qtags: [None, None], + dst_queue_kind: None, + dst_logical_tag: None, + dst_qtag: None, + bypass_d2: false, + is_load: true, + is_store: false, + load_ordinal: Some(0), + load_store_id: Some(0), + miss_injected: true, + redirect_target: None, + phys_iq: Some(PhysIq::AguIq0), + pick_wakeup_visible: Some(5), + data_ready_visible: Some(8), + miss_pending_until: None, + e1_cycle: Some(4), + e4_cycle: None, + w1_cycle: None, + done_cycle: None, + }, + CycleUop { + decoded: consumer, + commit: CommitRecord { + src0_valid: 1, + src0_reg: 2, + ..CommitRecord::unsupported(0, 0x1004, 0, 4, &isa::BlockMeta::default()) + }, + deps: [Some(0), None], + src_queue_kinds: [None, None], + src_logical_tags: [None, None], + src_qtags: [None, None], + dst_queue_kind: None, + dst_logical_tag: None, + dst_qtag: None, + bypass_d2: false, + is_load: false, + is_store: false, + load_ordinal: None, + load_store_id: None, + miss_injected: false, + redirect_target: None, + phys_iq: Some(PhysIq::AluIq0), + pick_wakeup_visible: None, + data_ready_visible: None, + miss_pending_until: None, + e1_cycle: None, + e4_cycle: None, + w1_cycle: None, + done_cycle: None, + }, + ]; + + assert_eq!(read_ports_needed(1, 5, &uops), 0); + assert!(iq_entry_ready(1, 5, 0, &uops)); + assert!(!i2_ready(1, 5, &uops)); + assert!(i2_ready(1, 8, &uops)); +} + +#[test] +fn load_consumer_waits_in_i2_for_e4_forward() { + let load = isa::decode_word(enc_addi(2, 0, 1) as u64).expect("decode addi"); + let consumer = isa::decode_word(enc_addi(3, 2, 1) as u64).expect("decode addi"); + let uops = vec![ + CycleUop { + decoded: load, + commit: CommitRecord::unsupported(0, 0x1000, 0, 4, &isa::BlockMeta::default()), + deps: [None, None], + src_queue_kinds: [None, None], + src_logical_tags: [None, None], + src_qtags: [None, None], + dst_queue_kind: None, + dst_logical_tag: None, + dst_qtag: None, + bypass_d2: false, + is_load: true, + is_store: false, + load_ordinal: Some(0), + load_store_id: Some(0), + miss_injected: true, + redirect_target: None, + phys_iq: Some(PhysIq::AguIq0), + pick_wakeup_visible: Some(5), + data_ready_visible: Some(8), + miss_pending_until: None, + e1_cycle: Some(4), + e4_cycle: None, + w1_cycle: None, + done_cycle: None, + }, + CycleUop { + decoded: consumer, + commit: CommitRecord { + src0_valid: 1, + src0_reg: 2, + ..CommitRecord::unsupported(0, 0x1004, 0, 4, &isa::BlockMeta::default()) + }, + deps: [Some(0), None], + src_queue_kinds: [None, None], + src_logical_tags: [None, None], + src_qtags: [None, None], + dst_queue_kind: None, + dst_logical_tag: None, + dst_qtag: None, + bypass_d2: false, + is_load: false, + is_store: false, + load_ordinal: None, + load_store_id: None, + miss_injected: false, + redirect_target: None, + phys_iq: Some(PhysIq::AluIq0), + pick_wakeup_visible: None, + data_ready_visible: None, + miss_pending_until: None, + e1_cycle: None, + e4_cycle: None, + w1_cycle: None, + done_cycle: None, + }, + ]; + let mut i2 = VecDeque::from([1usize]); + let mut e1 = VecDeque::new(); + let mut lhq = VecDeque::new(); + let mut stq = VecDeque::new(); + + let mut lsid_issue_ptr = 0usize; + let mut lsid_complete_ptr = 0usize; + advance_i2( + 5, + &mut i2, + &mut e1, + &mut lhq, + &mut stq, + &mut lsid_issue_ptr, + &mut lsid_complete_ptr, + &uops, + ); + assert_eq!(i2.iter().copied().collect::>(), vec![1]); + assert!(e1.is_empty()); + + advance_i2( + 8, + &mut i2, + &mut e1, + &mut lhq, + &mut stq, + &mut lsid_issue_ptr, + &mut lsid_complete_ptr, + &uops, + ); + assert!(i2.is_empty()); + assert_eq!(e1.iter().copied().collect::>(), vec![1]); + assert!(lhq.is_empty()); +} + +#[test] +fn iq_spec_ready_revokes_on_replay_reset_and_rewakes_later() { + let load = isa::decode_word(enc_addi(2, 0, 1) as u64).expect("decode addi"); + let consumer = isa::decode_word(enc_addi(3, 2, 1) as u64).expect("decode addi"); + let mut uops = vec![ + CycleUop { + decoded: load, + commit: CommitRecord::unsupported(0, 0x1000, 0, 4, &isa::BlockMeta::default()), + deps: [None, None], + src_queue_kinds: [None, None], + src_logical_tags: [None, None], + src_qtags: [None, None], + dst_queue_kind: None, + dst_logical_tag: None, + dst_qtag: None, + bypass_d2: false, + is_load: true, + is_store: false, + load_ordinal: Some(0), + load_store_id: Some(0), + miss_injected: true, + redirect_target: None, + phys_iq: Some(PhysIq::AguIq0), + pick_wakeup_visible: Some(5), + data_ready_visible: Some(8), + miss_pending_until: None, + e1_cycle: Some(4), + e4_cycle: None, + w1_cycle: None, + done_cycle: None, + }, + CycleUop { + decoded: consumer, + commit: CommitRecord { + src0_valid: 1, + src0_reg: 2, + ..CommitRecord::unsupported(0, 0x1004, 0, 4, &isa::BlockMeta::default()) + }, + deps: [Some(0), None], + src_queue_kinds: [None, None], + src_logical_tags: [None, None], + src_qtags: [None, None], + dst_queue_kind: None, + dst_logical_tag: None, + dst_qtag: None, + bypass_d2: false, + is_load: false, + is_store: false, + load_ordinal: None, + load_store_id: None, + miss_injected: false, + redirect_target: None, + phys_iq: Some(PhysIq::AluIq0), + pick_wakeup_visible: None, + data_ready_visible: None, + miss_pending_until: None, + e1_cycle: None, + e4_cycle: None, + w1_cycle: None, + done_cycle: None, + }, + ]; + let mut iq = vec![make_iq_entry( + 5, + 1, + PhysIq::AluIq0, + &BTreeSet::new(), + &BTreeSet::new(), + &uops, + )]; + + assert!(iq[0].src_ready_spec[0]); + assert!(!iq[0].src_ready_nonspec[0]); + + uops[0].pick_wakeup_visible = None; + uops[0].data_ready_visible = None; + uops[0].e1_cycle = None; + uops[0].miss_pending_until = Some(12); + let iq_tags = test_iq_tags(&iq); + let iq_owner_table = test_iq_owner_table(&iq, &iq_tags); + let crossbar = test_qtag_wait_crossbar(&iq, &uops); + update_iq_entries_for_cycle( + 6, + &mut iq, + &BTreeSet::new(), + &BTreeSet::new(), + &iq_owner_table, + &iq_tags, + &crossbar, + &uops, + ); + + assert!(!iq[0].src_ready_spec[0]); + assert!(!iq[0].src_ready_nonspec[0]); + assert_eq!( + iq_entry_wait_cause_from_state(&iq[0], 6, 0, &uops), + Some("wait_miss") + ); + + uops[0].miss_pending_until = None; + uops[0].pick_wakeup_visible = Some(9); + let iq_tags = test_iq_tags(&iq); + let iq_owner_table = test_iq_owner_table(&iq, &iq_tags); + let crossbar = test_qtag_wait_crossbar(&iq, &uops); + update_iq_entries_for_cycle( + 9, + &mut iq, + &BTreeSet::new(), + &BTreeSet::new(), + &iq_owner_table, + &iq_tags, + &crossbar, + &uops, + ); + + assert!(iq[0].src_ready_spec[0]); + assert!(!iq[0].src_ready_nonspec[0]); +} + +#[test] +fn iq_load_spec_wakeup_can_come_from_prior_e1_stage() { + let load = isa::decode_word(enc_addi(2, 0, 1) as u64).expect("decode addi"); + let consumer = isa::decode_word(enc_addi(3, 2, 1) as u64).expect("decode addi"); + let uops = vec![ + CycleUop { + decoded: load, + commit: CommitRecord::unsupported(0, 0x1000, 0, 4, &isa::BlockMeta::default()), + deps: [None, None], + src_queue_kinds: [None, None], + src_logical_tags: [None, None], + src_qtags: [None, None], + dst_queue_kind: None, + dst_logical_tag: None, + dst_qtag: None, + bypass_d2: false, + is_load: true, + is_store: false, + load_ordinal: Some(0), + load_store_id: Some(0), + miss_injected: false, + redirect_target: None, + phys_iq: Some(PhysIq::AguIq0), + pick_wakeup_visible: None, + data_ready_visible: None, + miss_pending_until: None, + e1_cycle: Some(4), + e4_cycle: None, + w1_cycle: None, + done_cycle: None, + }, + CycleUop { + decoded: consumer, + commit: CommitRecord { + src0_valid: 1, + src0_reg: 2, + ..CommitRecord::unsupported(0, 0x1004, 0, 4, &isa::BlockMeta::default()) + }, + deps: [Some(0), None], + src_queue_kinds: [None, None], + src_logical_tags: [None, None], + src_qtags: [None, None], + dst_queue_kind: None, + dst_logical_tag: None, + dst_qtag: None, + bypass_d2: false, + is_load: false, + is_store: false, + load_ordinal: None, + load_store_id: None, + miss_injected: false, + redirect_target: None, + phys_iq: Some(PhysIq::AluIq0), + pick_wakeup_visible: None, + data_ready_visible: None, + miss_pending_until: None, + e1_cycle: None, + e4_cycle: None, + w1_cycle: None, + done_cycle: None, + }, + ]; + let mut iq = vec![make_iq_entry( + 4, + 1, + PhysIq::AluIq0, + &BTreeSet::new(), + &BTreeSet::new(), + &uops, + )]; + + assert!(!iq[0].src_ready_spec[0]); + let iq_tags = test_iq_tags(&iq); + let iq_owner_table = test_iq_owner_table(&iq, &iq_tags); + let crossbar = test_qtag_wait_crossbar(&iq, &uops); + update_iq_entries_for_cycle( + 5, + &mut iq, + &BTreeSet::new(), + &BTreeSet::new(), + &iq_owner_table, + &iq_tags, + &crossbar, + &uops, + ); + assert!(iq[0].src_ready_spec[0]); + assert!(!iq[0].src_ready_nonspec[0]); +} + +#[test] +fn iq_nonspec_wakeup_can_come_from_prior_w1_stage() { + let producer = isa::decode_word(enc_addi(2, 0, 1) as u64).expect("decode addi"); + let consumer = isa::decode_word(enc_addi(3, 2, 1) as u64).expect("decode addi"); + let uops = vec![ + CycleUop { + decoded: producer, + commit: CommitRecord::unsupported(0, 0x1000, 0, 4, &isa::BlockMeta::default()), + deps: [None, None], + src_queue_kinds: [None, None], + src_logical_tags: [None, None], + src_qtags: [None, None], + dst_queue_kind: None, + dst_logical_tag: None, + dst_qtag: None, + bypass_d2: false, + is_load: false, + is_store: false, + load_ordinal: None, + load_store_id: None, + miss_injected: false, + redirect_target: None, + phys_iq: Some(PhysIq::AluIq0), + pick_wakeup_visible: None, + data_ready_visible: None, + miss_pending_until: None, + e1_cycle: None, + e4_cycle: None, + w1_cycle: Some(4), + done_cycle: None, + }, + CycleUop { + decoded: consumer, + commit: CommitRecord { + src0_valid: 1, + src0_reg: 2, + ..CommitRecord::unsupported(0, 0x1004, 0, 4, &isa::BlockMeta::default()) + }, + deps: [Some(0), None], + src_queue_kinds: [None, None], + src_logical_tags: [None, None], + src_qtags: [None, None], + dst_queue_kind: None, + dst_logical_tag: None, + dst_qtag: None, + bypass_d2: false, + is_load: false, + is_store: false, + load_ordinal: None, + load_store_id: None, + miss_injected: false, + redirect_target: None, + phys_iq: Some(PhysIq::AluIq0), + pick_wakeup_visible: None, + data_ready_visible: None, + miss_pending_until: None, + e1_cycle: None, + e4_cycle: None, + w1_cycle: None, + done_cycle: None, + }, + ]; + let mut iq = vec![make_iq_entry( + 4, + 1, + PhysIq::AluIq0, + &BTreeSet::new(), + &BTreeSet::new(), + &uops, + )]; + + assert!(!iq[0].src_ready_nonspec[0]); + let iq_tags = test_iq_tags(&iq); + let iq_owner_table = test_iq_owner_table(&iq, &iq_tags); + let crossbar = test_qtag_wait_crossbar(&iq, &uops); + update_iq_entries_for_cycle( + 5, + &mut iq, + &BTreeSet::new(), + &BTreeSet::new(), + &iq_owner_table, + &iq_tags, + &crossbar, + &uops, + ); + assert!(iq[0].src_ready_nonspec[0]); + assert!(!iq[0].src_ready_spec[0]); +} + +#[test] +fn qtag_wakeup_fanout_only_wakes_matching_queue_consumer() { + let producer = isa::decode_word(enc_addi(31, 0, 1) as u64).expect("decode implicit-t producer"); + let consumer = + isa::decode_word(enc_addi(2, REG_T1 as u32, 1) as u64).expect("decode implicit-t consumer"); + let uops = vec![ + CycleUop { + decoded: producer, + commit: CommitRecord { + wb_valid: 1, + wb_rd: REG_T1, + ..CommitRecord::unsupported(0, 0x1000, 0, 4, &isa::BlockMeta::default()) + }, + deps: [None, None], + src_queue_kinds: [None, None], + src_logical_tags: [None, None], + src_qtags: [None, None], + dst_queue_kind: Some(QueueWakeKind::T), + dst_logical_tag: Some(LogicalQueueTag { + kind: QueueWakeKind::T, + tag: 0, + }), + dst_qtag: Some(QTag { + phys_iq: PhysIq::AluIq0, + entry_id: 3, + }), + bypass_d2: false, + is_load: false, + is_store: false, + load_ordinal: None, + load_store_id: None, + miss_injected: false, + redirect_target: None, + phys_iq: Some(PhysIq::AluIq0), + pick_wakeup_visible: None, + data_ready_visible: None, + miss_pending_until: None, + e1_cycle: None, + e4_cycle: None, + w1_cycle: Some(4), + done_cycle: None, + }, + CycleUop { + decoded: consumer.clone(), + commit: CommitRecord { + src0_valid: 1, + src0_reg: REG_T1, + ..CommitRecord::unsupported(0, 0x1004, 0, 4, &isa::BlockMeta::default()) + }, + deps: [Some(0), None], + src_queue_kinds: [Some(QueueWakeKind::T), None], + src_logical_tags: [ + Some(LogicalQueueTag { + kind: QueueWakeKind::T, + tag: 0, + }), + None, + ], + src_qtags: [ + Some(QTag { + phys_iq: PhysIq::AluIq0, + entry_id: 3, + }), + None, + ], + dst_queue_kind: None, + dst_logical_tag: None, + dst_qtag: None, + bypass_d2: false, + is_load: false, + is_store: false, + load_ordinal: None, + load_store_id: None, + miss_injected: false, + redirect_target: None, + phys_iq: Some(PhysIq::AluIq0), + pick_wakeup_visible: None, + data_ready_visible: None, + miss_pending_until: None, + e1_cycle: None, + e4_cycle: None, + w1_cycle: None, + done_cycle: None, + }, + CycleUop { + decoded: consumer, + commit: CommitRecord { + src0_valid: 1, + src0_reg: REG_T1, + ..CommitRecord::unsupported(0, 0x1008, 0, 4, &isa::BlockMeta::default()) + }, + deps: [Some(0), None], + src_queue_kinds: [Some(QueueWakeKind::T), None], + src_logical_tags: [ + Some(LogicalQueueTag { + kind: QueueWakeKind::T, + tag: 0, + }), + None, + ], + src_qtags: [ + Some(QTag { + phys_iq: PhysIq::SharedIq1, + entry_id: 3, + }), + None, + ], + dst_queue_kind: None, + dst_logical_tag: None, + dst_qtag: None, + bypass_d2: false, + is_load: false, + is_store: false, + load_ordinal: None, + load_store_id: None, + miss_injected: false, + redirect_target: None, + phys_iq: Some(PhysIq::AluIq0), + pick_wakeup_visible: None, + data_ready_visible: None, + miss_pending_until: None, + e1_cycle: None, + e4_cycle: None, + w1_cycle: None, + done_cycle: None, + }, + CycleUop { + decoded: isa::decode_word(enc_addi(2, REG_T1 as u32, 1) as u64) + .expect("decode implicit-t consumer same queue wrong slot"), + commit: CommitRecord { + src0_valid: 1, + src0_reg: REG_T1, + ..CommitRecord::unsupported(0, 0x100c, 0, 4, &isa::BlockMeta::default()) + }, + deps: [Some(0), None], + src_queue_kinds: [Some(QueueWakeKind::T), None], + src_logical_tags: [ + Some(LogicalQueueTag { + kind: QueueWakeKind::T, + tag: 0, + }), + None, + ], + src_qtags: [ + Some(QTag { + phys_iq: PhysIq::AluIq0, + entry_id: 2, + }), + None, + ], + dst_queue_kind: None, + dst_logical_tag: None, + dst_qtag: None, + bypass_d2: false, + is_load: false, + is_store: false, + load_ordinal: None, + load_store_id: None, + miss_injected: false, + redirect_target: None, + phys_iq: Some(PhysIq::AluIq0), + pick_wakeup_visible: None, + data_ready_visible: None, + miss_pending_until: None, + e1_cycle: None, + e4_cycle: None, + w1_cycle: None, + done_cycle: None, + }, + ]; + let mut iq = vec![ + make_iq_entry( + 4, + 1, + PhysIq::AluIq0, + &BTreeSet::new(), + &BTreeSet::new(), + &uops, + ), + make_iq_entry( + 4, + 2, + PhysIq::AluIq0, + &BTreeSet::new(), + &BTreeSet::new(), + &uops, + ), + make_iq_entry( + 4, + 3, + PhysIq::AluIq0, + &BTreeSet::new(), + &BTreeSet::new(), + &uops, + ), + ]; + + let iq_tags = test_iq_tags(&iq); + let iq_owner_table = test_iq_owner_table(&iq, &iq_tags); + let crossbar = test_qtag_wait_crossbar(&iq, &uops); + update_iq_entries_for_cycle( + 5, + &mut iq, + &BTreeSet::new(), + &BTreeSet::new(), + &iq_owner_table, + &iq_tags, + &crossbar, + &uops, + ); + + assert!(iq[0].src_ready_nonspec[0]); + assert!(!iq[1].src_ready_nonspec[0]); + assert!(!iq[2].src_ready_nonspec[0]); + assert_eq!(iq_entry_wait_cause_from_state(&iq[0], 5, 0, &uops), None); + assert_eq!( + iq_entry_wait_cause_from_state(&iq[1], 5, 0, &uops), + Some("wait_qtag") + ); + assert_eq!( + iq_entry_wait_cause_from_state(&iq[2], 5, 0, &uops), + Some("wait_qtag") + ); +} + +#[test] +fn queue_wakeup_keeps_nonqueue_dependents_alive() { + let producer = isa::decode_word(3191065).expect("decode ldi ->{t,u,Rd}"); + let queue_consumer = isa::decode_word(8314).expect("decode c.sdi t#1 consumer"); + let reg_consumer = + isa::decode_word(enc_addi(3, 2, 1) as u64).expect("decode addi reg consumer"); + let uops = vec![ + CycleUop { + decoded: producer, + commit: CommitRecord { + wb_valid: 1, + wb_rd: 2, + ..CommitRecord::unsupported(0, 0x1000, 3191065, 4, &isa::BlockMeta::default()) + }, + deps: [None, None], + src_queue_kinds: [None, None], + src_logical_tags: [None, None], + src_qtags: [None, None], + dst_queue_kind: Some(QueueWakeKind::T), + dst_logical_tag: Some(LogicalQueueTag { + kind: QueueWakeKind::T, + tag: 0, + }), + dst_qtag: Some(QTag { + phys_iq: PhysIq::AguIq0, + entry_id: 1, + }), + bypass_d2: false, + is_load: false, + is_store: false, + load_ordinal: None, + load_store_id: None, + miss_injected: false, + redirect_target: None, + phys_iq: Some(PhysIq::AguIq0), + pick_wakeup_visible: None, + data_ready_visible: None, + miss_pending_until: None, + e1_cycle: None, + e4_cycle: None, + w1_cycle: Some(4), + done_cycle: None, + }, + CycleUop { + decoded: queue_consumer, + commit: CommitRecord { + src0_valid: 1, + src0_reg: REG_T1, + ..CommitRecord::unsupported(0, 0x1004, 8314, 2, &isa::BlockMeta::default()) + }, + deps: [Some(0), None], + src_queue_kinds: [Some(QueueWakeKind::T), None], + src_logical_tags: [ + Some(LogicalQueueTag { + kind: QueueWakeKind::T, + tag: 0, + }), + None, + ], + src_qtags: [ + Some(QTag { + phys_iq: PhysIq::AguIq0, + entry_id: 1, + }), + None, + ], + dst_queue_kind: None, + dst_logical_tag: None, + dst_qtag: None, + bypass_d2: false, + is_load: false, + is_store: false, + load_ordinal: None, + load_store_id: None, + miss_injected: false, + redirect_target: None, + phys_iq: Some(PhysIq::StdIq0), + pick_wakeup_visible: None, + data_ready_visible: None, + miss_pending_until: None, + e1_cycle: None, + e4_cycle: None, + w1_cycle: None, + done_cycle: None, + }, + CycleUop { + decoded: reg_consumer, + commit: CommitRecord { + src0_valid: 1, + src0_reg: 2, + ..CommitRecord::unsupported(0, 0x1008, 0, 4, &isa::BlockMeta::default()) + }, + deps: [Some(0), None], + src_queue_kinds: [None, None], + src_logical_tags: [None, None], + src_qtags: [None, None], + dst_queue_kind: None, + dst_logical_tag: None, + dst_qtag: None, + bypass_d2: false, + is_load: false, + is_store: false, + load_ordinal: None, + load_store_id: None, + miss_injected: false, + redirect_target: None, + phys_iq: Some(PhysIq::AluIq0), + pick_wakeup_visible: None, + data_ready_visible: None, + miss_pending_until: None, + e1_cycle: None, + e4_cycle: None, + w1_cycle: None, + done_cycle: None, + }, + ]; + let mut iq = vec![ + make_iq_entry( + 4, + 1, + PhysIq::StdIq0, + &BTreeSet::new(), + &BTreeSet::new(), + &uops, + ), + make_iq_entry( + 4, + 2, + PhysIq::AluIq0, + &BTreeSet::new(), + &BTreeSet::new(), + &uops, + ), + ]; + + let iq_tags = test_iq_tags(&iq); + let iq_owner_table = test_iq_owner_table(&iq, &iq_tags); + let crossbar = test_qtag_wait_crossbar(&iq, &uops); + update_iq_entries_for_cycle( + 5, + &mut iq, + &BTreeSet::new(), + &BTreeSet::new(), + &iq_owner_table, + &iq_tags, + &crossbar, + &uops, + ); + + assert!(iq[0].src_ready_nonspec[0]); + assert!(iq[1].src_ready_nonspec[0]); + assert_eq!(iq_entry_wait_cause_from_state(&iq[0], 5, 0, &uops), None); + assert_eq!(iq_entry_wait_cause_from_state(&iq[1], 5, 0, &uops), None); +} + +#[test] +fn ld_gen_vec_propagates_through_dependency_chain() { + let decoded = isa::decode_word(enc_addi(2, 0, 1) as u64).expect("decode addi"); + let uops = vec![ + CycleUop { + decoded: decoded.clone(), + commit: CommitRecord::unsupported(0, 0x1000, 0, 4, &isa::BlockMeta::default()), + deps: [None, None], + src_queue_kinds: [None, None], + src_logical_tags: [None, None], + src_qtags: [None, None], + dst_queue_kind: None, + dst_logical_tag: None, + dst_qtag: None, + bypass_d2: false, + is_load: true, + is_store: false, + load_ordinal: Some(0), + load_store_id: Some(0), + miss_injected: false, + redirect_target: None, + phys_iq: Some(PhysIq::AguIq0), + pick_wakeup_visible: Some(5), + data_ready_visible: Some(8), + miss_pending_until: None, + e1_cycle: Some(4), + e4_cycle: None, + w1_cycle: None, + done_cycle: None, + }, + CycleUop { + decoded: decoded.clone(), + commit: CommitRecord::unsupported(0, 0x1004, 0, 4, &isa::BlockMeta::default()), + deps: [Some(0), None], + src_queue_kinds: [None, None], + src_logical_tags: [None, None], + src_qtags: [None, None], + dst_queue_kind: None, + dst_logical_tag: None, + dst_qtag: None, + bypass_d2: false, + is_load: false, + is_store: false, + load_ordinal: None, + load_store_id: None, + miss_injected: false, + redirect_target: None, + phys_iq: Some(PhysIq::AluIq0), + pick_wakeup_visible: None, + data_ready_visible: None, + miss_pending_until: None, + e1_cycle: None, + e4_cycle: None, + w1_cycle: None, + done_cycle: None, + }, + CycleUop { + decoded, + commit: CommitRecord::unsupported(0, 0x1008, 0, 4, &isa::BlockMeta::default()), + deps: [Some(1), None], + src_queue_kinds: [None, None], + src_logical_tags: [None, None], + src_qtags: [None, None], + dst_queue_kind: None, + dst_logical_tag: None, + dst_qtag: None, + bypass_d2: false, + is_load: false, + is_store: false, + load_ordinal: None, + load_store_id: None, + miss_injected: false, + redirect_target: None, + phys_iq: Some(PhysIq::AluIq0), + pick_wakeup_visible: None, + data_ready_visible: None, + miss_pending_until: None, + e1_cycle: None, + e4_cycle: None, + w1_cycle: None, + done_cycle: None, + }, + ]; + + assert_eq!(dep_load_gen_vec(2, 4, &uops), LD_GEN_E1); + assert_eq!(dep_load_gen_vec(2, 5, &uops), LD_GEN_E2); + assert_eq!(dep_load_gen_vec(2, 6, &uops), LD_GEN_E3); +} + +#[test] +fn miss_pending_suppresses_ld_e4_dependent_pick() { + let decoded = isa::decode_word(enc_addi(2, 0, 1) as u64).expect("decode addi"); + let uops = vec![ + CycleUop { + decoded: decoded.clone(), + commit: CommitRecord::unsupported(0, 0x1000, 0, 4, &isa::BlockMeta::default()), + deps: [None, None], + src_queue_kinds: [None, None], + src_logical_tags: [None, None], + src_qtags: [None, None], + dst_queue_kind: None, + dst_logical_tag: None, + dst_qtag: None, + bypass_d2: false, + is_load: true, + is_store: false, + load_ordinal: Some(0), + load_store_id: Some(0), + miss_injected: true, + redirect_target: None, + phys_iq: Some(PhysIq::AguIq0), + pick_wakeup_visible: Some(5), + data_ready_visible: Some(12), + miss_pending_until: Some(12), + e1_cycle: Some(4), + e4_cycle: Some(7), + w1_cycle: None, + done_cycle: None, + }, + CycleUop { + decoded, + commit: CommitRecord { + src0_valid: 1, + src0_reg: 2, + ..CommitRecord::unsupported(0, 0x1004, 0, 4, &isa::BlockMeta::default()) + }, + deps: [Some(0), None], + src_queue_kinds: [None, None], + src_logical_tags: [None, None], + src_qtags: [None, None], + dst_queue_kind: None, + dst_logical_tag: None, + dst_qtag: None, + bypass_d2: false, + is_load: false, + is_store: false, + load_ordinal: None, + load_store_id: None, + miss_injected: false, + redirect_target: None, + phys_iq: Some(PhysIq::AluIq0), + pick_wakeup_visible: None, + data_ready_visible: None, + miss_pending_until: None, + e1_cycle: None, + e4_cycle: None, + w1_cycle: None, + done_cycle: None, + }, + ]; + + assert!(miss_pending_active(8, &uops)); + assert_eq!(dep_load_gen_vec(0, 8, &uops), LD_GEN_E4); + assert!(!iq_entry_ready(1, 8, 0, &uops)); + + let mut replayed = uops.clone(); + replayed[0].miss_pending_until = None; + replayed[0].e1_cycle = Some(12); + replayed[0].e4_cycle = None; + replayed[0].pick_wakeup_visible = Some(13); + replayed[0].data_ready_visible = Some(16); + assert!(iq_entry_ready(1, 13, 0, &replayed)); +} + +#[test] +fn s2_stalls_when_iq_is_full() { + let mut pipeline = StageQueues::default(); + pipeline.frontend[10].push_back(0); + let mut iq = (0..IQ_CAPACITY) + .map(|seq| IqEntry { + seq, + phys_iq: PhysIq::AluIq0, + inflight: false, + src_valid: [false; 2], + src_ready_nonspec: [false; 2], + src_ready_spec: [false; 2], + src_wait_qtag: [false; 2], + }) + .chain((IQ_CAPACITY..(IQ_CAPACITY * 2)).map(|seq| IqEntry { + seq, + phys_iq: PhysIq::SharedIq1, + inflight: false, + src_valid: [false; 2], + src_ready_nonspec: [false; 2], + src_ready_spec: [false; 2], + src_wait_qtag: [false; 2], + })) + .collect::>(); + let mut rob = VecDeque::new(); + let mut uops = vec![CycleUop { + decoded: isa::decode_word(enc_addi(2, 0, 1) as u64).expect("decode addi"), + commit: CommitRecord::unsupported(0, 0x1000, 0, 4, &isa::BlockMeta::default()), + deps: [None, None], + src_queue_kinds: [None, None], + src_logical_tags: [None, None], + src_qtags: [None, None], + dst_queue_kind: None, + dst_logical_tag: None, + dst_qtag: None, + bypass_d2: false, + is_load: false, + is_store: false, + load_ordinal: None, + load_store_id: None, + miss_injected: false, + redirect_target: None, + phys_iq: None, + pick_wakeup_visible: None, + data_ready_visible: None, + miss_pending_until: None, + e1_cycle: None, + e4_cycle: None, + w1_cycle: None, + done_cycle: None, + }]; + + dispatch_to_iq_and_bypass(0, &mut pipeline, &mut iq, &mut rob, &mut uops); + + assert_eq!( + pipeline.frontend[10].iter().copied().collect::>(), + vec![0] + ); + assert_eq!(iq.len(), IQ_CAPACITY * 2); +} + +#[test] +fn s2_spills_third_same_cycle_alu_enqueue_to_shared_iq() { + let mut pipeline = StageQueues::default(); + pipeline.frontend[10].extend([0, 1, 2]); + let mut iq = Vec::new(); + let mut rob = VecDeque::new(); + let decoded = isa::decode_word(enc_addi(2, 0, 1) as u64).expect("decode addi"); + let mut uops = (0..3) + .map(|_| CycleUop { + decoded: decoded.clone(), + commit: CommitRecord::unsupported(0, 0x1000, 0, 4, &isa::BlockMeta::default()), + deps: [None, None], + src_queue_kinds: [None, None], + src_logical_tags: [None, None], + src_qtags: [None, None], + dst_queue_kind: None, + dst_logical_tag: None, + dst_qtag: None, + bypass_d2: false, + is_load: false, + is_store: false, + load_ordinal: None, + load_store_id: None, + miss_injected: false, + redirect_target: None, + phys_iq: None, + pick_wakeup_visible: None, + data_ready_visible: None, + miss_pending_until: None, + e1_cycle: None, + e4_cycle: None, + w1_cycle: None, + done_cycle: None, + }) + .collect::>(); + + dispatch_to_iq_and_bypass(0, &mut pipeline, &mut iq, &mut rob, &mut uops); + + assert!(pipeline.frontend[10].is_empty()); + assert_eq!( + iq.iter() + .map(|entry| (entry.seq, entry.phys_iq)) + .collect::>(), + vec![ + (0, PhysIq::AluIq0), + (1, PhysIq::AluIq0), + (2, PhysIq::SharedIq1), + ] + ); +} + +#[test] +fn s2_shared_only_enqueue_keeps_oldest_two_when_ports_exhausted() { + let mut pipeline = StageQueues::default(); + pipeline.frontend[10].extend([0, 1, 2]); + let mut iq = Vec::new(); + let mut rob = VecDeque::new(); + let decoded = isa::decode_word(enc_addi(2, 0, 1) as u64).expect("decode addi"); + let mut uops = (0..3) + .map(|_| { + let mut sys_decoded = decoded.clone(); + sys_decoded.uop_group = "SYS".to_string(); + CycleUop { + decoded: sys_decoded, + commit: CommitRecord::unsupported(0, 0x1000, 0, 4, &isa::BlockMeta::default()), + deps: [None, None], + src_queue_kinds: [None, None], + src_logical_tags: [None, None], + src_qtags: [None, None], + dst_queue_kind: None, + dst_logical_tag: None, + dst_qtag: None, + bypass_d2: false, + is_load: false, + is_store: false, + load_ordinal: None, + load_store_id: None, + miss_injected: false, + redirect_target: None, + phys_iq: None, + pick_wakeup_visible: None, + data_ready_visible: None, + miss_pending_until: None, + e1_cycle: None, + e4_cycle: None, + w1_cycle: None, + done_cycle: None, + } + }) + .collect::>(); + + dispatch_to_iq_and_bypass(0, &mut pipeline, &mut iq, &mut rob, &mut uops); + + assert_eq!( + iq.iter() + .map(|entry| (entry.seq, entry.phys_iq)) + .collect::>(), + vec![(0, PhysIq::SharedIq1), (1, PhysIq::SharedIq1)] + ); + assert_eq!( + pipeline.frontend[10].iter().copied().collect::>(), + vec![2] + ); +} + +#[test] +fn s2_allocates_distinct_qtags_within_one_physical_iq() { + let mut pipeline = StageQueues::default(); + pipeline.frontend[10].extend([0, 1]); + let mut iq = Vec::new(); + let mut rob = VecDeque::new(); + let decoded = isa::decode_word(enc_addi(2, 0, 1) as u64).expect("decode addi"); + let mut uops = (0..2) + .map(|_| CycleUop { + decoded: decoded.clone(), + commit: CommitRecord::unsupported(0, 0x1000, 0, 4, &isa::BlockMeta::default()), + deps: [None, None], + src_queue_kinds: [None, None], + src_logical_tags: [None, None], + src_qtags: [None, None], + dst_queue_kind: None, + dst_logical_tag: None, + dst_qtag: None, + bypass_d2: false, + is_load: false, + is_store: false, + load_ordinal: None, + load_store_id: None, + miss_injected: false, + redirect_target: None, + phys_iq: None, + pick_wakeup_visible: None, + data_ready_visible: None, + miss_pending_until: None, + e1_cycle: None, + e4_cycle: None, + w1_cycle: None, + done_cycle: None, + }) + .collect::>(); + + dispatch_to_iq_and_bypass(0, &mut pipeline, &mut iq, &mut rob, &mut uops); + + assert_eq!( + pipeline.iq_tags.get(&0), + Some(&QTag { + phys_iq: PhysIq::AluIq0, + entry_id: 0, + }) + ); + assert_eq!( + pipeline.iq_tags.get(&1), + Some(&QTag { + phys_iq: PhysIq::AluIq0, + entry_id: 1, + }) + ); + assert_eq!(pipeline.iq_owner_table[PhysIq::AluIq0.index()][0], Some(0)); + assert_eq!(pipeline.iq_owner_table[PhysIq::AluIq0.index()][1], Some(1)); +} + +#[test] +fn i2_deallocation_releases_qtag_for_reuse() { + let mut pipeline = StageQueues::default(); + pipeline.i1 = VecDeque::from([0usize]); + pipeline.iq_tags.insert( + 0, + QTag { + phys_iq: PhysIq::AluIq0, + entry_id: 0, + }, + ); + let mut iq = vec![IqEntry { + seq: 0, + phys_iq: PhysIq::AluIq0, + inflight: true, + src_valid: [false; 2], + src_ready_nonspec: [false; 2], + src_ready_spec: [false; 2], + src_wait_qtag: [false; 2], + }]; + + advance_i1_to_i2(&mut pipeline, &mut iq); + + assert!(pipeline.iq_tags.is_empty()); + assert_eq!(pipeline.iq_owner_table[PhysIq::AluIq0.index()][0], None); + + pipeline.frontend[10].push_back(1); + let mut rob = VecDeque::new(); + let decoded = isa::decode_word(enc_addi(2, 0, 1) as u64).expect("decode addi"); + let mut uops = vec![ + CycleUop { + decoded: decoded.clone(), + commit: CommitRecord::unsupported(0, 0x1000, 0, 4, &isa::BlockMeta::default()), + deps: [None, None], + src_queue_kinds: [None, None], + src_logical_tags: [None, None], + src_qtags: [None, None], + dst_queue_kind: None, + dst_logical_tag: None, + dst_qtag: None, + bypass_d2: false, + is_load: false, + is_store: false, + load_ordinal: None, + load_store_id: None, + miss_injected: false, + redirect_target: None, + phys_iq: None, + pick_wakeup_visible: None, + data_ready_visible: None, + miss_pending_until: None, + e1_cycle: None, + e4_cycle: None, + w1_cycle: None, + done_cycle: None, + }, + CycleUop { + decoded, + commit: CommitRecord::unsupported(0, 0x1004, 0, 4, &isa::BlockMeta::default()), + deps: [None, None], + src_queue_kinds: [None, None], + src_logical_tags: [None, None], + src_qtags: [None, None], + dst_queue_kind: None, + dst_logical_tag: None, + dst_qtag: None, + bypass_d2: false, + is_load: false, + is_store: false, + load_ordinal: None, + load_store_id: None, + miss_injected: false, + redirect_target: None, + phys_iq: None, + pick_wakeup_visible: None, + data_ready_visible: None, + miss_pending_until: None, + e1_cycle: None, + e4_cycle: None, + w1_cycle: None, + done_cycle: None, + }, + ]; + + dispatch_to_iq_and_bypass(1, &mut pipeline, &mut iq, &mut rob, &mut uops); + + assert_eq!( + pipeline.iq_tags.get(&1), + Some(&QTag { + phys_iq: PhysIq::AluIq0, + entry_id: 0, + }) + ); + assert_eq!(pipeline.iq_owner_table[PhysIq::AluIq0.index()][0], Some(0)); +} + +#[test] +fn redirect_flush_prunes_younger_qtags() { + let mut pipeline = StageQueues::default(); + pipeline.iq_tags.insert( + 1, + QTag { + phys_iq: PhysIq::AluIq0, + entry_id: 0, + }, + ); + pipeline.iq_tags.insert( + 2, + QTag { + phys_iq: PhysIq::SharedIq1, + entry_id: 0, + }, + ); + let mut iq = vec![ + IqEntry { + seq: 1, + phys_iq: PhysIq::AluIq0, + inflight: false, + src_valid: [false; 2], + src_ready_nonspec: [false; 2], + src_ready_spec: [false; 2], + src_wait_qtag: [false; 2], + }, + IqEntry { + seq: 2, + phys_iq: PhysIq::SharedIq1, + inflight: false, + src_valid: [false; 2], + src_ready_nonspec: [false; 2], + src_ready_spec: [false; 2], + src_wait_qtag: [false; 2], + }, + ]; + let mut rob = VecDeque::from([1usize, 2]); + let decoded = isa::decode_word(enc_addi(2, 0, 1) as u64).expect("decode addi"); + let boundary = isa::decode_word(2048).expect("decode c.bstart.std"); + let uops = vec![ + CycleUop { + decoded: decoded.clone(), + commit: CommitRecord::unsupported(0, 0x0ffc, 0, 4, &isa::BlockMeta::default()), + deps: [None, None], + src_queue_kinds: [None, None], + src_logical_tags: [None, None], + src_qtags: [None, None], + dst_queue_kind: None, + dst_logical_tag: None, + dst_qtag: None, + bypass_d2: false, + is_load: false, + is_store: false, + load_ordinal: None, + load_store_id: None, + miss_injected: false, + redirect_target: None, + phys_iq: None, + pick_wakeup_visible: None, + data_ready_visible: None, + miss_pending_until: None, + e1_cycle: None, + e4_cycle: None, + w1_cycle: None, + done_cycle: None, + }, + CycleUop { + decoded: boundary, + commit: CommitRecord::unsupported(4, 0x1000, 0, 4, &isa::BlockMeta::default()), + deps: [None, None], + src_queue_kinds: [None, None], + src_logical_tags: [None, None], + src_qtags: [None, None], + dst_queue_kind: None, + dst_logical_tag: None, + dst_qtag: None, + bypass_d2: false, + is_load: false, + is_store: false, + load_ordinal: None, + load_store_id: None, + miss_injected: false, + redirect_target: Some(0x2000), + phys_iq: Some(PhysIq::BruIq), + pick_wakeup_visible: None, + data_ready_visible: None, + miss_pending_until: None, + e1_cycle: None, + e4_cycle: None, + w1_cycle: Some(7), + done_cycle: None, + }, + CycleUop { + decoded, + commit: CommitRecord::unsupported(4, 0x1004, 0, 4, &isa::BlockMeta::default()), + deps: [None, None], + src_queue_kinds: [None, None], + src_logical_tags: [None, None], + src_qtags: [None, None], + dst_queue_kind: None, + dst_logical_tag: None, + dst_qtag: None, + bypass_d2: false, + is_load: false, + is_store: false, + load_ordinal: None, + load_store_id: None, + miss_injected: false, + redirect_target: None, + phys_iq: Some(PhysIq::SharedIq1), + pick_wakeup_visible: None, + data_ready_visible: None, + miss_pending_until: None, + e1_cycle: None, + e4_cycle: None, + w1_cycle: None, + done_cycle: None, + }, + ]; + + prune_speculative_state_on_redirect(7, &mut pipeline, &mut iq, &mut rob, &uops); + + assert_eq!( + pipeline.iq_tags, + BTreeMap::from([( + 1usize, + QTag { + phys_iq: PhysIq::AluIq0, + entry_id: 0, + }, + )]) + ); + assert_eq!( + iq.iter().map(|entry| entry.seq).collect::>(), + vec![1] + ); + assert_eq!(pipeline.iq_owner_table[PhysIq::AluIq0.index()][0], Some(0)); + assert_eq!(pipeline.iq_owner_table[PhysIq::SharedIq1.index()][0], None); +} + +#[test] +fn advance_execute_injects_configured_load_miss() { + let decoded = isa::decode_word(enc_addi(2, 0, 1) as u64).expect("decode addi"); + let mut pipeline = StageQueues::default(); + pipeline.e4.push_back(0); + pipeline.lhq.push_back(0); + let mut uops = vec![CycleUop { + decoded, + commit: CommitRecord::unsupported(0, 0x1000, 0, 4, &isa::BlockMeta::default()), + deps: [None, None], + src_queue_kinds: [None, None], + src_logical_tags: [None, None], + src_qtags: [None, None], + dst_queue_kind: None, + dst_logical_tag: None, + dst_qtag: None, + bypass_d2: false, + is_load: true, + is_store: false, + load_ordinal: Some(0), + load_store_id: Some(0), + miss_injected: false, + redirect_target: None, + phys_iq: Some(PhysIq::AguIq0), + pick_wakeup_visible: Some(5), + data_ready_visible: Some(8), + miss_pending_until: None, + e1_cycle: Some(4), + e4_cycle: Some(7), + w1_cycle: None, + done_cycle: None, + }]; + + advance_execute( + 9, + &mut pipeline, + &mut uops, + &CycleRunOptions { + max_cycles: 32, + load_miss_every: Some(1), + load_miss_penalty: 4, + }, + ); + + assert!(pipeline.w1.is_empty()); + assert_eq!(pipeline.liq.len(), 1); + assert_eq!(pipeline.liq[0].seq, 0); + assert_eq!(pipeline.liq[0].refill_ready_cycle, 13); + assert_eq!(pipeline.mdb.len(), 1); + assert_eq!(pipeline.mdb[0].seq, 0); + assert!(pipeline.lhq.is_empty()); + assert!(uops[0].miss_injected); + assert_eq!(uops[0].miss_pending_until, Some(13)); + assert_eq!(uops[0].pick_wakeup_visible, None); + assert_eq!(uops[0].data_ready_visible, None); + assert_eq!(uops[0].e1_cycle, None); + assert_eq!(uops[0].e4_cycle, None); +} + +#[test] +fn advance_liq_requeues_oldest_ready_load_into_e1() { + let decoded = isa::decode_word(enc_addi(2, 0, 1) as u64).expect("decode addi"); + let mut pipeline = StageQueues::default(); + pipeline.liq = VecDeque::from([ + LiqEntry { + seq: 1, + refill_ready_cycle: 11, + }, + LiqEntry { + seq: 0, + refill_ready_cycle: 11, + }, + ]); + pipeline.mdb = VecDeque::from([ + MdbEntry { + seq: 1, + refill_ready_cycle: 11, + }, + MdbEntry { + seq: 0, + refill_ready_cycle: 11, + }, + ]); + let mut uops = vec![ + CycleUop { + decoded: decoded.clone(), + commit: CommitRecord::unsupported(0, 0x1000, 0, 4, &isa::BlockMeta::default()), + deps: [None, None], + src_queue_kinds: [None, None], + src_logical_tags: [None, None], + src_qtags: [None, None], + dst_queue_kind: None, + dst_logical_tag: None, + dst_qtag: None, + bypass_d2: false, + is_load: true, + is_store: false, + load_ordinal: Some(0), + load_store_id: Some(0), + miss_injected: true, + redirect_target: None, + phys_iq: Some(PhysIq::AguIq0), + pick_wakeup_visible: None, + data_ready_visible: None, + miss_pending_until: Some(11), + e1_cycle: None, + e4_cycle: None, + w1_cycle: None, + done_cycle: None, + }, + CycleUop { + decoded, + commit: CommitRecord::unsupported(0, 0x1004, 0, 4, &isa::BlockMeta::default()), + deps: [None, None], + src_queue_kinds: [None, None], + src_logical_tags: [None, None], + src_qtags: [None, None], + dst_queue_kind: None, + dst_logical_tag: None, + dst_qtag: None, + bypass_d2: false, + is_load: true, + is_store: false, + load_ordinal: Some(1), + load_store_id: Some(1), + miss_injected: true, + redirect_target: None, + phys_iq: Some(PhysIq::AguIq0), + pick_wakeup_visible: None, + data_ready_visible: None, + miss_pending_until: Some(11), + e1_cycle: None, + e4_cycle: None, + w1_cycle: None, + done_cycle: None, + }, + ]; + let rob = VecDeque::from([0usize, 1]); + + advance_liq(11, &mut pipeline, &mut uops, &rob); + + assert_eq!(pipeline.e1.iter().copied().collect::>(), vec![0]); + assert_eq!( + pipeline + .liq + .iter() + .map(|entry| entry.seq) + .collect::>(), + vec![1] + ); + assert_eq!( + pipeline + .mdb + .iter() + .map(|entry| entry.seq) + .collect::>(), + vec![1] + ); + assert_eq!(pipeline.lhq.iter().copied().collect::>(), vec![0]); + assert_eq!(uops[0].miss_pending_until, None); + assert_eq!(uops[1].miss_pending_until, Some(11)); +} + +#[test] +fn emit_stage_events_reports_liq_lhq_and_mdb() { + let runtime = sample_runtime(&[enc_addi(2, 0, 1)], &[]); + let decoded = isa::decode_word(enc_addi(2, 0, 1) as u64).expect("decode addi"); + let uops = vec![CycleUop { + decoded, + commit: CommitRecord::unsupported(0, 0x1000, 0, 4, &isa::BlockMeta::default()), + deps: [None, None], + src_queue_kinds: [None, None], + src_logical_tags: [None, None], + src_qtags: [None, None], + dst_queue_kind: None, + dst_logical_tag: None, + dst_qtag: None, + bypass_d2: false, + is_load: true, + is_store: false, + load_ordinal: Some(0), + load_store_id: Some(0), + miss_injected: true, + redirect_target: None, + phys_iq: Some(PhysIq::AguIq0), + pick_wakeup_visible: None, + data_ready_visible: None, + miss_pending_until: Some(12), + e1_cycle: None, + e4_cycle: None, + w1_cycle: None, + done_cycle: None, + }]; + let mut pipeline = StageQueues::default(); + pipeline.liq.push_back(LiqEntry { + seq: 0, + refill_ready_cycle: 12, + }); + pipeline.lhq.push_back(0); + pipeline.mdb.push_back(MdbEntry { + seq: 0, + refill_ready_cycle: 12, + }); + let mut out = Vec::new(); + + emit_stage_events( + 10, + &runtime, + &pipeline, + &[], + &VecDeque::new(), + &uops, + &mut out, + ); + + assert!(out.iter().any(|event| event.stage_id == "LIQ")); + assert!(out.iter().any(|event| event.stage_id == "LHQ")); + assert!(out.iter().any(|event| event.stage_id == "MDB")); +} + +#[test] +fn build_uops_assigns_monotonic_load_store_ids() { + let block = isa::BlockMeta::default(); + let commits = vec![ + CommitRecord { + mem_valid: 1, + mem_is_store: 1, + mem_addr: 0x2000, + mem_size: 8, + ..CommitRecord::unsupported(0, 0x1000, enc_addi(2, 0, 1) as u64, 4, &block) + }, + CommitRecord::unsupported(0, 0x1004, enc_addi(3, 0, 1) as u64, 4, &block), + CommitRecord { + mem_valid: 1, + mem_is_store: 0, + mem_addr: 0x2008, + mem_size: 8, + ..CommitRecord::unsupported(0, 0x1008, enc_addi(4, 0, 1) as u64, 4, &block) + }, + ]; + let decoded = vec![ + isa::decode_word(enc_addi(2, 0, 1) as u64).expect("decode addi"), + isa::decode_word(enc_addi(3, 0, 1) as u64).expect("decode addi"), + isa::decode_word(enc_addi(4, 0, 1) as u64).expect("decode addi"), + ]; + + let uops = build_uops(&commits, &decoded); + + assert_eq!(uops[0].load_store_id, Some(0)); + assert_eq!(uops[1].load_store_id, None); + assert_eq!(uops[2].load_store_id, Some(1)); +} + +#[test] +fn redirect_rebases_lsid_to_oldest_surviving_unissued_memory_uop() { + let decoded = isa::decode_word(enc_addi(2, 0, 1) as u64).expect("decode addi"); + let boundary = isa::decode_word(2048).expect("decode c.bstart.std"); + let mut pipeline = StageQueues::default(); + pipeline.lsid_issue_ptr = 9; + pipeline.lsid_complete_ptr = 9; + pipeline.lsid_cache_ptr = 9; + pipeline.frontend[0].push_back(2); + pipeline.i2.push_back(3); + pipeline.e1.push_back(0); + + let iq = vec![IqEntry { + seq: 1, + phys_iq: PhysIq::AguIq0, + inflight: false, + src_valid: [false; 2], + src_ready_nonspec: [false; 2], + src_ready_spec: [false; 2], + src_wait_qtag: [false; 2], + }]; + let rob = VecDeque::from([0usize, 1, 2, 3]); + let uops = vec![ + CycleUop { + decoded: decoded.clone(), + commit: CommitRecord { + mem_valid: 1, + mem_is_store: 0, + mem_addr: 0x1000, + mem_size: 8, + ..CommitRecord::unsupported(0, 0x1000, 0, 4, &isa::BlockMeta::default()) + }, + deps: [None, None], + src_queue_kinds: [None, None], + src_logical_tags: [None, None], + src_qtags: [None, None], + dst_queue_kind: None, + dst_logical_tag: None, + dst_qtag: None, + bypass_d2: false, + is_load: true, + is_store: false, + load_ordinal: Some(0), + load_store_id: Some(0), + miss_injected: false, + redirect_target: None, + phys_iq: Some(PhysIq::AguIq0), + pick_wakeup_visible: Some(4), + data_ready_visible: Some(4), + miss_pending_until: None, + e1_cycle: Some(4), + e4_cycle: None, + w1_cycle: None, + done_cycle: None, + }, + CycleUop { + decoded: decoded.clone(), + commit: CommitRecord { + mem_valid: 1, + mem_is_store: 0, + mem_addr: 0x1008, + mem_size: 8, + ..CommitRecord::unsupported(0, 0x1004, 0, 4, &isa::BlockMeta::default()) + }, + deps: [None, None], + src_queue_kinds: [None, None], + src_logical_tags: [None, None], + src_qtags: [None, None], + dst_queue_kind: None, + dst_logical_tag: None, + dst_qtag: None, + bypass_d2: false, + is_load: true, + is_store: false, + load_ordinal: Some(1), + load_store_id: Some(1), + miss_injected: false, + redirect_target: None, + phys_iq: Some(PhysIq::AguIq0), + pick_wakeup_visible: Some(4), + data_ready_visible: Some(4), + miss_pending_until: None, + e1_cycle: None, + e4_cycle: None, + w1_cycle: None, + done_cycle: None, + }, + CycleUop { + decoded: boundary, + commit: CommitRecord::unsupported(0, 0x1008, 0, 4, &isa::BlockMeta::default()), + deps: [None, None], + src_queue_kinds: [None, None], + src_logical_tags: [None, None], + src_qtags: [None, None], + dst_queue_kind: None, + dst_logical_tag: None, + dst_qtag: None, + bypass_d2: false, + is_load: false, + is_store: false, + load_ordinal: None, + load_store_id: None, + miss_injected: false, + redirect_target: Some(0x2000), + phys_iq: Some(PhysIq::BruIq), + pick_wakeup_visible: Some(4), + data_ready_visible: Some(4), + miss_pending_until: None, + e1_cycle: None, + e4_cycle: None, + w1_cycle: Some(5), + done_cycle: Some(5), + }, + CycleUop { + decoded, + commit: CommitRecord { + mem_valid: 1, + mem_is_store: 1, + mem_addr: 0x1010, + mem_size: 8, + ..CommitRecord::unsupported(0, 0x100c, 0, 4, &isa::BlockMeta::default()) + }, + deps: [None, None], + src_queue_kinds: [None, None], + src_logical_tags: [None, None], + src_qtags: [None, None], + dst_queue_kind: None, + dst_logical_tag: None, + dst_qtag: None, + bypass_d2: false, + is_load: false, + is_store: true, + load_ordinal: None, + load_store_id: Some(2), + miss_injected: false, + redirect_target: None, + phys_iq: Some(PhysIq::StdIq0), + pick_wakeup_visible: Some(4), + data_ready_visible: Some(4), + miss_pending_until: None, + e1_cycle: None, + e4_cycle: None, + w1_cycle: None, + done_cycle: None, + }, + ]; + + rebase_lsid_on_redirect(5, &mut pipeline, &iq, &rob, &uops); + + assert_eq!(pipeline.lsid_issue_ptr, 1); + assert_eq!(pipeline.lsid_complete_ptr, 1); + assert_eq!(pipeline.lsid_cache_ptr, 0); +} + +#[test] +fn redirect_rebase_ignores_non_redirect_cycles() { + let decoded = isa::decode_word(enc_addi(2, 0, 1) as u64).expect("decode addi"); + let mut pipeline = StageQueues::default(); + pipeline.lsid_issue_ptr = 4; + pipeline.lsid_complete_ptr = 4; + pipeline.lsid_cache_ptr = 4; + pipeline.i2.push_back(0); + + let iq = Vec::new(); + let rob = VecDeque::from([0usize]); + let uops = vec![CycleUop { + decoded, + commit: CommitRecord { + mem_valid: 1, + mem_is_store: 0, + mem_addr: 0x4000, + mem_size: 8, + ..CommitRecord::unsupported(0, 0x1000, 0, 4, &isa::BlockMeta::default()) + }, + deps: [None, None], + src_queue_kinds: [None, None], + src_logical_tags: [None, None], + src_qtags: [None, None], + dst_queue_kind: None, + dst_logical_tag: None, + dst_qtag: None, + bypass_d2: false, + is_load: true, + is_store: false, + load_ordinal: Some(0), + load_store_id: Some(0), + miss_injected: false, + redirect_target: None, + phys_iq: Some(PhysIq::AguIq0), + pick_wakeup_visible: Some(0), + data_ready_visible: Some(0), + miss_pending_until: None, + e1_cycle: None, + e4_cycle: None, + w1_cycle: None, + done_cycle: None, + }]; + + rebase_lsid_on_redirect(7, &mut pipeline, &iq, &rob, &uops); + + assert_eq!(pipeline.lsid_issue_ptr, 4); + assert_eq!(pipeline.lsid_complete_ptr, 4); + assert_eq!(pipeline.lsid_cache_ptr, 4); +} + +#[test] +fn redirect_prunes_younger_memory_owner_state() { + let decoded = isa::decode_word(enc_addi(2, 0, 1) as u64).expect("decode addi"); + let boundary = isa::decode_word(2048).expect("decode c.bstart.std"); + let mut pipeline = StageQueues::default(); + pipeline.stq.extend([0usize, 3]); + pipeline.lhq.extend([1usize, 4]); + pipeline.liq.push_back(LiqEntry { + seq: 0, + refill_ready_cycle: 9, + }); + pipeline.liq.push_back(LiqEntry { + seq: 3, + refill_ready_cycle: 9, + }); + pipeline.mdb.push_back(MdbEntry { + seq: 1, + refill_ready_cycle: 9, + }); + pipeline.mdb.push_back(MdbEntry { + seq: 4, + refill_ready_cycle: 9, + }); + pipeline.scb.push_back(ScbEntry { + seq: 2, + enqueue_cycle: 4, + }); + pipeline.scb.push_back(ScbEntry { + seq: 5, + enqueue_cycle: 4, + }); + pipeline.l1d.push_back(L1dEntry { + seq: 1, + kind: L1dTxnKind::LoadHit, + ready_cycle: 6, + }); + pipeline.l1d.push_back(L1dEntry { + seq: 4, + kind: L1dTxnKind::StoreDrain, + ready_cycle: 6, + }); + + let uops = vec![ + CycleUop { + decoded: decoded.clone(), + commit: CommitRecord { + mem_valid: 1, + mem_is_store: 1, + mem_addr: 0x1000, + mem_size: 8, + ..CommitRecord::unsupported(0, 0x1000, 0, 4, &isa::BlockMeta::default()) + }, + deps: [None, None], + src_queue_kinds: [None, None], + src_logical_tags: [None, None], + src_qtags: [None, None], + dst_queue_kind: None, + dst_logical_tag: None, + dst_qtag: None, + bypass_d2: false, + is_load: false, + is_store: true, + load_ordinal: None, + load_store_id: Some(0), + miss_injected: false, + redirect_target: None, + phys_iq: Some(PhysIq::StdIq0), + pick_wakeup_visible: Some(4), + data_ready_visible: Some(4), + miss_pending_until: None, + e1_cycle: None, + e4_cycle: None, + w1_cycle: None, + done_cycle: None, + }, + CycleUop { + decoded: decoded.clone(), + commit: CommitRecord { + mem_valid: 1, + mem_is_store: 0, + mem_addr: 0x1008, + mem_size: 8, + ..CommitRecord::unsupported(0, 0x1004, 0, 4, &isa::BlockMeta::default()) + }, + deps: [None, None], + src_queue_kinds: [None, None], + src_logical_tags: [None, None], + src_qtags: [None, None], + dst_queue_kind: None, + dst_logical_tag: None, + dst_qtag: None, + bypass_d2: false, + is_load: true, + is_store: false, + load_ordinal: Some(0), + load_store_id: Some(1), + miss_injected: false, + redirect_target: None, + phys_iq: Some(PhysIq::AguIq0), + pick_wakeup_visible: Some(4), + data_ready_visible: Some(4), + miss_pending_until: None, + e1_cycle: None, + e4_cycle: None, + w1_cycle: None, + done_cycle: None, + }, + CycleUop { + decoded: boundary, + commit: CommitRecord::unsupported(0, 0x1008, 0, 4, &isa::BlockMeta::default()), + deps: [None, None], + src_queue_kinds: [None, None], + src_logical_tags: [None, None], + src_qtags: [None, None], + dst_queue_kind: None, + dst_logical_tag: None, + dst_qtag: None, + bypass_d2: false, + is_load: false, + is_store: false, + load_ordinal: None, + load_store_id: None, + miss_injected: false, + redirect_target: Some(0x2000), + phys_iq: Some(PhysIq::BruIq), + pick_wakeup_visible: Some(4), + data_ready_visible: Some(4), + miss_pending_until: None, + e1_cycle: None, + e4_cycle: None, + w1_cycle: Some(5), + done_cycle: Some(5), + }, + CycleUop { + decoded: decoded.clone(), + commit: CommitRecord { + mem_valid: 1, + mem_is_store: 1, + mem_addr: 0x1010, + mem_size: 8, + ..CommitRecord::unsupported(0, 0x100c, 0, 4, &isa::BlockMeta::default()) + }, + deps: [None, None], + src_queue_kinds: [None, None], + src_logical_tags: [None, None], + src_qtags: [None, None], + dst_queue_kind: None, + dst_logical_tag: None, + dst_qtag: None, + bypass_d2: false, + is_load: false, + is_store: true, + load_ordinal: None, + load_store_id: Some(2), + miss_injected: false, + redirect_target: None, + phys_iq: Some(PhysIq::StdIq0), + pick_wakeup_visible: Some(4), + data_ready_visible: Some(4), + miss_pending_until: None, + e1_cycle: None, + e4_cycle: None, + w1_cycle: None, + done_cycle: None, + }, + CycleUop { + decoded: decoded.clone(), + commit: CommitRecord { + mem_valid: 1, + mem_is_store: 0, + mem_addr: 0x1018, + mem_size: 8, + ..CommitRecord::unsupported(0, 0x1010, 0, 4, &isa::BlockMeta::default()) + }, + deps: [None, None], + src_queue_kinds: [None, None], + src_logical_tags: [None, None], + src_qtags: [None, None], + dst_queue_kind: None, + dst_logical_tag: None, + dst_qtag: None, + bypass_d2: false, + is_load: true, + is_store: false, + load_ordinal: Some(1), + load_store_id: Some(3), + miss_injected: false, + redirect_target: None, + phys_iq: Some(PhysIq::AguIq0), + pick_wakeup_visible: Some(4), + data_ready_visible: Some(4), + miss_pending_until: None, + e1_cycle: None, + e4_cycle: None, + w1_cycle: None, + done_cycle: None, + }, + CycleUop { + decoded, + commit: CommitRecord { + mem_valid: 1, + mem_is_store: 1, + mem_addr: 0x1020, + mem_size: 8, + ..CommitRecord::unsupported(0, 0x1014, 0, 4, &isa::BlockMeta::default()) + }, + deps: [None, None], + src_queue_kinds: [None, None], + src_logical_tags: [None, None], + src_qtags: [None, None], + dst_queue_kind: None, + dst_logical_tag: None, + dst_qtag: None, + bypass_d2: false, + is_load: false, + is_store: true, + load_ordinal: None, + load_store_id: Some(4), + miss_injected: false, + redirect_target: None, + phys_iq: Some(PhysIq::StdIq0), + pick_wakeup_visible: Some(4), + data_ready_visible: Some(4), + miss_pending_until: None, + e1_cycle: None, + e4_cycle: None, + w1_cycle: None, + done_cycle: None, + }, + ]; + + prune_memory_owner_state_on_redirect(5, &mut pipeline, &uops); + + assert_eq!(pipeline.stq.iter().copied().collect::>(), vec![0]); + assert_eq!(pipeline.lhq.iter().copied().collect::>(), vec![1]); + assert_eq!( + pipeline + .liq + .iter() + .map(|entry| entry.seq) + .collect::>(), + vec![0] + ); + assert_eq!( + pipeline + .mdb + .iter() + .map(|entry| entry.seq) + .collect::>(), + vec![1] + ); + assert_eq!( + pipeline + .scb + .iter() + .map(|entry| entry.seq) + .collect::>(), + vec![2] + ); + assert_eq!( + pipeline + .l1d + .iter() + .map(|entry| entry.seq) + .collect::>(), + vec![1] + ); +} + +#[test] +fn redirect_prunes_younger_frontend_iq_backend_and_rob_state() { + let decoded = isa::decode_word(enc_addi(2, 0, 1) as u64).expect("decode addi"); + let boundary = isa::decode_word(2048).expect("decode c.bstart.std"); + let mut pipeline = StageQueues::default(); + pipeline.frontend[0].extend([1usize, 4]); + pipeline.frontend[7].extend([2usize, 5]); + pipeline.p1.extend([1usize, 4]); + pipeline.i1.extend([2usize, 5]); + pipeline.i2.extend([1usize, 6]); + pipeline.e1.extend([0usize, 4]); + pipeline.e2.extend([1usize, 5]); + pipeline.e3.extend([2usize, 6]); + pipeline.e4.extend([1usize, 7]); + pipeline.w1.extend([2usize, 8]); + pipeline.w2.extend([0usize, 9]); + let mut iq = vec![ + IqEntry { + seq: 1, + phys_iq: PhysIq::AluIq0, + inflight: false, + src_valid: [false; 2], + src_ready_nonspec: [false; 2], + src_ready_spec: [false; 2], + src_wait_qtag: [false; 2], + }, + IqEntry { + seq: 6, + phys_iq: PhysIq::AguIq0, + inflight: true, + src_valid: [false; 2], + src_ready_nonspec: [false; 2], + src_ready_spec: [false; 2], + src_wait_qtag: [false; 2], + }, + ]; + let mut rob = VecDeque::from([0usize, 1, 2, 6]); + let uops = vec![ + CycleUop { + decoded: decoded.clone(), + commit: CommitRecord::unsupported(0, 0x1000, 0, 4, &isa::BlockMeta::default()), + deps: [None, None], + src_queue_kinds: [None, None], + src_logical_tags: [None, None], + src_qtags: [None, None], + dst_queue_kind: None, + dst_logical_tag: None, + dst_qtag: None, + bypass_d2: false, + is_load: false, + is_store: false, + load_ordinal: None, + load_store_id: None, + miss_injected: false, + redirect_target: None, + phys_iq: Some(PhysIq::AluIq0), + pick_wakeup_visible: Some(4), + data_ready_visible: Some(4), + miss_pending_until: None, + e1_cycle: None, + e4_cycle: None, + w1_cycle: None, + done_cycle: None, + }, + CycleUop { + decoded: decoded.clone(), + commit: CommitRecord::unsupported(0, 0x1004, 0, 4, &isa::BlockMeta::default()), + deps: [None, None], + src_queue_kinds: [None, None], + src_logical_tags: [None, None], + src_qtags: [None, None], + dst_queue_kind: None, + dst_logical_tag: None, + dst_qtag: None, + bypass_d2: false, + is_load: false, + is_store: false, + load_ordinal: None, + load_store_id: None, + miss_injected: false, + redirect_target: None, + phys_iq: Some(PhysIq::AluIq0), + pick_wakeup_visible: Some(4), + data_ready_visible: Some(4), + miss_pending_until: None, + e1_cycle: None, + e4_cycle: None, + w1_cycle: None, + done_cycle: None, + }, + CycleUop { + decoded: boundary, + commit: CommitRecord::unsupported(0, 0x1008, 0, 4, &isa::BlockMeta::default()), + deps: [None, None], + src_queue_kinds: [None, None], + src_logical_tags: [None, None], + src_qtags: [None, None], + dst_queue_kind: None, + dst_logical_tag: None, + dst_qtag: None, + bypass_d2: false, + is_load: false, + is_store: false, + load_ordinal: None, + load_store_id: None, + miss_injected: false, + redirect_target: Some(0x2000), + phys_iq: Some(PhysIq::BruIq), + pick_wakeup_visible: Some(4), + data_ready_visible: Some(4), + miss_pending_until: None, + e1_cycle: None, + e4_cycle: None, + w1_cycle: Some(5), + done_cycle: Some(5), + }, + CycleUop { + decoded: decoded.clone(), + commit: CommitRecord::unsupported(0, 0x100c, 0, 4, &isa::BlockMeta::default()), + deps: [None, None], + src_queue_kinds: [None, None], + src_logical_tags: [None, None], + src_qtags: [None, None], + dst_queue_kind: None, + dst_logical_tag: None, + dst_qtag: None, + bypass_d2: false, + is_load: false, + is_store: false, + load_ordinal: None, + load_store_id: None, + miss_injected: false, + redirect_target: None, + phys_iq: Some(PhysIq::AluIq0), + pick_wakeup_visible: Some(4), + data_ready_visible: Some(4), + miss_pending_until: None, + e1_cycle: None, + e4_cycle: None, + w1_cycle: None, + done_cycle: None, + }, + CycleUop { + decoded: decoded.clone(), + commit: CommitRecord::unsupported(0, 0x1010, 0, 4, &isa::BlockMeta::default()), + deps: [None, None], + src_queue_kinds: [None, None], + src_logical_tags: [None, None], + src_qtags: [None, None], + dst_queue_kind: None, + dst_logical_tag: None, + dst_qtag: None, + bypass_d2: false, + is_load: false, + is_store: false, + load_ordinal: None, + load_store_id: None, + miss_injected: false, + redirect_target: None, + phys_iq: Some(PhysIq::AluIq0), + pick_wakeup_visible: Some(4), + data_ready_visible: Some(4), + miss_pending_until: None, + e1_cycle: None, + e4_cycle: None, + w1_cycle: None, + done_cycle: None, + }, + CycleUop { + decoded: decoded.clone(), + commit: CommitRecord::unsupported(0, 0x1014, 0, 4, &isa::BlockMeta::default()), + deps: [None, None], + src_queue_kinds: [None, None], + src_logical_tags: [None, None], + src_qtags: [None, None], + dst_queue_kind: None, + dst_logical_tag: None, + dst_qtag: None, + bypass_d2: false, + is_load: false, + is_store: false, + load_ordinal: None, + load_store_id: None, + miss_injected: false, + redirect_target: None, + phys_iq: Some(PhysIq::AluIq0), + pick_wakeup_visible: Some(4), + data_ready_visible: Some(4), + miss_pending_until: None, + e1_cycle: None, + e4_cycle: None, + w1_cycle: None, + done_cycle: None, + }, + CycleUop { + decoded: decoded.clone(), + commit: CommitRecord::unsupported(0, 0x1018, 0, 4, &isa::BlockMeta::default()), + deps: [None, None], + src_queue_kinds: [None, None], + src_logical_tags: [None, None], + src_qtags: [None, None], + dst_queue_kind: None, + dst_logical_tag: None, + dst_qtag: None, + bypass_d2: false, + is_load: false, + is_store: false, + load_ordinal: None, + load_store_id: None, + miss_injected: false, + redirect_target: None, + phys_iq: Some(PhysIq::AguIq0), + pick_wakeup_visible: Some(4), + data_ready_visible: Some(4), + miss_pending_until: None, + e1_cycle: None, + e4_cycle: None, + w1_cycle: None, + done_cycle: None, + }, + CycleUop { + decoded: decoded.clone(), + commit: CommitRecord::unsupported(0, 0x101c, 0, 4, &isa::BlockMeta::default()), + deps: [None, None], + src_queue_kinds: [None, None], + src_logical_tags: [None, None], + src_qtags: [None, None], + dst_queue_kind: None, + dst_logical_tag: None, + dst_qtag: None, + bypass_d2: false, + is_load: false, + is_store: false, + load_ordinal: None, + load_store_id: None, + miss_injected: false, + redirect_target: None, + phys_iq: Some(PhysIq::AguIq0), + pick_wakeup_visible: Some(4), + data_ready_visible: Some(4), + miss_pending_until: None, + e1_cycle: None, + e4_cycle: None, + w1_cycle: None, + done_cycle: None, + }, + CycleUop { + decoded: decoded.clone(), + commit: CommitRecord::unsupported(0, 0x1020, 0, 4, &isa::BlockMeta::default()), + deps: [None, None], + src_queue_kinds: [None, None], + src_logical_tags: [None, None], + src_qtags: [None, None], + dst_queue_kind: None, + dst_logical_tag: None, + dst_qtag: None, + bypass_d2: false, + is_load: false, + is_store: false, + load_ordinal: None, + load_store_id: None, + miss_injected: false, + redirect_target: None, + phys_iq: Some(PhysIq::AguIq0), + pick_wakeup_visible: Some(4), + data_ready_visible: Some(4), + miss_pending_until: None, + e1_cycle: None, + e4_cycle: None, + w1_cycle: None, + done_cycle: None, + }, + CycleUop { + decoded, + commit: CommitRecord::unsupported(0, 0x1024, 0, 4, &isa::BlockMeta::default()), + deps: [None, None], + src_queue_kinds: [None, None], + src_logical_tags: [None, None], + src_qtags: [None, None], + dst_queue_kind: None, + dst_logical_tag: None, + dst_qtag: None, + bypass_d2: false, + is_load: false, + is_store: false, + load_ordinal: None, + load_store_id: None, + miss_injected: false, + redirect_target: None, + phys_iq: Some(PhysIq::AguIq0), + pick_wakeup_visible: Some(4), + data_ready_visible: Some(4), + miss_pending_until: None, + e1_cycle: None, + e4_cycle: None, + w1_cycle: None, + done_cycle: None, + }, + ]; + + prune_speculative_state_on_redirect(5, &mut pipeline, &mut iq, &mut rob, &uops); + + assert_eq!( + pipeline.frontend[0].iter().copied().collect::>(), + vec![1] + ); + assert_eq!( + pipeline.frontend[7].iter().copied().collect::>(), + vec![2] + ); + assert_eq!(pipeline.p1.iter().copied().collect::>(), vec![1]); + assert_eq!(pipeline.i1.iter().copied().collect::>(), vec![2]); + assert_eq!(pipeline.i2.iter().copied().collect::>(), vec![1]); + assert_eq!(pipeline.e1.iter().copied().collect::>(), vec![0]); + assert_eq!(pipeline.e2.iter().copied().collect::>(), vec![1]); + assert_eq!(pipeline.e3.iter().copied().collect::>(), vec![2]); + assert_eq!(pipeline.e4.iter().copied().collect::>(), vec![1]); + assert_eq!(pipeline.w1.iter().copied().collect::>(), vec![2]); + assert_eq!(pipeline.w2.iter().copied().collect::>(), vec![0]); + assert_eq!( + iq.iter().map(|entry| entry.seq).collect::>(), + vec![1] + ); + assert_eq!(rob.iter().copied().collect::>(), vec![0, 1, 2]); +} + +#[test] +fn i2_waits_for_matching_lsid_before_memory_issue() { + let decoded = isa::decode_word(enc_addi(2, 0, 1) as u64).expect("decode addi"); + let uops = vec![CycleUop { + decoded, + commit: CommitRecord { + mem_valid: 1, + mem_is_store: 0, + mem_addr: 0x5000, + mem_size: 8, + ..CommitRecord::unsupported(0, 0x1000, 0, 4, &isa::BlockMeta::default()) + }, + deps: [None, None], + src_queue_kinds: [None, None], + src_logical_tags: [None, None], + src_qtags: [None, None], + dst_queue_kind: None, + dst_logical_tag: None, + dst_qtag: None, + bypass_d2: false, + is_load: true, + is_store: false, + load_ordinal: Some(0), + load_store_id: Some(1), + miss_injected: false, + redirect_target: None, + phys_iq: Some(PhysIq::AguIq0), + pick_wakeup_visible: Some(5), + data_ready_visible: Some(5), + miss_pending_until: None, + e1_cycle: None, + e4_cycle: None, + w1_cycle: None, + done_cycle: None, + }]; + let mut i2 = VecDeque::from([0usize]); + let mut e1 = VecDeque::new(); + let mut lhq = VecDeque::new(); + let mut stq = VecDeque::new(); + let mut lsid_issue_ptr = 0usize; + let mut lsid_complete_ptr = 0usize; + + assert!(i2_waits_on_lsid(0, 5, lsid_issue_ptr, &uops)); + advance_i2( + 5, + &mut i2, + &mut e1, + &mut lhq, + &mut stq, + &mut lsid_issue_ptr, + &mut lsid_complete_ptr, + &uops, + ); + + assert_eq!(i2.iter().copied().collect::>(), vec![0]); + assert!(e1.is_empty()); + assert_eq!(lsid_issue_ptr, 0); + assert_eq!(lsid_complete_ptr, 0); + + lsid_issue_ptr = 1; + lsid_complete_ptr = 1; + advance_i2( + 5, + &mut i2, + &mut e1, + &mut lhq, + &mut stq, + &mut lsid_issue_ptr, + &mut lsid_complete_ptr, + &uops, + ); + + assert!(i2.is_empty()); + assert_eq!(e1.iter().copied().collect::>(), vec![0]); + assert_eq!(lhq.iter().copied().collect::>(), vec![0]); + assert_eq!(lsid_issue_ptr, 2); + assert_eq!(lsid_complete_ptr, 2); +} + +#[test] +fn store_enters_stq_at_i2_confirmation() { + let decoded = isa::decode_word(enc_addi(2, 0, 1) as u64).expect("decode addi"); + let uops = vec![CycleUop { + decoded, + commit: CommitRecord { + mem_valid: 1, + mem_is_store: 1, + mem_addr: 0x2000, + mem_size: 8, + ..CommitRecord::unsupported(0, 0x1000, 0, 4, &isa::BlockMeta::default()) + }, + deps: [None, None], + src_queue_kinds: [None, None], + src_logical_tags: [None, None], + src_qtags: [None, None], + dst_queue_kind: None, + dst_logical_tag: None, + dst_qtag: None, + bypass_d2: false, + is_load: false, + is_store: true, + load_ordinal: None, + load_store_id: Some(0), + miss_injected: false, + redirect_target: None, + phys_iq: Some(PhysIq::StdIq0), + pick_wakeup_visible: Some(5), + data_ready_visible: Some(5), + miss_pending_until: None, + e1_cycle: None, + e4_cycle: None, + w1_cycle: None, + done_cycle: None, + }]; + let mut i2 = VecDeque::from([0usize]); + let mut e1 = VecDeque::new(); + let mut lhq = VecDeque::new(); + let mut stq = VecDeque::new(); + + let mut lsid_issue_ptr = 0usize; + let mut lsid_complete_ptr = 0usize; + advance_i2( + 5, + &mut i2, + &mut e1, + &mut lhq, + &mut stq, + &mut lsid_issue_ptr, + &mut lsid_complete_ptr, + &uops, + ); + + assert!(i2.is_empty()); + assert_eq!(e1.iter().copied().collect::>(), vec![0]); + assert!(lhq.is_empty()); + assert_eq!(stq.iter().copied().collect::>(), vec![0]); + assert_eq!(lsid_issue_ptr, 1); + assert_eq!(lsid_complete_ptr, 1); +} + +#[test] +fn retired_store_moves_from_stq_to_scb() { + let runtime = sample_runtime(&[enc_addi(2, 0, 1)], &[]); + let mut rob = VecDeque::from([0usize]); + let mut committed = Vec::new(); + let mut retired_seqs = Vec::new(); + let mut stage_events = Vec::new(); + let mut pipeline = StageQueues::default(); + pipeline.stq.push_back(0); + let mut uops = vec![CycleUop { + decoded: isa::decode_word(enc_addi(2, 0, 1) as u64).expect("decode addi"), + commit: CommitRecord { + mem_valid: 1, + mem_is_store: 1, + mem_addr: 0x2000, + mem_size: 8, + ..CommitRecord::unsupported(0, 0x1000, 0, 4, &isa::BlockMeta::default()) + }, + deps: [None, None], + src_queue_kinds: [None, None], + src_logical_tags: [None, None], + src_qtags: [None, None], + dst_queue_kind: None, + dst_logical_tag: None, + dst_qtag: None, + bypass_d2: false, + is_load: false, + is_store: true, + load_ordinal: None, + load_store_id: Some(0), + miss_injected: false, + redirect_target: None, + phys_iq: Some(PhysIq::StdIq0), + pick_wakeup_visible: None, + data_ready_visible: None, + miss_pending_until: None, + e1_cycle: None, + e4_cycle: None, + w1_cycle: Some(7), + done_cycle: Some(8), + }]; + + retire_ready( + 9, + &runtime, + &mut rob, + &mut committed, + &mut retired_seqs, + &mut pipeline, + &mut uops, + &mut stage_events, + ); + + assert!(pipeline.stq.is_empty()); + assert_eq!(pipeline.scb.len(), 1); + assert_eq!(pipeline.scb[0].seq, 0); + assert_eq!(pipeline.scb[0].enqueue_cycle, 9); +} + +#[test] +fn emit_stage_events_marks_store_forwarded_loads() { + let runtime = sample_runtime(&[enc_addi(2, 0, 1)], &[]); + let decoded = isa::decode_word(enc_addi(2, 0, 1) as u64).expect("decode addi"); + let uops = vec![ + CycleUop { + decoded: decoded.clone(), + commit: CommitRecord { + mem_valid: 1, + mem_is_store: 1, + mem_addr: 0x3000, + mem_size: 8, + ..CommitRecord::unsupported(0, 0x1000, 0, 4, &isa::BlockMeta::default()) + }, + deps: [None, None], + src_queue_kinds: [None, None], + src_logical_tags: [None, None], + src_qtags: [None, None], + dst_queue_kind: None, + dst_logical_tag: None, + dst_qtag: None, + bypass_d2: false, + is_load: false, + is_store: true, + load_ordinal: None, + load_store_id: Some(0), + miss_injected: false, + redirect_target: None, + phys_iq: Some(PhysIq::StdIq0), + pick_wakeup_visible: None, + data_ready_visible: None, + miss_pending_until: None, + e1_cycle: None, + e4_cycle: None, + w1_cycle: None, + done_cycle: None, + }, + CycleUop { + decoded, + commit: CommitRecord { + mem_valid: 1, + mem_is_store: 0, + mem_addr: 0x3000, + mem_size: 8, + ..CommitRecord::unsupported(0, 0x1004, 0, 4, &isa::BlockMeta::default()) + }, + deps: [None, None], + src_queue_kinds: [None, None], + src_logical_tags: [None, None], + src_qtags: [None, None], + dst_queue_kind: None, + dst_logical_tag: None, + dst_qtag: None, + bypass_d2: false, + is_load: true, + is_store: false, + load_ordinal: Some(0), + load_store_id: Some(1), + miss_injected: false, + redirect_target: None, + phys_iq: Some(PhysIq::AguIq0), + pick_wakeup_visible: None, + data_ready_visible: Some(10), + miss_pending_until: None, + e1_cycle: None, + e4_cycle: Some(9), + w1_cycle: None, + done_cycle: None, + }, + ]; + let mut pipeline = StageQueues::default(); + pipeline.stq.push_back(0); + pipeline.e4.push_back(1); + let mut out = Vec::new(); + + emit_stage_events( + 9, + &runtime, + &pipeline, + &[], + &VecDeque::new(), + &uops, + &mut out, + ); + + assert!( + out.iter() + .any(|event| event.stage_id == "STQ" && event.row_id == "uop0") + ); + assert!(out.iter().any(|event| { + event.stage_id == "E4" && event.row_id == "uop1" && event.cause == "ld_store_forward" + })); +} + +#[test] +fn load_hit_transitions_through_l1d_before_w1() { + let decoded = isa::decode_word(enc_addi(2, 0, 1) as u64).expect("decode addi"); + let mut uops = vec![CycleUop { + decoded, + commit: CommitRecord { + mem_valid: 1, + mem_is_store: 0, + mem_addr: 0x4000, + mem_size: 8, + ..CommitRecord::unsupported(0, 0x1000, 0, 4, &isa::BlockMeta::default()) + }, + deps: [None, None], + src_queue_kinds: [None, None], + src_logical_tags: [None, None], + src_qtags: [None, None], + dst_queue_kind: None, + dst_logical_tag: None, + dst_qtag: None, + bypass_d2: false, + is_load: true, + is_store: false, + load_ordinal: Some(0), + load_store_id: Some(0), + miss_injected: false, + redirect_target: None, + phys_iq: Some(PhysIq::AguIq0), + pick_wakeup_visible: Some(6), + data_ready_visible: Some(9), + miss_pending_until: None, + e1_cycle: Some(6), + e4_cycle: Some(8), + w1_cycle: None, + done_cycle: None, + }]; + let mut pipeline = StageQueues::default(); + pipeline.e4.push_back(0); + pipeline.lhq.push_back(0); + + advance_execute(8, &mut pipeline, &mut uops, &CycleRunOptions::default()); + assert!(pipeline.e4.is_empty()); + assert!(pipeline.w1.is_empty()); + assert_eq!(pipeline.lhq.iter().copied().collect::>(), vec![0]); + assert_eq!(pipeline.l1d.len(), 1); + assert_eq!(pipeline.l1d[0].seq, 0); + assert_eq!(pipeline.l1d[0].kind, L1dTxnKind::LoadHit); + assert_eq!(pipeline.l1d[0].ready_cycle, 9); + assert_eq!(pipeline.lsid_cache_ptr, 0); + + advance_l1d(8, &mut pipeline); + assert!(pipeline.w1.is_empty()); + assert_eq!(pipeline.lhq.iter().copied().collect::>(), vec![0]); + assert_eq!(pipeline.lsid_cache_ptr, 0); + + advance_l1d(9, &mut pipeline); + assert_eq!(pipeline.w1.iter().copied().collect::>(), vec![0]); + assert!(pipeline.l1d.is_empty()); + assert!(pipeline.lhq.is_empty()); + assert_eq!(pipeline.lsid_cache_ptr, 1); +} + +#[test] +fn scb_ready_entry_moves_through_l1d_drain() { + let mut pipeline = StageQueues::default(); + let decoded = isa::decode_word(enc_addi(2, 0, 1) as u64).expect("decode addi"); + let uops = vec![CycleUop { + decoded, + commit: CommitRecord { + mem_valid: 1, + mem_is_store: 1, + mem_addr: 0x4000, + mem_size: 8, + ..CommitRecord::unsupported(0, 0x1000, 0, 4, &isa::BlockMeta::default()) + }, + deps: [None, None], + src_queue_kinds: [None, None], + src_logical_tags: [None, None], + src_qtags: [None, None], + dst_queue_kind: None, + dst_logical_tag: None, + dst_qtag: None, + bypass_d2: false, + is_load: false, + is_store: true, + load_ordinal: None, + load_store_id: Some(0), + miss_injected: false, + redirect_target: None, + phys_iq: Some(PhysIq::StdIq0), + pick_wakeup_visible: Some(8), + data_ready_visible: Some(8), + miss_pending_until: None, + e1_cycle: Some(8), + e4_cycle: None, + w1_cycle: None, + done_cycle: None, + }]; + pipeline.scb.push_back(ScbEntry { + seq: 3, + enqueue_cycle: 9, + }); + pipeline.scb.clear(); + pipeline.scb.push_back(ScbEntry { + seq: 0, + enqueue_cycle: 9, + }); + + advance_scb(10, &mut pipeline, &uops); + assert!(pipeline.scb.is_empty()); + assert_eq!(pipeline.l1d.len(), 1); + assert_eq!(pipeline.l1d[0].seq, 0); + assert_eq!(pipeline.l1d[0].kind, L1dTxnKind::StoreDrain); + assert_eq!(pipeline.l1d[0].ready_cycle, 11); + + advance_l1d(10, &mut pipeline); + assert_eq!(pipeline.l1d.len(), 1); + assert_eq!(pipeline.lsid_cache_ptr, 0); + + advance_l1d(11, &mut pipeline); + assert!(pipeline.l1d.is_empty()); + assert_eq!(pipeline.lsid_cache_ptr, 1); +} + +#[test] +fn load_hit_waits_for_cache_owner_turn() { + let decoded = isa::decode_word(enc_addi(2, 0, 1) as u64).expect("decode addi"); + let mut uops = vec![CycleUop { + decoded, + commit: CommitRecord { + mem_valid: 1, + mem_is_store: 0, + mem_addr: 0x4100, + mem_size: 8, + ..CommitRecord::unsupported(0, 0x1000, 0, 4, &isa::BlockMeta::default()) + }, + deps: [None, None], + src_queue_kinds: [None, None], + src_logical_tags: [None, None], + src_qtags: [None, None], + dst_queue_kind: None, + dst_logical_tag: None, + dst_qtag: None, + bypass_d2: false, + is_load: true, + is_store: false, + load_ordinal: Some(1), + load_store_id: Some(1), + miss_injected: false, + redirect_target: None, + phys_iq: Some(PhysIq::AguIq0), + pick_wakeup_visible: Some(6), + data_ready_visible: Some(9), + miss_pending_until: None, + e1_cycle: Some(6), + e4_cycle: Some(8), + w1_cycle: None, + done_cycle: None, + }]; + let mut pipeline = StageQueues::default(); + pipeline.e4.push_back(0); + pipeline.lhq.push_back(0); + pipeline.lsid_cache_ptr = 0; + + advance_execute(8, &mut pipeline, &mut uops, &CycleRunOptions::default()); + assert_eq!(pipeline.e4.iter().copied().collect::>(), vec![0]); + assert!(pipeline.l1d.is_empty()); + + pipeline.lsid_cache_ptr = 1; + advance_execute(8, &mut pipeline, &mut uops, &CycleRunOptions::default()); + assert!(pipeline.e4.is_empty()); + assert_eq!(pipeline.l1d.len(), 1); + assert_eq!(pipeline.l1d[0].seq, 0); +} + +#[test] +fn scb_drain_waits_for_cache_owner_turn() { + let decoded = isa::decode_word(enc_addi(2, 0, 1) as u64).expect("decode addi"); + let uops = vec![CycleUop { + decoded, + commit: CommitRecord { + mem_valid: 1, + mem_is_store: 1, + mem_addr: 0x4200, + mem_size: 8, + ..CommitRecord::unsupported(0, 0x1000, 0, 4, &isa::BlockMeta::default()) + }, + deps: [None, None], + src_queue_kinds: [None, None], + src_logical_tags: [None, None], + src_qtags: [None, None], + dst_queue_kind: None, + dst_logical_tag: None, + dst_qtag: None, + bypass_d2: false, + is_load: false, + is_store: true, + load_ordinal: None, + load_store_id: Some(1), + miss_injected: false, + redirect_target: None, + phys_iq: Some(PhysIq::StdIq0), + pick_wakeup_visible: Some(8), + data_ready_visible: Some(8), + miss_pending_until: None, + e1_cycle: Some(8), + e4_cycle: None, + w1_cycle: None, + done_cycle: None, + }]; + let mut pipeline = StageQueues::default(); + pipeline.scb.push_back(ScbEntry { + seq: 0, + enqueue_cycle: 9, + }); + pipeline.lsid_cache_ptr = 0; + + advance_scb(10, &mut pipeline, &uops); + assert_eq!(pipeline.scb.len(), 1); + assert!(pipeline.l1d.is_empty()); + + pipeline.lsid_cache_ptr = 1; + advance_scb(10, &mut pipeline, &uops); + assert!(pipeline.scb.is_empty()); + assert_eq!(pipeline.l1d.len(), 1); + assert_eq!(pipeline.l1d[0].seq, 0); +} + +#[test] +fn emit_stage_events_includes_l1d_transactions() { + let runtime = sample_runtime(&[enc_addi(2, 0, 1)], &[]); + let decoded = isa::decode_word(enc_addi(2, 0, 1) as u64).expect("decode addi"); + let uops = vec![CycleUop { + decoded, + commit: CommitRecord { + mem_valid: 1, + mem_is_store: 0, + mem_addr: 0x4000, + mem_size: 8, + ..CommitRecord::unsupported(0, 0x1000, 0, 4, &isa::BlockMeta::default()) + }, + deps: [None, None], + src_queue_kinds: [None, None], + src_logical_tags: [None, None], + src_qtags: [None, None], + dst_queue_kind: None, + dst_logical_tag: None, + dst_qtag: None, + bypass_d2: false, + is_load: true, + is_store: false, + load_ordinal: Some(0), + load_store_id: Some(0), + miss_injected: false, + redirect_target: None, + phys_iq: Some(PhysIq::AguIq0), + pick_wakeup_visible: Some(6), + data_ready_visible: Some(9), + miss_pending_until: None, + e1_cycle: Some(6), + e4_cycle: Some(8), + w1_cycle: None, + done_cycle: None, + }]; + let mut pipeline = StageQueues::default(); + pipeline.l1d.push_back(L1dEntry { + seq: 0, + kind: L1dTxnKind::LoadHit, + ready_cycle: 9, + }); + let mut out = Vec::new(); + + emit_stage_events( + 9, + &runtime, + &pipeline, + &[], + &VecDeque::new(), + &uops, + &mut out, + ); + + assert!(out.iter().any(|event| { + event.stage_id == "L1D" && event.row_id == "uop0" && event.cause == "load_hit_resp" + })); +} + +#[test] +fn p1_winners_stay_live_when_i1_is_full() { + let mut i1 = VecDeque::from([10usize, 11, 12, 13]); + let mut admitted_i1 = VecDeque::from([20usize, 21]); + let mut p1 = VecDeque::new(); + + advance_p1_to_i1(&mut i1, &mut admitted_i1, &mut p1); + + assert_eq!(i1.iter().copied().collect::>(), vec![10, 11, 12, 13]); + assert!(admitted_i1.is_empty()); + assert_eq!(p1.iter().copied().collect::>(), vec![20, 21]); +} + +#[test] +fn advance_i2_respects_single_load_slot() { + let decoded = isa::decode_word(enc_addi(2, 0, 1) as u64).expect("decode addi"); + let uops = vec![ + CycleUop { + decoded: decoded.clone(), + commit: CommitRecord::unsupported(0, 0x1000, 0, 4, &isa::BlockMeta::default()), + deps: [None, None], + src_queue_kinds: [None, None], + src_logical_tags: [None, None], + src_qtags: [None, None], + dst_queue_kind: None, + dst_logical_tag: None, + dst_qtag: None, + bypass_d2: false, + is_load: true, + is_store: false, + load_ordinal: Some(0), + load_store_id: Some(0), + miss_injected: false, + redirect_target: None, + phys_iq: Some(PhysIq::AguIq0), + pick_wakeup_visible: Some(5), + data_ready_visible: Some(8), + miss_pending_until: None, + e1_cycle: Some(4), + e4_cycle: None, + w1_cycle: None, + done_cycle: None, + }, + CycleUop { + decoded, + commit: CommitRecord::unsupported(0, 0x1004, 0, 4, &isa::BlockMeta::default()), + deps: [None, None], + src_queue_kinds: [None, None], + src_logical_tags: [None, None], + src_qtags: [None, None], + dst_queue_kind: None, + dst_logical_tag: None, + dst_qtag: None, + bypass_d2: false, + is_load: true, + is_store: false, + load_ordinal: Some(1), + load_store_id: Some(1), + miss_injected: false, + redirect_target: None, + phys_iq: Some(PhysIq::AguIq0), + pick_wakeup_visible: Some(5), + data_ready_visible: Some(8), + miss_pending_until: None, + e1_cycle: None, + e4_cycle: None, + w1_cycle: None, + done_cycle: None, + }, + ]; + let mut i2 = VecDeque::from([1usize]); + let mut e1 = VecDeque::from([0usize]); + let mut lhq = VecDeque::from([0usize]); + let mut stq = VecDeque::new(); + + let mut lsid_issue_ptr = 0usize; + let mut lsid_complete_ptr = 0usize; + advance_i2( + 8, + &mut i2, + &mut e1, + &mut lhq, + &mut stq, + &mut lsid_issue_ptr, + &mut lsid_complete_ptr, + &uops, + ); + + assert_eq!(e1.iter().copied().collect::>(), vec![0]); + assert_eq!(i2.iter().copied().collect::>(), vec![1]); + assert_eq!(lhq.iter().copied().collect::>(), vec![0]); +} + +#[test] +fn d1_stalls_when_rob_group_would_overflow() { + let mut pipeline = StageQueues::default(); + pipeline.frontend[6].push_back(200); + pipeline.frontend[6].push_back(201); + let mut rob = (0..(ROB_CAPACITY - 1)) + .map(|idx| idx + 1000) + .collect::>(); + + advance_frontend(&mut pipeline, &mut rob); + + assert_eq!( + pipeline.frontend[6].iter().copied().collect::>(), + vec![200, 201] + ); + assert_eq!(pipeline.frontend[7].len(), 0); + assert_eq!(rob.len(), ROB_CAPACITY - 1); +} + +fn sample_runtime(words: &[u32], extra_regions: &[MemoryRegion]) -> GuestRuntime { + let text_base = 0x1000u64; + let mut text = Vec::with_capacity(words.len() * 4); + for word in words { + text.extend_from_slice(&word.to_le_bytes()); + } + + let mut regions = vec![MemoryRegion { + base: text_base, + size: 0x1000, + flags: 0b101, + data: { + let mut bytes = vec![0; 0x1000]; + bytes[..text.len()].copy_from_slice(&text); + bytes + }, + }]; + regions.extend_from_slice(extra_regions); + regions.push(MemoryRegion { + base: 0x0000_7FFF_E000, + size: 0x2000, + flags: 0b110, + data: vec![0; 0x2000], + }); + + GuestRuntime { + image: LoadedElf { + path: PathBuf::from("sample.elf"), + entry: text_base, + little_endian: true, + bits: 64, + machine: 0, + segments: vec![SegmentImage { + vaddr: text_base, + mem_size: text.len() as u64, + file_size: text.len() as u64, + flags: 0b101, + data: text, + }], + }, + config: RuntimeConfig::default(), + state: isa::ArchitecturalState::new(text_base), + block: isa::BlockMeta::default(), + memory: GuestMemory { regions }, + boot: BootInfo { + entry_pc: text_base, + stack_top: 0x0000_7FFF_F000, + stack_pointer: 0x0000_7FFF_F000, + argc: 0, + }, + fd_table: HashMap::from([(0, 0), (1, 1), (2, 2)]), + } +} + +fn enc_addi(rd: u32, rs1: u32, imm: u32) -> u32 { + ((imm & 0x0fff) << 20) | (rs1 << 15) | (rd << 7) | 0x15 +} + +fn enc_acrc(rst_type: u32) -> u32 { + ((rst_type & 0xf) << 20) | 0x302b +} diff --git a/crates/camodel/src/trace/emit.rs b/crates/camodel/src/trace/emit.rs new file mode 100644 index 0000000..09eb2c6 --- /dev/null +++ b/crates/camodel/src/trace/emit.rs @@ -0,0 +1 @@ +// Trace-emission namespace placeholder. diff --git a/crates/camodel/src/trace/labels.rs b/crates/camodel/src/trace/labels.rs new file mode 100644 index 0000000..d7117d3 --- /dev/null +++ b/crates/camodel/src/trace/labels.rs @@ -0,0 +1 @@ +// Trace-label namespace placeholder. diff --git a/crates/camodel/src/trace/mod.rs b/crates/camodel/src/trace/mod.rs new file mode 100644 index 0000000..894ea8b --- /dev/null +++ b/crates/camodel/src/trace/mod.rs @@ -0,0 +1,389 @@ +pub mod emit; +pub mod labels; + +use std::collections::VecDeque; + +use isa::{ + StageTraceEvent, TRAP_BRU_RECOVERY_NOT_BSTART, TRAP_DYNAMIC_TARGET_MISSING, + TRAP_DYNAMIC_TARGET_NOT_BSTART, TRAP_DYNAMIC_TARGET_STALE, TRAP_SETRET_NOT_ADJACENT, +}; +use runtime::GuestRuntime; + +use crate::{ + CycleUop, FRONTEND_STAGE_NAMES, IqEntry, L1dTxnKind, StageQueues, branch_kind_label, + call_materialization_kind_label, dynamic_target_source_kind_label, i2_issue_eligible, + i2_waits_on_lsid, iq_entry_wait_cause_from_state, live_boundary_epoch_for_seq, + live_branch_kind_for_seq, live_call_materialization_kind_for_seq, + live_control_target_owner_row_id_for_seq, live_dynamic_target_producer_kind_for_seq, + live_dynamic_target_setup_epoch_for_seq, live_dynamic_target_source_epoch_for_seq, + live_dynamic_target_source_kind_for_seq, live_dynamic_target_source_owner_row_id_for_seq, + live_return_consumer_kind_for_seq, live_rob_checkpoint_id_for_seq, load_forward_visible, + ready_iq_winners, redirect_resolve_cycle, resolved_frontend_redirect, + return_consumer_kind_label, +}; + +pub(crate) fn tag_stage_cycles(cycle: u64, pipeline: &StageQueues, uops: &mut [CycleUop]) { + for &seq in &pipeline.e1 { + let uop = &mut uops[seq]; + if uop.e1_cycle.is_none() { + uop.e1_cycle = Some(cycle); + if uop.is_load { + uop.pick_wakeup_visible = Some(cycle + 1); + } + } + } + for &seq in &pipeline.e4 { + let uop = &mut uops[seq]; + if uop.e4_cycle.is_none() { + uop.e4_cycle = Some(cycle); + uop.data_ready_visible = Some(cycle + 1); + } + } + for &seq in &pipeline.w1 { + let uop = &mut uops[seq]; + if uop.w1_cycle.is_none() { + uop.w1_cycle = Some(cycle); + if !uop.is_load { + uop.pick_wakeup_visible = Some(cycle + 1); + uop.data_ready_visible = Some(cycle + 1); + } + } + } + for &seq in &pipeline.w2 { + let uop = &mut uops[seq]; + if uop.done_cycle.is_none() { + uop.done_cycle = Some(cycle); + } + } +} + +pub(crate) fn emit_stage_events( + cycle: u64, + runtime: &GuestRuntime, + pipeline: &StageQueues, + iq: &[IqEntry], + rob: &VecDeque, + uops: &[CycleUop], + out: &mut Vec, +) { + for (idx, queue) in pipeline.frontend.iter().enumerate() { + for &seq in queue { + out.push(stage_event( + cycle, + runtime, + uops, + seq, + FRONTEND_STAGE_NAMES[idx], + "resident", + )); + } + } + for entry in &pipeline.liq { + let cause = if entry.refill_ready_cycle <= cycle { + "eligible" + } else { + "wait_refill" + }; + out.push(stage_event(cycle, runtime, uops, entry.seq, "LIQ", cause)); + } + for &seq in &pipeline.lhq { + out.push(stage_event( + cycle, + runtime, + uops, + seq, + "LHQ", + "inflight_load", + )); + } + for entry in &pipeline.mdb { + let cause = if entry.refill_ready_cycle <= cycle { + "refill_ready" + } else { + "wait_refill" + }; + out.push(stage_event(cycle, runtime, uops, entry.seq, "MDB", cause)); + } + for &seq in &pipeline.stq { + out.push(stage_event( + cycle, + runtime, + uops, + seq, + "STQ", + "store_visible", + )); + } + for entry in &pipeline.scb { + let cause = if entry.enqueue_cycle < cycle { + "drain_ready" + } else { + "coalesce" + }; + out.push(stage_event(cycle, runtime, uops, entry.seq, "SCB", cause)); + } + for entry in &pipeline.l1d { + let cause = match entry.kind { + L1dTxnKind::LoadHit if entry.ready_cycle <= cycle => "load_hit_resp", + L1dTxnKind::LoadHit => "load_hit_req", + L1dTxnKind::StoreDrain if entry.ready_cycle <= cycle => "store_drain_resp", + L1dTxnKind::StoreDrain => "store_drain_req", + }; + out.push(stage_event(cycle, runtime, uops, entry.seq, "L1D", cause)); + } + let iq_ready_winners = ready_iq_winners(cycle, pipeline.lsid_issue_ptr, iq, uops, rob); + for entry in iq { + let cause = if entry.inflight { + "inflight" + } else if let Some(cause) = + iq_entry_wait_cause_from_state(entry, cycle, pipeline.lsid_issue_ptr, uops) + { + cause + } else if iq_ready_winners + .iter() + .any(|&(_, seq, phys_iq)| seq == entry.seq && phys_iq == entry.phys_iq) + { + "ready" + } else { + "wait_iq_age" + }; + out.push(stage_event(cycle, runtime, uops, entry.seq, "IQ", cause)); + } + for &seq in &pipeline.p1 { + out.push(stage_event(cycle, runtime, uops, seq, "P1", "pick")); + } + for &seq in &pipeline.i1 { + out.push(stage_event(cycle, runtime, uops, seq, "I1", "rf_arbitrate")); + } + for &seq in &pipeline.i2 { + let cause = if i2_issue_eligible(seq, cycle, pipeline.lsid_issue_ptr, uops) { + "issue_confirm" + } else if i2_waits_on_lsid(seq, cycle, pipeline.lsid_issue_ptr, uops) { + "wait_lsid" + } else { + "wait_forward" + }; + out.push(stage_event(cycle, runtime, uops, seq, "I2", cause)); + } + for &seq in &pipeline.e1 { + out.push(stage_event( + cycle, + runtime, + uops, + seq, + "E1", + if uops[seq].is_load { + "ld_spec_wakeup" + } else { + "execute" + }, + )); + } + for &seq in &pipeline.e2 { + out.push(stage_event(cycle, runtime, uops, seq, "E2", "execute")); + } + for &seq in &pipeline.e3 { + out.push(stage_event(cycle, runtime, uops, seq, "E3", "execute")); + } + for &seq in &pipeline.e4 { + let cause = if uops[seq].is_load && load_forward_visible(seq, pipeline, uops) { + "ld_store_forward" + } else { + "ld_data" + }; + out.push(stage_event(cycle, runtime, uops, seq, "E4", cause)); + } + for &seq in &pipeline.w1 { + out.push(stage_event(cycle, runtime, uops, seq, "W1", "wakeup")); + } + for &seq in &pipeline.w2 { + out.push(stage_event(cycle, runtime, uops, seq, "W2", "complete")); + } + if let Some(pending_trap) = pipeline + .pending_trap + .filter(|pending_trap| pending_trap.visible_cycle == cycle) + { + let target_source_kind = + live_dynamic_target_source_kind_for_seq(pending_trap.seq, pipeline, uops); + out.push(stage_event_with_meta( + cycle, + runtime, + uops, + pending_trap.seq, + "FLS", + pending_trap_stage_cause(pending_trap.cause, target_source_kind), + Some(pending_trap.checkpoint_id), + Some(pending_trap.cause), + Some(pending_trap.traparg0), + live_dynamic_target_setup_epoch_for_seq(pending_trap.seq, pipeline, uops), + live_boundary_epoch_for_seq(pending_trap.seq, pipeline, uops), + live_dynamic_target_source_owner_row_id_for_seq(pending_trap.seq, pipeline, uops) + .as_deref(), + live_dynamic_target_source_epoch_for_seq(pending_trap.seq, pipeline, uops), + live_control_target_owner_row_id_for_seq(pending_trap.seq, pipeline, uops).as_deref(), + live_dynamic_target_producer_kind_for_seq(pending_trap.seq, pipeline, uops) + .map(return_consumer_kind_label), + live_branch_kind_for_seq(pending_trap.seq, pipeline, uops).and_then(branch_kind_label), + live_return_consumer_kind_for_seq(pending_trap.seq, pipeline, uops) + .map(return_consumer_kind_label), + live_call_materialization_kind_for_seq(pending_trap.seq, pipeline, uops) + .map(call_materialization_kind_label), + target_source_kind.map(dynamic_target_source_kind_label), + )); + } + if let Some(redirect) = resolved_frontend_redirect(cycle, pipeline, uops) { + out.push(stage_event_with_meta( + cycle, + runtime, + uops, + redirect.source_seq, + "FLS", + if redirect.from_correction { + "redirect_br_corr" + } else { + "redirect_boundary" + }, + Some(redirect.checkpoint_id), + None, + None, + live_dynamic_target_setup_epoch_for_seq(redirect.source_seq, pipeline, uops), + live_boundary_epoch_for_seq(redirect.source_seq, pipeline, uops), + live_dynamic_target_source_owner_row_id_for_seq(redirect.source_seq, pipeline, uops) + .as_deref(), + live_dynamic_target_source_epoch_for_seq(redirect.source_seq, pipeline, uops), + live_control_target_owner_row_id_for_seq(redirect.source_seq, pipeline, uops) + .as_deref(), + live_dynamic_target_producer_kind_for_seq(redirect.source_seq, pipeline, uops) + .map(return_consumer_kind_label), + live_branch_kind_for_seq(redirect.source_seq, pipeline, uops) + .and_then(branch_kind_label), + live_return_consumer_kind_for_seq(redirect.source_seq, pipeline, uops) + .map(return_consumer_kind_label), + live_call_materialization_kind_for_seq(redirect.source_seq, pipeline, uops) + .map(call_materialization_kind_label), + live_dynamic_target_source_kind_for_seq(redirect.source_seq, pipeline, uops) + .map(dynamic_target_source_kind_label), + )); + } else { + for (seq, uop) in uops.iter().enumerate() { + if redirect_resolve_cycle(uop) == Some(cycle) { + out.push(stage_event_with_meta( + cycle, + runtime, + uops, + seq, + "FLS", + "redirect", + Some(live_rob_checkpoint_id_for_seq(seq, pipeline, uops)), + None, + None, + live_dynamic_target_setup_epoch_for_seq(seq, pipeline, uops), + live_boundary_epoch_for_seq(seq, pipeline, uops), + live_dynamic_target_source_owner_row_id_for_seq(seq, pipeline, uops).as_deref(), + live_dynamic_target_source_epoch_for_seq(seq, pipeline, uops), + live_control_target_owner_row_id_for_seq(seq, pipeline, uops).as_deref(), + live_dynamic_target_producer_kind_for_seq(seq, pipeline, uops) + .map(return_consumer_kind_label), + live_branch_kind_for_seq(seq, pipeline, uops).and_then(branch_kind_label), + live_return_consumer_kind_for_seq(seq, pipeline, uops) + .map(return_consumer_kind_label), + live_call_materialization_kind_for_seq(seq, pipeline, uops) + .map(call_materialization_kind_label), + live_dynamic_target_source_kind_for_seq(seq, pipeline, uops) + .map(dynamic_target_source_kind_label), + )); + } + } + } + for &seq in rob { + let cause = if uops[seq].done_cycle.is_some() { + "ready" + } else { + "wait_head" + }; + out.push(stage_event(cycle, runtime, uops, seq, "ROB", cause)); + } +} + +fn pending_trap_stage_cause( + cause: u64, + target_source_kind: Option, +) -> &'static str { + match cause { + TRAP_BRU_RECOVERY_NOT_BSTART => "bru_recovery_fault", + TRAP_DYNAMIC_TARGET_MISSING => "dynamic_target_missing", + TRAP_DYNAMIC_TARGET_STALE => match target_source_kind { + Some(crate::DynamicTargetSourceKind::CallReturnFused) + | Some(crate::DynamicTargetSourceKind::CallReturnAdjacentSetret) => { + "dynamic_target_stale_return" + } + Some(crate::DynamicTargetSourceKind::ArchTargetSetup) | None => { + "dynamic_target_stale_setup" + } + }, + TRAP_DYNAMIC_TARGET_NOT_BSTART => "dynamic_target_not_bstart", + TRAP_SETRET_NOT_ADJACENT => "call_header_fault", + _ => "trap_fault", + } +} + +pub(crate) fn stage_event( + cycle: u64, + runtime: &GuestRuntime, + uops: &[CycleUop], + seq: usize, + stage: &str, + cause: &str, +) -> StageTraceEvent { + stage_event_with_meta( + cycle, runtime, uops, seq, stage, cause, None, None, None, None, None, None, None, None, + None, None, None, None, None, + ) +} + +pub(crate) fn stage_event_with_meta( + cycle: u64, + runtime: &GuestRuntime, + uops: &[CycleUop], + seq: usize, + stage: &str, + cause: &str, + checkpoint_id: Option, + trap_cause: Option, + traparg0: Option, + target_setup_epoch: Option, + boundary_epoch: Option, + target_source_owner_row_id: Option<&str>, + target_source_epoch: Option, + target_owner_row_id: Option<&str>, + target_producer_kind: Option<&str>, + branch_kind: Option<&str>, + return_kind: Option<&str>, + call_materialization_kind: Option<&str>, + target_source_kind: Option<&str>, +) -> StageTraceEvent { + StageTraceEvent { + cycle, + row_id: format!("uop{seq}"), + stage_id: stage.to_string(), + lane_id: uops[seq] + .phys_iq + .map(|phys_iq| phys_iq.lane_id().to_string()) + .unwrap_or_else(|| runtime.block.lane_id.clone()), + stall: false, + cause: cause.to_string(), + checkpoint_id, + trap_cause, + traparg0, + target_setup_epoch, + boundary_epoch, + target_source_owner_row_id: target_source_owner_row_id.map(str::to_string), + target_source_epoch, + target_owner_row_id: target_owner_row_id.map(str::to_string), + target_producer_kind: target_producer_kind.map(str::to_string), + branch_kind: branch_kind.map(str::to_string), + return_kind: return_kind.map(str::to_string), + call_materialization_kind: call_materialization_kind.map(str::to_string), + target_source_kind: target_source_kind.map(str::to_string), + } +} diff --git a/crates/cosim/Cargo.toml b/crates/cosim/Cargo.toml new file mode 100644 index 0000000..04906d3 --- /dev/null +++ b/crates/cosim/Cargo.toml @@ -0,0 +1,12 @@ +[package] +name = "cosim" +version.workspace = true +edition.workspace = true +license.workspace = true +authors.workspace = true + +[dependencies] +anyhow.workspace = true +serde.workspace = true +serde_json.workspace = true +isa = { path = "../isa" } diff --git a/crates/cosim/src/compare/mod.rs b/crates/cosim/src/compare/mod.rs new file mode 100644 index 0000000..0120e38 --- /dev/null +++ b/crates/cosim/src/compare/mod.rs @@ -0,0 +1,146 @@ +use anyhow::{Context, Result, bail}; +use isa::CommitRecord; +use serde::{Deserialize, Serialize}; +use std::fs; +use std::path::Path; + +#[derive(Debug, Clone, Serialize, Deserialize)] +#[serde(tag = "type")] +pub enum M1Message { + #[serde(rename = "start")] + Start { + boot_pc: u64, + trigger_pc: u64, + terminate_pc: u64, + snapshot_path: String, + seq_base: u64, + }, + #[serde(rename = "commit")] + Commit { + seq: u64, + #[serde(flatten)] + commit: CommitRecord, + }, + #[serde(rename = "end")] + End { reason: String }, +} + +#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] +pub struct CosimMismatch { + pub index: usize, + pub field: String, + pub expected: String, + pub actual: String, +} + +#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] +pub struct CosimReport { + pub matched_commits: usize, + pub mismatch: Option, +} + +pub fn load_commit_jsonl(path: impl AsRef) -> Result> { + let text = fs::read_to_string(path.as_ref()) + .with_context(|| format!("failed to read {}", path.as_ref().display()))?; + let mut out = Vec::new(); + for (lineno, line) in text.lines().enumerate() { + let trimmed = line.trim(); + if trimmed.is_empty() { + continue; + } + let rec: CommitRecord = serde_json::from_str(trimmed) + .with_context(|| format!("invalid commit JSON at line {}", lineno + 1))?; + out.push(rec); + } + Ok(out) +} + +pub fn compare_commit_streams(expected: &[CommitRecord], actual: &[CommitRecord]) -> CosimReport { + let count = expected.len().min(actual.len()); + for idx in 0..count { + let lhs = &expected[idx]; + let rhs = &actual[idx]; + if lhs.pc != rhs.pc { + return mismatch(idx, "pc", lhs.pc, rhs.pc); + } + if lhs.insn != rhs.insn { + return mismatch(idx, "insn", lhs.insn, rhs.insn); + } + if lhs.len != rhs.len { + return mismatch(idx, "len", lhs.len, rhs.len); + } + if lhs.wb_valid != rhs.wb_valid { + return mismatch(idx, "wb_valid", lhs.wb_valid, rhs.wb_valid); + } + if lhs.mem_valid != rhs.mem_valid { + return mismatch(idx, "mem_valid", lhs.mem_valid, rhs.mem_valid); + } + if lhs.trap_valid != rhs.trap_valid { + return mismatch(idx, "trap_valid", lhs.trap_valid, rhs.trap_valid); + } + if lhs.next_pc != rhs.next_pc { + return mismatch(idx, "next_pc", lhs.next_pc, rhs.next_pc); + } + } + + if expected.len() != actual.len() { + return CosimReport { + matched_commits: count, + mismatch: Some(CosimMismatch { + index: count, + field: "commit_count".to_string(), + expected: expected.len().to_string(), + actual: actual.len().to_string(), + }), + }; + } + + CosimReport { + matched_commits: count, + mismatch: None, + } +} + +pub fn require_cosim_match(report: &CosimReport) -> Result<()> { + if let Some(mismatch) = &report.mismatch { + bail!( + "cosim mismatch at commit {} field {}: expected={} actual={}", + mismatch.index, + mismatch.field, + mismatch.expected, + mismatch.actual + ); + } + Ok(()) +} + +fn mismatch( + index: usize, + field: &str, + expected: T, + actual: U, +) -> CosimReport { + CosimReport { + matched_commits: index, + mismatch: Some(CosimMismatch { + index, + field: field.to_string(), + expected: expected.to_string(), + actual: actual.to_string(), + }), + } +} + +#[cfg(test)] +mod tests { + use super::*; + use isa::{BlockMeta, CommitRecord}; + + #[test] + fn comparison_finds_pc_mismatch() { + let lhs = CommitRecord::unsupported(0, 1, 2, 4, &BlockMeta::default()); + let rhs = CommitRecord::unsupported(0, 3, 2, 4, &BlockMeta::default()); + let report = compare_commit_streams(&[lhs], &[rhs]); + assert_eq!(report.mismatch.unwrap().field, "pc"); + } +} diff --git a/crates/cosim/src/lib.rs b/crates/cosim/src/lib.rs new file mode 100644 index 0000000..c4dadc7 --- /dev/null +++ b/crates/cosim/src/lib.rs @@ -0,0 +1,7 @@ +pub mod compare; +pub mod protocol; +pub mod qemu; + +pub use compare::{CosimMismatch, CosimReport, compare_commit_streams, require_cosim_match}; +pub use protocol::M1Message; +pub use qemu::load_commit_jsonl; diff --git a/crates/cosim/src/protocol/mod.rs b/crates/cosim/src/protocol/mod.rs new file mode 100644 index 0000000..2e7c898 --- /dev/null +++ b/crates/cosim/src/protocol/mod.rs @@ -0,0 +1 @@ +pub use crate::compare::M1Message; diff --git a/crates/cosim/src/qemu/mod.rs b/crates/cosim/src/qemu/mod.rs new file mode 100644 index 0000000..c0cb50e --- /dev/null +++ b/crates/cosim/src/qemu/mod.rs @@ -0,0 +1 @@ +pub use crate::compare::load_commit_jsonl; diff --git a/crates/dse/Cargo.toml b/crates/dse/Cargo.toml new file mode 100644 index 0000000..ca6be96 --- /dev/null +++ b/crates/dse/Cargo.toml @@ -0,0 +1,16 @@ +[package] +name = "dse" +version.workspace = true +edition.workspace = true +license.workspace = true +authors.workspace = true + +[dependencies] +anyhow.workspace = true +serde.workspace = true +toml.workspace = true +camodel = { path = "../camodel" } +elf = { path = "../elf" } +funcmodel = { path = "../funcmodel" } +isa = { path = "../isa" } +runtime = { path = "../runtime" } diff --git a/crates/dse/src/lib.rs b/crates/dse/src/lib.rs new file mode 100644 index 0000000..8906281 --- /dev/null +++ b/crates/dse/src/lib.rs @@ -0,0 +1,104 @@ +use anyhow::{Context, Result}; +use camodel::{CycleEngine, CycleRunOptions}; +use elf::load_static_elf; +use funcmodel::{FuncEngine, FuncRunOptions}; +use isa::EngineKind; +use runtime::{GuestRuntime, RuntimeConfig}; +use serde::{Deserialize, Serialize}; +use std::fs; +use std::path::Path; + +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct SweepSpec { + pub cases: Vec, +} + +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct SweepCase { + pub name: String, + pub engine: EngineKind, + pub elf: String, + pub iterations: usize, +} + +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct SweepCaseReport { + pub name: String, + pub engine: EngineKind, + pub iterations: usize, + pub cycles: Vec, + pub commits: Vec, + pub exit_reason: String, +} + +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct SweepReport { + pub cases: Vec, +} + +pub fn load_sweep_spec(path: impl AsRef) -> Result { + let text = fs::read_to_string(path.as_ref()) + .with_context(|| format!("failed to read {}", path.as_ref().display()))?; + toml::from_str(&text).with_context(|| format!("failed to parse {}", path.as_ref().display())) +} + +pub fn run_sweep(spec: &SweepSpec) -> Result { + let mut reports = Vec::new(); + let func = FuncEngine; + let cycle = CycleEngine; + for case in &spec.cases { + let mut cycles = Vec::new(); + let mut commits = Vec::new(); + let mut exit_reason = String::new(); + for _ in 0..case.iterations { + let image = load_static_elf(&case.elf)?; + let runtime = GuestRuntime::bootstrap(image, RuntimeConfig::default())?; + match case.engine { + EngineKind::Func => { + let bundle = func.run(&runtime, &FuncRunOptions::default())?; + cycles.push(bundle.result.metrics.cycles); + commits.push(bundle.result.metrics.commits); + exit_reason = bundle.result.metrics.exit_reason; + } + EngineKind::Cycle => { + let bundle = cycle.run(&runtime, &CycleRunOptions::default())?; + cycles.push(bundle.result.metrics.cycles); + commits.push(bundle.result.metrics.commits); + exit_reason = bundle.result.metrics.exit_reason; + } + } + } + reports.push(SweepCaseReport { + name: case.name.clone(), + engine: case.engine, + iterations: case.iterations, + cycles, + commits, + exit_reason, + }); + } + Ok(SweepReport { cases: reports }) +} + +pub fn render_markdown(report: &SweepReport) -> String { + let mut text = + String::from("| Case | Engine | Iterations | Avg cycles | Avg commits | Exit |\n"); + text.push_str("|---|---|---:|---:|---:|---|\n"); + for case in &report.cases { + let avg_cycles = if case.cycles.is_empty() { + 0.0 + } else { + case.cycles.iter().sum::() as f64 / case.cycles.len() as f64 + }; + let avg_commits = if case.commits.is_empty() { + 0.0 + } else { + case.commits.iter().sum::() as f64 / case.commits.len() as f64 + }; + text.push_str(&format!( + "| {} | {:?} | {} | {:.1} | {:.1} | {} |\n", + case.name, case.engine, case.iterations, avg_cycles, avg_commits, case.exit_reason + )); + } + text +} diff --git a/crates/elf/Cargo.toml b/crates/elf/Cargo.toml new file mode 100644 index 0000000..97589b9 --- /dev/null +++ b/crates/elf/Cargo.toml @@ -0,0 +1,11 @@ +[package] +name = "elf" +version.workspace = true +edition.workspace = true +license.workspace = true +authors.workspace = true + +[dependencies] +anyhow.workspace = true +goblin.workspace = true +serde.workspace = true diff --git a/crates/elf/src/lib.rs b/crates/elf/src/lib.rs new file mode 100644 index 0000000..c7b714e --- /dev/null +++ b/crates/elf/src/lib.rs @@ -0,0 +1,87 @@ +use anyhow::{Context, Result, bail}; +use goblin::elf::{Elf, program_header::PT_INTERP, program_header::PT_LOAD}; +use serde::{Deserialize, Serialize}; +use std::fs; +use std::path::{Path, PathBuf}; + +#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] +pub struct SegmentImage { + pub vaddr: u64, + pub mem_size: u64, + pub file_size: u64, + pub flags: u32, + pub data: Vec, +} + +#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] +pub struct LoadedElf { + pub path: PathBuf, + pub entry: u64, + pub little_endian: bool, + pub bits: u8, + pub machine: u16, + pub segments: Vec, +} + +impl LoadedElf { + pub fn image_name(&self) -> String { + self.path + .file_name() + .and_then(|value| value.to_str()) + .unwrap_or("a.out") + .to_string() + } +} + +pub fn load_static_elf(path: impl AsRef) -> Result { + let path = path.as_ref(); + let bytes = fs::read(path).with_context(|| format!("failed to read {}", path.display()))?; + let elf = + Elf::parse(&bytes).with_context(|| format!("failed to parse ELF {}", path.display()))?; + + if elf.is_64 != true { + bail!( + "expected 64-bit ELF, found 32-bit image at {}", + path.display() + ); + } + if !elf.little_endian { + bail!("expected little-endian ELF at {}", path.display()); + } + if elf.program_headers.iter().any(|ph| ph.p_type == PT_INTERP) { + bail!( + "dynamic interpreter segments are not supported yet; expected static user ELF at {}", + path.display() + ); + } + + let mut segments = Vec::new(); + for ph in elf.program_headers.iter().filter(|ph| ph.p_type == PT_LOAD) { + let start = ph.p_offset as usize; + let end = start + ph.p_filesz as usize; + let data = bytes + .get(start..end) + .with_context(|| format!("segment outside ELF image for {}", path.display()))? + .to_vec(); + segments.push(SegmentImage { + vaddr: ph.p_vaddr, + mem_size: ph.p_memsz, + file_size: ph.p_filesz, + flags: ph.p_flags, + data, + }); + } + + if segments.is_empty() { + bail!("ELF contains no PT_LOAD segments at {}", path.display()); + } + + Ok(LoadedElf { + path: path.to_path_buf(), + entry: elf.entry, + little_endian: elf.little_endian, + bits: 64, + machine: elf.header.e_machine, + segments, + }) +} diff --git a/crates/funcmodel/Cargo.toml b/crates/funcmodel/Cargo.toml new file mode 100644 index 0000000..94dcce2 --- /dev/null +++ b/crates/funcmodel/Cargo.toml @@ -0,0 +1,17 @@ +[package] +name = "funcmodel" +version.workspace = true +edition.workspace = true +license.workspace = true +authors.workspace = true + +[dependencies] +anyhow.workspace = true +libc.workspace = true +serde.workspace = true +elf = { path = "../elf" } +isa = { path = "../isa" } +runtime = { path = "../runtime" } + +[dev-dependencies] +tempfile.workspace = true diff --git a/crates/funcmodel/src/core/mod.rs b/crates/funcmodel/src/core/mod.rs new file mode 100644 index 0000000..3e5d7b1 --- /dev/null +++ b/crates/funcmodel/src/core/mod.rs @@ -0,0 +1 @@ +// Core-domain placeholder for future state/config splits. diff --git a/crates/funcmodel/src/exec/mod.rs b/crates/funcmodel/src/exec/mod.rs new file mode 100644 index 0000000..2c0ebf7 --- /dev/null +++ b/crates/funcmodel/src/exec/mod.rs @@ -0,0 +1,5139 @@ +use anyhow::{Context, Result, bail}; +use isa::{ + CommitRecord, DecodedInstruction, EngineKind, RunMetrics, RunResult, StageTraceEvent, + TRAP_ILLEGAL_INST, decode_word, +}; +use libc::{clock_gettime, getpid, timespec}; +use runtime::{GuestMemory, GuestRuntime, MEM_READ, MEM_WRITE, guest_prot_to_region_flags}; +use serde::{Deserialize, Serialize}; +use std::collections::{BTreeMap, HashMap}; +use std::ffi::CString; +use std::path::PathBuf; + +const REG_ZERO: usize = 0; +const REG_SP: usize = 1; +const REG_A0: usize = 2; +const REG_A1: usize = 3; +const REG_A2: usize = 4; +const REG_A3: usize = 5; +const REG_A4: usize = 6; +const REG_A5: usize = 7; +const REG_A7: usize = 9; +const REG_RA: usize = 10; +const REG_T1: usize = 24; +const REG_T4: usize = 27; +const REG_U1: usize = 28; +const REG_U3: usize = 30; +const REG_U4: usize = 31; +const REG_IMPLICIT_T_DST: usize = REG_U4; +const REG_IMPLICIT_U_DST: usize = REG_U3; + +const SYS_EVENTFD2: u64 = 19; +const SYS_EPOLL_CREATE1: u64 = 20; +const SYS_EPOLL_CTL: u64 = 21; +const SYS_EPOLL_PWAIT: u64 = 22; +const SYS_GETCWD: u64 = 17; +const SYS_DUP3: u64 = 24; +const SYS_FCNTL: u64 = 25; +const SYS_IOCTL: u64 = 29; +const SYS_READ: u64 = 63; +const SYS_WRITE: u64 = 64; +const SYS_OPENAT: u64 = 56; +const SYS_CLOSE: u64 = 57; +const SYS_PIPE2: u64 = 59; +const SYS_LSEEK: u64 = 62; +const SYS_PSELECT6: u64 = 72; +const SYS_PPOLL: u64 = 73; +const SYS_READLINKAT: u64 = 78; +const SYS_NEWFSTATAT: u64 = 79; +const SYS_FSTAT: u64 = 80; +const SYS_FUTEX: u64 = 98; +const SYS_SET_TID_ADDRESS: u64 = 96; +const SYS_SET_ROBUST_LIST: u64 = 99; +const SYS_SETGID: u64 = 144; +const SYS_SETUID: u64 = 146; +const SYS_SETRESUID: u64 = 147; +const SYS_GETRESUID: u64 = 148; +const SYS_SETRESGID: u64 = 149; +const SYS_GETRESGID: u64 = 150; +const SYS_UNAME: u64 = 160; +const SYS_GETPPID: u64 = 173; +const SYS_BRK: u64 = 214; +const SYS_MUNMAP: u64 = 215; +const SYS_MMAP: u64 = 222; +const SYS_WAIT4: u64 = 260; +const SYS_MPROTECT: u64 = 226; +const SYS_MADVISE: u64 = 233; +const SYS_PRLIMIT64: u64 = 261; +const SYS_MEMBARRIER: u64 = 283; +const SYS_RSEQ: u64 = 293; +const SYS_SIGALTSTACK: u64 = 132; +const SYS_RT_SIGACTION: u64 = 134; +const SYS_RT_SIGPROCMASK: u64 = 135; +const SYS_CLOCK_GETTIME: u64 = 113; +const SYS_GETPID: u64 = 172; +const SYS_PRCTL: u64 = 167; +const SYS_GETUID: u64 = 174; +const SYS_GETEUID: u64 = 175; +const SYS_GETGID: u64 = 176; +const SYS_GETEGID: u64 = 177; +const SYS_GETTID: u64 = 178; +const SYS_SYSINFO: u64 = 179; +const SYS_GETRANDOM: u64 = 278; +const SYS_EXIT: u64 = 93; +const SYS_EXIT_GROUP: u64 = 94; + +const TRAP_SW_BREAKPOINT: u64 = 50; +const PAGE_SIZE: u64 = 4096; +const MAX_C_STRING: usize = 4096; +const GUEST_AT_FDCWD: i32 = -100; +const GUEST_AT_EMPTY_PATH: i32 = 0x1000; +const GUEST_AT_SYMLINK_NOFOLLOW: i32 = 0x100; +const GUEST_F_GETFD: i32 = 1; +const GUEST_F_SETFD: i32 = 2; +const GUEST_F_GETFL: i32 = 3; +const GUEST_F_SETFL: i32 = 4; +const GUEST_F_DUPFD: i32 = 0; +const GUEST_F_DUPFD_CLOEXEC: i32 = 1030; +const GUEST_FD_CLOEXEC: i32 = 1; +const GUEST_EFD_SEMAPHORE: i32 = 1; +const GUEST_O_RDONLY: i32 = 0; +const GUEST_O_WRONLY: i32 = 1; +const GUEST_O_NONBLOCK: i32 = 0o4000; +const GUEST_O_CLOEXEC: i32 = 0o2000000; +const GUEST_EPOLL_CTL_ADD: i32 = 1; +const GUEST_EPOLL_CTL_DEL: i32 = 2; +const GUEST_EPOLL_CTL_MOD: i32 = 3; +const GUEST_EPOLLIN: u32 = 0x001; +const GUEST_EPOLLPRI: u32 = 0x002; +const GUEST_EPOLLOUT: u32 = 0x004; +const GUEST_EPOLLERR: u32 = 0x008; +const GUEST_EPOLLHUP: u32 = 0x010; +const GUEST_EPOLLNVAL: u32 = 0x020; +const GUEST_EPOLLRDNORM: u32 = 0x040; +const GUEST_EPOLLRDBAND: u32 = 0x080; +const GUEST_EPOLLWRNORM: u32 = 0x100; +const GUEST_EPOLLWRBAND: u32 = 0x200; +const GUEST_EPOLLRDHUP: u32 = 0x2000; +const GUEST_EPOLLET: u32 = 1 << 31; +const GUEST_EPOLLONESHOT: u32 = 1 << 30; +const GUEST_EPOLL_EVENT_SIZE: u64 = 16; +const GUEST_TIOCGPGRP: u64 = 0x540F; +const GUEST_TIOCSPGRP: u64 = 0x5410; +const GUEST_TIOCGWINSZ: u64 = 0x5413; +const GUEST_POLLNVAL: u16 = 0x020; +const GUEST_SIGALTSTACK_SIZE: u64 = 24; +const GUEST_POLLFD_SIZE: u64 = 8; +const GUEST_FD_SET_SIZE: usize = 128; +const GUEST_SS_ONSTACK: u32 = 1; +const GUEST_SS_DISABLE: u32 = 2; +const GUEST_MINSIGSTKSZ: u64 = 2048; +const GUEST_SIGSET_MAX_BYTES: usize = 128; +const GUEST_PR_SET_NAME: u64 = 15; +const GUEST_PR_GET_NAME: u64 = 16; +const GUEST_PRCTL_NAME_BYTES: usize = 16; +const GUEST_MEMBARRIER_CMD_QUERY: u64 = 0; +const GUEST_MEMBARRIER_CMD_PRIVATE_EXPEDITED: u64 = 8; +const GUEST_MEMBARRIER_CMD_REGISTER_PRIVATE_EXPEDITED: u64 = 16; +const GUEST_RSEQ_FLAG_UNREGISTER: u64 = 1; +const GUEST_RSEQ_SIG: u32 = 0x5305_3053; +const GUEST_RSEQ_MIN_LEN: u32 = 32; +const GUEST_LINUX_STAT_SIZE: usize = 128; +const GUEST_SIGSET_BYTES: usize = 16; +const GUEST_UTS_FIELD_BYTES: usize = 65; +const GUEST_UTSNAME_SIZE: usize = GUEST_UTS_FIELD_BYTES * 6; +const GUEST_SYSINFO_SIZE: usize = 368; +const GUEST_RUSAGE_SIZE: usize = 272; +const FUTEX_WAIT: i32 = 0; +const FUTEX_WAKE: i32 = 1; +const FUTEX_PRIVATE: i32 = 128; +const FUTEX_CLOCK_REALTIME: i32 = 256; +const GUEST_EPERM: i32 = 1; +const GUEST_ENOENT: i32 = 2; +const GUEST_EEXIST: i32 = 17; +const GUEST_ECHILD: i32 = 10; +const GUEST_EAGAIN: i32 = 11; +const GUEST_EBADF: i32 = 9; +const GUEST_EFAULT: i32 = 14; +const GUEST_EINVAL: i32 = 22; +const GUEST_ENOTTY: i32 = 25; +const GUEST_ENOMEM: i32 = 12; +const GUEST_ENOSYS: i32 = 38; +const GUEST_ERANGE: i32 = 34; +const GUEST_ETIMEDOUT: i32 = 110; +const GUEST_ESRCH: i32 = 3; +const RLIMIT_DATA: u32 = 2; +const RLIMIT_STACK: u32 = 3; +const RLIMIT_NOFILE: u32 = 7; +const RLIMIT_AS: u32 = 9; + +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct FuncRunOptions { + pub max_steps: u64, +} + +impl Default for FuncRunOptions { + fn default() -> Self { + Self { max_steps: 100_000 } + } +} + +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct FuncRunBundle { + pub result: RunResult, + pub stage_events: Vec, +} + +#[derive(Debug, Default)] +pub struct FuncEngine; + +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +enum ExitSignal { + GuestExit(i32), + Breakpoint, + Fault, +} + +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +enum BlockKind { + Fall, + Direct, + Cond, + Call, + Ind, + ICall, + Ret, +} + +#[derive(Debug, Clone)] +struct BlockContext { + kind: BlockKind, + target: Option, + return_target: Option, +} + +#[derive(Debug, Clone)] +enum SpecialFdKind { + EventFd(EventFdState), + Epoll(EpollState), +} + +#[derive(Debug, Clone)] +struct EventFdState { + write_fd: i32, + counter: u64, + semaphore: bool, +} + +#[derive(Debug, Clone)] +struct GuestEpollRegistration { + guest_fd: u64, + events: u32, + data: u64, +} + +#[derive(Debug, Clone)] +struct EpollState { + wake_write_fd: i32, + registrations: BTreeMap, +} + +#[derive(Debug, Clone)] +struct ExecState { + pc: u64, + regs: [u64; 32], + memory: GuestMemory, + ssr: BTreeMap, + fd_table: HashMap, + fd_status_flags: HashMap, + fd_fd_flags: HashMap, + special_fds: HashMap, + block: Option, + cond: bool, + carg: bool, + target: u64, + brk_base: u64, + brk_current: u64, + mmap_cursor: u64, + clear_child_tid: u64, + robust_list_head: u64, + robust_list_len: u64, + current_pid: u64, + current_ppid: u64, + current_pgrp: u32, + uid: u32, + euid: u32, + suid: u32, + gid: u32, + egid: u32, + sgid: u32, + random_state: u64, + thread_name: [u8; GUEST_PRCTL_NAME_BYTES], + membarrier_private_expedited: bool, + rseq_addr: u64, + rseq_len: u32, + rseq_sig: u32, + alt_stack_sp: u64, + alt_stack_size: u64, + alt_stack_flags: u32, + rlimits: BTreeMap, +} + +#[derive(Debug, Clone)] +struct StepOutcome { + next_pc: u64, + exit: Option, + retire_cause: String, +} + +#[derive(Debug, Clone, Copy)] +struct GuestLinuxStat { + dev: u64, + ino: u64, + mode: u32, + nlink: u32, + uid: u32, + gid: u32, + rdev: u64, + size: i64, + blksize: i32, + blocks: i64, + atime_sec: i64, + atime_nsec: u64, + mtime_sec: i64, + mtime_nsec: u64, + ctime_sec: i64, + ctime_nsec: u64, +} + +#[derive(Debug, Clone, Copy)] +struct GuestRlimit { + cur: u64, + max: u64, +} + +impl FuncEngine { + pub fn run(&self, runtime: &GuestRuntime, options: &FuncRunOptions) -> Result { + let mut state = ExecState::from_runtime(runtime); + let mut commits = Vec::new(); + let mut decoded = Vec::::new(); + let mut stage_events = Vec::::new(); + let mut exit_reason = "step_limit".to_string(); + let mut step = 0u64; + + while step < options.max_steps { + state.ssr.insert(0x0C00, step); + let pc = state.pc; + let bundle = state + .memory + .fetch_u64_bundle(pc) + .with_context(|| format!("no mapped instruction bundle at pc=0x{pc:016x}"))?; + + let Some(decoded_insn) = decode_word(bundle) else { + let mut commit = + CommitRecord::unsupported(step, pc, bundle, TRAP_ILLEGAL_INST, &runtime.block); + commit.len = 8; + commit.next_pc = pc; + commits.push(commit); + stage_events.push(stage_event(step, runtime, "D1", "decode_miss")); + exit_reason = "decode_fault".to_string(); + break; + }; + + if state.block.is_some() && starts_new_block(&decoded_insn) { + let next_pc = resolve_block_end(&mut state, pc); + if next_pc != pc { + state.pc = next_pc; + continue; + } + } + + let mut commit = empty_commit(step, pc, &decoded_insn, runtime); + let outcome = match execute_step(&mut state, runtime, &decoded_insn, &mut commit) { + Ok(outcome) => outcome, + Err(_) => { + commit.trap_valid = 1; + commit.trap_cause = TRAP_ILLEGAL_INST; + commit.traparg0 = decoded_insn.instruction_bits; + commit.next_pc = pc; + StepOutcome { + next_pc: pc, + exit: Some(ExitSignal::Fault), + retire_cause: format!("unsupported:{}", decoded_insn.mnemonic), + } + } + }; + + decoded.push(decoded_insn.clone()); + stage_events.push(stage_event(step * 4, runtime, "F0", "fetch")); + stage_events.push(stage_event( + step * 4 + 1, + runtime, + "D1", + &format!("decode:{}", decoded_insn.mnemonic), + )); + stage_events.push(stage_event( + step * 4 + 2, + runtime, + "E1", + &outcome.retire_cause, + )); + stage_events.push(stage_event(step * 4 + 3, runtime, "CMT", "retire")); + + state.pc = outcome.next_pc; + commits.push(commit); + step += 1; + + if let Some(exit) = outcome.exit { + exit_reason = match exit { + ExitSignal::GuestExit(code) => format!("guest_exit({code})"), + ExitSignal::Breakpoint => "breakpoint".to_string(), + ExitSignal::Fault => "unsupported_instruction".to_string(), + }; + break; + } + } + + let cycles = (commits.len() as u64).saturating_mul(4); + let result = RunResult { + image_name: runtime.image.image_name(), + entry_pc: runtime.state.pc, + metrics: RunMetrics { + engine: EngineKind::Func, + cycles, + commits: commits.len() as u64, + exit_reason, + }, + commits, + decoded, + }; + Ok(FuncRunBundle { + result, + stage_events, + }) + } +} + +fn starts_new_block(decoded: &DecodedInstruction) -> bool { + matches!( + decoded.mnemonic.as_str(), + "BSTART.STD" + | "BSTART CALL" + | "C.BSTART" + | "C.BSTART.STD" + | "HL.BSTART.STD" + | "HL.BSTART.CALL" + | "HL.BSTART.FP" + | "HL.BSTART.SYS" + ) +} + +impl ExecState { + fn from_runtime(runtime: &GuestRuntime) -> Self { + let mut ssr = BTreeMap::new(); + ssr.insert(0x0000, 0); + ssr.insert(0x0001, 0); + ssr.insert(0x0C00, 0); + let current_pid = unsafe { getpid() }.max(0) as u64; + let current_ppid = unsafe { libc::getppid() }.max(0) as u64; + let default_nofile = GuestRlimit { + cur: 1024, + max: 4096, + }; + let default_stack = GuestRlimit { + cur: runtime.config.stack_size, + max: runtime.config.stack_size, + }; + let default_data = GuestRlimit { + cur: runtime.config.mem_bytes, + max: runtime.config.mem_bytes, + }; + let default_as = GuestRlimit { + cur: runtime.config.mem_bytes, + max: runtime.config.mem_bytes, + }; + + let heap_base = align_up(runtime.memory.highest_mapped_address(), PAGE_SIZE); + let mmap_cursor = heap_base.saturating_add(PAGE_SIZE); + + Self { + pc: runtime.state.pc, + regs: runtime.state.regs, + memory: runtime.memory.clone(), + ssr, + fd_table: runtime.fd_table.clone(), + fd_status_flags: HashMap::from([(0, 0), (1, 0), (2, 0)]), + fd_fd_flags: HashMap::from([(0, 0), (1, 0), (2, 0)]), + special_fds: HashMap::new(), + block: None, + cond: false, + carg: false, + target: 0, + brk_base: heap_base, + brk_current: heap_base, + mmap_cursor, + clear_child_tid: 0, + robust_list_head: 0, + robust_list_len: 0, + current_pid, + current_ppid, + current_pgrp: current_pid.min(u32::MAX as u64) as u32, + uid: 0, + euid: 0, + suid: 0, + gid: 0, + egid: 0, + sgid: 0, + random_state: 0x4c69_6e78_434f_5245u64 + ^ runtime.state.pc + ^ runtime.config.mem_bytes + ^ runtime.config.stack_size, + thread_name: { + let mut name = [0u8; GUEST_PRCTL_NAME_BYTES]; + name[..4].copy_from_slice(b"lx-f"); + name + }, + membarrier_private_expedited: false, + rseq_addr: 0, + rseq_len: 0, + rseq_sig: 0, + alt_stack_sp: 0, + alt_stack_size: 0, + alt_stack_flags: GUEST_SS_DISABLE, + rlimits: BTreeMap::from([ + (RLIMIT_DATA, default_data), + (RLIMIT_STACK, default_stack), + (RLIMIT_NOFILE, default_nofile), + (RLIMIT_AS, default_as), + ]), + } + } + + fn read_reg(&self, reg: usize) -> u64 { + self.regs.get(reg).copied().unwrap_or(0) + } + + fn write_reg(&mut self, reg: usize, value: u64) { + if reg != REG_ZERO { + self.regs[reg] = value; + } + self.regs[REG_ZERO] = 0; + } + + fn alloc_guest_fd(&self) -> u64 { + self.alloc_guest_fd_from(3) + } + + fn alloc_guest_fd_from(&self, min_fd: u64) -> u64 { + let mut fd = min_fd.max(3); + while self.fd_table.contains_key(&fd) { + fd += 1; + } + fd + } + + fn insert_guest_fd(&mut self, guest_fd: u64, host_fd: i32, status_flags: i32, fd_flags: i32) { + self.release_guest_fd(guest_fd); + self.fd_table.insert(guest_fd, host_fd); + self.fd_status_flags.insert(guest_fd, status_flags); + self.fd_fd_flags + .insert(guest_fd, fd_flags & GUEST_FD_CLOEXEC); + } + + fn duplicate_guest_fd( + &mut self, + guest_fd: u64, + min_fd: u64, + cloexec: bool, + ) -> std::result::Result { + if self.special_fds.contains_key(&guest_fd) { + return Err(GUEST_EINVAL); + } + let host_fd = self.host_fd(guest_fd)?; + let duplicated = unsafe { libc::dup(host_fd) }; + if duplicated < 0 { + return Err(last_errno()); + } + let new_guest_fd = self.alloc_guest_fd_from(min_fd); + let status_flags = self.fd_status_flags.get(&guest_fd).copied().unwrap_or(0); + let fd_flags = if cloexec { GUEST_FD_CLOEXEC } else { 0 }; + self.insert_guest_fd(new_guest_fd, duplicated, status_flags, fd_flags); + Ok(new_guest_fd) + } + + fn duplicate_guest_fd_to( + &mut self, + guest_fd: u64, + target_guest_fd: u64, + cloexec: bool, + ) -> std::result::Result { + if guest_fd == target_guest_fd { + return Err(GUEST_EINVAL); + } + if self.special_fds.contains_key(&guest_fd) { + return Err(GUEST_EINVAL); + } + let host_fd = self.host_fd(guest_fd)?; + let duplicated = unsafe { libc::dup(host_fd) }; + if duplicated < 0 { + return Err(last_errno()); + } + let status_flags = self.fd_status_flags.get(&guest_fd).copied().unwrap_or(0); + let fd_flags = if cloexec { GUEST_FD_CLOEXEC } else { 0 }; + self.insert_guest_fd(target_guest_fd, duplicated, status_flags, fd_flags); + Ok(target_guest_fd) + } + + fn push_t(&mut self, value: u64) { + for reg in (REG_T1 + 1..=REG_T4).rev() { + self.regs[reg] = self.regs[reg - 1]; + } + self.regs[REG_T1] = value; + } + + fn push_u(&mut self, value: u64) { + for reg in (REG_U1 + 1..=REG_U4).rev() { + self.regs[reg] = self.regs[reg - 1]; + } + self.regs[REG_U1] = value; + } + + fn host_fd(&self, guest_fd: u64) -> std::result::Result { + self.fd_table.get(&guest_fd).copied().ok_or(GUEST_EBADF) + } + + fn release_guest_fd(&mut self, guest_fd: u64) { + self.unregister_from_epolls(guest_fd); + let host_fd = self.fd_table.remove(&guest_fd); + self.fd_status_flags.remove(&guest_fd); + self.fd_fd_flags.remove(&guest_fd); + match self.special_fds.remove(&guest_fd) { + Some(SpecialFdKind::EventFd(eventfd)) => { + if let Some(read_fd) = host_fd { + close_host_fd(read_fd); + } + close_host_fd(eventfd.write_fd); + } + Some(SpecialFdKind::Epoll(epoll)) => { + if let Some(read_fd) = host_fd { + close_host_fd(read_fd); + } + close_host_fd(epoll.wake_write_fd); + } + None => { + if let Some(fd) = host_fd { + close_host_fd(fd); + } + } + } + } + + fn close_guest_fd(&mut self, guest_fd: u64) -> std::result::Result { + if !self.fd_table.contains_key(&guest_fd) { + return Err(GUEST_EBADF); + } + self.release_guest_fd(guest_fd); + Ok(0) + } + + fn unregister_from_epolls(&mut self, guest_fd: u64) { + for special in self.special_fds.values_mut() { + if let SpecialFdKind::Epoll(epoll) = special { + epoll.registrations.remove(&guest_fd); + } + } + } + + fn set_block(&mut self, kind: BlockKind, _start_pc: u64, target: Option) { + self.block = Some(BlockContext { + kind, + target, + return_target: None, + }); + self.cond = false; + self.carg = false; + self.target = 0; + } + + fn grow_brk(&mut self, target: u64) { + let target = align_up(target.max(self.brk_base), PAGE_SIZE); + let desired_size = target - self.brk_base; + if let Some(region) = self + .memory + .regions + .iter_mut() + .find(|region| region.base == self.brk_base) + { + if desired_size == 0 { + region.size = 0; + region.data.clear(); + } else { + region.size = desired_size; + region.data.resize(desired_size as usize, 0); + } + } else if desired_size != 0 { + self.memory.regions.push(runtime::MemoryRegion { + base: self.brk_base, + size: desired_size, + flags: MEM_READ | MEM_WRITE, + data: vec![0; desired_size as usize], + }); + } + self.memory.regions.retain(|region| region.size != 0); + self.brk_current = target; + } + + fn alloc_mmap(&mut self, requested: u64, size: u64, prot: u32) -> u64 { + let base = if requested != 0 { + align_down(requested, PAGE_SIZE) + } else { + let next = align_up(self.mmap_cursor, PAGE_SIZE); + self.mmap_cursor = next + align_up(size, PAGE_SIZE); + next + }; + let size = align_up(size.max(PAGE_SIZE), PAGE_SIZE); + self.memory.regions.push(runtime::MemoryRegion { + base, + size, + flags: guest_prot_to_region_flags(prot), + data: vec![0; size as usize], + }); + base + } + + fn rlimit_for(&self, resource: u32) -> Option { + self.rlimits.get(&resource).copied() + } + + fn set_rlimit(&mut self, resource: u32, limit: GuestRlimit) -> std::result::Result<(), i32> { + let Some(current) = self.rlimits.get_mut(&resource) else { + return Err(GUEST_EINVAL); + }; + if limit.cur > limit.max || limit.max > current.max { + return Err(GUEST_EINVAL); + } + *current = limit; + Ok(()) + } +} + +fn execute_step( + state: &mut ExecState, + runtime: &GuestRuntime, + decoded: &DecodedInstruction, + commit: &mut CommitRecord, +) -> Result { + let pc = state.pc; + let fallthrough = pc + decoded.length_bytes() as u64; + + match decoded.mnemonic.as_str() { + "LUI" => { + let rd = reg_field(decoded, &["RegDst"])?; + let imm = sign_extend(need_u(decoded, &["imm20"])? as u64, 20) << 12; + writeback(state, commit, rd, imm as u64); + } + "HL.LUI" | "HL.LIU" => { + let rd = reg_field(decoded, &["RegDst"])?; + let imm = need_u(decoded, &["imm", "uimm"])?; + writeback(state, commit, rd, imm); + } + "HL.LIS" => { + let rd = reg_field(decoded, &["RegDst"])?; + let imm = field_i(decoded, &["imm", "simm"])? as u64; + writeback(state, commit, rd, imm); + } + "ADDTPC" => { + let rd = reg_field(decoded, &["RegDst"])?; + let imm = sign_extend(need_u(decoded, &["imm20"])? as u64, 20) << 12; + let value = (pc & !0xfff).wrapping_add(imm as u64); + writeback(state, commit, rd, value); + } + "ADD" | "SUB" | "AND" | "OR" | "XOR" => { + let rd = reg_field(decoded, &["RegDst"])?; + let lhs_reg = reg_field(decoded, &["SrcL"])?; + let rhs_reg = reg_field(decoded, &["SrcR"])?; + let shamt = field_u(decoded, &["shamt"]).unwrap_or(0) as u32; + let lhs = state.read_reg(lhs_reg); + let rhs = if decoded.mnemonic == "AND" + || decoded.mnemonic == "OR" + || decoded.mnemonic == "XOR" + { + apply_src_r_logic( + state.read_reg(rhs_reg), + field_u(decoded, &["SrcRType"]).unwrap_or(3) as u8, + shamt, + ) + } else { + apply_src_r_addsub( + state.read_reg(rhs_reg), + field_u(decoded, &["SrcRType"]).unwrap_or(3) as u8, + shamt, + ) + }; + record_src0(commit, lhs_reg, lhs); + record_src1(commit, rhs_reg, state.read_reg(rhs_reg)); + let value = match decoded.mnemonic.as_str() { + "ADD" => lhs.wrapping_add(rhs), + "SUB" => lhs.wrapping_sub(rhs), + "AND" => lhs & rhs, + "OR" => lhs | rhs, + _ => lhs ^ rhs, + }; + writeback(state, commit, rd, value); + } + "ADDW" | "SUBW" | "ANDW" | "ORW" | "XORW" => { + let rd = reg_field(decoded, &["RegDst"])?; + let lhs_reg = reg_field(decoded, &["SrcL"])?; + let rhs_reg = reg_field(decoded, &["SrcR"])?; + let shamt = field_u(decoded, &["shamt"]).unwrap_or(0) as u32; + let lhs = state.read_reg(lhs_reg); + let rhs = if decoded.mnemonic == "ANDW" + || decoded.mnemonic == "ORW" + || decoded.mnemonic == "XORW" + { + apply_src_r_logic( + state.read_reg(rhs_reg), + field_u(decoded, &["SrcRType"]).unwrap_or(3) as u8, + shamt, + ) + } else { + apply_src_r_addsub( + state.read_reg(rhs_reg), + field_u(decoded, &["SrcRType"]).unwrap_or(3) as u8, + shamt, + ) + }; + record_src0(commit, lhs_reg, lhs); + record_src1(commit, rhs_reg, state.read_reg(rhs_reg)); + let value = match decoded.mnemonic.as_str() { + "ADDW" => lhs.wrapping_add(rhs), + "SUBW" => lhs.wrapping_sub(rhs), + "ANDW" => lhs & rhs, + "ORW" => lhs | rhs, + _ => lhs ^ rhs, + }; + writeback(state, commit, rd, sign_extend32(value)); + } + "ADDI" | "SUBI" | "ADDIW" | "SUBIW" | "HL.ADDI" | "HL.SUBI" | "HL.ADDIW" | "HL.SUBIW" => { + let rd = reg_field(decoded, &["RegDst"])?; + let lhs_reg = reg_field(decoded, &["SrcL"])?; + let lhs = state.read_reg(lhs_reg); + let imm = need_u(decoded, &["uimm12", "uimm24", "uimm"])?; + record_src0(commit, lhs_reg, lhs); + let value = match decoded.mnemonic.as_str() { + "ADDI" | "ADDIW" | "HL.ADDI" | "HL.ADDIW" => lhs.wrapping_add(imm), + _ => lhs.wrapping_sub(imm), + }; + if decoded.mnemonic.ends_with('W') { + writeback(state, commit, rd, sign_extend32(value)); + } else { + writeback(state, commit, rd, value); + } + } + "ANDI" | "ORI" | "XORI" | "ANDIW" | "ORIW" | "XORIW" | "HL.ANDI" | "HL.ORI" | "HL.XORI" + | "HL.ANDIW" | "HL.ORIW" | "HL.XORIW" => { + let rd = reg_field(decoded, &["RegDst"])?; + let lhs_reg = reg_field(decoded, &["SrcL"])?; + let lhs = state.read_reg(lhs_reg); + let imm = field_i(decoded, &["simm12", "simm24", "simm"])? as u64; + record_src0(commit, lhs_reg, lhs); + let value = match decoded.mnemonic.as_str() { + "ANDI" | "ANDIW" | "HL.ANDI" | "HL.ANDIW" => lhs & imm, + "ORI" | "ORIW" | "HL.ORI" | "HL.ORIW" => lhs | imm, + _ => lhs ^ imm, + }; + if decoded.mnemonic.ends_with('W') { + writeback(state, commit, rd, sign_extend32(value)); + } else { + writeback(state, commit, rd, value); + } + } + "SLL" | "SRL" | "SRA" => { + let rd = reg_field(decoded, &["RegDst"])?; + let lhs_reg = reg_field(decoded, &["SrcL"])?; + let rhs_reg = reg_field(decoded, &["SrcR"])?; + let lhs = state.read_reg(lhs_reg); + let shamt = (state.read_reg(rhs_reg) & 0x3f) as u32; + record_src0(commit, lhs_reg, lhs); + record_src1(commit, rhs_reg, state.read_reg(rhs_reg)); + let value = match decoded.mnemonic.as_str() { + "SLL" => lhs << shamt, + "SRL" => lhs >> shamt, + _ => ((lhs as i64) >> shamt) as u64, + }; + writeback(state, commit, rd, value); + } + "SLLW" | "SRLW" | "SRAW" => { + let rd = reg_field(decoded, &["RegDst"])?; + let lhs_reg = reg_field(decoded, &["SrcL"])?; + let rhs_reg = reg_field(decoded, &["SrcR"])?; + let lhs = state.read_reg(lhs_reg); + let shamt = (state.read_reg(rhs_reg) & 0x1f) as u32; + record_src0(commit, lhs_reg, lhs); + record_src1(commit, rhs_reg, state.read_reg(rhs_reg)); + let value = match decoded.mnemonic.as_str() { + "SLLW" => (lhs as u32).wrapping_shl(shamt) as u64, + "SRLW" => ((lhs as u32) >> shamt) as u64, + _ => (((lhs as u32) as i32) >> shamt) as u64, + }; + writeback(state, commit, rd, sign_extend32(value)); + } + "SLLI" | "SRLI" | "SRAI" => { + let rd = reg_field(decoded, &["RegDst"])?; + let lhs_reg = reg_field(decoded, &["SrcL"])?; + let lhs = state.read_reg(lhs_reg); + let shamt = (need_u(decoded, &["shamt"])? & 0x3f) as u32; + record_src0(commit, lhs_reg, lhs); + let value = match decoded.mnemonic.as_str() { + "SLLI" => lhs << shamt, + "SRLI" => lhs >> shamt, + _ => ((lhs as i64) >> shamt) as u64, + }; + writeback(state, commit, rd, value); + } + "SLLIW" | "SRLIW" | "SRAIW" => { + let rd = reg_field(decoded, &["RegDst"])?; + let lhs_reg = reg_field(decoded, &["SrcL"])?; + let lhs = state.read_reg(lhs_reg); + let shamt = (need_u(decoded, &["shamt"])? & 0x1f) as u32; + record_src0(commit, lhs_reg, lhs); + let value = match decoded.mnemonic.as_str() { + "SLLIW" => (lhs as u32).wrapping_shl(shamt) as u64, + "SRLIW" => ((lhs as u32) >> shamt) as u64, + _ => (((lhs as u32) as i32) >> shamt) as u64, + }; + writeback(state, commit, rd, sign_extend32(value)); + } + "MUL" | "DIV" | "DIVU" | "REM" | "REMU" => { + let rd = reg_field(decoded, &["RegDst"])?; + let lhs_reg = reg_field(decoded, &["SrcL"])?; + let rhs_reg = reg_field(decoded, &["SrcR"])?; + let lhs = state.read_reg(lhs_reg); + let rhs = state.read_reg(rhs_reg); + record_src0(commit, lhs_reg, lhs); + record_src1(commit, rhs_reg, rhs); + let value = match decoded.mnemonic.as_str() { + "MUL" => lhs.wrapping_mul(rhs), + "DIV" => signed_div(lhs, rhs), + "DIVU" => unsigned_div(lhs, rhs), + "REM" => signed_rem(lhs, rhs), + _ => unsigned_rem(lhs, rhs), + }; + writeback(state, commit, rd, value); + } + "MULW" | "DIVW" | "DIVUW" | "REMW" | "REMUW" => { + let rd = reg_field(decoded, &["RegDst"])?; + let lhs_reg = reg_field(decoded, &["SrcL"])?; + let rhs_reg = reg_field(decoded, &["SrcR"])?; + let lhs = state.read_reg(lhs_reg); + let rhs = state.read_reg(rhs_reg); + record_src0(commit, lhs_reg, lhs); + record_src1(commit, rhs_reg, rhs); + let lhs32 = lhs as u32; + let rhs32 = rhs as u32; + let value = match decoded.mnemonic.as_str() { + "MULW" => lhs32.wrapping_mul(rhs32) as u64, + "DIVW" => signed_div(lhs32 as i32 as u64, rhs32 as i32 as u64), + "DIVUW" => unsigned_div(lhs32 as u64, rhs32 as u64), + "REMW" => signed_rem(lhs32 as i32 as u64, rhs32 as i32 as u64), + _ => unsigned_rem(lhs32 as u64, rhs32 as u64), + }; + writeback(state, commit, rd, sign_extend32(value)); + } + "CMP.EQ" | "CMP.NE" | "CMP.AND" | "CMP.OR" | "CMP.LT" | "CMP.LTU" | "CMP.GE" + | "CMP.GEU" => { + let rd = reg_field(decoded, &["RegDst"])?; + let lhs_reg = reg_field(decoded, &["SrcL"])?; + let rhs_reg = reg_field(decoded, &["SrcR"])?; + let lhs = state.read_reg(lhs_reg); + let rhs_raw = state.read_reg(rhs_reg); + record_src0(commit, lhs_reg, lhs); + record_src1(commit, rhs_reg, rhs_raw); + let rhs = if decoded.mnemonic == "CMP.AND" || decoded.mnemonic == "CMP.OR" { + apply_src_r_logic( + rhs_raw, + field_u(decoded, &["SrcRType"]).unwrap_or(3) as u8, + 0, + ) + } else { + apply_src_r_addsub( + rhs_raw, + field_u(decoded, &["SrcRType"]).unwrap_or(3) as u8, + 0, + ) + }; + let value = match decoded.mnemonic.as_str() { + "CMP.EQ" => (lhs == rhs) as u64, + "CMP.NE" => (lhs != rhs) as u64, + "CMP.AND" => ((lhs & rhs) != 0) as u64, + "CMP.OR" => ((lhs | rhs) != 0) as u64, + "CMP.LT" => ((lhs as i64) < (rhs as i64)) as u64, + "CMP.LTU" => (lhs < rhs) as u64, + "CMP.GE" => ((lhs as i64) >= (rhs as i64)) as u64, + _ => (lhs >= rhs) as u64, + }; + writeback(state, commit, rd, value); + } + "CMP.EQI" | "CMP.NEI" | "CMP.ANDI" | "CMP.ORI" | "CMP.LTI" | "CMP.GEI" | "CMP.LTUI" + | "CMP.GEUI" => { + let rd = reg_field(decoded, &["RegDst"])?; + let lhs_reg = reg_field(decoded, &["SrcL"])?; + let lhs = state.read_reg(lhs_reg); + record_src0(commit, lhs_reg, lhs); + let value = match decoded.mnemonic.as_str() { + "CMP.EQI" => (lhs == field_i(decoded, &["simm12"])? as u64) as u64, + "CMP.NEI" => (lhs != field_i(decoded, &["simm12"])? as u64) as u64, + "CMP.ANDI" => ((lhs & field_i(decoded, &["simm12"])? as u64) != 0) as u64, + "CMP.ORI" => ((lhs | field_i(decoded, &["simm12"])? as u64) != 0) as u64, + "CMP.LTI" => ((lhs as i64) < field_i(decoded, &["simm12"])? as i64) as u64, + "CMP.GEI" => ((lhs as i64) >= field_i(decoded, &["simm12"])? as i64) as u64, + "CMP.LTUI" => (lhs < need_u(decoded, &["uimm12"])? as u64) as u64, + _ => (lhs >= need_u(decoded, &["uimm12"])? as u64) as u64, + }; + writeback(state, commit, rd, value); + } + "SETC.EQ" | "SETC.NE" | "SETC.AND" | "SETC.OR" | "SETC.LT" | "SETC.LTU" | "SETC.GE" + | "SETC.GEU" => { + let lhs_reg = reg_field(decoded, &["SrcL"])?; + let rhs_reg = reg_field(decoded, &["SrcR"])?; + let lhs = state.read_reg(lhs_reg); + let rhs_raw = state.read_reg(rhs_reg); + record_src0(commit, lhs_reg, lhs); + record_src1(commit, rhs_reg, rhs_raw); + let rhs = if decoded.mnemonic == "SETC.AND" || decoded.mnemonic == "SETC.OR" { + apply_src_r_logic( + rhs_raw, + field_u(decoded, &["SrcRType"]).unwrap_or(3) as u8, + 0, + ) + } else { + apply_src_r_addsub( + rhs_raw, + field_u(decoded, &["SrcRType"]).unwrap_or(3) as u8, + 0, + ) + }; + state.cond = match decoded.mnemonic.as_str() { + "SETC.EQ" => lhs == rhs, + "SETC.NE" => lhs != rhs, + "SETC.AND" => (lhs & rhs) != 0, + "SETC.OR" => (lhs | rhs) != 0, + "SETC.LT" => (lhs as i64) < (rhs as i64), + "SETC.LTU" => lhs < rhs, + "SETC.GE" => (lhs as i64) >= (rhs as i64), + _ => lhs >= rhs, + }; + state.carg = state.cond; + } + "SETC.EQI" | "SETC.NEI" | "SETC.ANDI" | "SETC.ORI" | "SETC.LTI" | "SETC.GEI" + | "SETC.LTUI" | "SETC.GEUI" => { + let lhs_reg = reg_field(decoded, &["SrcL"])?; + let lhs = state.read_reg(lhs_reg); + record_src0(commit, lhs_reg, lhs); + state.cond = match decoded.mnemonic.as_str() { + "SETC.EQI" => lhs == field_i(decoded, &["simm12"])? as u64, + "SETC.NEI" => lhs != field_i(decoded, &["simm12"])? as u64, + "SETC.ANDI" => (lhs & field_i(decoded, &["simm12"])? as u64) != 0, + "SETC.ORI" => (lhs | field_i(decoded, &["simm12"])? as u64) != 0, + "SETC.LTI" => (lhs as i64) < field_i(decoded, &["simm12"])? as i64, + "SETC.GEI" => (lhs as i64) >= field_i(decoded, &["simm12"])? as i64, + "SETC.LTUI" => lhs < need_u(decoded, &["uimm12"])? as u64, + _ => lhs >= need_u(decoded, &["uimm12"])? as u64, + }; + state.carg = state.cond; + } + "C.SETC.EQ" | "C.SETC.NE" | "C.SETC.LT" | "C.SETC.LTU" | "C.SETC.GE" | "C.SETC.GEU" => { + let lhs_reg = reg_field(decoded, &["SrcL"])?; + let rhs_reg = reg_field(decoded, &["SrcR"])?; + let lhs = state.read_reg(lhs_reg); + let rhs = state.read_reg(rhs_reg); + record_src0(commit, lhs_reg, lhs); + record_src1(commit, rhs_reg, rhs); + state.cond = match decoded.mnemonic.as_str() { + "C.SETC.EQ" => lhs == rhs, + "C.SETC.NE" => lhs != rhs, + "C.SETC.LT" => (lhs as i64) < (rhs as i64), + "C.SETC.LTU" => lhs < rhs, + "C.SETC.GE" => (lhs as i64) >= (rhs as i64), + _ => lhs >= rhs, + }; + state.carg = state.cond; + } + "SETC.TGT" | "C.SETC.TGT" => { + let src_reg = reg_field(decoded, &["SrcL"])?; + let value = state.read_reg(src_reg); + record_src0(commit, src_reg, value); + state.target = value; + state.cond = true; + state.carg = true; + } + "CSEL" => { + let rd = reg_field(decoded, &["RegDst"])?; + let pred_reg = reg_field(decoded, &["SrcP"])?; + let lhs_reg = reg_field(decoded, &["SrcL"])?; + let rhs_reg = reg_field(decoded, &["SrcR"])?; + let pred = state.read_reg(pred_reg); + let lhs = state.read_reg(lhs_reg); + let rhs = apply_src_r_addsub( + state.read_reg(rhs_reg), + field_u(decoded, &["SrcRType"]).unwrap_or(3) as u8, + 0, + ); + record_src0(commit, lhs_reg, lhs); + record_src1(commit, rhs_reg, state.read_reg(rhs_reg)); + let value = if pred != 0 { rhs } else { lhs }; + writeback(state, commit, rd, value); + } + "LBI" | "LBUI" | "LHI" | "LHUI" | "LWI" | "LWUI" | "LDI" => { + let rd = reg_field(decoded, &["RegDst"])?; + let base_reg = reg_field(decoded, &["SrcL"])?; + let base = state.read_reg(base_reg); + let scale = match decoded.mnemonic.as_str() { + "LHI" | "LHUI" => 2, + "LWI" | "LWUI" => 4, + "LDI" => 8, + _ => 1, + }; + let offset = field_i(decoded, &["simm12"])? * scale; + let addr = base.wrapping_add(offset as u64); + record_src0(commit, base_reg, base); + let value = load_value(&state.memory, decoded.mnemonic.as_str(), addr)?; + record_load(commit, addr, value.raw, value.size); + writeback(state, commit, rd, value.value); + } + "LB" | "LBU" | "LH" | "LHU" | "LW" | "LWU" | "LD" => { + let rd = reg_field(decoded, &["RegDst"])?; + let base_reg = reg_field(decoded, &["SrcL"])?; + let idx_reg = reg_field(decoded, &["SrcR"])?; + let base = state.read_reg(base_reg); + let idx_raw = state.read_reg(idx_reg); + let shamt = field_u(decoded, &["shamt"]).unwrap_or(0) as u32; + let idx = apply_src_r_addsub( + idx_raw, + field_u(decoded, &["SrcRType"]).unwrap_or(3) as u8, + shamt, + ); + let addr = base.wrapping_add(idx); + record_src0(commit, base_reg, base); + record_src1(commit, idx_reg, idx_raw); + let value = load_value(&state.memory, decoded.mnemonic.as_str(), addr)?; + record_load(commit, addr, value.raw, value.size); + writeback(state, commit, rd, value.value); + } + "HL.LBIP" | "HL.LBUIP" | "HL.LHIP" | "HL.LHIP.U" | "HL.LHUIP" | "HL.LHUIP.U" + | "HL.LWIP" | "HL.LWIP.U" | "HL.LWUIP" | "HL.LWUIP.U" | "HL.LDIP" | "HL.LDIP.U" => { + let rd0 = reg_field(decoded, &["RegDst0"])?; + let rd1 = reg_field(decoded, &["RegDst1"])?; + let base_reg = reg_field(decoded, &["SrcL"])?; + let base = state.read_reg(base_reg); + let pair = pair_access(decoded.mnemonic.as_str())?; + let simm = field_i(decoded, &["simm17"])?; + let offset = if pair.unscaled { + simm + } else { + simm * i64::from(pair.elem_size) + }; + let addr0 = base.wrapping_add(offset as u64); + let addr1 = addr0.wrapping_add(u64::from(pair.elem_size)); + let value0 = load_value(&state.memory, pair.load_mnemonic, addr0)?; + let value1 = load_value(&state.memory, pair.load_mnemonic, addr1)?; + record_src0(commit, base_reg, base); + record_load(commit, addr0, value0.raw, pair.elem_size.saturating_mul(2)); + state.write_reg(rd1, value1.value); + writeback(state, commit, rd0, value0.value); + } + "SBI" | "SHI" | "SWI" | "SDI" => { + let src_reg = reg_field(decoded, &["SrcL"])?; + let base_reg = reg_field(decoded, &["SrcR"])?; + let src_value = state.read_reg(src_reg); + let base = state.read_reg(base_reg); + let scale = match decoded.mnemonic.as_str() { + "SHI" => 2, + "SWI" => 4, + "SDI" => 8, + _ => 1, + }; + let offset = field_i(decoded, &["simm12"])? * scale; + let addr = base.wrapping_add(offset as u64); + record_src0(commit, src_reg, src_value); + record_src1(commit, base_reg, base); + store_value( + &mut state.memory, + decoded.mnemonic.as_str(), + addr, + src_value, + )?; + record_store( + commit, + addr, + src_value, + size_for_store(decoded.mnemonic.as_str()), + ); + } + "SB" | "SH" | "SW" | "SD" => { + let src_reg = reg_field(decoded, &["SrcD", "SrcP"])?; + let base_reg = reg_field(decoded, &["SrcL"])?; + let idx_reg = reg_field(decoded, &["SrcR"])?; + let src_value = state.read_reg(src_reg); + let base = state.read_reg(base_reg); + let idx_raw = state.read_reg(idx_reg); + let shamt = field_u(decoded, &["shamt"]) + .unwrap_or(size_shift_for_store(decoded.mnemonic.as_str()) as u64) + as u32; + let idx = apply_src_r_addsub( + idx_raw, + field_u(decoded, &["SrcRType"]).unwrap_or(3) as u8, + shamt, + ); + let addr = base.wrapping_add(idx); + record_src0(commit, src_reg, src_value); + record_src1(commit, base_reg, base); + store_value( + &mut state.memory, + decoded.mnemonic.as_str(), + addr, + src_value, + )?; + record_store( + commit, + addr, + src_value, + size_for_store(decoded.mnemonic.as_str()), + ); + } + "HL.SBIP" | "HL.SHIP" | "HL.SHIP.U" | "HL.SWIP" | "HL.SWIP.U" | "HL.SDIP" | "HL.SDIP.U" => { + let src0_reg = reg_field(decoded, &["SrcD"])?; + let src1_reg = reg_field(decoded, &["SrcD1"])?; + let base_reg = reg_field(decoded, &["SrcR"])?; + let src0_value = state.read_reg(src0_reg); + let src1_value = state.read_reg(src1_reg); + let base = state.read_reg(base_reg); + let pair = pair_access(decoded.mnemonic.as_str())?; + let simm = field_i(decoded, &["simm17"])?; + let offset = if pair.unscaled { + simm + } else { + simm * i64::from(pair.elem_size) + }; + let addr0 = base.wrapping_add(offset as u64); + let addr1 = addr0.wrapping_add(u64::from(pair.elem_size)); + record_src0(commit, src0_reg, src0_value); + record_src1(commit, src1_reg, src1_value); + store_value(&mut state.memory, pair.store_mnemonic, addr0, src0_value)?; + store_value(&mut state.memory, pair.store_mnemonic, addr1, src1_value)?; + record_store(commit, addr0, src0_value, pair.elem_size.saturating_mul(2)); + } + "SSRGET" => { + let rd = reg_field(decoded, &["RegDst"])?; + let ssr = need_u(decoded, &["SSR_ID"])? as u16; + let value = *state.ssr.get(&ssr).unwrap_or(&0); + writeback(state, commit, rd, value); + } + "SSRSET" => { + let src_reg = reg_field(decoded, &["SrcL"])?; + let ssr = need_u(decoded, &["SSR_ID"])? as u16; + let value = state.read_reg(src_reg); + record_src0(commit, src_reg, value); + state.ssr.insert(ssr, value); + } + "C.SSRGET" => { + let ssr = need_u(decoded, &["SrcL"])? as u16; + let value = *state.ssr.get(&ssr).unwrap_or(&0); + writeback(state, commit, REG_IMPLICIT_T_DST, value); + } + "C.MOVR" => { + let rd = reg_field(decoded, &["RegDst"])?; + let src_reg = reg_field(decoded, &["SrcL"])?; + let value = state.read_reg(src_reg); + record_src0(commit, src_reg, value); + writeback(state, commit, rd, value); + } + "C.MOVI" => { + let rd = reg_field(decoded, &["RegDst"])?; + let value = field_i(decoded, &["simm5"])? as u64; + writeback(state, commit, rd, value); + } + "C.ADDI" => { + let lhs_reg = reg_field(decoded, &["SrcL"])?; + let lhs = state.read_reg(lhs_reg); + let value = lhs.wrapping_add(field_i(decoded, &["simm5"])? as u64); + record_src0(commit, lhs_reg, lhs); + writeback(state, commit, REG_IMPLICIT_T_DST, value); + } + "C.ADD" | "C.SUB" | "C.AND" | "C.OR" => { + let lhs_reg = reg_field(decoded, &["SrcL"])?; + let rhs_reg = reg_field(decoded, &["SrcR"])?; + let lhs = state.read_reg(lhs_reg); + let rhs = state.read_reg(rhs_reg); + record_src0(commit, lhs_reg, lhs); + record_src1(commit, rhs_reg, rhs); + let value = match decoded.mnemonic.as_str() { + "C.ADD" => lhs.wrapping_add(rhs), + "C.SUB" => lhs.wrapping_sub(rhs), + "C.AND" => lhs & rhs, + _ => lhs | rhs, + }; + writeback(state, commit, REG_IMPLICIT_T_DST, value); + } + "C.SLLI" | "C.SRLI" => { + let value = state.read_reg(REG_T1); + let shamt = (need_u(decoded, &["uimm5"])? & 0x1f) as u32; + record_src0(commit, REG_T1, value); + let out = if decoded.mnemonic == "C.SLLI" { + value << shamt + } else { + value >> shamt + }; + writeback(state, commit, REG_IMPLICIT_T_DST, out); + } + "C.ZEXT.B" | "C.ZEXT.H" | "C.ZEXT.W" | "C.SEXT.B" | "C.SEXT.H" | "C.SEXT.W" => { + let src_reg = reg_field(decoded, &["SrcL"])?; + let value = state.read_reg(src_reg); + record_src0(commit, src_reg, value); + let out = match decoded.mnemonic.as_str() { + "C.ZEXT.B" => value as u8 as u64, + "C.ZEXT.H" => value as u16 as u64, + "C.ZEXT.W" => value as u32 as u64, + "C.SEXT.B" => (value as u8 as i8 as i64) as u64, + "C.SEXT.H" => (value as u16 as i16 as i64) as u64, + _ => sign_extend32(value), + }; + writeback(state, commit, REG_IMPLICIT_T_DST, out); + } + "C.LWI" | "C.LDI" => { + let base_reg = reg_field(decoded, &["SrcL"])?; + let base = state.read_reg(base_reg); + let scale = if decoded.mnemonic == "C.LDI" { 8 } else { 4 }; + let addr = base.wrapping_add((field_i(decoded, &["simm5"])? * scale) as u64); + record_src0(commit, base_reg, base); + let value = load_value( + &state.memory, + if decoded.mnemonic == "C.LDI" { + "LD" + } else { + "LW" + }, + addr, + )?; + record_load(commit, addr, value.raw, value.size); + writeback(state, commit, REG_IMPLICIT_T_DST, value.value); + } + "C.SWI" | "C.SDI" => { + let base_reg = reg_field(decoded, &["SrcL"])?; + let base = state.read_reg(base_reg); + let src_value = state.read_reg(REG_T1); + let scale = if decoded.mnemonic == "C.SDI" { 8 } else { 4 }; + let addr = base.wrapping_add((field_i(decoded, &["simm5"])? * scale) as u64); + record_src0(commit, REG_T1, src_value); + record_src1(commit, base_reg, base); + store_value( + &mut state.memory, + if decoded.mnemonic == "C.SDI" { + "SD" + } else { + "SW" + }, + addr, + src_value, + )?; + record_store( + commit, + addr, + src_value, + if decoded.mnemonic == "C.SDI" { 8 } else { 4 }, + ); + } + "SETRET" => { + let target = pc.wrapping_add(need_u(decoded, &["imm20"])? << 1); + writeback(state, commit, REG_RA, target); + if let Some(block) = &mut state.block { + block.return_target = Some(target); + } + } + "C.SETRET" => { + let target = pc.wrapping_add(need_u(decoded, &["uimm5"])? << 1); + writeback(state, commit, REG_RA, target); + if let Some(block) = &mut state.block { + block.return_target = Some(target); + } + } + "HL.SETRET" => { + let offset = sign_extend(need_u(decoded, &["imm32"])? as u64, 32) as i128; + let target = pc.wrapping_add((offset << 1) as u64); + writeback(state, commit, REG_RA, target); + if let Some(block) = &mut state.block { + block.return_target = Some(target); + } + } + "J" => { + let target = pc.wrapping_add(((field_i(decoded, &["simm22"])? as i128) << 1) as u64); + commit.next_pc = target; + return Ok(StepOutcome { + next_pc: target, + exit: None, + retire_cause: "jump".to_string(), + }); + } + "JR" => { + let base_reg = reg_field(decoded, &["SrcL"])?; + let base = state.read_reg(base_reg); + record_src0(commit, base_reg, base); + let target = base.wrapping_add(((field_i(decoded, &["simm12"])? as i128) << 1) as u64); + commit.next_pc = target; + return Ok(StepOutcome { + next_pc: target, + exit: None, + retire_cause: "jump_reg".to_string(), + }); + } + "BSTART.STD" | "HL.BSTART.STD" | "HL.BSTART.FP" | "HL.BSTART.SYS" => { + let kind = match decoded.asm.as_str() { + asm if asm.contains("CALL") => BlockKind::Call, + asm if asm.contains("DIRECT") => BlockKind::Direct, + asm if asm.contains("COND") => BlockKind::Cond, + asm if asm.contains("RET") => BlockKind::Ret, + asm if asm.contains("ICALL") => BlockKind::ICall, + asm if asm.contains("IND") => BlockKind::Ind, + _ => BlockKind::Fall, + }; + let target = if matches!(kind, BlockKind::Call | BlockKind::Cond | BlockKind::Direct) { + Some(match decoded.mnemonic.as_str() { + "BSTART.STD" => { + pc.wrapping_add(((field_i(decoded, &["simm17"])? as i128) << 1) as u64) + } + _ => pc.wrapping_add(field_i(decoded, &["simm"])? as u64), + }) + } else { + None + }; + state.set_block(kind, pc, target); + } + "BSTART CALL" | "HL.BSTART.CALL" => { + let (target, return_target) = match decoded.mnemonic.as_str() { + "BSTART CALL" => ( + pc.wrapping_add(((field_i(decoded, &["simm12"])? as i128) << 1) as u64), + pc.wrapping_add(need_u(decoded, &["uimm5"])? << 1), + ), + _ => ( + pc.wrapping_add(field_i(decoded, &["simm25"])? as u64), + fallthrough, + ), + }; + state.set_block(BlockKind::Call, pc, Some(target)); + state.write_reg(REG_RA, return_target); + if let Some(block) = &mut state.block { + block.return_target = Some(return_target); + } + commit.wb_valid = 1; + commit.wb_rd = REG_RA as u8; + commit.wb_data = return_target; + commit.dst_valid = 1; + commit.dst_reg = REG_RA as u8; + commit.dst_data = return_target; + } + "C.BSTART" => { + let kind = if decoded.asm.contains("COND") { + BlockKind::Cond + } else { + BlockKind::Direct + }; + let target = pc.wrapping_add(((field_i(decoded, &["simm12"])? as i128) << 1) as u64); + state.set_block(kind, pc, Some(target)); + } + "C.BSTART.STD" => { + let br_type = need_u(decoded, &["BrType"])? as u8; + let kind = match br_type { + 5 => BlockKind::Ind, + 6 => BlockKind::ICall, + 7 => BlockKind::Ret, + _ => BlockKind::Fall, + }; + state.set_block(kind, pc, None); + } + "C.BSTOP" => { + let next_pc = resolve_block_end(state, fallthrough); + commit.next_pc = next_pc; + return Ok(StepOutcome { + next_pc, + exit: None, + retire_cause: "block_end".to_string(), + }); + } + "FENTRY" => { + apply_fentry( + state, + commit, + need_u(decoded, &["SrcBegin"])? as usize, + need_u(decoded, &["SrcEnd"])? as usize, + need_u(decoded, &["uimm"])?, + )?; + } + "FEXIT" => { + apply_fexit( + state, + commit, + need_u(decoded, &["DstBegin"])? as usize, + need_u(decoded, &["DstEnd"])? as usize, + need_u(decoded, &["uimm"])?, + )?; + } + "FRET.STK" => { + let target = apply_fret_stk( + state, + commit, + need_u(decoded, &["DstBegin"])? as usize, + need_u(decoded, &["DstEnd"])? as usize, + need_u(decoded, &["uimm"])?, + )?; + commit.next_pc = target; + return Ok(StepOutcome { + next_pc: target, + exit: None, + retire_cause: "fret_stk".to_string(), + }); + } + "FRET.RA" => { + let target = apply_fret_ra( + state, + commit, + need_u(decoded, &["DstBegin"])? as usize, + need_u(decoded, &["DstEnd"])? as usize, + need_u(decoded, &["uimm"])?, + )?; + commit.next_pc = target; + return Ok(StepOutcome { + next_pc: target, + exit: None, + retire_cause: "fret_ra".to_string(), + }); + } + "ACRC" => { + let rst = need_u(decoded, &["RST_Type"])?; + if rst != 1 { + bail!("unsupported ACRC rst_type {rst}"); + } + let outcome = dispatch_syscall(state, runtime, commit)?; + commit.next_pc = fallthrough; + return Ok(StepOutcome { + next_pc: fallthrough, + exit: outcome, + retire_cause: "syscall".to_string(), + }); + } + "EBREAK" | "C.EBREAK" => { + commit.trap_valid = 1; + commit.trap_cause = TRAP_SW_BREAKPOINT; + commit.traparg0 = decoded.instruction_bits; + commit.next_pc = fallthrough; + return Ok(StepOutcome { + next_pc: fallthrough, + exit: Some(ExitSignal::Breakpoint), + retire_cause: "breakpoint".to_string(), + }); + } + "C.BSTART.STD RET" => unreachable!(), + other => bail!("unsupported mnemonic {other}"), + } + + commit.next_pc = fallthrough; + Ok(StepOutcome { + next_pc: fallthrough, + exit: None, + retire_cause: "execute".to_string(), + }) +} + +fn dispatch_syscall( + state: &mut ExecState, + runtime: &GuestRuntime, + commit: &mut CommitRecord, +) -> Result> { + let number = state.read_reg(REG_A7); + let args = [ + state.read_reg(REG_A0), + state.read_reg(REG_A1), + state.read_reg(REG_A2), + state.read_reg(REG_A3), + state.read_reg(REG_A4), + state.read_reg(REG_A5), + ]; + record_src0(commit, REG_A0, args[0]); + record_src1(commit, REG_A7, number); + + let result = match number { + SYS_EVENTFD2 => dispatch_eventfd2(state, args), + SYS_EPOLL_CREATE1 => dispatch_epoll_create1(state, args), + SYS_EPOLL_CTL => dispatch_epoll_ctl(state, args), + SYS_EPOLL_PWAIT => dispatch_epoll_pwait(state, commit, args), + SYS_GETPID => Ok(state.current_pid), + SYS_GETPPID => Ok(state.current_ppid), + SYS_WAIT4 => dispatch_wait4(state, args), + SYS_GETCWD => dispatch_getcwd(state, runtime, args), + SYS_PSELECT6 => dispatch_pselect6(state, commit, args), + SYS_PPOLL => dispatch_ppoll(state, commit, args), + SYS_PRCTL => dispatch_prctl(state, commit, args), + SYS_GETUID => Ok(state.uid as u64), + SYS_GETEUID => Ok(state.euid as u64), + SYS_GETGID => Ok(state.gid as u64), + SYS_GETEGID => Ok(state.egid as u64), + SYS_GETRESUID => match dispatch_getres_ids( + &mut state.memory, + [args[0], args[1], args[2]], + [state.uid, state.euid, state.suid], + ) { + Ok(()) => { + record_store(commit, args[0], state.uid as u64, 12); + Ok(0) + } + Err(errno) => Err(errno), + }, + SYS_GETRESGID => match dispatch_getres_ids( + &mut state.memory, + [args[0], args[1], args[2]], + [state.gid, state.egid, state.sgid], + ) { + Ok(()) => { + record_store(commit, args[0], state.gid as u64, 12); + Ok(0) + } + Err(errno) => Err(errno), + }, + SYS_SETUID => match validate_single_id(args[0], &[state.uid, state.euid, state.suid]) { + Ok(uid) => { + state.uid = uid; + state.euid = uid; + state.suid = uid; + Ok(0) + } + Err(errno) => Err(errno), + }, + SYS_SETGID => match validate_single_id(args[0], &[state.gid, state.egid, state.sgid]) { + Ok(gid) => { + state.gid = gid; + state.egid = gid; + state.sgid = gid; + Ok(0) + } + Err(errno) => Err(errno), + }, + SYS_SETRESUID => match apply_setres_ids( + [state.uid, state.euid, state.suid], + [args[0], args[1], args[2]], + ) { + Ok([uid, euid, suid]) => { + state.uid = uid; + state.euid = euid; + state.suid = suid; + Ok(0) + } + Err(errno) => Err(errno), + }, + SYS_SETRESGID => match apply_setres_ids( + [state.gid, state.egid, state.sgid], + [args[0], args[1], args[2]], + ) { + Ok([gid, egid, sgid]) => { + state.gid = gid; + state.egid = egid; + state.sgid = sgid; + Ok(0) + } + Err(errno) => Err(errno), + }, + SYS_GETTID => Ok(state.current_pid), + SYS_GETRANDOM => dispatch_getrandom(state, commit, args), + SYS_MEMBARRIER => dispatch_membarrier(state, args), + SYS_RSEQ => dispatch_rseq(state, commit, args), + SYS_SIGALTSTACK => dispatch_sigaltstack(state, commit, args), + SYS_SET_TID_ADDRESS => { + state.clear_child_tid = args[0]; + Ok(state.current_pid) + } + SYS_SET_ROBUST_LIST => { + state.robust_list_head = args[0]; + state.robust_list_len = args[1]; + Ok(0) + } + SYS_FUTEX => dispatch_futex(state, args), + SYS_UNAME => { + if write_guest_utsname(&mut state.memory, args[0]).is_err() { + Err(GUEST_EFAULT) + } else { + record_store(commit, args[0], 0, trace_size(GUEST_UTSNAME_SIZE)); + Ok(0) + } + } + SYS_SYSINFO => { + if write_guest_sysinfo(&mut state.memory, args[0], runtime).is_err() { + Err(GUEST_EFAULT) + } else { + record_store(commit, args[0], 0, trace_size(GUEST_SYSINFO_SIZE)); + Ok(0) + } + } + SYS_PRLIMIT64 => dispatch_prlimit64(state, args), + SYS_CLOCK_GETTIME => { + let clk_id: libc::clockid_t = args[0].try_into().unwrap_or(libc::CLOCK_REALTIME); + let mut ts = timespec { + tv_sec: 0, + tv_nsec: 0, + }; + let rc = unsafe { clock_gettime(clk_id, &mut ts as *mut timespec) }; + if rc != 0 { + Err(last_errno()) + } else { + if state + .memory + .write_u64_checked(args[1], ts.tv_sec as u64) + .is_none() + || state + .memory + .write_u64_checked(args[1] + 8, ts.tv_nsec as u64) + .is_none() + { + Err(GUEST_EFAULT) + } else { + record_store(commit, args[1], ts.tv_sec as u64, 16); + Ok(0) + } + } + } + SYS_FCNTL => dispatch_fcntl(state, args), + SYS_IOCTL => dispatch_ioctl(state, commit, args), + SYS_WRITE => match state.special_fds.get_mut(&args[0]) { + Some(SpecialFdKind::EventFd(eventfd)) => { + dispatch_eventfd_write(&mut state.memory, eventfd, args) + } + Some(SpecialFdKind::Epoll(_)) => Err(GUEST_EINVAL), + None => match state.host_fd(args[0]) { + Ok(host_fd) => { + let Some(bytes) = state.memory.read_bytes_checked(args[1], args[2] as usize) + else { + return finalize_syscall(state, commit, Err(GUEST_EFAULT)); + }; + let rc = unsafe { libc::write(host_fd, bytes.as_ptr().cast(), bytes.len()) }; + if rc < 0 { + Err(last_errno()) + } else { + Ok(rc as u64) + } + } + Err(errno) => Err(errno), + }, + }, + SYS_READ => match state.special_fds.get(&args[0]) { + Some(SpecialFdKind::EventFd(_)) => { + let read_fd = match state.host_fd(args[0]) { + Ok(read_fd) => read_fd, + Err(errno) => return finalize_syscall(state, commit, Err(errno)), + }; + let Some(SpecialFdKind::EventFd(eventfd)) = state.special_fds.get_mut(&args[0]) + else { + unreachable!(); + }; + dispatch_eventfd_read(&mut state.memory, read_fd, eventfd, commit, args) + } + Some(SpecialFdKind::Epoll(_)) => Err(GUEST_EINVAL), + None => match state.host_fd(args[0]) { + Ok(host_fd) => { + let mut bytes = vec![0u8; args[2] as usize]; + let rc = unsafe { libc::read(host_fd, bytes.as_mut_ptr().cast(), bytes.len()) }; + if rc < 0 { + Err(last_errno()) + } else { + let count = rc as usize; + if state + .memory + .write_bytes_checked(args[1], &bytes[..count]) + .is_none() + { + return finalize_syscall(state, commit, Err(GUEST_EFAULT)); + } + record_load(commit, args[1], 0, trace_size(count)); + Ok(count as u64) + } + } + Err(errno) => Err(errno), + }, + }, + SYS_DUP3 => dispatch_dup3(state, args), + SYS_OPENAT => { + let dirfd = args[0] as i64 as i32; + let Some(path) = state.memory.read_c_string_checked(args[1], MAX_C_STRING) else { + return finalize_syscall(state, commit, Err(GUEST_EFAULT)); + }; + let flags = args[2] as i32; + let mode = args[3] as libc::mode_t; + let resolved = resolve_open_path(runtime, dirfd, &path); + let c_path = CString::new(resolved.as_str()).context("guest open path contains NUL")?; + let host_fd = if dirfd == GUEST_AT_FDCWD || resolved != path { + unsafe { libc::open(c_path.as_ptr(), flags, mode as libc::c_uint) } + } else { + match state.host_fd(args[0]) { + Ok(host_dirfd) => unsafe { + libc::openat(host_dirfd, c_path.as_ptr(), flags, mode as libc::c_uint) + }, + Err(errno) => return finalize_syscall(state, commit, Err(errno)), + } + }; + if host_fd < 0 { + Err(last_errno()) + } else { + let guest_fd = state.alloc_guest_fd(); + state.insert_guest_fd(guest_fd, host_fd, flags & !GUEST_O_CLOEXEC, 0); + Ok(guest_fd) + } + } + SYS_CLOSE => state.close_guest_fd(args[0]), + SYS_PIPE2 => dispatch_pipe2(state, commit, args), + SYS_LSEEK => match state.host_fd(args[0]) { + Ok(host_fd) => { + let offset = args[1] as i64; + let whence = args[2] as i32; + let rc = unsafe { libc::lseek(host_fd, offset, whence) }; + if rc < 0 { + Err(last_errno()) + } else { + Ok(rc as u64) + } + } + Err(errno) => Err(errno), + }, + SYS_FSTAT => match state.host_fd(args[0]) { + Ok(host_fd) => { + let stat = match host_fstat(host_fd) { + Ok(stat) => stat, + Err(errno) => return finalize_syscall(state, commit, Err(errno)), + }; + if write_guest_linux_stat(&mut state.memory, args[1], stat).is_err() { + return finalize_syscall(state, commit, Err(GUEST_EFAULT)); + } + record_store( + commit, + args[1], + stat.size as u64, + GUEST_LINUX_STAT_SIZE as u8, + ); + Ok(0) + } + Err(errno) => Err(errno), + }, + SYS_NEWFSTATAT => dispatch_newfstatat(state, runtime, commit, args), + SYS_READLINKAT => dispatch_readlinkat(state, runtime, commit, args), + SYS_BRK => { + if args[0] != 0 { + state.grow_brk(args[0]); + } + Ok(state.brk_current) + } + SYS_MADVISE => dispatch_madvise(state, args), + SYS_MMAP => { + if args[1] == 0 { + Err(GUEST_EINVAL) + } else { + let addr = state.alloc_mmap(args[0], args[1], args[2] as u32); + Ok(addr) + } + } + SYS_MUNMAP => { + let addr = args[0]; + let size = align_up(args[1], PAGE_SIZE); + if size == 0 || addr & (PAGE_SIZE - 1) != 0 { + Err(GUEST_EINVAL) + } else { + state.memory.unmap_range(addr, size); + Ok(0) + } + } + SYS_MPROTECT => { + let addr = args[0]; + let size = align_up(args[1], PAGE_SIZE); + if size == 0 || addr & (PAGE_SIZE - 1) != 0 { + Err(GUEST_EINVAL) + } else if state.memory.protect_range( + addr, + size, + guest_prot_to_region_flags(args[2] as u32), + ) { + Ok(0) + } else { + Err(GUEST_ENOMEM) + } + } + SYS_RT_SIGACTION => { + if args[2] != 0 { + let bytes = vec![0u8; 32]; + if state.memory.write_bytes_checked(args[2], &bytes).is_none() { + Err(GUEST_EFAULT) + } else { + record_store(commit, args[2], 0, trace_size(bytes.len())); + Ok(0) + } + } else { + Ok(0) + } + } + SYS_RT_SIGPROCMASK => { + let size = args[3] as usize; + if size == 0 || size > GUEST_SIGSET_BYTES { + Err(GUEST_EINVAL) + } else if args[2] != 0 { + let bytes = vec![0u8; size]; + if state.memory.write_bytes_checked(args[2], &bytes).is_none() { + Err(GUEST_EFAULT) + } else { + record_store(commit, args[2], 0, trace_size(size)); + Ok(0) + } + } else { + Ok(0) + } + } + SYS_EXIT | SYS_EXIT_GROUP => { + return Ok(Some(ExitSignal::GuestExit(args[0] as i32))); + } + _ => Err(GUEST_ENOSYS), + }; + + finalize_syscall(state, commit, result) +} + +fn resolve_block_end(state: &mut ExecState, fallthrough: u64) -> u64 { + let block = state.block.take(); + let next_pc = match block { + None => fallthrough, + Some(block) => match block.kind { + BlockKind::Fall => fallthrough, + BlockKind::Direct => block.target.unwrap_or(fallthrough), + BlockKind::Cond => { + if state.cond { + block.target.unwrap_or(fallthrough) + } else { + fallthrough + } + } + BlockKind::Call => { + if block.return_target.is_none() { + state.write_reg(REG_RA, fallthrough); + } + block.target.unwrap_or(fallthrough) + } + BlockKind::Ind => state.target, + BlockKind::ICall => { + if block.return_target.is_none() { + state.write_reg(REG_RA, fallthrough); + } + state.target + } + BlockKind::Ret => { + if state.target != 0 { + state.target + } else { + state.read_reg(REG_RA) + } + } + }, + }; + state.cond = false; + state.carg = false; + state.target = 0; + next_pc +} + +fn apply_fentry( + state: &mut ExecState, + commit: &mut CommitRecord, + begin: usize, + end: usize, + stack_size: u64, +) -> Result<()> { + let old_sp = state.read_reg(REG_SP); + let new_sp = old_sp + .checked_sub(stack_size) + .context("stack underflow in FENTRY")?; + let regs = wrapped_reg_sequence(begin, end); + for (idx, reg) in regs.into_iter().enumerate() { + if reg == REG_ZERO { + continue; + } + let offset = stack_size + .checked_sub(((idx + 1) as u64) * 8) + .context("invalid FENTRY stack frame size")?; + state + .memory + .write_u64(new_sp + offset, state.read_reg(reg)) + .context("failed to save register during FENTRY")?; + } + writeback(state, commit, REG_SP, new_sp); + Ok(()) +} + +fn apply_fexit( + state: &mut ExecState, + commit: &mut CommitRecord, + begin: usize, + end: usize, + stack_size: u64, +) -> Result<()> { + let new_sp = state.read_reg(REG_SP).wrapping_add(stack_size); + let regs = wrapped_reg_sequence(begin, end); + for (idx, reg) in regs.into_iter().enumerate() { + if reg == REG_ZERO { + continue; + } + let value = state + .memory + .read_u64(new_sp - (((idx + 1) as u64) * 8)) + .context("failed to restore register during FEXIT")?; + state.write_reg(reg, value); + } + writeback(state, commit, REG_SP, new_sp); + Ok(()) +} + +fn apply_fret_stk( + state: &mut ExecState, + commit: &mut CommitRecord, + begin: usize, + end: usize, + stack_size: u64, +) -> Result { + apply_fexit(state, commit, begin, end, stack_size)?; + let target = state.read_reg(REG_RA); + state.block = None; + state.cond = false; + state.carg = false; + state.target = 0; + Ok(target) +} + +fn apply_fret_ra( + state: &mut ExecState, + commit: &mut CommitRecord, + begin: usize, + end: usize, + stack_size: u64, +) -> Result { + let target = state.read_reg(REG_RA); + apply_fexit(state, commit, begin, end, stack_size)?; + state.block = None; + state.cond = false; + state.carg = false; + state.target = 0; + Ok(target) +} + +fn wrapped_reg_sequence(begin: usize, end: usize) -> Vec { + let mut regs = Vec::new(); + let mut current = begin; + loop { + regs.push(current); + if current == end { + break; + } + current += 1; + if current > 23 { + current = 2; + } + } + regs +} + +struct LoadResult { + value: u64, + raw: u64, + size: u8, +} + +struct PairAccess { + load_mnemonic: &'static str, + store_mnemonic: &'static str, + elem_size: u8, + unscaled: bool, +} + +fn load_value(memory: &GuestMemory, mnemonic: &str, addr: u64) -> Result { + let result = match mnemonic { + "LB" | "LBI" => { + let raw = memory.read_u8_checked(addr).context("faulting LB")?; + LoadResult { + value: sign_extend(raw as u64, 8) as u64, + raw: raw as u64, + size: 1, + } + } + "LBU" | "LBUI" => { + let raw = memory.read_u8_checked(addr).context("faulting LBU")?; + LoadResult { + value: raw as u64, + raw: raw as u64, + size: 1, + } + } + "LH" | "LHI" => { + let raw = memory.read_u16_checked(addr).context("faulting LH")?; + LoadResult { + value: sign_extend(raw as u64, 16) as u64, + raw: raw as u64, + size: 2, + } + } + "LHU" | "LHUI" => { + let raw = memory.read_u16_checked(addr).context("faulting LHU")?; + LoadResult { + value: raw as u64, + raw: raw as u64, + size: 2, + } + } + "LW" | "LWI" => { + let raw = memory.read_u32_checked(addr).context("faulting LW")?; + LoadResult { + value: sign_extend(raw as u64, 32) as u64, + raw: raw as u64, + size: 4, + } + } + "LWU" | "LWUI" => { + let raw = memory.read_u32_checked(addr).context("faulting LWU")?; + LoadResult { + value: raw as u64, + raw: raw as u64, + size: 4, + } + } + "LD" | "LDI" => { + let raw = memory.read_u64_checked(addr).context("faulting LD")?; + LoadResult { + value: raw, + raw, + size: 8, + } + } + other => bail!("unsupported load mnemonic {other}"), + }; + Ok(result) +} + +fn store_value(memory: &mut GuestMemory, mnemonic: &str, addr: u64, value: u64) -> Result<()> { + match mnemonic { + "SB" | "SBI" => memory + .write_bytes_checked(addr, &[value as u8]) + .context("failed SB guest store")?, + "SH" | "SHI" => memory + .write_u16_checked(addr, value as u16) + .context("failed SH guest store")?, + "SW" | "SWI" => memory + .write_u32_checked(addr, value as u32) + .context("failed SW guest store")?, + _ => memory + .write_u64_checked(addr, value) + .context("failed SD guest store")?, + } + Ok(()) +} + +fn pair_access(mnemonic: &str) -> Result { + let (load_mnemonic, store_mnemonic, elem_size) = match mnemonic { + "HL.LBIP" | "HL.SBIP" => ("LB", "SB", 1), + "HL.LBUIP" => ("LBU", "SB", 1), + "HL.LHIP" | "HL.LHIP.U" | "HL.SHIP" | "HL.SHIP.U" => ("LH", "SH", 2), + "HL.LHUIP" | "HL.LHUIP.U" => ("LHU", "SH", 2), + "HL.LWIP" | "HL.LWIP.U" | "HL.SWIP" | "HL.SWIP.U" => ("LW", "SW", 4), + "HL.LWUIP" | "HL.LWUIP.U" => ("LWU", "SW", 4), + "HL.LDIP" | "HL.LDIP.U" | "HL.SDIP" | "HL.SDIP.U" => ("LD", "SD", 8), + other => bail!("unsupported pair mnemonic {other}"), + }; + Ok(PairAccess { + load_mnemonic, + store_mnemonic, + elem_size, + unscaled: mnemonic.ends_with(".U"), + }) +} + +fn size_for_store(mnemonic: &str) -> u8 { + match mnemonic { + "SB" | "SBI" => 1, + "SH" | "SHI" => 2, + "SW" | "SWI" => 4, + _ => 8, + } +} + +fn size_shift_for_store(mnemonic: &str) -> u8 { + match mnemonic { + "SB" => 0, + "SH" => 1, + "SW" => 2, + _ => 3, + } +} + +fn apply_src_r_addsub(value: u64, src_type: u8, shamt: u32) -> u64 { + let mut out = match src_type & 0x3 { + 0 => sign_extend32(value), + 1 => value as u32 as u64, + 2 => value.wrapping_neg(), + _ => value, + }; + if shamt != 0 { + out = out.wrapping_shl(shamt & 0x3f); + } + out +} + +fn apply_src_r_logic(value: u64, src_type: u8, shamt: u32) -> u64 { + let mut out = match src_type & 0x3 { + 0 => sign_extend32(value), + 1 => value as u32 as u64, + 2 => !value, + _ => value, + }; + if shamt != 0 { + out = out.wrapping_shl(shamt & 0x3f); + } + out +} + +fn sign_extend(value: u64, width: u8) -> i64 { + if width == 0 { + return 0; + } + let shift = 64 - width; + ((value << shift) as i64) >> shift +} + +fn sign_extend32(value: u64) -> u64 { + (value as u32 as i32 as i64) as u64 +} + +fn unsigned_div(lhs: u64, rhs: u64) -> u64 { + if rhs == 0 { u64::MAX } else { lhs / rhs } +} + +fn unsigned_rem(lhs: u64, rhs: u64) -> u64 { + if rhs == 0 { lhs } else { lhs % rhs } +} + +fn signed_div(lhs: u64, rhs: u64) -> u64 { + let lhs = lhs as i64; + let rhs = rhs as i64; + if rhs == 0 { + (-1i64) as u64 + } else if lhs == i64::MIN && rhs == -1 { + lhs as u64 + } else { + (lhs / rhs) as u64 + } +} + +fn signed_rem(lhs: u64, rhs: u64) -> u64 { + let lhs = lhs as i64; + let rhs = rhs as i64; + if rhs == 0 { + lhs as u64 + } else if lhs == i64::MIN && rhs == -1 { + 0 + } else { + (lhs % rhs) as u64 + } +} + +fn resolve_open_path(runtime: &GuestRuntime, dirfd: i32, path: &str) -> String { + if PathBuf::from(path).is_absolute() { + return path.to_string(); + } + if dirfd == GUEST_AT_FDCWD { + if let Some(workdir) = &runtime.config.workdir { + return workdir.join(path).display().to_string(); + } + } + path.to_string() +} + +fn host_fstat(host_fd: i32) -> std::result::Result { + let mut stat = unsafe { std::mem::zeroed::() }; + let rc = unsafe { libc::fstat(host_fd, &mut stat as *mut libc::stat) }; + if rc != 0 { + return Err(last_errno()); + } + Ok(guest_linux_stat_from_host(&stat)) +} + +fn host_fstatat( + state: &ExecState, + runtime: &GuestRuntime, + dirfd: i32, + path: &str, + flags: i32, +) -> std::result::Result { + let resolved = resolve_open_path(runtime, dirfd, path); + let c_path = CString::new(resolved.as_str()).map_err(|_| GUEST_EINVAL)?; + let host_flags = if flags & GUEST_AT_SYMLINK_NOFOLLOW != 0 { + libc::AT_SYMLINK_NOFOLLOW + } else { + 0 + }; + let mut stat = unsafe { std::mem::zeroed::() }; + let rc = if dirfd == GUEST_AT_FDCWD || resolved != path { + unsafe { + libc::fstatat( + libc::AT_FDCWD, + c_path.as_ptr(), + &mut stat as *mut libc::stat, + host_flags, + ) + } + } else { + let host_dirfd = state.host_fd(dirfd as u64)?; + unsafe { + libc::fstatat( + host_dirfd, + c_path.as_ptr(), + &mut stat as *mut libc::stat, + host_flags, + ) + } + }; + if rc != 0 { + return Err(last_errno()); + } + Ok(guest_linux_stat_from_host(&stat)) +} + +fn guest_linux_stat_from_host(stat: &libc::stat) -> GuestLinuxStat { + GuestLinuxStat { + dev: stat.st_dev as u64, + ino: stat.st_ino as u64, + mode: stat.st_mode as u32, + nlink: stat.st_nlink as u32, + uid: stat.st_uid, + gid: stat.st_gid, + rdev: stat.st_rdev as u64, + size: stat.st_size as i64, + blksize: stat.st_blksize as i32, + blocks: stat.st_blocks as i64, + atime_sec: stat.st_atime as i64, + atime_nsec: stat.st_atime_nsec as u64, + mtime_sec: stat.st_mtime as i64, + mtime_nsec: stat.st_mtime_nsec as u64, + ctime_sec: stat.st_ctime as i64, + ctime_nsec: stat.st_ctime_nsec as u64, + } +} + +fn write_guest_linux_stat(memory: &mut GuestMemory, addr: u64, stat: GuestLinuxStat) -> Result<()> { + let mut bytes = [0u8; GUEST_LINUX_STAT_SIZE]; + write_u64_field(&mut bytes, 0, stat.dev); + write_u64_field(&mut bytes, 8, stat.ino); + write_u32_field(&mut bytes, 16, stat.mode); + write_u32_field(&mut bytes, 20, stat.nlink); + write_u32_field(&mut bytes, 24, stat.uid); + write_u32_field(&mut bytes, 28, stat.gid); + write_u64_field(&mut bytes, 32, stat.rdev); + write_i64_field(&mut bytes, 48, stat.size); + write_i32_field(&mut bytes, 56, stat.blksize); + write_i64_field(&mut bytes, 64, stat.blocks); + write_i64_field(&mut bytes, 72, stat.atime_sec); + write_u64_field(&mut bytes, 80, stat.atime_nsec); + write_i64_field(&mut bytes, 88, stat.mtime_sec); + write_u64_field(&mut bytes, 96, stat.mtime_nsec); + write_i64_field(&mut bytes, 104, stat.ctime_sec); + write_u64_field(&mut bytes, 112, stat.ctime_nsec); + memory + .write_bytes_checked(addr, &bytes) + .context("failed to write guest fstat buffer") +} + +fn write_guest_utsname(memory: &mut GuestMemory, addr: u64) -> Result<()> { + let mut bytes = [0u8; GUEST_UTSNAME_SIZE]; + write_guest_uts_field(&mut bytes, 0, "Linux")?; + write_guest_uts_field(&mut bytes, 1, "linxcoremodel")?; + write_guest_uts_field(&mut bytes, 2, "6.8.0-linx")?; + write_guest_uts_field(&mut bytes, 3, "LinxCoreModel")?; + write_guest_uts_field(&mut bytes, 4, "linx64")?; + write_guest_uts_field(&mut bytes, 5, "localdomain")?; + memory + .write_bytes_checked(addr, &bytes) + .context("failed to write guest utsname buffer") +} + +fn write_guest_uts_field(buf: &mut [u8], index: usize, text: &str) -> Result<()> { + let field = buf + .get_mut(index * GUEST_UTS_FIELD_BYTES..(index + 1) * GUEST_UTS_FIELD_BYTES) + .context("invalid utsname field index")?; + if text.len() >= GUEST_UTS_FIELD_BYTES { + bail!("utsname field is too long"); + } + field[..text.len()].copy_from_slice(text.as_bytes()); + Ok(()) +} + +fn write_guest_sysinfo(memory: &mut GuestMemory, addr: u64, runtime: &GuestRuntime) -> Result<()> { + let totalram = runtime.config.mem_bytes; + let mapped = runtime + .memory + .regions + .iter() + .map(|region| region.size) + .sum::(); + let bufferram = totalram / 16; + let freeram = totalram.saturating_sub(mapped.min(totalram)); + let mut bytes = [0u8; GUEST_SYSINFO_SIZE]; + + write_u64_field(&mut bytes, 0, 1); + write_u64_field(&mut bytes, 8, 0); + write_u64_field(&mut bytes, 16, 0); + write_u64_field(&mut bytes, 24, 0); + write_u64_field(&mut bytes, 32, totalram); + write_u64_field(&mut bytes, 40, freeram); + write_u64_field(&mut bytes, 48, 0); + write_u64_field(&mut bytes, 56, bufferram); + write_u64_field(&mut bytes, 64, 0); + write_u64_field(&mut bytes, 72, 0); + write_u16_field(&mut bytes, 80, 1); + write_u16_field(&mut bytes, 82, 0); + write_u64_field(&mut bytes, 88, 0); + write_u64_field(&mut bytes, 96, 0); + write_u32_field(&mut bytes, 104, 1); + + memory + .write_bytes_checked(addr, &bytes) + .context("failed to write guest sysinfo buffer") +} + +fn dispatch_futex(state: &mut ExecState, args: [u64; 6]) -> std::result::Result { + let addr = args[0]; + let op = args[1] as i32; + let val = args[2] as u32; + let cmd = op & !(FUTEX_PRIVATE | FUTEX_CLOCK_REALTIME); + + match cmd { + FUTEX_WAIT => { + let Some(current) = state.memory.read_u32_checked(addr) else { + return Err(GUEST_EFAULT); + }; + if current != val { + Err(GUEST_EAGAIN) + } else if args[3] != 0 { + Err(GUEST_ETIMEDOUT) + } else { + // Single-process mode cannot block on a real waiter queue yet. + Err(GUEST_EAGAIN) + } + } + FUTEX_WAKE => Ok(0), + _ => Err(GUEST_ENOSYS), + } +} + +fn dispatch_getcwd( + state: &mut ExecState, + runtime: &GuestRuntime, + args: [u64; 6], +) -> std::result::Result { + let buf = args[0]; + let size = args[1] as usize; + if size == 0 { + return Err(GUEST_EINVAL); + } + let cwd = runtime + .config + .workdir + .clone() + .or_else(|| std::env::current_dir().ok()) + .ok_or(GUEST_ENOENT)?; + let cwd = cwd.to_string_lossy(); + if cwd.len() + 1 > size { + return Err(GUEST_ERANGE); + } + let mut bytes = cwd.as_bytes().to_vec(); + bytes.push(0); + if state.memory.write_bytes_checked(buf, &bytes).is_none() { + return Err(GUEST_EFAULT); + } + Ok(buf) +} + +fn dispatch_wait4(state: &mut ExecState, args: [u64; 6]) -> std::result::Result { + let _pid = args[0] as i64; + let _options = args[2] as i32; + if args[1] != 0 && state.memory.read_u32_checked(args[1]).is_none() { + return Err(GUEST_EFAULT); + } + if args[3] != 0 + && state + .memory + .read_bytes_checked(args[3], GUEST_RUSAGE_SIZE) + .is_none() + { + return Err(GUEST_EFAULT); + } + Err(GUEST_ECHILD) +} + +fn set_nonblocking(fd: i32) -> std::result::Result<(), i32> { + let flags = unsafe { libc::fcntl(fd, libc::F_GETFL) }; + if flags < 0 { + return Err(last_errno()); + } + if unsafe { libc::fcntl(fd, libc::F_SETFL, flags | libc::O_NONBLOCK) } < 0 { + return Err(last_errno()); + } + Ok(()) +} + +fn create_pollable_pipe() -> std::result::Result<(i32, i32), i32> { + let mut pipefd = [0i32; 2]; + if unsafe { libc::pipe(pipefd.as_mut_ptr()) } < 0 { + return Err(last_errno()); + } + if let Err(errno) = set_nonblocking(pipefd[0]).and_then(|_| set_nonblocking(pipefd[1])) { + close_host_fd(pipefd[0]); + close_host_fd(pipefd[1]); + return Err(errno); + } + Ok((pipefd[0], pipefd[1])) +} + +fn close_host_fd(fd: i32) { + unsafe { + libc::close(fd); + } +} + +fn guest_epoll_to_poll_events(events: u32) -> i16 { + let mut poll_events = 0i16; + if events & (GUEST_EPOLLIN | GUEST_EPOLLRDNORM) != 0 { + poll_events |= libc::POLLIN; + } + if events & (GUEST_EPOLLPRI | GUEST_EPOLLRDBAND) != 0 { + poll_events |= libc::POLLPRI; + } + if events & (GUEST_EPOLLOUT | GUEST_EPOLLWRNORM | GUEST_EPOLLWRBAND) != 0 { + poll_events |= libc::POLLOUT; + } + poll_events +} + +fn poll_revents_to_guest_epoll(revents: i16) -> u32 { + let mut events = 0u32; + if revents & libc::POLLIN != 0 { + events |= GUEST_EPOLLIN | GUEST_EPOLLRDNORM; + } + if revents & libc::POLLPRI != 0 { + events |= GUEST_EPOLLPRI | GUEST_EPOLLRDBAND; + } + if revents & libc::POLLOUT != 0 { + events |= GUEST_EPOLLOUT | GUEST_EPOLLWRNORM; + } + if revents & libc::POLLERR != 0 { + events |= GUEST_EPOLLERR; + } + if revents & libc::POLLHUP != 0 { + events |= GUEST_EPOLLHUP | GUEST_EPOLLRDHUP; + } + if revents & libc::POLLNVAL != 0 { + events |= GUEST_EPOLLNVAL; + } + #[cfg(any(target_os = "linux", target_os = "android"))] + if revents & libc::POLLRDHUP != 0 { + events |= GUEST_EPOLLRDHUP; + } + events +} + +fn read_guest_epoll_event(memory: &GuestMemory, addr: u64) -> std::result::Result<(u32, u64), i32> { + let events = memory.read_u32_checked(addr).ok_or(GUEST_EFAULT)?; + let data = memory.read_u64_checked(addr + 8).ok_or(GUEST_EFAULT)?; + Ok((events, data)) +} + +fn write_guest_epoll_event( + memory: &mut GuestMemory, + addr: u64, + events: u32, + data: u64, +) -> std::result::Result<(), i32> { + if memory.write_u32_checked(addr, events).is_none() + || memory.write_u32_checked(addr + 4, 0).is_none() + || memory.write_u64_checked(addr + 8, data).is_none() + { + return Err(GUEST_EFAULT); + } + Ok(()) +} + +fn dispatch_eventfd2(state: &mut ExecState, args: [u64; 6]) -> std::result::Result { + let init = args[0]; + let flags = args[1] as i32; + if flags & !(GUEST_EFD_SEMAPHORE | GUEST_O_NONBLOCK | GUEST_O_CLOEXEC) != 0 { + return Err(GUEST_EINVAL); + } + let (read_fd, write_fd) = create_pollable_pipe()?; + if init != 0 { + let signal = [1u8; 1]; + if unsafe { libc::write(write_fd, signal.as_ptr().cast(), signal.len()) } < 0 { + close_host_fd(read_fd); + close_host_fd(write_fd); + return Err(last_errno()); + } + } + let guest_fd = state.alloc_guest_fd(); + let fd_flags = if flags & GUEST_O_CLOEXEC != 0 { + GUEST_FD_CLOEXEC + } else { + 0 + }; + state.insert_guest_fd(guest_fd, read_fd, flags & !GUEST_O_CLOEXEC, fd_flags); + state.special_fds.insert( + guest_fd, + SpecialFdKind::EventFd(EventFdState { + write_fd, + counter: init, + semaphore: (flags & GUEST_EFD_SEMAPHORE) != 0, + }), + ); + Ok(guest_fd) +} + +fn dispatch_eventfd_write( + memory: &mut GuestMemory, + eventfd: &mut EventFdState, + args: [u64; 6], +) -> std::result::Result { + if args[2] != 8 { + return Err(GUEST_EINVAL); + } + let value = memory.read_u64_checked(args[1]).ok_or(GUEST_EFAULT)?; + if value == u64::MAX { + return Err(GUEST_EINVAL); + } + if eventfd.counter > u64::MAX - 1 - value { + return Err(GUEST_EAGAIN); + } + let was_zero = eventfd.counter == 0; + eventfd.counter = eventfd.counter.saturating_add(value); + if was_zero && eventfd.counter != 0 { + let signal = [1u8; 1]; + if unsafe { libc::write(eventfd.write_fd, signal.as_ptr().cast(), signal.len()) } < 0 { + let errno = last_errno(); + if errno != GUEST_EAGAIN { + return Err(errno); + } + } + } + Ok(8) +} + +fn dispatch_eventfd_read( + memory: &mut GuestMemory, + read_fd: i32, + eventfd: &mut EventFdState, + commit: &mut CommitRecord, + args: [u64; 6], +) -> std::result::Result { + if args[2] != 8 { + return Err(GUEST_EINVAL); + } + if eventfd.counter == 0 { + return Err(GUEST_EAGAIN); + } + let value = if eventfd.semaphore { + 1 + } else { + eventfd.counter + }; + eventfd.counter -= value; + if eventfd.counter == 0 { + let mut byte = [0u8; 1]; + unsafe { + libc::read(read_fd, byte.as_mut_ptr().cast(), byte.len()); + }; + } + if memory.write_u64_checked(args[1], value).is_none() { + return Err(GUEST_EFAULT); + } + record_store(commit, args[1], value, 8); + Ok(8) +} + +fn dispatch_epoll_create1(state: &mut ExecState, args: [u64; 6]) -> std::result::Result { + let flags = args[0] as i32; + if flags & !(GUEST_O_CLOEXEC | GUEST_O_NONBLOCK) != 0 { + return Err(GUEST_EINVAL); + } + let (read_fd, write_fd) = create_pollable_pipe()?; + let guest_fd = state.alloc_guest_fd(); + let fd_flags = if flags & GUEST_O_CLOEXEC != 0 { + GUEST_FD_CLOEXEC + } else { + 0 + }; + state.insert_guest_fd(guest_fd, read_fd, flags & !GUEST_O_CLOEXEC, fd_flags); + state.special_fds.insert( + guest_fd, + SpecialFdKind::Epoll(EpollState { + wake_write_fd: write_fd, + registrations: BTreeMap::new(), + }), + ); + Ok(guest_fd) +} + +fn dispatch_epoll_ctl(state: &mut ExecState, args: [u64; 6]) -> std::result::Result { + let epfd = args[0]; + let op = args[1] as i32; + let target_fd = args[2]; + let event_ptr = args[3]; + if target_fd == epfd { + return Err(GUEST_EINVAL); + } + if matches!( + state.special_fds.get(&target_fd), + Some(SpecialFdKind::Epoll(_)) + ) { + return Err(GUEST_EINVAL); + } + state.host_fd(target_fd)?; + let registration = match op { + GUEST_EPOLL_CTL_ADD | GUEST_EPOLL_CTL_MOD => { + if event_ptr == 0 { + return Err(GUEST_EFAULT); + } + let (events, data) = read_guest_epoll_event(&state.memory, event_ptr)?; + Some(GuestEpollRegistration { + guest_fd: target_fd, + events, + data, + }) + } + GUEST_EPOLL_CTL_DEL => None, + _ => return Err(GUEST_EINVAL), + }; + let Some(SpecialFdKind::Epoll(epoll)) = state.special_fds.get_mut(&epfd) else { + return Err(GUEST_EBADF); + }; + match op { + GUEST_EPOLL_CTL_ADD => { + if epoll.registrations.contains_key(&target_fd) { + return Err(GUEST_EEXIST); + } + epoll.registrations.insert(target_fd, registration.unwrap()); + } + GUEST_EPOLL_CTL_MOD => { + if !epoll.registrations.contains_key(&target_fd) { + return Err(GUEST_ENOENT); + } + epoll.registrations.insert(target_fd, registration.unwrap()); + } + GUEST_EPOLL_CTL_DEL => { + if epoll.registrations.remove(&target_fd).is_none() { + return Err(GUEST_ENOENT); + } + } + _ => unreachable!(), + } + Ok(0) +} + +fn dispatch_epoll_pwait( + state: &mut ExecState, + commit: &mut CommitRecord, + args: [u64; 6], +) -> std::result::Result { + let epfd = args[0]; + let events_ptr = args[1]; + let maxevents = usize::try_from(args[2]).map_err(|_| GUEST_EINVAL)?; + let timeout_ms = i32::try_from(args[3] as i64).map_err(|_| GUEST_EINVAL)?; + let sigmask_ptr = args[4]; + let sigset_size = usize::try_from(args[5]).map_err(|_| GUEST_EINVAL)?; + if maxevents == 0 { + return Err(GUEST_EINVAL); + } + validate_guest_sigmask(&state.memory, sigmask_ptr, sigset_size)?; + + let registrations = match state.special_fds.get(&epfd) { + Some(SpecialFdKind::Epoll(epoll)) => { + epoll.registrations.values().cloned().collect::>() + } + _ => return Err(GUEST_EBADF), + }; + let mut pollfds = Vec::with_capacity(registrations.len()); + for registration in ®istrations { + let host_fd = state.host_fd(registration.guest_fd)?; + pollfds.push(libc::pollfd { + fd: host_fd, + events: guest_epoll_to_poll_events(registration.events), + revents: 0, + }); + } + let rc = unsafe { + libc::poll( + pollfds.as_mut_ptr(), + pollfds.len() as libc::nfds_t, + timeout_ms, + ) + }; + if rc < 0 { + return Err(last_errno()); + } + + let mut ready_count = 0usize; + let mut oneshot_remove = Vec::new(); + for (registration, pollfd) in registrations.iter().zip(pollfds.iter()) { + let revents = poll_revents_to_guest_epoll(pollfd.revents) + | (registration.events & (GUEST_EPOLLET | GUEST_EPOLLONESHOT)); + if revents == 0 { + continue; + } + if ready_count < maxevents { + let addr = events_ptr + (ready_count as u64) * GUEST_EPOLL_EVENT_SIZE; + write_guest_epoll_event(&mut state.memory, addr, revents, registration.data)?; + } + ready_count += 1; + if registration.events & GUEST_EPOLLONESHOT != 0 { + oneshot_remove.push(registration.guest_fd); + } + } + + if let Some(SpecialFdKind::Epoll(epoll)) = state.special_fds.get_mut(&epfd) { + for guest_fd in oneshot_remove { + epoll.registrations.remove(&guest_fd); + } + } + + let produced = ready_count.min(maxevents); + if produced != 0 { + record_store( + commit, + events_ptr, + produced as u64, + trace_size(produced * GUEST_EPOLL_EVENT_SIZE as usize), + ); + } + Ok(produced as u64) +} + +fn parse_guest_timeout_ms(memory: &GuestMemory, timeout_ptr: u64) -> std::result::Result { + if timeout_ptr == 0 { + return Ok(-1); + } + let sec = memory.read_u64_checked(timeout_ptr).ok_or(GUEST_EFAULT)?; + let nsec = memory + .read_u64_checked(timeout_ptr + 8) + .ok_or(GUEST_EFAULT)?; + let sec = i64::try_from(sec).map_err(|_| GUEST_EINVAL)?; + let nsec = i64::try_from(nsec).map_err(|_| GUEST_EINVAL)?; + if sec < 0 || !(0..1_000_000_000).contains(&nsec) { + return Err(GUEST_EINVAL); + } + let millis = sec + .saturating_mul(1000) + .saturating_add((nsec + 999_999) / 1_000_000); + Ok(millis.min(i32::MAX as i64) as i32) +} + +fn validate_guest_sigmask( + memory: &GuestMemory, + sigmask_ptr: u64, + sigset_size: usize, +) -> std::result::Result<(), i32> { + if sigmask_ptr == 0 { + return Ok(()); + } + if sigset_size == 0 || sigset_size > GUEST_SIGSET_MAX_BYTES { + return Err(GUEST_EINVAL); + } + if memory + .read_bytes_checked(sigmask_ptr, sigset_size) + .is_none() + { + return Err(GUEST_EFAULT); + } + Ok(()) +} + +fn read_guest_fd_set( + memory: &GuestMemory, + addr: u64, + nfds: usize, +) -> std::result::Result, i32> { + let bytes = memory + .read_bytes_checked(addr, GUEST_FD_SET_SIZE) + .ok_or(GUEST_EFAULT)?; + let mut set = vec![false; nfds]; + for fd in 0..nfds { + let word = fd / 64; + let bit = fd % 64; + let mut raw = [0u8; 8]; + raw.copy_from_slice(&bytes[word * 8..word * 8 + 8]); + let bits = u64::from_le_bytes(raw); + set[fd] = ((bits >> bit) & 1) != 0; + } + Ok(set) +} + +fn write_guest_fd_set( + memory: &mut GuestMemory, + addr: u64, + ready: &[bool], +) -> std::result::Result<(), i32> { + let mut bytes = [0u8; GUEST_FD_SET_SIZE]; + for (fd, is_set) in ready.iter().copied().enumerate() { + if !is_set { + continue; + } + let word = fd / 64; + let bit = fd % 64; + let range = word * 8..word * 8 + 8; + let mut raw = [0u8; 8]; + raw.copy_from_slice(&bytes[range.clone()]); + let value = u64::from_le_bytes(raw) | (1u64 << bit); + bytes[range].copy_from_slice(&value.to_le_bytes()); + } + memory + .write_bytes_checked(addr, &bytes) + .ok_or(GUEST_EFAULT)?; + Ok(()) +} + +fn dispatch_pselect6( + state: &mut ExecState, + commit: &mut CommitRecord, + args: [u64; 6], +) -> std::result::Result { + let nfds = usize::try_from(args[0]).map_err(|_| GUEST_EINVAL)?; + if nfds > GUEST_FD_SET_SIZE * 8 { + return Err(GUEST_EINVAL); + } + let timeout_ms = parse_guest_timeout_ms(&state.memory, args[4])?; + + if args[5] != 0 { + let sigmask_ptr = state.memory.read_u64_checked(args[5]).ok_or(GUEST_EFAULT)?; + let sigset_size = state + .memory + .read_u64_checked(args[5] + 8) + .ok_or(GUEST_EFAULT)?; + let sigset_size = usize::try_from(sigset_size).map_err(|_| GUEST_EINVAL)?; + validate_guest_sigmask(&state.memory, sigmask_ptr, sigset_size)?; + } + + let read_req = if args[1] != 0 { + Some(read_guest_fd_set(&state.memory, args[1], nfds)?) + } else { + None + }; + let write_req = if args[2] != 0 { + Some(read_guest_fd_set(&state.memory, args[2], nfds)?) + } else { + None + }; + let except_req = if args[3] != 0 { + Some(read_guest_fd_set(&state.memory, args[3], nfds)?) + } else { + None + }; + + let mut pollfds = Vec::new(); + for fd in 0..nfds { + let wants_read = read_req.as_ref().map(|set| set[fd]).unwrap_or(false); + let wants_write = write_req.as_ref().map(|set| set[fd]).unwrap_or(false); + let wants_except = except_req.as_ref().map(|set| set[fd]).unwrap_or(false); + if !(wants_read || wants_write || wants_except) { + continue; + } + let host_fd = state.host_fd(fd as u64)?; + let mut events = 0i16; + if wants_read { + events |= libc::POLLIN; + } + if wants_write { + events |= libc::POLLOUT; + } + if wants_except { + events |= libc::POLLPRI; + } + pollfds.push(( + fd, + libc::pollfd { + fd: host_fd, + events, + revents: 0, + }, + )); + } + + let mut host_only: Vec = pollfds.iter().map(|(_, pfd)| *pfd).collect(); + let rc = unsafe { + libc::poll( + host_only.as_mut_ptr(), + host_only.len() as libc::nfds_t, + timeout_ms, + ) + }; + if rc < 0 { + return Err(last_errno()); + } + + let mut read_ready = vec![false; nfds]; + let mut write_ready = vec![false; nfds]; + let mut except_ready = vec![false; nfds]; + let mut ready_count = 0u64; + for ((fd, _), polled) in pollfds.iter().zip(host_only.iter()) { + let mut any = false; + if polled.revents & (libc::POLLIN | libc::POLLHUP | libc::POLLERR) != 0 { + read_ready[*fd] = true; + any = true; + } + if polled.revents & libc::POLLOUT != 0 { + write_ready[*fd] = true; + any = true; + } + if polled.revents & (libc::POLLPRI | libc::POLLERR) != 0 { + except_ready[*fd] = true; + any = true; + } + if any { + ready_count += 1; + } + } + + if let Some(_) = read_req { + write_guest_fd_set(&mut state.memory, args[1], &read_ready)?; + } + if let Some(_) = write_req { + write_guest_fd_set(&mut state.memory, args[2], &write_ready)?; + } + if let Some(_) = except_req { + write_guest_fd_set(&mut state.memory, args[3], &except_ready)?; + } + + let traced_bytes = (args[1] != 0) as usize * GUEST_FD_SET_SIZE + + (args[2] != 0) as usize * GUEST_FD_SET_SIZE + + (args[3] != 0) as usize * GUEST_FD_SET_SIZE; + if traced_bytes != 0 { + let trace_addr = if args[1] != 0 { + args[1] + } else if args[2] != 0 { + args[2] + } else { + args[3] + }; + record_store(commit, trace_addr, ready_count, trace_size(traced_bytes)); + } + Ok(ready_count) +} + +fn dispatch_ppoll( + state: &mut ExecState, + commit: &mut CommitRecord, + args: [u64; 6], +) -> std::result::Result { + let fds_addr = args[0]; + let nfds = usize::try_from(args[1]).map_err(|_| GUEST_EINVAL)?; + let timeout_ptr = args[2]; + let sigmask_ptr = args[3]; + let sigset_size = usize::try_from(args[4]).map_err(|_| GUEST_EINVAL)?; + + validate_guest_sigmask(&state.memory, sigmask_ptr, sigset_size)?; + let timeout_ms = parse_guest_timeout_ms(&state.memory, timeout_ptr)?; + + let mut host_fds = Vec::with_capacity(nfds); + let mut forced_revents = vec![0u16; nfds]; + let mut forced_ready = 0u64; + for idx in 0..nfds { + let base = fds_addr + (idx as u64) * GUEST_POLLFD_SIZE; + let guest_fd_raw = state.memory.read_u32_checked(base).ok_or(GUEST_EFAULT)?; + let guest_fd = guest_fd_raw as i32; + let events = state + .memory + .read_u16_checked(base + 4) + .ok_or(GUEST_EFAULT)? as i16; + let mut host_fd = -1i32; + if guest_fd >= 0 { + match state.host_fd(guest_fd as u64) { + Ok(mapped) => host_fd = mapped, + Err(_) => { + forced_revents[idx] = GUEST_POLLNVAL; + forced_ready += 1; + } + } + } + host_fds.push(libc::pollfd { + fd: host_fd, + events, + revents: 0, + }); + } + + let rc = unsafe { + libc::poll( + host_fds.as_mut_ptr(), + host_fds.len() as libc::nfds_t, + timeout_ms, + ) + }; + if rc < 0 { + return Err(last_errno()); + } + + let mut ready = forced_ready + rc as u64; + for (idx, host_fd) in host_fds.iter().enumerate() { + let base = fds_addr + (idx as u64) * GUEST_POLLFD_SIZE; + let revents = if forced_revents[idx] != 0 { + forced_revents[idx] + } else { + host_fd.revents as u16 + }; + if forced_revents[idx] == 0 && host_fd.fd < 0 && revents != 0 { + ready += 1; + } + if state.memory.write_u16_checked(base + 6, revents).is_none() { + return Err(GUEST_EFAULT); + } + } + record_store( + commit, + fds_addr, + ready, + trace_size(nfds * GUEST_POLLFD_SIZE as usize), + ); + Ok(ready) +} + +fn dispatch_prctl( + state: &mut ExecState, + commit: &mut CommitRecord, + args: [u64; 6], +) -> std::result::Result { + match args[0] { + GUEST_PR_SET_NAME => { + state.thread_name = read_guest_prctl_name(&state.memory, args[1])?; + Ok(0) + } + GUEST_PR_GET_NAME => { + if state + .memory + .write_bytes_checked(args[1], &state.thread_name) + .is_none() + { + return Err(GUEST_EFAULT); + } + record_store(commit, args[1], 0, GUEST_PRCTL_NAME_BYTES as u8); + Ok(0) + } + _ => Err(GUEST_ENOSYS), + } +} + +fn dispatch_getrandom( + state: &mut ExecState, + commit: &mut CommitRecord, + args: [u64; 6], +) -> std::result::Result { + let buf = args[0]; + let len = args[1] as usize; + let flags = args[2]; + if flags & !0x3 != 0 { + return Err(GUEST_EINVAL); + } + let mut bytes = vec![0u8; len]; + for byte in &mut bytes { + state.random_state ^= state.random_state << 13; + state.random_state ^= state.random_state >> 7; + state.random_state ^= state.random_state << 17; + *byte = state.random_state as u8; + } + if state.memory.write_bytes_checked(buf, &bytes).is_none() { + return Err(GUEST_EFAULT); + } + record_store(commit, buf, 0, trace_size(len)); + Ok(len as u64) +} + +fn dispatch_membarrier(state: &mut ExecState, args: [u64; 6]) -> std::result::Result { + if args[1] != 0 { + return Err(GUEST_EINVAL); + } + match args[0] { + GUEST_MEMBARRIER_CMD_QUERY => Ok(GUEST_MEMBARRIER_CMD_PRIVATE_EXPEDITED + | GUEST_MEMBARRIER_CMD_REGISTER_PRIVATE_EXPEDITED), + GUEST_MEMBARRIER_CMD_REGISTER_PRIVATE_EXPEDITED => { + state.membarrier_private_expedited = true; + Ok(0) + } + GUEST_MEMBARRIER_CMD_PRIVATE_EXPEDITED => { + if state.membarrier_private_expedited { + Ok(0) + } else { + Err(GUEST_EPERM) + } + } + _ => Err(GUEST_ENOSYS), + } +} + +fn dispatch_sigaltstack( + state: &mut ExecState, + commit: &mut CommitRecord, + args: [u64; 6], +) -> std::result::Result { + let new_addr = args[0]; + let old_addr = args[1]; + + if old_addr != 0 { + write_guest_sigaltstack( + &mut state.memory, + old_addr, + state.alt_stack_sp, + state.alt_stack_flags, + state.alt_stack_size, + )?; + record_store( + commit, + old_addr, + state.alt_stack_sp, + GUEST_SIGALTSTACK_SIZE as u8, + ); + } + + if new_addr != 0 { + let (sp, flags, size) = read_guest_sigaltstack(&state.memory, new_addr)?; + if flags & GUEST_SS_ONSTACK != 0 || flags & !(GUEST_SS_DISABLE | GUEST_SS_ONSTACK) != 0 { + return Err(GUEST_EINVAL); + } + if flags & GUEST_SS_DISABLE != 0 { + state.alt_stack_sp = 0; + state.alt_stack_size = 0; + state.alt_stack_flags = GUEST_SS_DISABLE; + } else { + if sp == 0 { + return Err(GUEST_EINVAL); + } + if size < GUEST_MINSIGSTKSZ { + return Err(GUEST_ENOMEM); + } + state.alt_stack_sp = sp; + state.alt_stack_size = size; + state.alt_stack_flags = 0; + } + } + Ok(0) +} + +fn dispatch_rseq( + state: &mut ExecState, + commit: &mut CommitRecord, + args: [u64; 6], +) -> std::result::Result { + let addr = args[0]; + let len = u32::try_from(args[1]).map_err(|_| GUEST_EINVAL)?; + let flags = args[2]; + let sig = u32::try_from(args[3]).map_err(|_| GUEST_EINVAL)?; + + if flags == GUEST_RSEQ_FLAG_UNREGISTER { + if state.rseq_addr == 0 || state.rseq_addr != addr { + return Err(GUEST_EINVAL); + } + state.rseq_addr = 0; + state.rseq_len = 0; + state.rseq_sig = 0; + return Ok(0); + } + if flags != 0 { + return Err(GUEST_EINVAL); + } + if len < GUEST_RSEQ_MIN_LEN { + return Err(GUEST_EINVAL); + } + if sig != GUEST_RSEQ_SIG { + return Err(GUEST_EINVAL); + } + if addr == 0 { + return Err(GUEST_EFAULT); + } + initialize_guest_rseq(&mut state.memory, addr, len, 0)?; + state.rseq_addr = addr; + state.rseq_len = len; + state.rseq_sig = sig; + record_store(commit, addr, 0, trace_size(len as usize)); + Ok(0) +} + +fn dispatch_fcntl(state: &mut ExecState, args: [u64; 6]) -> std::result::Result { + let guest_fd = args[0]; + state.host_fd(guest_fd)?; + let cmd = args[1] as i32; + let value = args[2] as i32; + match cmd { + GUEST_F_DUPFD => { + let min_fd = u64::try_from(value.max(0)).map_err(|_| GUEST_EINVAL)?; + state.duplicate_guest_fd(guest_fd, min_fd, false) + } + GUEST_F_DUPFD_CLOEXEC => { + let min_fd = u64::try_from(value.max(0)).map_err(|_| GUEST_EINVAL)?; + state.duplicate_guest_fd(guest_fd, min_fd, true) + } + GUEST_F_GETFD => Ok(state.fd_fd_flags.get(&guest_fd).copied().unwrap_or(0) as u64), + GUEST_F_SETFD => { + state.fd_fd_flags.insert(guest_fd, value & GUEST_FD_CLOEXEC); + Ok(0) + } + GUEST_F_GETFL => Ok(state.fd_status_flags.get(&guest_fd).copied().unwrap_or(0) as u64), + GUEST_F_SETFL => { + let entry = state.fd_status_flags.entry(guest_fd).or_insert(0); + *entry = (*entry & 0b11) | (value & !0b11); + Ok(0) + } + _ => Err(GUEST_ENOSYS), + } +} + +fn dispatch_ioctl( + state: &mut ExecState, + commit: &mut CommitRecord, + args: [u64; 6], +) -> std::result::Result { + let guest_fd = args[0]; + state.host_fd(guest_fd)?; + if guest_fd > 2 { + return Err(GUEST_ENOTTY); + } + match args[1] { + GUEST_TIOCGWINSZ => { + let addr = args[2]; + if state.memory.write_u16_checked(addr, 24).is_none() + || state.memory.write_u16_checked(addr + 2, 80).is_none() + || state.memory.write_u16_checked(addr + 4, 0).is_none() + || state.memory.write_u16_checked(addr + 6, 0).is_none() + { + return Err(GUEST_EFAULT); + } + record_store(commit, addr, 0, 8); + Ok(0) + } + GUEST_TIOCGPGRP => { + let addr = args[2]; + if state + .memory + .write_u32_checked(addr, state.current_pgrp) + .is_none() + { + return Err(GUEST_EFAULT); + } + record_store(commit, addr, state.current_pgrp as u64, 4); + Ok(0) + } + GUEST_TIOCSPGRP => { + let addr = args[2]; + let Some(pgrp) = state.memory.read_u32_checked(addr) else { + return Err(GUEST_EFAULT); + }; + state.current_pgrp = pgrp; + Ok(0) + } + _ => Err(GUEST_ENOTTY), + } +} + +fn dispatch_dup3(state: &mut ExecState, args: [u64; 6]) -> std::result::Result { + let old_guest_fd = args[0]; + let new_guest_fd = args[1]; + let flags = args[2] as i32; + if flags & !GUEST_O_CLOEXEC != 0 { + return Err(GUEST_EINVAL); + } + state.duplicate_guest_fd_to(old_guest_fd, new_guest_fd, (flags & GUEST_O_CLOEXEC) != 0) +} + +fn dispatch_pipe2( + state: &mut ExecState, + commit: &mut CommitRecord, + args: [u64; 6], +) -> std::result::Result { + let guest_pipefd = args[0]; + let flags = args[1] as i32; + if flags & !(GUEST_O_CLOEXEC | GUEST_O_NONBLOCK) != 0 { + return Err(GUEST_EINVAL); + } + + let mut host_pipe = [-1i32; 2]; + let rc = unsafe { libc::pipe(host_pipe.as_mut_ptr()) }; + if rc != 0 { + return Err(last_errno()); + } + + let read_guest_fd = state.alloc_guest_fd(); + let write_guest_fd = state.alloc_guest_fd_from(read_guest_fd + 1); + if state + .memory + .write_u32_checked(guest_pipefd, read_guest_fd as u32) + .is_none() + || state + .memory + .write_u32_checked(guest_pipefd + 4, write_guest_fd as u32) + .is_none() + { + unsafe { + libc::close(host_pipe[0]); + libc::close(host_pipe[1]); + } + return Err(GUEST_EFAULT); + } + + let fd_flags = if flags & GUEST_O_CLOEXEC != 0 { + GUEST_FD_CLOEXEC + } else { + 0 + }; + let status_flags = flags & !GUEST_O_CLOEXEC; + let read_status = status_flags & !GUEST_O_WRONLY; + let write_status = (status_flags & !GUEST_O_RDONLY) | GUEST_O_WRONLY; + state.insert_guest_fd(read_guest_fd, host_pipe[0], read_status, fd_flags); + state.insert_guest_fd(write_guest_fd, host_pipe[1], write_status, fd_flags); + record_store(commit, guest_pipefd, read_guest_fd, 8); + Ok(0) +} + +fn dispatch_madvise(state: &mut ExecState, args: [u64; 6]) -> std::result::Result { + let addr = args[0]; + let size = args[1]; + if size == 0 { + return Ok(0); + } + if !state.memory.is_range_mapped(addr, size) { + return Err(GUEST_ENOMEM); + } + match args[2] { + 0 | 1 | 2 | 3 | 4 | 8 | 9 | 10 | 11 | 12 | 13 | 14 | 15 | 16 | 17 | 18 | 19 | 20 | 21 + | 100 | 101 => Ok(0), + _ => Err(GUEST_EINVAL), + } +} + +fn dispatch_prlimit64(state: &mut ExecState, args: [u64; 6]) -> std::result::Result { + let pid = args[0]; + let resource = args[1] as u32; + let new_limit_ptr = args[2]; + let old_limit_ptr = args[3]; + + if pid != 0 && pid != state.current_pid { + return Err(GUEST_ESRCH); + } + + let current = state.rlimit_for(resource).ok_or(GUEST_EINVAL)?; + if old_limit_ptr != 0 { + write_guest_rlimit(&mut state.memory, old_limit_ptr, current)?; + } + if new_limit_ptr != 0 { + let new_limit = read_guest_rlimit(&state.memory, new_limit_ptr)?; + state.set_rlimit(resource, new_limit)?; + } + Ok(0) +} + +fn dispatch_newfstatat( + state: &mut ExecState, + runtime: &GuestRuntime, + commit: &mut CommitRecord, + args: [u64; 6], +) -> std::result::Result { + let dirfd = args[0] as i64 as i32; + let stat_ptr = args[2]; + let flags = args[3] as i32; + if flags & !(GUEST_AT_EMPTY_PATH | GUEST_AT_SYMLINK_NOFOLLOW) != 0 { + return Err(GUEST_EINVAL); + } + let path = if args[1] == 0 { + String::new() + } else { + state + .memory + .read_c_string_checked(args[1], MAX_C_STRING) + .ok_or(GUEST_EFAULT)? + }; + let stat = if path.is_empty() && (flags & GUEST_AT_EMPTY_PATH) != 0 { + let host_fd = state.host_fd(args[0])?; + host_fstat(host_fd)? + } else { + host_fstatat(state, runtime, dirfd, &path, flags)? + }; + write_guest_linux_stat(&mut state.memory, stat_ptr, stat).map_err(|_| GUEST_EFAULT)?; + record_store( + commit, + stat_ptr, + stat.size as u64, + GUEST_LINUX_STAT_SIZE as u8, + ); + Ok(0) +} + +fn dispatch_readlinkat( + state: &mut ExecState, + runtime: &GuestRuntime, + commit: &mut CommitRecord, + args: [u64; 6], +) -> std::result::Result { + let dirfd = args[0] as i64 as i32; + let Some(path) = state.memory.read_c_string_checked(args[1], MAX_C_STRING) else { + return Err(GUEST_EFAULT); + }; + let resolved = resolve_open_path(runtime, dirfd, &path); + let c_path = CString::new(resolved.as_str()).map_err(|_| GUEST_EINVAL)?; + let mut bytes = vec![0u8; args[3] as usize]; + let rc = if dirfd == GUEST_AT_FDCWD || resolved != path { + unsafe { libc::readlink(c_path.as_ptr(), bytes.as_mut_ptr().cast(), bytes.len()) } + } else { + let host_dirfd = state.host_fd(args[0])?; + unsafe { + libc::readlinkat( + host_dirfd, + c_path.as_ptr(), + bytes.as_mut_ptr().cast(), + bytes.len(), + ) + } + }; + if rc < 0 { + return Err(last_errno()); + } + let count = rc as usize; + if state + .memory + .write_bytes_checked(args[2], &bytes[..count]) + .is_none() + { + return Err(GUEST_EFAULT); + } + record_store(commit, args[2], 0, trace_size(count)); + Ok(count as u64) +} + +fn read_guest_sigaltstack( + memory: &GuestMemory, + addr: u64, +) -> std::result::Result<(u64, u32, u64), i32> { + let sp = memory.read_u64_checked(addr).ok_or(GUEST_EFAULT)?; + let flags = memory.read_u32_checked(addr + 8).ok_or(GUEST_EFAULT)?; + let size = memory.read_u64_checked(addr + 16).ok_or(GUEST_EFAULT)?; + Ok((sp, flags, size)) +} + +fn write_guest_sigaltstack( + memory: &mut GuestMemory, + addr: u64, + sp: u64, + flags: u32, + size: u64, +) -> std::result::Result<(), i32> { + if memory.write_u64_checked(addr, sp).is_none() + || memory.write_u32_checked(addr + 8, flags).is_none() + || memory.write_u32_checked(addr + 12, 0).is_none() + || memory.write_u64_checked(addr + 16, size).is_none() + { + return Err(GUEST_EFAULT); + } + Ok(()) +} + +fn read_guest_prctl_name( + memory: &GuestMemory, + addr: u64, +) -> std::result::Result<[u8; GUEST_PRCTL_NAME_BYTES], i32> { + let mut bytes = [0u8; GUEST_PRCTL_NAME_BYTES]; + for (idx, slot) in bytes.iter_mut().enumerate() { + let value = memory + .read_u8_checked(addr + idx as u64) + .ok_or(GUEST_EFAULT)?; + *slot = value; + if value == 0 { + break; + } + } + if bytes[GUEST_PRCTL_NAME_BYTES - 1] != 0 { + bytes[GUEST_PRCTL_NAME_BYTES - 1] = 0; + } + Ok(bytes) +} + +fn initialize_guest_rseq( + memory: &mut GuestMemory, + addr: u64, + len: u32, + cpu_id: u32, +) -> std::result::Result<(), i32> { + let zero = vec![0u8; len as usize]; + if memory.write_bytes_checked(addr, &zero).is_none() { + return Err(GUEST_EFAULT); + } + if memory.write_u32_checked(addr, cpu_id).is_none() + || memory.write_u32_checked(addr + 4, cpu_id).is_none() + || memory.write_u64_checked(addr + 8, 0).is_none() + || memory.write_u32_checked(addr + 16, 0).is_none() + { + return Err(GUEST_EFAULT); + } + Ok(()) +} + +fn dispatch_getres_ids( + memory: &mut GuestMemory, + addrs: [u64; 3], + ids: [u32; 3], +) -> std::result::Result<(), i32> { + for (addr, id) in addrs.into_iter().zip(ids) { + if addr == 0 { + continue; + } + if memory.write_u32_checked(addr, id).is_none() { + return Err(GUEST_EFAULT); + } + } + Ok(()) +} + +fn validate_single_id(raw: u64, current: &[u32; 3]) -> std::result::Result { + let value = raw.try_into().map_err(|_| GUEST_EINVAL)?; + if value == 0 || current.contains(&value) { + Ok(value) + } else { + Err(GUEST_EPERM) + } +} + +fn apply_setres_ids(current: [u32; 3], requested: [u64; 3]) -> std::result::Result<[u32; 3], i32> { + let mut next = current; + for (idx, raw) in requested.into_iter().enumerate() { + if raw == u64::MAX { + continue; + } + let value: u32 = raw.try_into().map_err(|_| GUEST_EINVAL)?; + if value != 0 && !current.contains(&value) { + return Err(GUEST_EPERM); + } + next[idx] = value; + } + Ok(next) +} + +fn read_guest_rlimit(memory: &GuestMemory, addr: u64) -> std::result::Result { + let Some(cur) = memory.read_u64_checked(addr) else { + return Err(GUEST_EFAULT); + }; + let Some(max) = memory.read_u64_checked(addr + 8) else { + return Err(GUEST_EFAULT); + }; + Ok(GuestRlimit { cur, max }) +} + +fn write_guest_rlimit( + memory: &mut GuestMemory, + addr: u64, + limit: GuestRlimit, +) -> std::result::Result<(), i32> { + if memory.write_u64_checked(addr, limit.cur).is_none() + || memory.write_u64_checked(addr + 8, limit.max).is_none() + { + return Err(GUEST_EFAULT); + } + Ok(()) +} + +fn write_u32_field(bytes: &mut [u8], offset: usize, value: u32) { + bytes[offset..offset + 4].copy_from_slice(&value.to_le_bytes()); +} + +fn write_u16_field(bytes: &mut [u8], offset: usize, value: u16) { + bytes[offset..offset + 2].copy_from_slice(&value.to_le_bytes()); +} + +fn write_i32_field(bytes: &mut [u8], offset: usize, value: i32) { + bytes[offset..offset + 4].copy_from_slice(&value.to_le_bytes()); +} + +fn write_u64_field(bytes: &mut [u8], offset: usize, value: u64) { + bytes[offset..offset + 8].copy_from_slice(&value.to_le_bytes()); +} + +fn write_i64_field(bytes: &mut [u8], offset: usize, value: i64) { + bytes[offset..offset + 8].copy_from_slice(&value.to_le_bytes()); +} + +fn empty_commit( + cycle: u64, + pc: u64, + decoded: &DecodedInstruction, + runtime: &GuestRuntime, +) -> CommitRecord { + let mut commit = CommitRecord::unsupported( + cycle, + pc, + decoded.instruction_bits, + TRAP_ILLEGAL_INST, + &runtime.block, + ); + commit.len = decoded.length_bytes(); + commit.trap_valid = 0; + commit.trap_cause = 0; + commit.traparg0 = 0; + commit.next_pc = pc + decoded.length_bytes() as u64; + commit +} + +fn stage_event(cycle: u64, runtime: &GuestRuntime, stage: &str, cause: &str) -> StageTraceEvent { + StageTraceEvent { + cycle, + row_id: format!("uop{cycle}"), + stage_id: stage.to_string(), + lane_id: runtime.block.lane_id.clone(), + stall: false, + cause: cause.to_string(), + checkpoint_id: None, + trap_cause: None, + traparg0: None, + target_setup_epoch: None, + boundary_epoch: None, + target_source_owner_row_id: None, + target_source_epoch: None, + target_owner_row_id: None, + target_producer_kind: None, + branch_kind: None, + return_kind: None, + call_materialization_kind: None, + target_source_kind: None, + } +} + +fn reg_field(decoded: &DecodedInstruction, names: &[&str]) -> Result { + field_u(decoded, names) + .map(|value| value as usize) + .with_context(|| format!("missing register field {:?} in {}", names, decoded.mnemonic)) +} + +fn field_u(decoded: &DecodedInstruction, names: &[&str]) -> Option { + names + .iter() + .find_map(|name| decoded.field(name).map(|field| field.value_u64)) +} + +fn need_u(decoded: &DecodedInstruction, names: &[&str]) -> Result { + field_u(decoded, names) + .with_context(|| format!("missing unsigned field {:?} in {}", names, decoded.mnemonic)) +} + +fn field_i(decoded: &DecodedInstruction, names: &[&str]) -> Result { + for name in names { + if let Some(field) = decoded.field(name) { + return Ok(field.value_i64.unwrap_or(field.value_u64 as i64)); + } + } + bail!("missing signed field {:?} in {}", names, decoded.mnemonic) +} + +fn record_src0(commit: &mut CommitRecord, reg: usize, value: u64) { + commit.src0_valid = 1; + commit.src0_reg = reg as u8; + commit.src0_data = value; +} + +fn record_src1(commit: &mut CommitRecord, reg: usize, value: u64) { + commit.src1_valid = 1; + commit.src1_reg = reg as u8; + commit.src1_data = value; +} + +fn writeback(state: &mut ExecState, commit: &mut CommitRecord, reg: usize, value: u64) { + let committed_reg = match reg { + REG_IMPLICIT_T_DST => { + state.push_t(value); + REG_T1 + } + REG_IMPLICIT_U_DST => { + state.push_u(value); + REG_U1 + } + _ => { + state.write_reg(reg, value); + reg + } + }; + commit.dst_valid = 1; + commit.dst_reg = committed_reg as u8; + commit.dst_data = value; + commit.wb_valid = 1; + commit.wb_rd = committed_reg as u8; + commit.wb_data = value; +} + +fn record_load(commit: &mut CommitRecord, addr: u64, data: u64, size: u8) { + commit.mem_valid = 1; + commit.mem_is_store = 0; + commit.mem_addr = addr; + commit.mem_rdata = data; + commit.mem_size = size; +} + +fn record_store(commit: &mut CommitRecord, addr: u64, data: u64, size: u8) { + commit.mem_valid = 1; + commit.mem_is_store = 1; + commit.mem_addr = addr; + commit.mem_wdata = data; + commit.mem_size = size; +} + +fn trace_size(size: usize) -> u8 { + size.min(u8::MAX as usize) as u8 +} + +fn align_up(value: u64, align: u64) -> u64 { + debug_assert!(align.is_power_of_two()); + (value + align - 1) & !(align - 1) +} + +fn align_down(value: u64, align: u64) -> u64 { + debug_assert!(align.is_power_of_two()); + value & !(align - 1) +} + +fn last_errno() -> i32 { + let host_errno = std::io::Error::last_os_error() + .raw_os_error() + .unwrap_or(libc::EINVAL); + host_errno_to_guest(host_errno) +} + +fn host_errno_to_guest(host_errno: i32) -> i32 { + match host_errno { + x if x == libc::EPERM => GUEST_EPERM, + x if x == libc::ENOENT => GUEST_ENOENT, + x if x == libc::EAGAIN => GUEST_EAGAIN, + x if x == libc::EBADF => GUEST_EBADF, + x if x == libc::EFAULT => GUEST_EFAULT, + x if x == libc::EINVAL => GUEST_EINVAL, + x if x == libc::ENOTTY => GUEST_ENOTTY, + x if x == libc::ENOMEM => GUEST_ENOMEM, + x if x == libc::ERANGE => GUEST_ERANGE, + x if x == libc::ENOSYS => GUEST_ENOSYS, + x if x == libc::ETIMEDOUT => GUEST_ETIMEDOUT, + _ => host_errno, + } +} + +fn finalize_syscall( + state: &mut ExecState, + commit: &mut CommitRecord, + result: std::result::Result, +) -> Result> { + let a0 = match result { + Ok(value) => value, + Err(errno) => (-(errno as i64)) as u64, + }; + writeback(state, commit, REG_A0, a0); + Ok(None) +} + +#[cfg(test)] +mod tests { + use super::*; + use elf::{LoadedElf, SegmentImage}; + use runtime::{BootInfo, MEM_READ, MEM_WRITE, RuntimeConfig}; + use std::fs; + use std::io::Write; + use std::os::fd::AsRawFd; + use std::os::unix::fs::symlink; + use tempfile::NamedTempFile; + use tempfile::tempdir; + + #[test] + fn executes_addi_and_exit_syscall() { + let program = vec![ + enc_addi(REG_A0 as u32, REG_ZERO as u32, 7), + enc_addi(REG_A7 as u32, REG_ZERO as u32, SYS_EXIT as u32), + enc_acrc(1), + ]; + let runtime = sample_runtime(&program, &[]); + let bundle = FuncEngine + .run(&runtime, &FuncRunOptions { max_steps: 16 }) + .unwrap(); + assert_eq!(bundle.result.metrics.exit_reason, "guest_exit(7)"); + assert_eq!(bundle.result.commits.len(), 3); + } + + #[test] + fn executes_memory_store_and_load() { + let data_addr = 0x0800u32; + let program = vec![ + enc_addi(20, REG_ZERO as u32, data_addr), + enc_addi(21, REG_ZERO as u32, 11), + enc_swi(21, 20, 0), + enc_lwi(2, 20, 0), + enc_addi(REG_A7 as u32, REG_ZERO as u32, SYS_EXIT as u32), + enc_acrc(1), + ]; + let data = vec![runtime::MemoryRegion { + base: data_addr as u64, + size: 0x1000, + flags: 0b110, + data: vec![0; 0x1000], + }]; + let runtime = sample_runtime(&program, &data); + let bundle = FuncEngine + .run(&runtime, &FuncRunOptions { max_steps: 32 }) + .unwrap(); + assert_eq!(bundle.result.metrics.exit_reason, "guest_exit(11)"); + } + + #[test] + fn implicit_t_destination_feeds_t1_consumers() { + let program = vec![ + enc_addi(REG_IMPLICIT_T_DST as u32, REG_ZERO as u32, 5), + enc_addi(REG_A0 as u32, REG_T1 as u32, 6), + enc_addi(REG_A7 as u32, REG_ZERO as u32, SYS_EXIT as u32), + enc_acrc(1), + ]; + let runtime = sample_runtime(&program, &[]); + let bundle = FuncEngine + .run(&runtime, &FuncRunOptions { max_steps: 16 }) + .unwrap(); + assert_eq!(bundle.result.metrics.exit_reason, "guest_exit(11)"); + assert_eq!(bundle.result.commits[0].wb_rd as usize, REG_T1); + } + + #[test] + fn fstat_writes_linux_guest_layout() { + let stat_addr = 0x4000u64; + let data = vec![runtime::MemoryRegion { + base: stat_addr, + size: 0x1000, + flags: 0b110, + data: vec![0; 0x1000], + }]; + let runtime = sample_runtime(&[enc_acrc(1)], &data); + let mut state = ExecState::from_runtime(&runtime); + let mut host_file = NamedTempFile::new().unwrap(); + host_file.write_all(b"stat-fixture\n").unwrap(); + host_file.flush().unwrap(); + + let host_fd = unsafe { libc::dup(host_file.as_raw_fd()) }; + assert!(host_fd >= 0); + + let guest_fd = 9u64; + state.fd_table.insert(guest_fd, host_fd); + state.write_reg(REG_A7, SYS_FSTAT); + state.write_reg(REG_A0, guest_fd); + state.write_reg(REG_A1, stat_addr); + let mut commit = CommitRecord::unsupported(0, 0, 0, TRAP_ILLEGAL_INST, &runtime.block); + + dispatch_syscall(&mut state, &runtime, &mut commit).unwrap(); + + assert_eq!(state.read_reg(REG_A0), 0); + assert_eq!(state.memory.read_u64(stat_addr + 48).unwrap(), 13); + assert_eq!( + state.memory.read_u32(stat_addr + 16).unwrap() & libc::S_IFMT as u32, + libc::S_IFREG as u32 + ); + assert_eq!(state.memory.read_u64(stat_addr + 40).unwrap(), 0); + assert_eq!(state.memory.read_u32(stat_addr + 124).unwrap(), 0); + + unsafe { + libc::close(host_fd); + } + } + + #[test] + fn brk_shrink_removes_unmapped_tail() { + let runtime = sample_runtime(&[enc_acrc(1)], &[]); + let mut state = ExecState::from_runtime(&runtime); + let heap_base = state.brk_base; + + state.grow_brk(heap_base + PAGE_SIZE); + assert_eq!(state.brk_current, heap_base + PAGE_SIZE); + assert!(state.memory.read_u8(heap_base).is_some()); + + state.grow_brk(heap_base); + assert_eq!(state.brk_current, heap_base); + assert!(state.memory.read_u8(heap_base).is_none()); + assert!( + !state + .memory + .regions + .iter() + .any(|region| region.base == heap_base) + ); + } + + #[test] + fn mprotect_enforces_guest_write_buffer_permissions() { + let data_addr = 0x4000u64; + let data = vec![runtime::MemoryRegion { + base: data_addr, + size: PAGE_SIZE, + flags: MEM_READ | MEM_WRITE, + data: vec![b'X'; PAGE_SIZE as usize], + }]; + let runtime = sample_runtime(&[enc_acrc(1)], &data); + let mut state = ExecState::from_runtime(&runtime); + let mut commit = CommitRecord::unsupported(0, 0, 0, TRAP_ILLEGAL_INST, &runtime.block); + + state.write_reg(REG_A7, SYS_MPROTECT); + state.write_reg(REG_A0, data_addr); + state.write_reg(REG_A1, PAGE_SIZE); + state.write_reg(REG_A2, libc::PROT_NONE as u64); + dispatch_syscall(&mut state, &runtime, &mut commit).unwrap(); + assert_eq!(state.read_reg(REG_A0), 0); + + state.write_reg(REG_A7, SYS_WRITE); + state.write_reg(REG_A0, 1); + state.write_reg(REG_A1, data_addr); + state.write_reg(REG_A2, 1); + dispatch_syscall(&mut state, &runtime, &mut commit).unwrap(); + assert_eq!(state.read_reg(REG_A0), (-(GUEST_EFAULT as i64)) as u64); + + state.write_reg(REG_A7, SYS_MPROTECT); + state.write_reg(REG_A0, data_addr); + state.write_reg(REG_A1, PAGE_SIZE); + state.write_reg(REG_A2, libc::PROT_READ as u64); + dispatch_syscall(&mut state, &runtime, &mut commit).unwrap(); + assert_eq!(state.read_reg(REG_A0), 0); + + state.write_reg(REG_A7, SYS_WRITE); + state.write_reg(REG_A0, 1); + state.write_reg(REG_A1, data_addr); + state.write_reg(REG_A2, 1); + dispatch_syscall(&mut state, &runtime, &mut commit).unwrap(); + assert_eq!(state.read_reg(REG_A0), 1); + } + + #[test] + fn rt_sigprocmask_zeroes_old_mask() { + let mask_addr = 0x4000u64; + let data = vec![runtime::MemoryRegion { + base: mask_addr, + size: PAGE_SIZE, + flags: MEM_READ | MEM_WRITE, + data: vec![0xFF; PAGE_SIZE as usize], + }]; + let runtime = sample_runtime(&[enc_acrc(1)], &data); + let mut state = ExecState::from_runtime(&runtime); + let mut commit = CommitRecord::unsupported(0, 0, 0, TRAP_ILLEGAL_INST, &runtime.block); + + state.write_reg(REG_A7, SYS_RT_SIGPROCMASK); + state.write_reg(REG_A0, 0); + state.write_reg(REG_A1, 0); + state.write_reg(REG_A2, mask_addr); + state.write_reg(REG_A3, GUEST_SIGSET_BYTES as u64); + dispatch_syscall(&mut state, &runtime, &mut commit).unwrap(); + + assert_eq!(state.read_reg(REG_A0), 0); + assert_eq!( + state + .memory + .read_bytes(mask_addr, GUEST_SIGSET_BYTES) + .unwrap(), + vec![0u8; GUEST_SIGSET_BYTES] + ); + } + + #[test] + fn uname_writes_guest_struct() { + let uts_addr = 0x4000u64; + let data = vec![runtime::MemoryRegion { + base: uts_addr, + size: PAGE_SIZE, + flags: MEM_READ | MEM_WRITE, + data: vec![0u8; PAGE_SIZE as usize], + }]; + let runtime = sample_runtime(&[enc_acrc(1)], &data); + let mut state = ExecState::from_runtime(&runtime); + let mut commit = CommitRecord::unsupported(0, 0, 0, TRAP_ILLEGAL_INST, &runtime.block); + + state.write_reg(REG_A7, SYS_UNAME); + state.write_reg(REG_A0, uts_addr); + dispatch_syscall(&mut state, &runtime, &mut commit).unwrap(); + + assert_eq!(state.read_reg(REG_A0), 0); + assert_eq!(state.memory.read_c_string(uts_addr, 64).unwrap(), "Linux"); + assert_eq!( + state + .memory + .read_c_string(uts_addr + (GUEST_UTS_FIELD_BYTES * 4) as u64, 64) + .unwrap(), + "linx64" + ); + } + + #[test] + fn set_tid_address_and_identity_syscalls_match_runtime_identity() { + let runtime = sample_runtime(&[enc_acrc(1)], &[]); + let mut state = ExecState::from_runtime(&runtime); + let mut commit = CommitRecord::unsupported(0, 0, 0, TRAP_ILLEGAL_INST, &runtime.block); + let tid_addr = 0x5000u64; + + state.write_reg(REG_A7, SYS_SET_TID_ADDRESS); + state.write_reg(REG_A0, tid_addr); + dispatch_syscall(&mut state, &runtime, &mut commit).unwrap(); + let tid = state.read_reg(REG_A0); + + assert!(tid > 0); + assert_eq!(state.clear_child_tid, tid_addr); + + for number in [SYS_GETUID, SYS_GETEUID, SYS_GETGID, SYS_GETEGID] { + state.write_reg(REG_A7, number); + dispatch_syscall(&mut state, &runtime, &mut commit).unwrap(); + assert_eq!(state.read_reg(REG_A0), 0); + } + } + + #[test] + fn getresuid_writes_guest_ids() { + let ids_addr = 0x4000u64; + let data = vec![runtime::MemoryRegion { + base: ids_addr, + size: PAGE_SIZE, + flags: MEM_READ | MEM_WRITE, + data: vec![0u8; PAGE_SIZE as usize], + }]; + let runtime = sample_runtime(&[enc_acrc(1)], &data); + let mut state = ExecState::from_runtime(&runtime); + let mut commit = CommitRecord::unsupported(0, 0, 0, TRAP_ILLEGAL_INST, &runtime.block); + + state.uid = 11; + state.euid = 22; + state.suid = 33; + state.write_reg(REG_A7, SYS_GETRESUID); + state.write_reg(REG_A0, ids_addr); + state.write_reg(REG_A1, ids_addr + 4); + state.write_reg(REG_A2, ids_addr + 8); + dispatch_syscall(&mut state, &runtime, &mut commit).unwrap(); + + assert_eq!(state.read_reg(REG_A0), 0); + assert_eq!(state.memory.read_u32(ids_addr).unwrap(), 11); + assert_eq!(state.memory.read_u32(ids_addr + 4).unwrap(), 22); + assert_eq!(state.memory.read_u32(ids_addr + 8).unwrap(), 33); + } + + #[test] + fn setuid_updates_single_process_identity() { + let runtime = sample_runtime(&[enc_acrc(1)], &[]); + let mut state = ExecState::from_runtime(&runtime); + let mut commit = CommitRecord::unsupported(0, 0, 0, TRAP_ILLEGAL_INST, &runtime.block); + + state.uid = 5; + state.euid = 7; + state.suid = 5; + state.write_reg(REG_A7, SYS_SETUID); + state.write_reg(REG_A0, 5); + dispatch_syscall(&mut state, &runtime, &mut commit).unwrap(); + assert_eq!(state.read_reg(REG_A0), 0); + assert_eq!([state.uid, state.euid, state.suid], [5, 5, 5]); + + state.write_reg(REG_A7, SYS_SETUID); + state.write_reg(REG_A0, 9); + dispatch_syscall(&mut state, &runtime, &mut commit).unwrap(); + assert_eq!(state.read_reg(REG_A0), (-(GUEST_EPERM as i64)) as u64); + } + + #[test] + fn setresgid_updates_selected_fields() { + let runtime = sample_runtime(&[enc_acrc(1)], &[]); + let mut state = ExecState::from_runtime(&runtime); + let mut commit = CommitRecord::unsupported(0, 0, 0, TRAP_ILLEGAL_INST, &runtime.block); + + state.gid = 11; + state.egid = 22; + state.sgid = 33; + state.write_reg(REG_A7, SYS_SETRESGID); + state.write_reg(REG_A0, u64::MAX); + state.write_reg(REG_A1, 11); + state.write_reg(REG_A2, 0); + dispatch_syscall(&mut state, &runtime, &mut commit).unwrap(); + + assert_eq!(state.read_reg(REG_A0), 0); + assert_eq!([state.gid, state.egid, state.sgid], [11, 11, 0]); + } + + #[test] + fn futex_wait_and_wake_follow_single_process_contract() { + let futex_addr = 0x4000u64; + let data = vec![runtime::MemoryRegion { + base: futex_addr, + size: PAGE_SIZE, + flags: MEM_READ | MEM_WRITE, + data: vec![0u8; PAGE_SIZE as usize], + }]; + let runtime = sample_runtime(&[enc_acrc(1)], &data); + let mut state = ExecState::from_runtime(&runtime); + let mut commit = CommitRecord::unsupported(0, 0, 0, TRAP_ILLEGAL_INST, &runtime.block); + + state.memory.write_u32_checked(futex_addr, 7).unwrap(); + + state.write_reg(REG_A7, SYS_FUTEX); + state.write_reg(REG_A0, futex_addr); + state.write_reg(REG_A1, (FUTEX_WAIT | FUTEX_PRIVATE) as u64); + state.write_reg(REG_A2, 3); + state.write_reg(REG_A3, 0); + dispatch_syscall(&mut state, &runtime, &mut commit).unwrap(); + assert_eq!(state.read_reg(REG_A0), (-(GUEST_EAGAIN as i64)) as u64); + + state.write_reg(REG_A7, SYS_FUTEX); + state.write_reg(REG_A0, futex_addr); + state.write_reg(REG_A1, (FUTEX_WAKE | FUTEX_PRIVATE) as u64); + state.write_reg(REG_A2, 1); + dispatch_syscall(&mut state, &runtime, &mut commit).unwrap(); + assert_eq!(state.read_reg(REG_A0), 0); + } + + #[test] + fn sysinfo_writes_linux_guest_layout() { + let info_addr = 0x4000u64; + let data = vec![runtime::MemoryRegion { + base: info_addr, + size: PAGE_SIZE, + flags: MEM_READ | MEM_WRITE, + data: vec![0u8; PAGE_SIZE as usize], + }]; + let runtime = sample_runtime(&[enc_acrc(1)], &data); + let mut state = ExecState::from_runtime(&runtime); + let mut commit = CommitRecord::unsupported(0, 0, 0, TRAP_ILLEGAL_INST, &runtime.block); + + state.write_reg(REG_A7, SYS_SYSINFO); + state.write_reg(REG_A0, info_addr); + dispatch_syscall(&mut state, &runtime, &mut commit).unwrap(); + + assert_eq!(state.read_reg(REG_A0), 0); + assert_eq!( + state.memory.read_u64(info_addr + 32).unwrap(), + runtime.config.mem_bytes + ); + assert_eq!(state.memory.read_u32(info_addr + 104).unwrap(), 1); + assert_eq!(state.memory.read_u16(info_addr + 80).unwrap(), 1); + } + + #[test] + fn prlimit64_reads_and_updates_current_process_limits() { + let limit_addr = 0x4000u64; + let data = vec![runtime::MemoryRegion { + base: limit_addr, + size: PAGE_SIZE, + flags: MEM_READ | MEM_WRITE, + data: vec![0u8; PAGE_SIZE as usize], + }]; + let runtime = sample_runtime(&[enc_acrc(1)], &data); + let mut state = ExecState::from_runtime(&runtime); + let mut commit = CommitRecord::unsupported(0, 0, 0, TRAP_ILLEGAL_INST, &runtime.block); + + state.write_reg(REG_A7, SYS_PRLIMIT64); + state.write_reg(REG_A0, 0); + state.write_reg(REG_A1, RLIMIT_STACK as u64); + state.write_reg(REG_A2, 0); + state.write_reg(REG_A3, limit_addr); + dispatch_syscall(&mut state, &runtime, &mut commit).unwrap(); + assert_eq!(state.read_reg(REG_A0), 0); + + let old_cur = state.memory.read_u64(limit_addr).unwrap(); + let old_max = state.memory.read_u64(limit_addr + 8).unwrap(); + assert_eq!(old_cur, runtime.config.stack_size); + assert_eq!(old_max, runtime.config.stack_size); + + state + .memory + .write_u64_checked(limit_addr, old_cur / 2) + .unwrap(); + state + .memory + .write_u64_checked(limit_addr + 8, old_max) + .unwrap(); + + state.write_reg(REG_A7, SYS_PRLIMIT64); + state.write_reg(REG_A0, 0); + state.write_reg(REG_A1, RLIMIT_STACK as u64); + state.write_reg(REG_A2, limit_addr); + state.write_reg(REG_A3, 0); + dispatch_syscall(&mut state, &runtime, &mut commit).unwrap(); + assert_eq!(state.read_reg(REG_A0), 0); + assert_eq!(state.rlimit_for(RLIMIT_STACK).unwrap().cur, old_cur / 2); + } + + #[test] + fn getcwd_and_getrandom_write_guest_buffers() { + let cwd_addr = 0x4000u64; + let rand_addr = 0x5000u64; + let data = vec![ + runtime::MemoryRegion { + base: cwd_addr, + size: PAGE_SIZE, + flags: MEM_READ | MEM_WRITE, + data: vec![0u8; PAGE_SIZE as usize], + }, + runtime::MemoryRegion { + base: rand_addr, + size: PAGE_SIZE, + flags: MEM_READ | MEM_WRITE, + data: vec![0u8; PAGE_SIZE as usize], + }, + ]; + let mut runtime = sample_runtime(&[enc_acrc(1)], &data); + runtime.config.workdir = Some(PathBuf::from("/tmp/linxcoremodel")); + let mut state = ExecState::from_runtime(&runtime); + let mut commit = CommitRecord::unsupported(0, 0, 0, TRAP_ILLEGAL_INST, &runtime.block); + + state.write_reg(REG_A7, SYS_GETCWD); + state.write_reg(REG_A0, cwd_addr); + state.write_reg(REG_A1, 64); + dispatch_syscall(&mut state, &runtime, &mut commit).unwrap(); + assert_eq!(state.read_reg(REG_A0), cwd_addr); + assert_eq!( + state.memory.read_c_string(cwd_addr, 63).unwrap(), + "/tmp/linxcoremodel" + ); + + state.write_reg(REG_A7, SYS_GETRANDOM); + state.write_reg(REG_A0, rand_addr); + state.write_reg(REG_A1, 16); + state.write_reg(REG_A2, 0); + dispatch_syscall(&mut state, &runtime, &mut commit).unwrap(); + assert_eq!(state.read_reg(REG_A0), 16); + assert_ne!( + state.memory.read_bytes(rand_addr, 16).unwrap(), + vec![0u8; 16] + ); + } + + #[test] + fn prctl_round_trips_thread_name() { + let name_addr = 0x4000u64; + let out_addr = 0x5000u64; + let data = vec![ + runtime::MemoryRegion { + base: name_addr, + size: PAGE_SIZE, + flags: MEM_READ | MEM_WRITE, + data: { + let mut bytes = vec![0u8; PAGE_SIZE as usize]; + bytes[..8].copy_from_slice(b"worker-0"); + bytes[8] = 0; + bytes + }, + }, + runtime::MemoryRegion { + base: out_addr, + size: PAGE_SIZE, + flags: MEM_READ | MEM_WRITE, + data: vec![0u8; PAGE_SIZE as usize], + }, + ]; + let runtime = sample_runtime(&[enc_acrc(1)], &data); + let mut state = ExecState::from_runtime(&runtime); + let mut commit = CommitRecord::unsupported(0, 0, 0, TRAP_ILLEGAL_INST, &runtime.block); + + state.write_reg(REG_A7, SYS_PRCTL); + state.write_reg(REG_A0, GUEST_PR_SET_NAME); + state.write_reg(REG_A1, name_addr); + dispatch_syscall(&mut state, &runtime, &mut commit).unwrap(); + assert_eq!(state.read_reg(REG_A0), 0); + + state.write_reg(REG_A7, SYS_PRCTL); + state.write_reg(REG_A0, GUEST_PR_GET_NAME); + state.write_reg(REG_A1, out_addr); + dispatch_syscall(&mut state, &runtime, &mut commit).unwrap(); + assert_eq!(state.read_reg(REG_A0), 0); + assert_eq!( + state + .memory + .read_c_string(out_addr, GUEST_PRCTL_NAME_BYTES) + .unwrap(), + "worker-0" + ); + } + + #[test] + fn madvise_and_membarrier_follow_model_contract() { + let map_addr = 0x6000u64; + let data = vec![runtime::MemoryRegion { + base: map_addr, + size: PAGE_SIZE, + flags: MEM_READ | MEM_WRITE, + data: vec![0u8; PAGE_SIZE as usize], + }]; + let runtime = sample_runtime(&[enc_acrc(1)], &data); + let mut state = ExecState::from_runtime(&runtime); + let mut commit = CommitRecord::unsupported(0, 0, 0, TRAP_ILLEGAL_INST, &runtime.block); + + state.write_reg(REG_A7, SYS_MADVISE); + state.write_reg(REG_A0, map_addr); + state.write_reg(REG_A1, PAGE_SIZE); + state.write_reg(REG_A2, 4); + dispatch_syscall(&mut state, &runtime, &mut commit).unwrap(); + assert_eq!(state.read_reg(REG_A0), 0); + + state.write_reg(REG_A7, SYS_MEMBARRIER); + state.write_reg(REG_A0, GUEST_MEMBARRIER_CMD_QUERY); + state.write_reg(REG_A1, 0); + dispatch_syscall(&mut state, &runtime, &mut commit).unwrap(); + let query = state.read_reg(REG_A0); + assert_ne!(query & GUEST_MEMBARRIER_CMD_PRIVATE_EXPEDITED, 0); + assert_ne!(query & GUEST_MEMBARRIER_CMD_REGISTER_PRIVATE_EXPEDITED, 0); + + state.write_reg(REG_A7, SYS_MEMBARRIER); + state.write_reg(REG_A0, GUEST_MEMBARRIER_CMD_REGISTER_PRIVATE_EXPEDITED); + state.write_reg(REG_A1, 0); + dispatch_syscall(&mut state, &runtime, &mut commit).unwrap(); + assert_eq!(state.read_reg(REG_A0), 0); + + state.write_reg(REG_A7, SYS_MEMBARRIER); + state.write_reg(REG_A0, GUEST_MEMBARRIER_CMD_PRIVATE_EXPEDITED); + state.write_reg(REG_A1, 0); + dispatch_syscall(&mut state, &runtime, &mut commit).unwrap(); + assert_eq!(state.read_reg(REG_A0), 0); + } + + #[test] + fn rseq_registration_initializes_guest_abi() { + let rseq_addr = 0x7000u64; + let data = vec![runtime::MemoryRegion { + base: rseq_addr, + size: PAGE_SIZE, + flags: MEM_READ | MEM_WRITE, + data: vec![0xAA; PAGE_SIZE as usize], + }]; + let runtime = sample_runtime(&[enc_acrc(1)], &data); + let mut state = ExecState::from_runtime(&runtime); + let mut commit = CommitRecord::unsupported(0, 0, 0, TRAP_ILLEGAL_INST, &runtime.block); + + state.write_reg(REG_A7, SYS_RSEQ); + state.write_reg(REG_A0, rseq_addr); + state.write_reg(REG_A1, GUEST_RSEQ_MIN_LEN as u64); + state.write_reg(REG_A2, 0); + state.write_reg(REG_A3, GUEST_RSEQ_SIG as u64); + dispatch_syscall(&mut state, &runtime, &mut commit).unwrap(); + + assert_eq!(state.read_reg(REG_A0), 0); + assert_eq!(state.rseq_addr, rseq_addr); + assert_eq!(state.rseq_len, GUEST_RSEQ_MIN_LEN); + assert_eq!(state.rseq_sig, GUEST_RSEQ_SIG); + assert_eq!(state.memory.read_u32(rseq_addr).unwrap(), 0); + assert_eq!(state.memory.read_u32(rseq_addr + 4).unwrap(), 0); + assert_eq!(state.memory.read_u64(rseq_addr + 8).unwrap(), 0); + + state.write_reg(REG_A7, SYS_RSEQ); + state.write_reg(REG_A0, rseq_addr); + state.write_reg(REG_A1, GUEST_RSEQ_MIN_LEN as u64); + state.write_reg(REG_A2, GUEST_RSEQ_FLAG_UNREGISTER); + state.write_reg(REG_A3, GUEST_RSEQ_SIG as u64); + dispatch_syscall(&mut state, &runtime, &mut commit).unwrap(); + assert_eq!(state.read_reg(REG_A0), 0); + assert_eq!(state.rseq_addr, 0); + } + + #[test] + fn pipe2_and_dup3_roundtrip_guest_data() { + let pipefd_addr = 0x4000u64; + let write_addr = 0x5000u64; + let read_addr = 0x6000u64; + let data = vec![ + runtime::MemoryRegion { + base: pipefd_addr, + size: PAGE_SIZE, + flags: MEM_READ | MEM_WRITE, + data: vec![0u8; PAGE_SIZE as usize], + }, + runtime::MemoryRegion { + base: write_addr, + size: PAGE_SIZE, + flags: MEM_READ | MEM_WRITE, + data: { + let mut bytes = vec![0u8; PAGE_SIZE as usize]; + bytes[..3].copy_from_slice(b"abc"); + bytes + }, + }, + runtime::MemoryRegion { + base: read_addr, + size: PAGE_SIZE, + flags: MEM_READ | MEM_WRITE, + data: vec![0u8; PAGE_SIZE as usize], + }, + ]; + let runtime = sample_runtime(&[enc_acrc(1)], &data); + let mut state = ExecState::from_runtime(&runtime); + let mut commit = CommitRecord::unsupported(0, 0, 0, TRAP_ILLEGAL_INST, &runtime.block); + + state.write_reg(REG_A7, SYS_PIPE2); + state.write_reg(REG_A0, pipefd_addr); + state.write_reg(REG_A1, GUEST_O_CLOEXEC as u64); + dispatch_syscall(&mut state, &runtime, &mut commit).unwrap(); + assert_eq!(state.read_reg(REG_A0), 0); + + let read_guest_fd = state.memory.read_u32(pipefd_addr).unwrap() as u64; + let write_guest_fd = state.memory.read_u32(pipefd_addr + 4).unwrap() as u64; + assert_eq!( + state.fd_fd_flags.get(&read_guest_fd).copied().unwrap_or(0), + GUEST_FD_CLOEXEC + ); + assert_eq!( + state.fd_fd_flags.get(&write_guest_fd).copied().unwrap_or(0), + GUEST_FD_CLOEXEC + ); + + state.write_reg(REG_A7, SYS_DUP3); + state.write_reg(REG_A0, write_guest_fd); + state.write_reg(REG_A1, 20); + state.write_reg(REG_A2, GUEST_O_CLOEXEC as u64); + dispatch_syscall(&mut state, &runtime, &mut commit).unwrap(); + assert_eq!(state.read_reg(REG_A0), 20); + assert_eq!( + state.fd_fd_flags.get(&20).copied().unwrap_or(0), + GUEST_FD_CLOEXEC + ); + + state.write_reg(REG_A7, SYS_WRITE); + state.write_reg(REG_A0, 20); + state.write_reg(REG_A1, write_addr); + state.write_reg(REG_A2, 3); + dispatch_syscall(&mut state, &runtime, &mut commit).unwrap(); + assert_eq!(state.read_reg(REG_A0), 3); + + state.write_reg(REG_A7, SYS_READ); + state.write_reg(REG_A0, read_guest_fd); + state.write_reg(REG_A1, read_addr); + state.write_reg(REG_A2, 3); + dispatch_syscall(&mut state, &runtime, &mut commit).unwrap(); + assert_eq!(state.read_reg(REG_A0), 3); + assert_eq!(state.memory.read_bytes(read_addr, 3).unwrap(), b"abc"); + } + + #[test] + fn ppoll_reports_guest_pipe_readiness() { + let pipefd_addr = 0x4000u64; + let timeout_addr = 0x5000u64; + let read_addr = 0x6000u64; + let data = vec![ + runtime::MemoryRegion { + base: pipefd_addr, + size: PAGE_SIZE, + flags: MEM_READ | MEM_WRITE, + data: vec![0u8; PAGE_SIZE as usize], + }, + runtime::MemoryRegion { + base: timeout_addr, + size: PAGE_SIZE, + flags: MEM_READ | MEM_WRITE, + data: vec![0u8; PAGE_SIZE as usize], + }, + runtime::MemoryRegion { + base: read_addr, + size: PAGE_SIZE, + flags: MEM_READ | MEM_WRITE, + data: vec![0u8; PAGE_SIZE as usize], + }, + ]; + let runtime = sample_runtime(&[enc_acrc(1)], &data); + let mut state = ExecState::from_runtime(&runtime); + let mut commit = CommitRecord::unsupported(0, 0, 0, TRAP_ILLEGAL_INST, &runtime.block); + + state.write_reg(REG_A7, SYS_PIPE2); + state.write_reg(REG_A0, pipefd_addr); + state.write_reg(REG_A1, 0); + dispatch_syscall(&mut state, &runtime, &mut commit).unwrap(); + let read_guest_fd = state.memory.read_u32(pipefd_addr).unwrap() as u64; + let write_guest_fd = state.memory.read_u32(pipefd_addr + 4).unwrap() as u64; + + state + .memory + .write_u32_checked(pipefd_addr, read_guest_fd as u32) + .unwrap(); + state + .memory + .write_u16_checked(pipefd_addr + 4, 0x001) + .unwrap(); + state.memory.write_u16_checked(pipefd_addr + 6, 0).unwrap(); + + state.write_reg(REG_A7, SYS_WRITE); + state.write_reg(REG_A0, write_guest_fd); + state.write_reg(REG_A1, read_addr); + state.write_reg(REG_A2, 1); + state.memory.write_bytes_checked(read_addr, b"x").unwrap(); + dispatch_syscall(&mut state, &runtime, &mut commit).unwrap(); + + state.write_reg(REG_A7, SYS_PPOLL); + state.write_reg(REG_A0, pipefd_addr); + state.write_reg(REG_A1, 1); + state.write_reg(REG_A2, timeout_addr); + state.write_reg(REG_A3, 0); + state.write_reg(REG_A4, 0); + dispatch_syscall(&mut state, &runtime, &mut commit).unwrap(); + + assert_eq!(state.read_reg(REG_A0), 1); + assert_ne!(state.memory.read_u16(pipefd_addr + 6).unwrap() & 0x001, 0); + } + + #[test] + fn pselect6_reports_guest_read_set_readiness() { + let readfds_addr = 0x4000u64; + let timeout_addr = 0x5000u64; + let sigdata_addr = 0x6000u64; + let byte_addr = 0x7000u64; + let data = vec![ + runtime::MemoryRegion { + base: readfds_addr, + size: PAGE_SIZE, + flags: MEM_READ | MEM_WRITE, + data: vec![0u8; PAGE_SIZE as usize], + }, + runtime::MemoryRegion { + base: timeout_addr, + size: PAGE_SIZE, + flags: MEM_READ | MEM_WRITE, + data: vec![0u8; PAGE_SIZE as usize], + }, + runtime::MemoryRegion { + base: sigdata_addr, + size: PAGE_SIZE, + flags: MEM_READ | MEM_WRITE, + data: vec![0u8; PAGE_SIZE as usize], + }, + runtime::MemoryRegion { + base: byte_addr, + size: PAGE_SIZE, + flags: MEM_READ | MEM_WRITE, + data: vec![0u8; PAGE_SIZE as usize], + }, + ]; + let runtime = sample_runtime(&[enc_acrc(1)], &data); + let mut state = ExecState::from_runtime(&runtime); + let mut commit = CommitRecord::unsupported(0, 0, 0, TRAP_ILLEGAL_INST, &runtime.block); + + state.write_reg(REG_A7, SYS_PIPE2); + state.write_reg(REG_A0, byte_addr); + state.write_reg(REG_A1, 0); + dispatch_syscall(&mut state, &runtime, &mut commit).unwrap(); + let read_guest_fd = state.memory.read_u32(byte_addr).unwrap() as u64; + let write_guest_fd = state.memory.read_u32(byte_addr + 4).unwrap() as u64; + + let bit = 1u64 << (read_guest_fd % 64); + state.memory.write_u64_checked(readfds_addr, bit).unwrap(); + state.memory.write_u64_checked(timeout_addr, 0).unwrap(); + state.memory.write_u64_checked(timeout_addr + 8, 0).unwrap(); + state.memory.write_u64_checked(sigdata_addr, 0).unwrap(); + state.memory.write_u64_checked(sigdata_addr + 8, 0).unwrap(); + state + .memory + .write_bytes_checked(byte_addr + 16, b"z") + .unwrap(); + + state.write_reg(REG_A7, SYS_WRITE); + state.write_reg(REG_A0, write_guest_fd); + state.write_reg(REG_A1, byte_addr + 16); + state.write_reg(REG_A2, 1); + dispatch_syscall(&mut state, &runtime, &mut commit).unwrap(); + + state.write_reg(REG_A7, SYS_PSELECT6); + state.write_reg(REG_A0, read_guest_fd + 1); + state.write_reg(REG_A1, readfds_addr); + state.write_reg(REG_A2, 0); + state.write_reg(REG_A3, 0); + state.write_reg(REG_A4, timeout_addr); + state.write_reg(REG_A5, sigdata_addr); + dispatch_syscall(&mut state, &runtime, &mut commit).unwrap(); + + assert_eq!(state.read_reg(REG_A0), 1); + assert_eq!(state.memory.read_u64(readfds_addr).unwrap() & bit, bit); + } + + #[test] + fn epoll_pwait_reports_eventfd_readiness() { + let value_addr = 0x4000u64; + let ctl_addr = 0x5000u64; + let out_addr = 0x6000u64; + let read_addr = 0x7000u64; + let data = vec![ + runtime::MemoryRegion { + base: value_addr, + size: PAGE_SIZE, + flags: MEM_READ | MEM_WRITE, + data: vec![0u8; PAGE_SIZE as usize], + }, + runtime::MemoryRegion { + base: ctl_addr, + size: PAGE_SIZE, + flags: MEM_READ | MEM_WRITE, + data: vec![0u8; PAGE_SIZE as usize], + }, + runtime::MemoryRegion { + base: out_addr, + size: PAGE_SIZE, + flags: MEM_READ | MEM_WRITE, + data: vec![0u8; PAGE_SIZE as usize], + }, + runtime::MemoryRegion { + base: read_addr, + size: PAGE_SIZE, + flags: MEM_READ | MEM_WRITE, + data: vec![0u8; PAGE_SIZE as usize], + }, + ]; + let runtime = sample_runtime(&[enc_acrc(1)], &data); + let mut state = ExecState::from_runtime(&runtime); + let mut commit = CommitRecord::unsupported(0, 0, 0, TRAP_ILLEGAL_INST, &runtime.block); + + state.write_reg(REG_A7, SYS_EVENTFD2); + state.write_reg(REG_A0, 0); + state.write_reg(REG_A1, 0); + dispatch_syscall(&mut state, &runtime, &mut commit).unwrap(); + let event_fd = state.read_reg(REG_A0); + + state.write_reg(REG_A7, SYS_EPOLL_CREATE1); + state.write_reg(REG_A0, 0); + dispatch_syscall(&mut state, &runtime, &mut commit).unwrap(); + let epoll_fd = state.read_reg(REG_A0); + + write_guest_epoll_event( + &mut state.memory, + ctl_addr, + GUEST_EPOLLIN, + 0x1122_3344_5566_7788, + ) + .unwrap(); + state.write_reg(REG_A7, SYS_EPOLL_CTL); + state.write_reg(REG_A0, epoll_fd); + state.write_reg(REG_A1, GUEST_EPOLL_CTL_ADD as u64); + state.write_reg(REG_A2, event_fd); + state.write_reg(REG_A3, ctl_addr); + dispatch_syscall(&mut state, &runtime, &mut commit).unwrap(); + assert_eq!(state.read_reg(REG_A0), 0); + + state.memory.write_u64_checked(value_addr, 7).unwrap(); + state.write_reg(REG_A7, SYS_WRITE); + state.write_reg(REG_A0, event_fd); + state.write_reg(REG_A1, value_addr); + state.write_reg(REG_A2, 8); + dispatch_syscall(&mut state, &runtime, &mut commit).unwrap(); + assert_eq!(state.read_reg(REG_A0), 8); + + state.write_reg(REG_A7, SYS_EPOLL_PWAIT); + state.write_reg(REG_A0, epoll_fd); + state.write_reg(REG_A1, out_addr); + state.write_reg(REG_A2, 4); + state.write_reg(REG_A3, 0); + state.write_reg(REG_A4, 0); + state.write_reg(REG_A5, 0); + dispatch_syscall(&mut state, &runtime, &mut commit).unwrap(); + assert_eq!(state.read_reg(REG_A0), 1); + assert_ne!(state.memory.read_u32(out_addr).unwrap() & GUEST_EPOLLIN, 0); + assert_eq!( + state.memory.read_u64(out_addr + 8).unwrap(), + 0x1122_3344_5566_7788 + ); + + state.write_reg(REG_A7, SYS_READ); + state.write_reg(REG_A0, event_fd); + state.write_reg(REG_A1, read_addr); + state.write_reg(REG_A2, 8); + dispatch_syscall(&mut state, &runtime, &mut commit).unwrap(); + assert_eq!(state.read_reg(REG_A0), 8); + assert_eq!(state.memory.read_u64(read_addr).unwrap(), 7); + } + + #[test] + fn wait4_reports_no_child_in_single_process_mode() { + let status_addr = 0x4000u64; + let rusage_addr = 0x5000u64; + let data = vec![ + runtime::MemoryRegion { + base: status_addr, + size: PAGE_SIZE, + flags: MEM_READ | MEM_WRITE, + data: vec![0xAA; PAGE_SIZE as usize], + }, + runtime::MemoryRegion { + base: rusage_addr, + size: PAGE_SIZE, + flags: MEM_READ | MEM_WRITE, + data: vec![0xBB; PAGE_SIZE as usize], + }, + ]; + let runtime = sample_runtime(&[enc_acrc(1)], &data); + let mut state = ExecState::from_runtime(&runtime); + let mut commit = CommitRecord::unsupported(0, 0, 0, TRAP_ILLEGAL_INST, &runtime.block); + + state.write_reg(REG_A7, SYS_WAIT4); + state.write_reg(REG_A0, u64::MAX); + state.write_reg(REG_A1, status_addr); + state.write_reg(REG_A2, 0); + state.write_reg(REG_A3, rusage_addr); + dispatch_syscall(&mut state, &runtime, &mut commit).unwrap(); + + assert_eq!(state.read_reg(REG_A0), (-(GUEST_ECHILD as i64)) as u64); + assert_eq!(state.memory.read_u32(status_addr).unwrap(), 0xAAAA_AAAA); + assert_eq!(state.memory.read_u32(rusage_addr).unwrap(), 0xBBBB_BBBB); + } + + #[test] + fn hl_bstart_std_uses_byte_scaled_target_offsets() { + let runtime = sample_runtime(&[enc_acrc(1)], &[]); + let mut state = ExecState::from_runtime(&runtime); + let pc = 0x11158u64; + let fallthrough = pc + 6; + let bundle = u64::from_le_bytes([0xfe, 0xff, 0x01, 0x40, 0xf6, 0xff, 0x56, 0x55]); + let decoded = decode_word(bundle).unwrap(); + let mut commit = empty_commit(0, pc, &decoded, &runtime); + state.pc = pc; + + let outcome = execute_step(&mut state, &runtime, &decoded, &mut commit).unwrap(); + + assert_eq!(outcome.next_pc, fallthrough); + assert_eq!( + state.block.as_ref().map(|block| block.kind), + Some(BlockKind::Call) + ); + assert_eq!( + state.block.as_ref().and_then(|block| block.target), + Some(0x11130) + ); + } + + #[test] + fn fret_stk_clears_stale_block_state() { + let stack_addr = 0x4000u64; + let data = vec![runtime::MemoryRegion { + base: stack_addr, + size: PAGE_SIZE, + flags: MEM_READ | MEM_WRITE, + data: vec![0u8; PAGE_SIZE as usize], + }]; + let runtime = sample_runtime(&[enc_acrc(1)], &data); + let mut state = ExecState::from_runtime(&runtime); + let mut commit = CommitRecord::unsupported(0, 0, 0, TRAP_ILLEGAL_INST, &runtime.block); + + state.write_reg(REG_SP, stack_addr); + state.memory.write_u64_checked(stack_addr, 0x1234).unwrap(); + state.block = Some(BlockContext { + kind: BlockKind::Cond, + target: Some(0x3000), + return_target: Some(0x4000), + }); + state.cond = true; + state.carg = true; + state.target = 0x5000; + + let target = apply_fret_stk(&mut state, &mut commit, REG_RA, REG_RA, 8).unwrap(); + + assert_eq!(target, 0x1234); + assert!(state.block.is_none()); + assert!(!state.cond); + assert!(!state.carg); + assert_eq!(state.target, 0); + } + + #[test] + fn sigaltstack_round_trips_single_thread_state() { + let new_addr = 0x4000u64; + let old_addr = 0x5000u64; + let data = vec![ + runtime::MemoryRegion { + base: new_addr, + size: PAGE_SIZE, + flags: MEM_READ | MEM_WRITE, + data: vec![0u8; PAGE_SIZE as usize], + }, + runtime::MemoryRegion { + base: old_addr, + size: PAGE_SIZE, + flags: MEM_READ | MEM_WRITE, + data: vec![0u8; PAGE_SIZE as usize], + }, + ]; + let runtime = sample_runtime(&[enc_acrc(1)], &data); + let mut state = ExecState::from_runtime(&runtime); + let mut commit = CommitRecord::unsupported(0, 0, 0, TRAP_ILLEGAL_INST, &runtime.block); + + write_guest_sigaltstack(&mut state.memory, new_addr, 0x8000, 0, 4096).unwrap(); + state.write_reg(REG_A7, SYS_SIGALTSTACK); + state.write_reg(REG_A0, new_addr); + state.write_reg(REG_A1, old_addr); + dispatch_syscall(&mut state, &runtime, &mut commit).unwrap(); + + assert_eq!(state.read_reg(REG_A0), 0); + assert_eq!(state.alt_stack_sp, 0x8000); + assert_eq!(state.alt_stack_size, 4096); + assert_eq!(state.alt_stack_flags, 0); + assert_eq!( + state.memory.read_u32(old_addr + 8).unwrap(), + GUEST_SS_DISABLE + ); + + state.write_reg(REG_A7, SYS_SIGALTSTACK); + state.write_reg(REG_A0, 0); + state.write_reg(REG_A1, old_addr); + dispatch_syscall(&mut state, &runtime, &mut commit).unwrap(); + assert_eq!(state.memory.read_u64(old_addr).unwrap(), 0x8000); + assert_eq!(state.memory.read_u64(old_addr + 16).unwrap(), 4096); + } + + #[test] + fn ioctl_tty_requests_use_deterministic_stdio_model() { + let winsize_addr = 0x4000u64; + let pgrp_addr = 0x5000u64; + let data = vec![ + runtime::MemoryRegion { + base: winsize_addr, + size: PAGE_SIZE, + flags: MEM_READ | MEM_WRITE, + data: vec![0u8; PAGE_SIZE as usize], + }, + runtime::MemoryRegion { + base: pgrp_addr, + size: PAGE_SIZE, + flags: MEM_READ | MEM_WRITE, + data: vec![0u8; PAGE_SIZE as usize], + }, + ]; + let runtime = sample_runtime(&[enc_acrc(1)], &data); + let mut state = ExecState::from_runtime(&runtime); + let mut commit = CommitRecord::unsupported(0, 0, 0, TRAP_ILLEGAL_INST, &runtime.block); + + state.write_reg(REG_A7, SYS_IOCTL); + state.write_reg(REG_A0, 1); + state.write_reg(REG_A1, GUEST_TIOCGWINSZ); + state.write_reg(REG_A2, winsize_addr); + dispatch_syscall(&mut state, &runtime, &mut commit).unwrap(); + assert_eq!(state.read_reg(REG_A0), 0); + assert_eq!(state.memory.read_u16(winsize_addr).unwrap(), 24); + assert_eq!(state.memory.read_u16(winsize_addr + 2).unwrap(), 80); + + state.write_reg(REG_A7, SYS_IOCTL); + state.write_reg(REG_A0, 1); + state.write_reg(REG_A1, GUEST_TIOCGPGRP); + state.write_reg(REG_A2, pgrp_addr); + dispatch_syscall(&mut state, &runtime, &mut commit).unwrap(); + assert_eq!( + state.memory.read_u32(pgrp_addr).unwrap(), + state.current_pid as u32 + ); + + state.memory.write_u32_checked(pgrp_addr, 77).unwrap(); + state.write_reg(REG_A7, SYS_IOCTL); + state.write_reg(REG_A0, 1); + state.write_reg(REG_A1, GUEST_TIOCSPGRP); + state.write_reg(REG_A2, pgrp_addr); + dispatch_syscall(&mut state, &runtime, &mut commit).unwrap(); + assert_eq!(state.read_reg(REG_A0), 0); + + state.write_reg(REG_A7, SYS_IOCTL); + state.write_reg(REG_A0, 1); + state.write_reg(REG_A1, GUEST_TIOCGPGRP); + state.write_reg(REG_A2, pgrp_addr); + dispatch_syscall(&mut state, &runtime, &mut commit).unwrap(); + assert_eq!(state.memory.read_u32(pgrp_addr).unwrap(), 77); + } + + #[test] + fn fcntl_tracks_guest_fd_flags() { + let runtime = sample_runtime(&[enc_acrc(1)], &[]); + let mut state = ExecState::from_runtime(&runtime); + let mut host_file = NamedTempFile::new().unwrap(); + host_file.write_all(b"fcntl\n").unwrap(); + host_file.flush().unwrap(); + let host_fd = unsafe { libc::dup(host_file.as_raw_fd()) }; + assert!(host_fd >= 0); + let guest_fd = 9u64; + state.fd_table.insert(guest_fd, host_fd); + state.fd_status_flags.insert(guest_fd, libc::O_RDONLY); + state.fd_fd_flags.insert(guest_fd, 0); + let mut commit = CommitRecord::unsupported(0, 0, 0, TRAP_ILLEGAL_INST, &runtime.block); + + state.write_reg(REG_A7, SYS_FCNTL); + state.write_reg(REG_A0, guest_fd); + state.write_reg(REG_A1, GUEST_F_SETFD as u64); + state.write_reg(REG_A2, GUEST_FD_CLOEXEC as u64); + dispatch_syscall(&mut state, &runtime, &mut commit).unwrap(); + assert_eq!(state.read_reg(REG_A0), 0); + + state.write_reg(REG_A7, SYS_FCNTL); + state.write_reg(REG_A0, guest_fd); + state.write_reg(REG_A1, GUEST_F_GETFD as u64); + dispatch_syscall(&mut state, &runtime, &mut commit).unwrap(); + assert_eq!(state.read_reg(REG_A0), GUEST_FD_CLOEXEC as u64); + + unsafe { + libc::close(host_fd); + } + } + + #[test] + fn fcntl_dupfd_commands_allocate_new_guest_fds() { + let runtime = sample_runtime(&[enc_acrc(1)], &[]); + let mut state = ExecState::from_runtime(&runtime); + let mut host_file = NamedTempFile::new().unwrap(); + host_file.write_all(b"dupfd\n").unwrap(); + host_file.flush().unwrap(); + let host_fd = unsafe { libc::dup(host_file.as_raw_fd()) }; + assert!(host_fd >= 0); + let guest_fd = 9u64; + state.insert_guest_fd(guest_fd, host_fd, GUEST_O_RDONLY, 0); + let mut commit = CommitRecord::unsupported(0, 0, 0, TRAP_ILLEGAL_INST, &runtime.block); + + state.write_reg(REG_A7, SYS_FCNTL); + state.write_reg(REG_A0, guest_fd); + state.write_reg(REG_A1, GUEST_F_DUPFD as u64); + state.write_reg(REG_A2, 20); + dispatch_syscall(&mut state, &runtime, &mut commit).unwrap(); + let dup_fd = state.read_reg(REG_A0); + assert!(dup_fd >= 20); + assert_eq!(state.fd_fd_flags.get(&dup_fd).copied().unwrap_or(0), 0); + + state.write_reg(REG_A7, SYS_FCNTL); + state.write_reg(REG_A0, guest_fd); + state.write_reg(REG_A1, GUEST_F_DUPFD_CLOEXEC as u64); + state.write_reg(REG_A2, 30); + dispatch_syscall(&mut state, &runtime, &mut commit).unwrap(); + let cloexec_fd = state.read_reg(REG_A0); + assert!(cloexec_fd >= 30); + assert_eq!( + state.fd_fd_flags.get(&cloexec_fd).copied().unwrap_or(0), + GUEST_FD_CLOEXEC + ); + } + + #[test] + fn newfstatat_and_readlinkat_follow_host_paths() { + let temp = tempdir().unwrap(); + let target = temp.path().join("target.txt"); + fs::write(&target, b"symlink fixture bytes").unwrap(); + let link = temp.path().join("link.txt"); + symlink(&target, &link).unwrap(); + let path_bytes = b"link.txt\0"; + let path_addr = 0x4000u64; + let stat_addr = 0x5000u64; + let link_addr = 0x6000u64; + let data = vec![ + runtime::MemoryRegion { + base: path_addr, + size: PAGE_SIZE, + flags: MEM_READ | MEM_WRITE, + data: { + let mut bytes = vec![0u8; PAGE_SIZE as usize]; + bytes[..path_bytes.len()].copy_from_slice(path_bytes); + bytes + }, + }, + runtime::MemoryRegion { + base: stat_addr, + size: PAGE_SIZE, + flags: MEM_READ | MEM_WRITE, + data: vec![0u8; PAGE_SIZE as usize], + }, + runtime::MemoryRegion { + base: link_addr, + size: PAGE_SIZE, + flags: MEM_READ | MEM_WRITE, + data: vec![0u8; PAGE_SIZE as usize], + }, + ]; + let mut runtime = sample_runtime(&[enc_acrc(1)], &data); + runtime.config.workdir = Some(temp.path().to_path_buf()); + let mut state = ExecState::from_runtime(&runtime); + let mut commit = CommitRecord::unsupported(0, 0, 0, TRAP_ILLEGAL_INST, &runtime.block); + + state.write_reg(REG_A7, SYS_NEWFSTATAT); + state.write_reg(REG_A0, GUEST_AT_FDCWD as u64); + state.write_reg(REG_A1, path_addr); + state.write_reg(REG_A2, stat_addr); + state.write_reg(REG_A3, GUEST_AT_SYMLINK_NOFOLLOW as u64); + dispatch_syscall(&mut state, &runtime, &mut commit).unwrap(); + assert_eq!(state.read_reg(REG_A0), 0); + assert_eq!( + state.memory.read_u32(stat_addr + 16).unwrap() & libc::S_IFMT as u32, + libc::S_IFLNK as u32 + ); + + state.write_reg(REG_A7, SYS_READLINKAT); + state.write_reg(REG_A0, GUEST_AT_FDCWD as u64); + state.write_reg(REG_A1, path_addr); + state.write_reg(REG_A2, link_addr); + state.write_reg(REG_A3, 128); + dispatch_syscall(&mut state, &runtime, &mut commit).unwrap(); + let count = state.read_reg(REG_A0) as usize; + let resolved = + String::from_utf8(state.memory.read_bytes(link_addr, count).unwrap()).unwrap(); + assert!(resolved.ends_with("target.txt")); + } + + fn sample_runtime(words: &[u32], extra_regions: &[runtime::MemoryRegion]) -> GuestRuntime { + let text_base = 0x1000u64; + let mut text = Vec::with_capacity(words.len() * 4); + for word in words { + text.extend_from_slice(&word.to_le_bytes()); + } + + let mut regions = vec![runtime::MemoryRegion { + base: text_base, + size: 0x1000, + flags: 0b101, + data: { + let mut bytes = vec![0; 0x1000]; + bytes[..text.len()].copy_from_slice(&text); + bytes + }, + }]; + regions.extend_from_slice(extra_regions); + regions.push(runtime::MemoryRegion { + base: 0x0000_7FFF_E000, + size: 0x2000, + flags: 0b110, + data: vec![0; 0x2000], + }); + + GuestRuntime { + image: LoadedElf { + path: PathBuf::from("sample.elf"), + entry: text_base, + little_endian: true, + bits: 64, + machine: 0, + segments: vec![SegmentImage { + vaddr: text_base, + mem_size: text.len() as u64, + file_size: text.len() as u64, + flags: 0b101, + data: text, + }], + }, + config: RuntimeConfig::default(), + state: isa::ArchitecturalState::new(text_base), + block: isa::BlockMeta::default(), + memory: GuestMemory { regions }, + boot: BootInfo { + entry_pc: text_base, + stack_top: 0x0000_7FFF_F000, + stack_pointer: 0x0000_7FFF_F000, + argc: 0, + }, + fd_table: HashMap::from([(0, 0), (1, 1), (2, 2)]), + } + } + + fn enc_addi(rd: u32, rs1: u32, imm: u32) -> u32 { + ((imm & 0x0fff) << 20) | (rs1 << 15) | (rd << 7) | 0x15 + } + + fn enc_lwi(rd: u32, rs1: u32, simm12: i32) -> u32 { + (((simm12 as u32) & 0x0fff) << 20) | (rs1 << 15) | (2 << 12) | (rd << 7) | 0x19 + } + + fn enc_swi(src: u32, base: u32, simm12: i32) -> u32 { + let imm = simm12 as u32 & 0x0fff; + ((imm & 0x7f) << 25) + | (base << 20) + | (src << 15) + | (2 << 12) + | (((imm >> 7) & 0x1f) << 7) + | 0x59 + } + + fn enc_acrc(rst: u32) -> u32 { + ((rst & 0xf) << 20) | 0x302b + } +} diff --git a/crates/funcmodel/src/lib.rs b/crates/funcmodel/src/lib.rs new file mode 100644 index 0000000..07b7d37 --- /dev/null +++ b/crates/funcmodel/src/lib.rs @@ -0,0 +1,7 @@ +pub mod core; +pub mod exec; +pub mod memory; +pub mod syscalls; +pub mod trace; + +pub use exec::*; diff --git a/crates/funcmodel/src/memory/mod.rs b/crates/funcmodel/src/memory/mod.rs new file mode 100644 index 0000000..9343acb --- /dev/null +++ b/crates/funcmodel/src/memory/mod.rs @@ -0,0 +1 @@ +// Memory-domain placeholder for future guest-memory helpers. diff --git a/crates/funcmodel/src/syscalls/mod.rs b/crates/funcmodel/src/syscalls/mod.rs new file mode 100644 index 0000000..360fc1b --- /dev/null +++ b/crates/funcmodel/src/syscalls/mod.rs @@ -0,0 +1 @@ +// Syscall-domain placeholder for future Linux-user syscall splits. diff --git a/crates/funcmodel/src/trace/mod.rs b/crates/funcmodel/src/trace/mod.rs new file mode 100644 index 0000000..b69ddbc --- /dev/null +++ b/crates/funcmodel/src/trace/mod.rs @@ -0,0 +1 @@ +// Trace-domain placeholder for future functional trace helpers. diff --git a/crates/isa/Cargo.toml b/crates/isa/Cargo.toml new file mode 100644 index 0000000..e4c3305 --- /dev/null +++ b/crates/isa/Cargo.toml @@ -0,0 +1,10 @@ +[package] +name = "isa" +version.workspace = true +edition.workspace = true +license.workspace = true +authors.workspace = true + +[dependencies] +serde.workspace = true +serde_json.workspace = true diff --git a/crates/isa/src/lib.rs b/crates/isa/src/lib.rs new file mode 100644 index 0000000..eaf02bc --- /dev/null +++ b/crates/isa/src/lib.rs @@ -0,0 +1,646 @@ +use std::collections::BTreeMap; +use std::sync::OnceLock; + +use serde::{Deserialize, Serialize}; + +pub const TRACE_SCHEMA_VERSION: &str = "1.0"; +pub const LINXTRACE_FORMAT: &str = "linxtrace.v1"; +pub const TRAP_ILLEGAL_INST: u64 = 4; +pub const TRAP_BRU_RECOVERY_NOT_BSTART: u64 = 0x0000_B001; +pub const TRAP_DYNAMIC_TARGET_MISSING: u64 = 0x0000_B002; +pub const TRAP_DYNAMIC_TARGET_NOT_BSTART: u64 = 0x0000_B003; +pub const TRAP_SETRET_NOT_ADJACENT: u64 = 0x0000_B004; +pub const TRAP_DYNAMIC_TARGET_STALE: u64 = 0x0000_B005; +pub const TRAP_UNSUPPORTED_PRIVILEGE: u64 = 0x0000_B010; +pub const DEFAULT_MEM_BYTES: u64 = 128 * 1024 * 1024; +pub const DEFAULT_STACK_SIZE: u64 = 8 * 1024 * 1024; + +#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize)] +#[serde(rename_all = "lowercase")] +pub enum EngineKind { + Func, + Cycle, +} + +impl core::str::FromStr for EngineKind { + type Err = String; + + fn from_str(value: &str) -> Result { + match value { + "func" => Ok(Self::Func), + "cycle" => Ok(Self::Cycle), + other => Err(format!("unknown engine {other}; expected func|cycle")), + } + } +} + +#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] +pub struct BlockMeta { + pub bid: u64, + pub block_uid: u64, + pub block_kind: String, + pub lane_id: String, +} + +impl Default for BlockMeta { + fn default() -> Self { + Self { + bid: 0, + block_uid: 0, + block_kind: "sys".to_string(), + lane_id: "scalar0".to_string(), + } + } +} + +#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] +pub struct ArchitecturalState { + pub pc: u64, + pub regs: [u64; 32], + pub next_bid: u64, +} + +impl ArchitecturalState { + pub fn new(pc: u64) -> Self { + Self { + pc, + regs: [0; 32], + next_bid: 1, + } + } +} + +#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] +pub struct CommitRecord { + pub schema_version: String, + pub cycle: u64, + pub pc: u64, + pub insn: u64, + pub len: u8, + pub next_pc: u64, + pub src0_valid: u8, + pub src0_reg: u8, + pub src0_data: u64, + pub src1_valid: u8, + pub src1_reg: u8, + pub src1_data: u64, + pub dst_valid: u8, + pub dst_reg: u8, + pub dst_data: u64, + pub wb_valid: u8, + pub wb_rd: u8, + pub wb_data: u64, + pub mem_valid: u8, + pub mem_is_store: u8, + pub mem_addr: u64, + pub mem_wdata: u64, + pub mem_rdata: u64, + pub mem_size: u8, + pub trap_valid: u8, + pub trap_cause: u64, + pub traparg0: u64, + pub block_kind: String, + pub lane_id: String, + pub tile_meta: String, + pub tile_ref_src: u64, + pub tile_ref_dst: u64, +} + +impl CommitRecord { + pub fn unsupported(cycle: u64, pc: u64, insn: u64, cause: u64, block: &BlockMeta) -> Self { + Self { + schema_version: TRACE_SCHEMA_VERSION.to_string(), + cycle, + pc, + insn, + len: 4, + next_pc: pc, + src0_valid: 0, + src0_reg: 0, + src0_data: 0, + src1_valid: 0, + src1_reg: 0, + src1_data: 0, + dst_valid: 0, + dst_reg: 0, + dst_data: 0, + wb_valid: 0, + wb_rd: 0, + wb_data: 0, + mem_valid: 0, + mem_is_store: 0, + mem_addr: 0, + mem_wdata: 0, + mem_rdata: 0, + mem_size: 0, + trap_valid: 1, + trap_cause: cause, + traparg0: insn, + block_kind: block.block_kind.clone(), + lane_id: block.lane_id.clone(), + tile_meta: String::new(), + tile_ref_src: 0, + tile_ref_dst: 0, + } + } +} + +#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] +pub struct RunMetrics { + pub engine: EngineKind, + pub cycles: u64, + pub commits: u64, + pub exit_reason: String, +} + +#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] +pub struct RunResult { + pub image_name: String, + pub entry_pc: u64, + pub metrics: RunMetrics, + pub commits: Vec, + pub decoded: Vec, +} + +#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] +pub struct TraceCaptureOptions { + pub commit_window_start: u64, + pub commit_window_end: Option, + pub stage_filter: Vec, + pub row_filter: Vec, +} + +impl Default for TraceCaptureOptions { + fn default() -> Self { + Self { + commit_window_start: 0, + commit_window_end: None, + stage_filter: Vec::new(), + row_filter: Vec::new(), + } + } +} + +pub fn default_stage_order() -> Vec<&'static str> { + vec![ + "F0", "F1", "F2", "F3", "IB", "F4", "D1", "D2", "D3", "S1", "S2", "IQ", "P1", "I1", "I2", + "E1", "E2", "E3", "E4", "W1", "W2", "ROB", "CMT", "FLS", + ] +} + +pub fn default_stage_catalog() -> Vec { + default_stage_order() + .into_iter() + .enumerate() + .map(|(idx, stage)| StageCatalogEntry { + stage_id: stage.to_string(), + label: stage.to_string(), + color: format!("#{:06x}", 0x335577 + idx as u32 * 0x050103), + group: if stage.starts_with('F') || stage == "IB" { + "frontend".to_string() + } else if stage.starts_with('D') || stage.starts_with('S') || stage == "IQ" { + "dispatch".to_string() + } else if stage.starts_with('E') + || stage.starts_with('W') + || stage.starts_with('P') + || stage.starts_with('I') + { + "execute".to_string() + } else { + "retire".to_string() + }, + }) + .collect() +} + +#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] +pub struct StageCatalogEntry { + pub stage_id: String, + pub label: String, + pub color: String, + pub group: String, +} + +#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] +pub struct LaneCatalogEntry { + pub lane_id: String, + pub label: String, +} + +#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] +pub struct RowCatalogEntry { + pub row_id: String, + pub row_kind: String, + pub core_id: String, + pub block_uid: u64, + pub uop_uid: u64, + pub left_label: String, + pub detail_defaults: String, +} + +#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] +pub struct StageTraceEvent { + pub cycle: u64, + pub row_id: String, + pub stage_id: String, + pub lane_id: String, + pub stall: bool, + pub cause: String, + #[serde(default, skip_serializing_if = "Option::is_none")] + pub checkpoint_id: Option, + #[serde(default, skip_serializing_if = "Option::is_none")] + pub trap_cause: Option, + #[serde(default, skip_serializing_if = "Option::is_none")] + pub traparg0: Option, + #[serde(default, skip_serializing_if = "Option::is_none")] + pub target_setup_epoch: Option, + #[serde(default, skip_serializing_if = "Option::is_none")] + pub boundary_epoch: Option, + #[serde(default, skip_serializing_if = "Option::is_none")] + pub target_source_owner_row_id: Option, + #[serde(default, skip_serializing_if = "Option::is_none")] + pub target_source_epoch: Option, + #[serde(default, skip_serializing_if = "Option::is_none")] + pub target_owner_row_id: Option, + #[serde(default, skip_serializing_if = "Option::is_none")] + pub target_producer_kind: Option, + #[serde(default, skip_serializing_if = "Option::is_none")] + pub branch_kind: Option, + #[serde(default, skip_serializing_if = "Option::is_none")] + pub return_kind: Option, + #[serde(default, skip_serializing_if = "Option::is_none")] + pub call_materialization_kind: Option, + #[serde(default, skip_serializing_if = "Option::is_none")] + pub target_source_kind: Option, +} + +#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] +pub struct DecodedField { + pub name: String, + pub width_bits: u8, + pub value_u64: u64, + pub value_i64: Option, +} + +#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] +pub struct DecodedInstruction { + pub uid: String, + pub mnemonic: String, + pub asm: String, + pub group: String, + pub encoding_kind: String, + pub length_bits: u8, + pub mask: u64, + pub match_bits: u64, + pub instruction_bits: u64, + pub uop_group: String, + pub fields: Vec, +} + +impl DecodedInstruction { + pub fn field(&self, name: &str) -> Option<&DecodedField> { + self.fields.iter().find(|field| field.name == name) + } + + pub fn length_bytes(&self) -> u8 { + self.length_bits / 8 + } +} + +#[derive(Debug, Deserialize)] +struct RawIsaBundle { + instructions: Vec, +} + +#[derive(Debug, Deserialize)] +struct RawInstruction { + asm: String, + encoding: RawEncoding, + encoding_kind: String, + group: String, + id: String, + length_bits: u8, + mnemonic: String, + uop_group: String, +} + +#[derive(Debug, Deserialize)] +struct RawEncoding { + parts: Vec, +} + +#[derive(Debug, Deserialize)] +struct RawEncodingPart { + fields: Vec, + mask: String, + #[serde(rename = "match")] + match_bits: String, + width_bits: u8, +} + +#[derive(Debug, Deserialize)] +struct RawField { + name: String, + pieces: Vec, + signed: Option, +} + +#[derive(Debug, Deserialize)] +struct RawFieldPiece { + insn_lsb: u8, + #[serde(rename = "insn_msb")] + _insn_msb: u8, + value_lsb: Option, + value_msb: Option, + width: u8, +} + +#[derive(Debug, Clone)] +struct DecodePiece { + insn_lsb: u8, + width: u8, + value_lsb: u8, +} + +#[derive(Debug, Clone)] +struct DecodeFieldSpec { + name: String, + width_bits: u8, + signed: bool, + pieces: Vec, +} + +#[derive(Debug, Clone)] +struct DecodeForm { + uid: String, + mnemonic: String, + asm: String, + group: String, + encoding_kind: String, + length_bits: u8, + mask: u64, + match_bits: u64, + mask_popcount: u32, + uop_group: String, + fields: Vec, +} + +#[derive(Debug, Default)] +struct DecodeTables { + forms16: Vec, + forms32: Vec, + forms48: Vec, + forms64: Vec, + all_forms: Vec, +} + +static DECODE_TABLES: OnceLock = OnceLock::new(); + +pub fn decode_word(insn_word: u64) -> Option { + let tables = decode_tables(); + best_match(&tables.all_forms, insn_word).map(|form| render_decoded(form, insn_word)) +} + +pub fn decode_form_count() -> usize { + decode_tables().all_forms.len() +} + +fn decode_tables() -> &'static DecodeTables { + DECODE_TABLES.get_or_init(build_decode_tables) +} + +fn build_decode_tables() -> DecodeTables { + let raw: RawIsaBundle = + serde_json::from_str(include_str!("../../../../../isa/v0.4/linxisa-v0.4.json")) + .expect("failed to parse embedded LinxISA v0.4 JSON"); + + let mut tables = DecodeTables::default(); + for instruction in raw.instructions { + let form = build_form(instruction); + match form.length_bits { + 16 => tables.forms16.push(form.clone()), + 32 => tables.forms32.push(form.clone()), + 48 => tables.forms48.push(form.clone()), + 64 => tables.forms64.push(form.clone()), + other => panic!("unsupported instruction length {other}"), + } + tables.all_forms.push(form); + } + tables +} + +fn build_form(instruction: RawInstruction) -> DecodeForm { + let offsets = part_offsets(&instruction.encoding.parts); + let mut mask = 0u64; + let mut match_bits = 0u64; + let mut fields_by_name: BTreeMap = BTreeMap::new(); + + for (part_idx, part) in instruction.encoding.parts.iter().enumerate() { + let offset = offsets[part_idx]; + mask |= parse_hex_u64(&part.mask) << offset; + match_bits |= parse_hex_u64(&part.match_bits) << offset; + + for field in &part.fields { + let entry = + fields_by_name + .entry(field.name.clone()) + .or_insert_with(|| DecodeFieldSpec { + name: field.name.clone(), + width_bits: 0, + signed: field.signed.unwrap_or(false), + pieces: Vec::new(), + }); + entry.signed = entry.signed || field.signed.unwrap_or(false); + for piece in &field.pieces { + let value_lsb = piece.value_lsb.unwrap_or(0); + let candidate_width = piece + .value_msb + .map(|msb| msb + 1) + .unwrap_or(value_lsb.saturating_add(piece.width)); + entry.width_bits = entry.width_bits.max(candidate_width); + entry.pieces.push(DecodePiece { + insn_lsb: offset + piece.insn_lsb, + width: piece.width, + value_lsb, + }); + } + } + } + + DecodeForm { + uid: instruction.id, + mnemonic: instruction.mnemonic, + asm: instruction.asm, + group: instruction.group, + encoding_kind: instruction.encoding_kind, + length_bits: instruction.length_bits, + mask, + match_bits, + mask_popcount: mask.count_ones(), + uop_group: instruction.uop_group, + fields: fields_by_name.into_values().collect(), + } +} + +fn part_offsets(parts: &[RawEncodingPart]) -> Vec { + let mut offsets = vec![0u8; parts.len()]; + let mut running = 0u8; + for (idx, part) in parts.iter().enumerate().rev() { + offsets[idx] = running; + running = running.saturating_add(part.width_bits); + } + offsets +} + +fn parse_hex_u64(text: &str) -> u64 { + let trimmed = text.trim().trim_start_matches("0x"); + u64::from_str_radix(trimmed, 16).expect("invalid hex value in ISA JSON") +} + +fn best_match(forms: &[DecodeForm], word: u64) -> Option<&DecodeForm> { + forms + .iter() + .filter(|form| (word & form.mask) == form.match_bits) + .max_by(|lhs, rhs| { + lhs.mask_popcount + .cmp(&rhs.mask_popcount) + .then(lhs.length_bits.cmp(&rhs.length_bits)) + }) +} + +fn render_decoded(form: &DecodeForm, word: u64) -> DecodedInstruction { + let fields = form + .fields + .iter() + .map(|field| { + let value = extract_field(word, field); + DecodedField { + name: field.name.clone(), + width_bits: field.width_bits, + value_u64: value, + value_i64: field.signed.then(|| sign_extend(value, field.width_bits)), + } + }) + .collect(); + + DecodedInstruction { + uid: form.uid.clone(), + mnemonic: form.mnemonic.clone(), + asm: form.asm.clone(), + group: form.group.clone(), + encoding_kind: form.encoding_kind.clone(), + length_bits: form.length_bits, + mask: form.mask, + match_bits: form.match_bits, + instruction_bits: truncate_word(word, form.length_bits), + uop_group: form.uop_group.clone(), + fields, + } +} + +fn extract_field(word: u64, field: &DecodeFieldSpec) -> u64 { + let mut value = 0u64; + for piece in &field.pieces { + let mask = low_mask(piece.width); + let piece_bits = (word >> piece.insn_lsb) & mask; + value |= piece_bits << piece.value_lsb; + } + value +} + +fn truncate_word(word: u64, length_bits: u8) -> u64 { + if length_bits >= 64 { + word + } else { + word & low_mask(length_bits) + } +} + +fn low_mask(width: u8) -> u64 { + if width >= 64 { + u64::MAX + } else { + (1u64 << width) - 1 + } +} + +fn sign_extend(value: u64, width_bits: u8) -> i64 { + if width_bits == 0 { + return 0; + } + if width_bits >= 64 { + return value as i64; + } + let shift = 64 - width_bits; + ((value << shift) as i64) >> shift +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn unsupported_record_meets_schema_shape() { + let rec = CommitRecord::unsupported( + 0, + 0x1000, + 0xDEAD_BEEF, + TRAP_ILLEGAL_INST, + &BlockMeta::default(), + ); + assert_eq!(rec.schema_version, TRACE_SCHEMA_VERSION); + assert_eq!(rec.trap_valid, 1); + assert_eq!(rec.trap_cause, TRAP_ILLEGAL_INST); + } + + #[test] + fn decode_known_addi_fields() { + let word = (0x123u64 << 20) | (5u64 << 15) | (7u64 << 7) | 0x15u64; + let decoded = decode_word(word).expect("expected ADDI decode"); + assert_eq!(decoded.mnemonic, "ADDI"); + assert_eq!(decoded.length_bits, 32); + assert_eq!(decoded.field("RegDst").unwrap().value_u64, 7); + assert_eq!(decoded.field("SrcL").unwrap().value_u64, 5); + assert_eq!(decoded.field("uimm12").unwrap().value_u64, 0x123); + } + + #[test] + fn decode_known_jr_split_immediate() { + let imm = 0x345u64; + let word = ((imm & 0x7F) << 25) + | (3u64 << 20) + | (2u64 << 15) + | (((imm >> 7) & 0x1F) << 7) + | 0x6027u64; + let decoded = decode_word(word).expect("expected JR decode"); + assert_eq!(decoded.mnemonic, "JR"); + assert_eq!(decoded.field("SrcL").unwrap().value_u64, 2); + assert_eq!(decoded.field("SrcZero").unwrap().value_u64, 3); + assert_eq!(decoded.field("simm12").unwrap().value_u64, imm); + } + + #[test] + fn decode_corpus_covers_all_machine_forms() { + let tables = decode_tables(); + assert!(tables.all_forms.len() >= 700); + for form in &tables.all_forms { + let decoded = decode_word(form.match_bits).unwrap_or_else(|| { + panic!( + "failed to decode match bits for {} ({})", + form.uid, form.mnemonic + ) + }); + assert_eq!( + decoded.length_bits, form.length_bits, + "length mismatch for {}", + form.mnemonic + ); + assert_eq!( + decoded.instruction_bits & decoded.mask, + decoded.match_bits, + "decoded form does not self-match for {}", + form.mnemonic + ); + } + } +} diff --git a/crates/lx-tools/Cargo.toml b/crates/lx-tools/Cargo.toml new file mode 100644 index 0000000..1c7ab74 --- /dev/null +++ b/crates/lx-tools/Cargo.toml @@ -0,0 +1,19 @@ +[package] +name = "lx-tools" +version.workspace = true +edition.workspace = true +license.workspace = true +authors.workspace = true + +[dependencies] +anyhow.workspace = true +clap.workspace = true +serde_json.workspace = true +camodel = { path = "../camodel" } +cosim = { path = "../cosim" } +dse = { path = "../dse" } +elf = { path = "../elf" } +funcmodel = { path = "../funcmodel" } +isa = { path = "../isa" } +runtime = { path = "../runtime" } +trace = { path = "../trace" } diff --git a/crates/lx-tools/src/bin/lx-cosim.rs b/crates/lx-tools/src/bin/lx-cosim.rs new file mode 100644 index 0000000..cfa9bff --- /dev/null +++ b/crates/lx-tools/src/bin/lx-cosim.rs @@ -0,0 +1,39 @@ +use anyhow::{Result, bail}; +use clap::Parser; +use cosim::{compare_commit_streams, load_commit_jsonl, require_cosim_match}; +use isa::EngineKind; +use lx_tools::{execute, prepare_runtime}; +use std::path::PathBuf; +use trace::write_commit_jsonl; + +#[derive(Parser)] +struct Args { + #[arg(long)] + engine: EngineKind, + #[arg(long)] + elf: PathBuf, + #[arg(long)] + config: Option, + #[arg(long)] + qemu: PathBuf, + #[arg(long, default_value = "m1")] + protocol: String, + #[arg(long, default_value = "out/lx-cosim")] + out_dir: PathBuf, +} + +fn main() -> Result<()> { + let args = Args::parse(); + if args.protocol != "m1" { + bail!("unsupported protocol {}; expected m1", args.protocol); + } + let prepared = prepare_runtime(&args.elf, &args.out_dir, args.config.as_ref())?; + let bundle = execute(&prepared, args.engine)?; + let dut_trace = prepared.out_dir.join("dut.commit.jsonl"); + write_commit_jsonl(&dut_trace, bundle.result())?; + let qemu_trace = load_commit_jsonl(&args.qemu)?; + let report = compare_commit_streams(&qemu_trace, &bundle.result().commits); + require_cosim_match(&report)?; + println!("{}", serde_json::to_string_pretty(&report)?); + Ok(()) +} diff --git a/crates/lx-tools/src/bin/lx-run.rs b/crates/lx-tools/src/bin/lx-run.rs new file mode 100644 index 0000000..a1ddc78 --- /dev/null +++ b/crates/lx-tools/src/bin/lx-run.rs @@ -0,0 +1,47 @@ +use anyhow::Result; +use camodel::CycleRunOptions; +use clap::Parser; +use funcmodel::FuncRunOptions; +use isa::EngineKind; +use lx_tools::{EngineRunOptions, execute_with_options, prepare_runtime}; +use std::path::PathBuf; +use trace::{write_commit_jsonl, write_linxtrace}; + +#[derive(Parser)] +struct Args { + #[arg(long)] + engine: EngineKind, + #[arg(long)] + elf: PathBuf, + #[arg(long)] + config: Option, + #[arg(long, default_value_t = 100_000)] + max_steps: u64, + #[arg(long, default_value_t = 64)] + max_cycles: u64, + #[arg(long, default_value = "out/lx-run")] + out_dir: PathBuf, +} + +fn main() -> Result<()> { + let args = Args::parse(); + let prepared = prepare_runtime(&args.elf, &args.out_dir, args.config.as_ref())?; + let run_options = match args.engine { + EngineKind::Func => EngineRunOptions::Func(FuncRunOptions { + max_steps: args.max_steps, + }), + EngineKind::Cycle => EngineRunOptions::Cycle(CycleRunOptions { + max_cycles: args.max_cycles, + ..CycleRunOptions::default() + }), + }; + let bundle = execute_with_options(&prepared, args.engine, Some(run_options))?; + write_commit_jsonl(prepared.out_dir.join("commit.jsonl"), bundle.result())?; + write_linxtrace( + prepared.out_dir.join("trace.linxtrace"), + bundle.result(), + bundle.stage_events(), + )?; + println!("{}", serde_json::to_string_pretty(bundle.result())?); + Ok(()) +} diff --git a/crates/lx-tools/src/bin/lx-sweep.rs b/crates/lx-tools/src/bin/lx-sweep.rs new file mode 100644 index 0000000..7089a67 --- /dev/null +++ b/crates/lx-tools/src/bin/lx-sweep.rs @@ -0,0 +1,27 @@ +use anyhow::Result; +use clap::Parser; +use dse::{load_sweep_spec, render_markdown, run_sweep}; +use std::fs; +use std::path::PathBuf; + +#[derive(Parser)] +struct Args { + #[arg(long)] + suite: PathBuf, + #[arg(long, default_value = "out/lx-sweep")] + out_dir: PathBuf, +} + +fn main() -> Result<()> { + let args = Args::parse(); + fs::create_dir_all(&args.out_dir)?; + let spec = load_sweep_spec(&args.suite)?; + let report = run_sweep(&spec)?; + fs::write( + args.out_dir.join("report.json"), + serde_json::to_string_pretty(&report)?, + )?; + fs::write(args.out_dir.join("report.md"), render_markdown(&report))?; + println!("{}", serde_json::to_string_pretty(&report)?); + Ok(()) +} diff --git a/crates/lx-tools/src/bin/lx-trace.rs b/crates/lx-tools/src/bin/lx-trace.rs new file mode 100644 index 0000000..63d4bbe --- /dev/null +++ b/crates/lx-tools/src/bin/lx-trace.rs @@ -0,0 +1,28 @@ +use anyhow::Result; +use clap::Parser; +use isa::EngineKind; +use lx_tools::{execute, prepare_runtime}; +use std::path::PathBuf; +use trace::write_linxtrace; + +#[derive(Parser)] +struct Args { + #[arg(long)] + engine: EngineKind, + #[arg(long)] + elf: PathBuf, + #[arg(long)] + config: Option, + #[arg(long, default_value = "out/lx-trace")] + out_dir: PathBuf, +} + +fn main() -> Result<()> { + let args = Args::parse(); + let prepared = prepare_runtime(&args.elf, &args.out_dir, args.config.as_ref())?; + let bundle = execute(&prepared, args.engine)?; + let path = prepared.out_dir.join("trace.linxtrace"); + write_linxtrace(&path, bundle.result(), bundle.stage_events())?; + println!("{}", path.display()); + Ok(()) +} diff --git a/crates/lx-tools/src/cli/mod.rs b/crates/lx-tools/src/cli/mod.rs new file mode 100644 index 0000000..551ff9d --- /dev/null +++ b/crates/lx-tools/src/cli/mod.rs @@ -0,0 +1,94 @@ +use anyhow::{Context, Result}; +use camodel::{CycleEngine, CycleRunBundle, CycleRunOptions}; +use elf::load_static_elf; +use funcmodel::{FuncEngine, FuncRunBundle, FuncRunOptions}; +use isa::{EngineKind, RunResult}; +use runtime::{GuestRuntime, RuntimeConfig}; +use std::path::{Path, PathBuf}; + +pub struct PreparedRun { + pub runtime: GuestRuntime, + pub out_dir: PathBuf, +} + +pub enum PreparedBundle { + Func(FuncRunBundle), + Cycle(CycleRunBundle), +} + +impl PreparedBundle { + pub fn result(&self) -> &RunResult { + match self { + Self::Func(bundle) => &bundle.result, + Self::Cycle(bundle) => &bundle.result, + } + } + + pub fn stage_events(&self) -> &[isa::StageTraceEvent] { + match self { + Self::Func(bundle) => &bundle.stage_events, + Self::Cycle(bundle) => &bundle.stage_events, + } + } +} + +pub enum EngineRunOptions { + Func(FuncRunOptions), + Cycle(CycleRunOptions), +} + +impl Default for EngineRunOptions { + fn default() -> Self { + Self::Func(FuncRunOptions::default()) + } +} + +pub fn prepare_runtime( + elf: impl AsRef, + out_dir: impl AsRef, + config: Option>, +) -> Result { + let image = load_static_elf(&elf)?; + let runtime_config = match config { + Some(path) => RuntimeConfig::load(path)?, + None => RuntimeConfig::default(), + }; + let runtime = GuestRuntime::bootstrap(image, runtime_config)?; + let out_dir = out_dir.as_ref().to_path_buf(); + std::fs::create_dir_all(&out_dir) + .with_context(|| format!("failed to create {}", out_dir.display()))?; + Ok(PreparedRun { runtime, out_dir }) +} + +pub fn execute(prepared: &PreparedRun, engine: EngineKind) -> Result { + execute_with_options(prepared, engine, None) +} + +pub fn execute_with_options( + prepared: &PreparedRun, + engine: EngineKind, + options: Option, +) -> Result { + match engine { + EngineKind::Func => { + let engine = FuncEngine; + let options = match options { + Some(EngineRunOptions::Func(opts)) => opts, + _ => FuncRunOptions::default(), + }; + Ok(PreparedBundle::Func( + engine.run(&prepared.runtime, &options)?, + )) + } + EngineKind::Cycle => { + let engine = CycleEngine; + let options = match options { + Some(EngineRunOptions::Cycle(opts)) => opts, + _ => CycleRunOptions::default(), + }; + Ok(PreparedBundle::Cycle( + engine.run(&prepared.runtime, &options)?, + )) + } + } +} diff --git a/crates/lx-tools/src/lib.rs b/crates/lx-tools/src/lib.rs new file mode 100644 index 0000000..787ca0f --- /dev/null +++ b/crates/lx-tools/src/lib.rs @@ -0,0 +1,3 @@ +pub mod cli; + +pub use cli::*; diff --git a/crates/runtime/Cargo.toml b/crates/runtime/Cargo.toml new file mode 100644 index 0000000..bbe6ebf --- /dev/null +++ b/crates/runtime/Cargo.toml @@ -0,0 +1,14 @@ +[package] +name = "runtime" +version.workspace = true +edition.workspace = true +license.workspace = true +authors.workspace = true + +[dependencies] +anyhow.workspace = true +libc.workspace = true +serde.workspace = true +toml.workspace = true +elf = { path = "../elf" } +isa = { path = "../isa" } diff --git a/crates/runtime/src/lib.rs b/crates/runtime/src/lib.rs new file mode 100644 index 0000000..6388b68 --- /dev/null +++ b/crates/runtime/src/lib.rs @@ -0,0 +1,961 @@ +use anyhow::{Context, Result, bail}; +use elf::LoadedElf; +use isa::{ArchitecturalState, BlockMeta, DEFAULT_MEM_BYTES, DEFAULT_STACK_SIZE}; +use libc::{clock_gettime, getpid, pid_t, timespec}; +use serde::{Deserialize, Serialize}; +use std::collections::{BTreeMap, HashMap}; +use std::ffi::CString; +use std::fs; +use std::path::PathBuf; + +const SYS_GETCWD: u64 = 17; +const SYS_EVENTFD2: u64 = 19; +const SYS_EPOLL_CREATE1: u64 = 20; +const SYS_EPOLL_CTL: u64 = 21; +const SYS_EPOLL_PWAIT: u64 = 22; +const SYS_DUP3: u64 = 24; +const SYS_FCNTL: u64 = 25; +const SYS_IOCTL: u64 = 29; +const SYS_READ: u64 = 63; +const SYS_WRITE: u64 = 64; +const SYS_OPENAT: u64 = 56; +const SYS_CLOSE: u64 = 57; +const SYS_PIPE2: u64 = 59; +const SYS_LSEEK: u64 = 62; +const SYS_PSELECT6: u64 = 72; +const SYS_PPOLL: u64 = 73; +const SYS_READLINKAT: u64 = 78; +const SYS_NEWFSTATAT: u64 = 79; +const SYS_FSTAT: u64 = 80; +const SYS_FUTEX: u64 = 98; +const SYS_SET_TID_ADDRESS: u64 = 96; +const SYS_SET_ROBUST_LIST: u64 = 99; +const SYS_SETGID: u64 = 144; +const SYS_SETUID: u64 = 146; +const SYS_SETRESUID: u64 = 147; +const SYS_GETRESUID: u64 = 148; +const SYS_SETRESGID: u64 = 149; +const SYS_GETRESGID: u64 = 150; +const SYS_UNAME: u64 = 160; +const SYS_GETPPID: u64 = 173; +const SYS_BRK: u64 = 214; +const SYS_MMAP: u64 = 222; +const SYS_MUNMAP: u64 = 215; +const SYS_MPROTECT: u64 = 226; +const SYS_WAIT4: u64 = 260; +const SYS_MADVISE: u64 = 233; +const SYS_PRLIMIT64: u64 = 261; +const SYS_MEMBARRIER: u64 = 283; +const SYS_RSEQ: u64 = 293; +const SYS_SIGALTSTACK: u64 = 132; +const SYS_RT_SIGACTION: u64 = 134; +const SYS_RT_SIGPROCMASK: u64 = 135; +const SYS_CLOCK_GETTIME: u64 = 113; +const SYS_GETPID: u64 = 172; +const SYS_PRCTL: u64 = 167; +const SYS_GETUID: u64 = 174; +const SYS_GETEUID: u64 = 175; +const SYS_GETGID: u64 = 176; +const SYS_GETEGID: u64 = 177; +const SYS_GETTID: u64 = 178; +const SYS_SYSINFO: u64 = 179; +const SYS_GETRANDOM: u64 = 278; +const SYS_EXIT: u64 = 93; +const SYS_EXIT_GROUP: u64 = 94; +const PAGE_SIZE: u64 = 4096; +pub const MEM_EXEC: u32 = 0b001; +pub const MEM_WRITE: u32 = 0b010; +pub const MEM_READ: u32 = 0b100; +const AT_NULL: u64 = 0; +const AT_PAGESZ: u64 = 6; +const AT_ENTRY: u64 = 9; +const AT_PLATFORM: u64 = 15; +const AT_HWCAP: u64 = 16; +const AT_CLKTCK: u64 = 17; +const AT_UID: u64 = 11; +const AT_EUID: u64 = 12; +const AT_GID: u64 = 13; +const AT_EGID: u64 = 14; +const AT_RANDOM: u64 = 25; +const AT_HWCAP2: u64 = 26; +const AT_HWCAP3: u64 = 29; +const AT_HWCAP4: u64 = 30; +const AT_EXECFN: u64 = 31; +const AT_SYSINFO_EHDR: u64 = 33; +const AT_MINSIGSTKSZ: u64 = 51; +const GUEST_MINSIGSTKSZ: u64 = 2048; + +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct RuntimeConfig { + pub mem_bytes: u64, + pub stack_size: u64, + pub args: Vec, + pub env: BTreeMap, + pub workdir: Option, +} + +impl Default for RuntimeConfig { + fn default() -> Self { + Self { + mem_bytes: DEFAULT_MEM_BYTES, + stack_size: DEFAULT_STACK_SIZE, + args: Vec::new(), + env: BTreeMap::new(), + workdir: None, + } + } +} + +impl RuntimeConfig { + pub fn load(path: impl AsRef) -> Result { + let path = path.as_ref(); + let text = fs::read_to_string(path) + .with_context(|| format!("failed to read runtime config {}", path.display()))?; + toml::from_str(&text) + .with_context(|| format!("failed to parse runtime config {}", path.display())) + } +} + +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct MemoryRegion { + pub base: u64, + pub size: u64, + pub flags: u32, + pub data: Vec, +} + +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct GuestMemory { + pub regions: Vec, +} + +impl GuestMemory { + fn region_containing(&self, addr: u64, count: usize) -> Option<&MemoryRegion> { + let end = addr.checked_add(count as u64)?; + self.regions + .iter() + .find(|region| addr >= region.base && end <= region.base + region.size) + } + + fn region_containing_with_flags( + &self, + addr: u64, + count: usize, + required_flags: u32, + ) -> Option<&MemoryRegion> { + let region = self.region_containing(addr, count)?; + (region.flags & required_flags == required_flags).then_some(region) + } + + fn region_containing_mut(&mut self, addr: u64, count: usize) -> Option<&mut MemoryRegion> { + let end = addr.checked_add(count as u64)?; + self.regions + .iter_mut() + .find(|region| addr >= region.base && end <= region.base + region.size) + } + + fn region_containing_mut_with_flags( + &mut self, + addr: u64, + count: usize, + required_flags: u32, + ) -> Option<&mut MemoryRegion> { + let region = self.region_containing_mut(addr, count)?; + (region.flags & required_flags == required_flags).then_some(region) + } + + pub fn read_bytes(&self, pc: u64, count: usize) -> Option> { + let region = self.region_containing(pc, count)?; + let offset = (pc - region.base) as usize; + Some(region.data.get(offset..offset + count)?.to_vec()) + } + + pub fn read_bytes_checked(&self, pc: u64, count: usize) -> Option> { + let region = self.region_containing_with_flags(pc, count, MEM_READ)?; + let offset = (pc - region.base) as usize; + Some(region.data.get(offset..offset + count)?.to_vec()) + } + + pub fn write_bytes(&mut self, addr: u64, bytes: &[u8]) -> Option<()> { + let region = self.region_containing_mut(addr, bytes.len())?; + let offset = (addr - region.base) as usize; + let dst = region.data.get_mut(offset..offset + bytes.len())?; + dst.copy_from_slice(bytes); + Some(()) + } + + pub fn write_bytes_checked(&mut self, addr: u64, bytes: &[u8]) -> Option<()> { + let region = self.region_containing_mut_with_flags(addr, bytes.len(), MEM_WRITE)?; + let offset = (addr - region.base) as usize; + let dst = region.data.get_mut(offset..offset + bytes.len())?; + dst.copy_from_slice(bytes); + Some(()) + } + + pub fn read_u8(&self, addr: u64) -> Option { + self.read_bytes(addr, 1) + .and_then(|bytes| bytes.first().copied()) + } + + pub fn read_u8_checked(&self, addr: u64) -> Option { + self.read_bytes_checked(addr, 1) + .and_then(|bytes| bytes.first().copied()) + } + + pub fn read_u16(&self, addr: u64) -> Option { + let bytes = self.read_bytes(addr, 2)?; + Some(u16::from_le_bytes(bytes.try_into().ok()?)) + } + + pub fn read_u16_checked(&self, addr: u64) -> Option { + let bytes = self.read_bytes_checked(addr, 2)?; + Some(u16::from_le_bytes(bytes.try_into().ok()?)) + } + + pub fn read_u32(&self, pc: u64) -> Option { + let bytes = self.read_bytes(pc, 4)?; + Some(u32::from_le_bytes(bytes.try_into().ok()?)) + } + + pub fn read_u32_checked(&self, addr: u64) -> Option { + let bytes = self.read_bytes_checked(addr, 4)?; + Some(u32::from_le_bytes(bytes.try_into().ok()?)) + } + + pub fn read_u64(&self, addr: u64) -> Option { + let bytes = self.read_bytes(addr, 8)?; + Some(u64::from_le_bytes(bytes.try_into().ok()?)) + } + + pub fn read_u64_checked(&self, addr: u64) -> Option { + let bytes = self.read_bytes_checked(addr, 8)?; + Some(u64::from_le_bytes(bytes.try_into().ok()?)) + } + + pub fn read_u64_bundle(&self, pc: u64) -> Option { + self.regions.iter().find_map(|region| { + if pc < region.base || pc >= region.base + region.size { + return None; + } + let offset = (pc - region.base) as usize; + let available = region.data.len().saturating_sub(offset).min(8); + if available == 0 { + return None; + } + let mut bytes = [0u8; 8]; + bytes[..available].copy_from_slice(region.data.get(offset..offset + available)?); + Some(u64::from_le_bytes(bytes)) + }) + } + + pub fn fetch_u64_bundle(&self, pc: u64) -> Option { + self.regions.iter().find_map(|region| { + if region.flags & MEM_EXEC == 0 || pc < region.base || pc >= region.base + region.size { + return None; + } + let offset = (pc - region.base) as usize; + let available = region.data.len().saturating_sub(offset).min(8); + if available == 0 { + return None; + } + let mut bytes = [0u8; 8]; + bytes[..available].copy_from_slice(region.data.get(offset..offset + available)?); + Some(u64::from_le_bytes(bytes)) + }) + } + + pub fn write_u16(&mut self, addr: u64, value: u16) -> Option<()> { + self.write_bytes(addr, &value.to_le_bytes()) + } + + pub fn write_u16_checked(&mut self, addr: u64, value: u16) -> Option<()> { + self.write_bytes_checked(addr, &value.to_le_bytes()) + } + + pub fn write_u32(&mut self, addr: u64, value: u32) -> Option<()> { + self.write_bytes(addr, &value.to_le_bytes()) + } + + pub fn write_u32_checked(&mut self, addr: u64, value: u32) -> Option<()> { + self.write_bytes_checked(addr, &value.to_le_bytes()) + } + + pub fn write_u64(&mut self, addr: u64, value: u64) -> Option<()> { + self.write_bytes(addr, &value.to_le_bytes()) + } + + pub fn write_u64_checked(&mut self, addr: u64, value: u64) -> Option<()> { + self.write_bytes_checked(addr, &value.to_le_bytes()) + } + + pub fn read_c_string(&self, addr: u64, max_len: usize) -> Option { + let mut bytes = Vec::new(); + for idx in 0..max_len { + let byte = self.read_u8(addr.checked_add(idx as u64)?)?; + if byte == 0 { + return String::from_utf8(bytes).ok(); + } + bytes.push(byte); + } + None + } + + pub fn read_c_string_checked(&self, addr: u64, max_len: usize) -> Option { + let mut bytes = Vec::new(); + for idx in 0..max_len { + let byte = self.read_u8_checked(addr.checked_add(idx as u64)?)?; + if byte == 0 { + return String::from_utf8(bytes).ok(); + } + bytes.push(byte); + } + None + } + + pub fn highest_mapped_address(&self) -> u64 { + self.regions + .iter() + .map(|region| region.base + region.size) + .max() + .unwrap_or(0) + } + + pub fn is_range_mapped(&self, addr: u64, size: u64) -> bool { + let Some(end) = addr.checked_add(size) else { + return false; + }; + if size == 0 { + return false; + } + + let mut cursor = addr; + let mut regions = self.regions.iter().collect::>(); + regions.sort_by_key(|region| region.base); + for region in regions { + if region.base > cursor { + break; + } + let region_end = region.base + region.size; + if region_end <= cursor { + continue; + } + cursor = region_end.min(end); + if cursor >= end { + return true; + } + } + false + } + + pub fn protect_range(&mut self, addr: u64, size: u64, flags: u32) -> bool { + if !self.is_range_mapped(addr, size) { + return false; + } + self.remap_range(addr, size, Some(flags)); + true + } + + pub fn unmap_range(&mut self, addr: u64, size: u64) { + self.remap_range(addr, size, None); + } + + fn remap_range(&mut self, addr: u64, size: u64, replacement_flags: Option) { + let end = addr.saturating_add(size); + let mut next_regions = Vec::with_capacity(self.regions.len() + 2); + for region in &self.regions { + let region_end = region.base + region.size; + let overlap_start = region.base.max(addr); + let overlap_end = region_end.min(end); + if overlap_start >= overlap_end { + next_regions.push(region.clone()); + continue; + } + + if overlap_start > region.base { + next_regions.push(region_slice( + region, + region.base, + overlap_start, + region.flags, + )); + } + if let Some(flags) = replacement_flags { + next_regions.push(region_slice(region, overlap_start, overlap_end, flags)); + } + if overlap_end < region_end { + next_regions.push(region_slice(region, overlap_end, region_end, region.flags)); + } + } + next_regions.sort_by_key(|region| region.base); + self.regions = next_regions; + } +} + +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct BootInfo { + pub entry_pc: u64, + pub stack_top: u64, + pub stack_pointer: u64, + pub argc: u64, +} + +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct GuestRuntime { + pub image: LoadedElf, + pub config: RuntimeConfig, + pub state: ArchitecturalState, + pub block: BlockMeta, + pub memory: GuestMemory, + pub boot: BootInfo, + pub fd_table: HashMap, +} + +impl GuestRuntime { + pub fn bootstrap(image: LoadedElf, config: RuntimeConfig) -> Result { + if config.mem_bytes < config.stack_size { + bail!( + "mem_bytes ({}) must be >= stack_size ({})", + config.mem_bytes, + config.stack_size + ); + } + + let mut regions = Vec::new(); + for segment in &image.segments { + let mut data = segment.data.clone(); + data.resize(segment.mem_size as usize, 0); + regions.push(MemoryRegion { + base: segment.vaddr, + size: segment.mem_size, + flags: segment.flags, + data, + }); + } + + let stack_top = 0x0000_7FFF_F000u64; + let stack_base = stack_top + .checked_sub(config.stack_size) + .context("stack underflow while computing guest stack")?; + regions.push(MemoryRegion { + base: stack_base, + size: config.stack_size, + flags: 0b110, + data: vec![0; config.stack_size as usize], + }); + + let mut memory = GuestMemory { regions }; + let argv = build_boot_args(&image, &config); + let envp = build_boot_env(&config); + let stack_pointer = initialize_user_stack( + &mut memory, + stack_base, + stack_top, + &argv, + &envp, + image.entry, + )?; + + let mut state = ArchitecturalState::new(image.entry); + state.regs[1] = stack_pointer; + let entry_pc = state.pc; + let argc = argv.len() as u64; + + Ok(Self { + image, + config: config.clone(), + state, + block: BlockMeta::default(), + memory, + boot: BootInfo { + entry_pc, + stack_top, + stack_pointer, + argc, + }, + fd_table: HashMap::from([(0, 0), (1, 1), (2, 2)]), + }) + } + + pub fn fetch_first_word(&self) -> Result { + self.memory + .read_u32(self.state.pc) + .with_context(|| format!("no mapped instruction at pc=0x{:016x}", self.state.pc)) + } + + pub fn fetch_bundle(&self, pc: u64) -> Result { + self.memory + .fetch_u64_bundle(pc) + .with_context(|| format!("no mapped instruction bundle at pc=0x{:016x}", pc)) + } +} + +#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] +pub enum SyscallEffect { + Return(u64), + Exit(i32), +} + +#[derive(Debug, Clone, Default)] +pub struct HostSyscallShim; + +impl HostSyscallShim { + pub fn dispatch(&self, number: u64, args: [u64; 6]) -> Result { + match number { + SYS_GETCWD => bail!( + "syscall {} is on the allowlist but still needs guest path marshalling in the executor", + number + ), + SYS_PRCTL | SYS_MEMBARRIER | SYS_RSEQ => bail!( + "syscall {} is on the allowlist but still needs guest thread/runtime handling in the executor", + number + ), + SYS_GETPID => Ok(SyscallEffect::Return(unsafe { unsafe_pid(getpid()) })), + SYS_GETPPID => Ok(SyscallEffect::Return(unsafe { + unsafe_pid(libc::getppid()) + })), + SYS_GETUID | SYS_GETEUID | SYS_GETGID | SYS_GETEGID => Ok(SyscallEffect::Return(0)), + SYS_SETUID | SYS_SETGID | SYS_SETRESUID | SYS_GETRESUID | SYS_SETRESGID + | SYS_GETRESGID => bail!( + "syscall {} is on the allowlist but still needs guest identity handling in the executor", + number + ), + SYS_GETTID => Ok(SyscallEffect::Return(unsafe { unsafe_pid(getpid()) })), + SYS_SET_TID_ADDRESS => Ok(SyscallEffect::Return(unsafe { unsafe_pid(getpid()) })), + SYS_SET_ROBUST_LIST | SYS_SIGALTSTACK | SYS_UNAME | SYS_FUTEX | SYS_PRLIMIT64 + | SYS_SYSINFO | SYS_WAIT4 => bail!( + "syscall {} is on the allowlist but still needs guest memory marshalling in the executor", + number + ), + SYS_CLOCK_GETTIME => { + let clk_id: libc::clockid_t = args[0].try_into().unwrap_or(libc::CLOCK_REALTIME); + let ts = host_clock_gettime(clk_id)?; + let nsec = (ts.tv_sec as u64).saturating_mul(1_000_000_000) + ts.tv_nsec as u64; + Ok(SyscallEffect::Return(nsec)) + } + SYS_EXIT | SYS_EXIT_GROUP => Ok(SyscallEffect::Exit(args[0] as i32)), + SYS_EVENTFD2 | SYS_EPOLL_CREATE1 | SYS_EPOLL_CTL | SYS_EPOLL_PWAIT | SYS_DUP3 + | SYS_FCNTL | SYS_IOCTL | SYS_READ | SYS_WRITE | SYS_OPENAT | SYS_CLOSE | SYS_PIPE2 + | SYS_LSEEK | SYS_PSELECT6 | SYS_PPOLL | SYS_READLINKAT | SYS_NEWFSTATAT + | SYS_FSTAT | SYS_BRK | SYS_MMAP | SYS_MUNMAP | SYS_MPROTECT | SYS_MADVISE + | SYS_RT_SIGACTION | SYS_RT_SIGPROCMASK | SYS_GETRANDOM => { + bail!( + "syscall {} is on the allowlist but still needs guest memory marshalling in the executor", + number + ) + } + _ => bail!("unsupported syscall number {}", number), + } + } + + pub fn describe_allowlist(&self) -> Vec<&'static str> { + vec![ + "read", + "write", + "eventfd2", + "epoll_create1", + "epoll_ctl", + "epoll_pwait", + "openat", + "close", + "lseek", + "getcwd", + "dup3", + "fcntl", + "ioctl", + "readlinkat", + "newfstatat", + "fstat", + "pipe2", + "pselect6", + "ppoll", + "futex", + "sigaltstack", + "set_tid_address", + "set_robust_list", + "setuid", + "setgid", + "setresuid", + "getresuid", + "setresgid", + "getresgid", + "uname", + "getppid", + "wait4", + "brk", + "mmap", + "munmap", + "mprotect", + "madvise", + "prlimit64", + "prctl", + "rt_sigaction", + "rt_sigprocmask", + "clock_gettime", + "getpid", + "getuid", + "geteuid", + "getgid", + "getegid", + "gettid", + "sysinfo", + "getrandom", + "membarrier", + "rseq", + "exit", + "exit_group", + ] + } + + pub fn validate_env_strings(&self, config: &RuntimeConfig) -> Result> { + config + .env + .iter() + .map(|(k, v)| { + CString::new(format!("{k}={v}")).context("invalid env contains interior NUL") + }) + .collect() + } +} + +fn unsafe_pid(value: pid_t) -> u64 { + value.max(0) as u64 +} + +fn host_clock_gettime(clk_id: libc::clockid_t) -> Result { + let mut ts = timespec { + tv_sec: 0, + tv_nsec: 0, + }; + let rc = unsafe { clock_gettime(clk_id, &mut ts as *mut timespec) }; + if rc != 0 { + bail!( + "clock_gettime({clk_id}) failed with errno {}", + std::io::Error::last_os_error() + ); + } + Ok(ts) +} + +fn build_boot_args(image: &LoadedElf, config: &RuntimeConfig) -> Vec { + let mut argv = Vec::with_capacity(config.args.len() + 1); + argv.push(image.path.display().to_string()); + argv.extend(config.args.iter().cloned()); + argv +} + +fn build_boot_env(config: &RuntimeConfig) -> Vec { + config.env.iter().map(|(k, v)| format!("{k}={v}")).collect() +} + +fn initialize_user_stack( + memory: &mut GuestMemory, + stack_base: u64, + stack_top: u64, + argv: &[String], + envp: &[String], + entry: u64, +) -> Result { + let mut sp = stack_top; + + let execfn_addr = push_c_string( + memory, + &mut sp, + argv.first().map(String::as_str).unwrap_or(""), + )?; + let platform_addr = push_c_string(memory, &mut sp, "linx64")?; + let random_addr = push_bytes(memory, &mut sp, &[0xA5; 16])?; + + let mut env_ptrs = Vec::with_capacity(envp.len()); + for item in envp.iter().rev() { + env_ptrs.push(push_c_string(memory, &mut sp, item)?); + } + env_ptrs.reverse(); + + let mut argv_ptrs = Vec::with_capacity(argv.len()); + for item in argv.iter().rev() { + argv_ptrs.push(push_c_string(memory, &mut sp, item)?); + } + argv_ptrs.reverse(); + + let auxv = vec![ + (AT_PAGESZ, PAGE_SIZE), + (AT_ENTRY, entry), + (AT_UID, 0), + (AT_EUID, 0), + (AT_GID, 0), + (AT_EGID, 0), + (AT_PLATFORM, platform_addr), + (AT_HWCAP, 0), + (AT_CLKTCK, 100), + (AT_RANDOM, random_addr), + (AT_HWCAP2, 0), + (AT_HWCAP3, 0), + (AT_HWCAP4, 0), + (AT_EXECFN, execfn_addr), + (AT_SYSINFO_EHDR, 0), + (AT_MINSIGSTKSZ, GUEST_MINSIGSTKSZ), + (AT_NULL, 0), + ]; + + let mut words = Vec::new(); + words.push(argv.len() as u64); + words.extend(argv_ptrs.iter().copied()); + words.push(0); + words.extend(env_ptrs.iter().copied()); + words.push(0); + for (key, value) in auxv { + words.push(key); + words.push(value); + } + + let frame_bytes = (words.len() * 8) as u64; + sp = align_down( + sp.checked_sub(frame_bytes) + .context("guest stack underflow while writing argv/envp")?, + 16, + ); + if sp < stack_base { + bail!("guest stack initialization exceeds reserved stack region"); + } + + for (idx, word) in words.into_iter().enumerate() { + memory + .write_u64(sp + (idx as u64 * 8), word) + .context("failed to populate initial stack words")?; + } + + Ok(sp) +} + +fn push_c_string(memory: &mut GuestMemory, sp: &mut u64, text: &str) -> Result { + let mut bytes = text.as_bytes().to_vec(); + bytes.push(0); + push_bytes(memory, sp, &bytes) +} + +fn push_bytes(memory: &mut GuestMemory, sp: &mut u64, bytes: &[u8]) -> Result { + let next_sp = sp + .checked_sub(bytes.len() as u64) + .context("guest stack underflow while writing bytes")?; + memory + .write_bytes(next_sp, bytes) + .context("failed to write guest stack bytes")?; + *sp = next_sp; + Ok(next_sp) +} + +pub fn guest_prot_to_region_flags(prot: u32) -> u32 { + let mut flags = 0; + if prot & libc::PROT_READ as u32 != 0 { + flags |= MEM_READ; + } + if prot & libc::PROT_WRITE as u32 != 0 { + flags |= MEM_WRITE; + } + if prot & libc::PROT_EXEC as u32 != 0 { + flags |= MEM_EXEC; + } + flags +} + +fn region_slice(region: &MemoryRegion, base: u64, end: u64, flags: u32) -> MemoryRegion { + let start_off = (base - region.base) as usize; + let end_off = (end - region.base) as usize; + MemoryRegion { + base, + size: end - base, + flags, + data: region.data[start_off..end_off].to_vec(), + } +} + +fn align_down(value: u64, align: u64) -> u64 { + debug_assert!(align.is_power_of_two()); + value & !(align - 1) +} + +#[cfg(test)] +mod tests { + use super::*; + use elf::SegmentImage; + + #[test] + fn syscall_allowlist_has_getpid() { + let shim = HostSyscallShim; + let value = shim.dispatch(SYS_GETPID, [0; 6]).unwrap(); + assert!(matches!(value, SyscallEffect::Return(pid) if pid > 0)); + } + + #[test] + fn bootstrap_sets_sp_and_argc() { + let image = LoadedElf { + path: PathBuf::from("sample.elf"), + entry: 0x1000, + little_endian: true, + bits: 64, + machine: 0xFEED, + segments: vec![SegmentImage { + vaddr: 0x1000, + mem_size: 0x1000, + file_size: 4, + flags: 0b101, + data: vec![0; 4], + }], + }; + let runtime = GuestRuntime::bootstrap( + image, + RuntimeConfig { + args: vec!["arg1".to_string()], + ..RuntimeConfig::default() + }, + ) + .unwrap(); + + assert_eq!(runtime.boot.argc, 2); + assert_eq!(runtime.state.regs[1], runtime.boot.stack_pointer); + assert_ne!(runtime.state.regs[1], runtime.boot.stack_top); + assert_eq!( + runtime.memory.read_u64(runtime.boot.stack_pointer).unwrap(), + runtime.boot.argc + ); + } + + #[test] + fn bootstrap_populates_auxv_and_env() { + let image = LoadedElf { + path: PathBuf::from("/tmp/bootstrap.elf"), + entry: 0x1000, + little_endian: true, + bits: 64, + machine: 0xFEED, + segments: vec![SegmentImage { + vaddr: 0x1000, + mem_size: 0x1000, + file_size: 4, + flags: 0b101, + data: vec![0; 4], + }], + }; + let runtime = GuestRuntime::bootstrap( + image, + RuntimeConfig { + args: vec!["alpha".to_string(), "beta".to_string()], + env: BTreeMap::from([ + ("LX_BOOTSTRAP".to_string(), "1".to_string()), + ("LX_TRACE".to_string(), "1".to_string()), + ]), + ..RuntimeConfig::default() + }, + ) + .unwrap(); + + let sp = runtime.boot.stack_pointer; + assert_eq!(runtime.memory.read_u64(sp).unwrap(), 3); + + let argv0 = runtime.memory.read_u64(sp + 8).unwrap(); + let argv1 = runtime.memory.read_u64(sp + 16).unwrap(); + let argv2 = runtime.memory.read_u64(sp + 24).unwrap(); + assert_eq!( + runtime.memory.read_c_string(argv0, 256).unwrap(), + "/tmp/bootstrap.elf" + ); + assert_eq!(runtime.memory.read_c_string(argv1, 256).unwrap(), "alpha"); + assert_eq!(runtime.memory.read_c_string(argv2, 256).unwrap(), "beta"); + + let env0 = runtime.memory.read_u64(sp + 40).unwrap(); + let env1 = runtime.memory.read_u64(sp + 48).unwrap(); + assert_eq!( + runtime.memory.read_c_string(env0, 256).unwrap(), + "LX_BOOTSTRAP=1" + ); + assert_eq!( + runtime.memory.read_c_string(env1, 256).unwrap(), + "LX_TRACE=1" + ); + + let aux_base = sp + 64; + let mut aux = BTreeMap::new(); + let mut cursor = aux_base; + loop { + let key = runtime.memory.read_u64(cursor).unwrap(); + let value = runtime.memory.read_u64(cursor + 8).unwrap(); + aux.insert(key, value); + cursor += 16; + if key == AT_NULL { + break; + } + } + + assert_eq!(aux.get(&AT_PAGESZ).copied().unwrap(), PAGE_SIZE); + assert_eq!(aux.get(&AT_ENTRY).copied().unwrap(), 0x1000); + assert_eq!(aux.get(&AT_PLATFORM).copied().unwrap(), aux[&AT_PLATFORM]); + assert_eq!(aux.get(&AT_HWCAP).copied().unwrap(), 0); + assert_eq!(aux.get(&AT_CLKTCK).copied().unwrap(), 100); + assert_eq!(aux.get(&AT_HWCAP2).copied().unwrap(), 0); + assert_eq!(aux.get(&AT_HWCAP3).copied().unwrap(), 0); + assert_eq!(aux.get(&AT_HWCAP4).copied().unwrap(), 0); + assert_eq!(aux.get(&AT_SYSINFO_EHDR).copied().unwrap(), 0); + assert_eq!( + aux.get(&AT_MINSIGSTKSZ).copied().unwrap(), + GUEST_MINSIGSTKSZ + ); + + let platform_addr = aux[&AT_PLATFORM]; + let random_addr = aux[&AT_RANDOM]; + let execfn_addr = aux[&AT_EXECFN]; + let random = runtime.memory.read_bytes(random_addr, 16).unwrap(); + assert!(random.iter().all(|byte| *byte == 0xA5)); + assert_eq!( + runtime.memory.read_c_string(platform_addr, 32).unwrap(), + "linx64" + ); + assert_eq!( + runtime.memory.read_c_string(execfn_addr, 256).unwrap(), + "/tmp/bootstrap.elf" + ); + } + + #[test] + fn protect_range_splits_region_and_enforces_permissions() { + let mut memory = GuestMemory { + regions: vec![MemoryRegion { + base: 0x4000, + size: PAGE_SIZE * 2, + flags: MEM_READ | MEM_WRITE, + data: vec![0xAA; (PAGE_SIZE * 2) as usize], + }], + }; + + assert!(memory.protect_range(0x5000, PAGE_SIZE, MEM_READ)); + assert_eq!(memory.regions.len(), 2); + assert_eq!(memory.regions[0].base, 0x4000); + assert_eq!(memory.regions[0].flags, MEM_READ | MEM_WRITE); + assert_eq!(memory.regions[1].base, 0x5000); + assert_eq!(memory.regions[1].flags, MEM_READ); + assert_eq!(memory.read_u8_checked(0x5000), Some(0xAA)); + assert!(memory.write_bytes_checked(0x5000, &[0x55]).is_none()); + assert_eq!(memory.write_bytes_checked(0x4000, &[0x55]), Some(())); + } + + #[test] + fn unmap_range_removes_overlap_only() { + let mut memory = GuestMemory { + regions: vec![MemoryRegion { + base: 0x4000, + size: PAGE_SIZE * 3, + flags: MEM_READ | MEM_WRITE, + data: vec![0x11; (PAGE_SIZE * 3) as usize], + }], + }; + + memory.unmap_range(0x5000, PAGE_SIZE); + assert_eq!(memory.regions.len(), 2); + assert_eq!(memory.regions[0].base, 0x4000); + assert_eq!(memory.regions[0].size, PAGE_SIZE); + assert_eq!(memory.regions[1].base, 0x6000); + assert_eq!(memory.regions[1].size, PAGE_SIZE); + assert_eq!(memory.read_u8(0x4000), Some(0x11)); + assert!(memory.read_u8(0x5000).is_none()); + assert_eq!(memory.read_u8(0x6000), Some(0x11)); + } +} diff --git a/crates/trace/Cargo.toml b/crates/trace/Cargo.toml new file mode 100644 index 0000000..d827153 --- /dev/null +++ b/crates/trace/Cargo.toml @@ -0,0 +1,13 @@ +[package] +name = "trace" +version.workspace = true +edition.workspace = true +license.workspace = true +authors.workspace = true + +[dependencies] +anyhow.workspace = true +serde.workspace = true +serde_json.workspace = true +tempfile.workspace = true +isa = { path = "../isa" } diff --git a/crates/trace/src/commit/mod.rs b/crates/trace/src/commit/mod.rs new file mode 100644 index 0000000..d52d73c --- /dev/null +++ b/crates/trace/src/commit/mod.rs @@ -0,0 +1 @@ +pub use crate::linxtrace::write_commit_jsonl; diff --git a/crates/trace/src/lib.rs b/crates/trace/src/lib.rs new file mode 100644 index 0000000..01ad6a7 --- /dev/null +++ b/crates/trace/src/lib.rs @@ -0,0 +1,6 @@ +pub mod commit; +pub mod linxtrace; +pub mod schema; + +pub use commit::write_commit_jsonl; +pub use linxtrace::write_linxtrace; diff --git a/crates/trace/src/linxtrace/mod.rs b/crates/trace/src/linxtrace/mod.rs new file mode 100644 index 0000000..a1da8d4 --- /dev/null +++ b/crates/trace/src/linxtrace/mod.rs @@ -0,0 +1,411 @@ +use anyhow::{Context, Result}; +use isa::{ + LINXTRACE_FORMAT, LaneCatalogEntry, RowCatalogEntry, RunResult, StageCatalogEntry, + StageTraceEvent, default_stage_catalog, +}; +use serde::Serialize; +use std::collections::BTreeSet; +use std::fs; +use std::path::Path; + +#[derive(Debug, Serialize)] +#[serde(tag = "type")] +enum TraceRecord<'a> { + META { + format: &'static str, + contract_id: &'static str, + pipeline_schema_id: &'static str, + stage_order_csv: String, + stage_catalog: Vec, + lane_catalog: Vec, + row_catalog: Vec, + render_prefs: serde_json::Value, + }, + #[serde(rename = "OP_DEF")] + OpDef { + row_id: &'a str, + row_kind: &'a str, + block_uid: u64, + uop_uid: u64, + }, + OCC { + cycle: u64, + row_id: &'a str, + stage_id: &'a str, + lane_id: &'a str, + stall: bool, + cause: &'a str, + #[serde(skip_serializing_if = "Option::is_none")] + checkpoint_id: Option, + #[serde(skip_serializing_if = "Option::is_none")] + trap_cause: Option, + #[serde(skip_serializing_if = "Option::is_none")] + traparg0: Option, + #[serde(skip_serializing_if = "Option::is_none")] + target_setup_epoch: Option, + #[serde(skip_serializing_if = "Option::is_none")] + boundary_epoch: Option, + #[serde(skip_serializing_if = "Option::is_none")] + target_source_owner_row_id: Option<&'a str>, + #[serde(skip_serializing_if = "Option::is_none")] + target_source_epoch: Option, + #[serde(skip_serializing_if = "Option::is_none")] + target_owner_row_id: Option<&'a str>, + #[serde(skip_serializing_if = "Option::is_none")] + target_producer_kind: Option<&'a str>, + #[serde(skip_serializing_if = "Option::is_none")] + branch_kind: Option<&'a str>, + #[serde(skip_serializing_if = "Option::is_none")] + return_kind: Option<&'a str>, + #[serde(skip_serializing_if = "Option::is_none")] + call_materialization_kind: Option<&'a str>, + #[serde(skip_serializing_if = "Option::is_none")] + target_source_kind: Option<&'a str>, + }, + RETIRE { + cycle: u64, + row_id: &'a str, + status: &'a str, + }, + #[serde(rename = "BLOCK_EVT")] + BlockEvt { + cycle: u64, + row_id: &'a str, + kind: &'a str, + detail: &'a str, + }, +} + +pub fn write_commit_jsonl(path: impl AsRef, result: &RunResult) -> Result<()> { + let mut body = String::new(); + for rec in &result.commits { + body.push_str(&serde_json::to_string(rec)?); + body.push('\n'); + } + fs::write(path.as_ref(), body) + .with_context(|| format!("failed to write commit trace {}", path.as_ref().display())) +} + +pub fn write_linxtrace( + path: impl AsRef, + result: &RunResult, + stage_events: &[StageTraceEvent], +) -> Result<()> { + let path = path.as_ref(); + let stage_catalog = default_stage_catalog(); + let stage_order_csv = stage_catalog + .iter() + .map(|entry| entry.stage_id.clone()) + .collect::>() + .join(","); + let row_ids = collect_row_ids(stage_events, result.commits.len()); + let row_catalog = row_ids + .iter() + .enumerate() + .map(|(idx, row_id)| RowCatalogEntry { + row_id: row_id.clone(), + row_kind: "uop".to_string(), + core_id: "core0".to_string(), + block_uid: 0, + uop_uid: idx as u64, + left_label: row_label(result, idx), + detail_defaults: result.metrics.exit_reason.clone(), + }) + .collect::>(); + let lane_catalog = collect_lane_catalog(stage_events); + + let mut lines = Vec::new(); + lines.push(serde_json::to_string(&TraceRecord::META { + format: LINXTRACE_FORMAT, + contract_id: "LXMODEL-TRACE-BOOTSTRAP", + pipeline_schema_id: "LC-TRACE1-LXMODEL", + stage_order_csv, + stage_catalog, + lane_catalog, + row_catalog, + render_prefs: serde_json::json!({"focus":"bootstrap"}), + })?); + for (idx, row_id) in row_ids.iter().enumerate() { + lines.push(serde_json::to_string(&TraceRecord::OpDef { + row_id, + row_kind: "uop", + block_uid: 0, + uop_uid: idx as u64, + })?); + } + + if stage_events.is_empty() { + lines.push(serde_json::to_string(&TraceRecord::OCC { + cycle: 0, + row_id: "uop0", + stage_id: "F0", + lane_id: "scalar0", + stall: false, + cause: "bootstrap_fetch", + checkpoint_id: None, + trap_cause: None, + traparg0: None, + target_setup_epoch: None, + boundary_epoch: None, + target_source_owner_row_id: None, + target_source_epoch: None, + target_owner_row_id: None, + target_producer_kind: None, + branch_kind: None, + return_kind: None, + call_materialization_kind: None, + target_source_kind: None, + })?); + let retire_cycle = result.commits.first().map(|rec| rec.cycle).unwrap_or(0); + lines.push(serde_json::to_string(&TraceRecord::OCC { + cycle: retire_cycle, + row_id: "uop0", + stage_id: "CMT", + lane_id: "scalar0", + stall: false, + cause: "trap_commit", + checkpoint_id: None, + trap_cause: None, + traparg0: None, + target_setup_epoch: None, + boundary_epoch: None, + target_source_owner_row_id: None, + target_source_epoch: None, + target_owner_row_id: None, + target_producer_kind: None, + branch_kind: None, + return_kind: None, + call_materialization_kind: None, + target_source_kind: None, + })?); + } else { + for event in stage_events { + lines.push(serde_json::to_string(&TraceRecord::OCC { + cycle: event.cycle, + row_id: &event.row_id, + stage_id: &event.stage_id, + lane_id: &event.lane_id, + stall: event.stall, + cause: &event.cause, + checkpoint_id: event.checkpoint_id, + trap_cause: event.trap_cause, + traparg0: event.traparg0, + target_setup_epoch: event.target_setup_epoch, + boundary_epoch: event.boundary_epoch, + target_source_owner_row_id: event.target_source_owner_row_id.as_deref(), + target_source_epoch: event.target_source_epoch, + target_owner_row_id: event.target_owner_row_id.as_deref(), + target_producer_kind: event.target_producer_kind.as_deref(), + branch_kind: event.branch_kind.as_deref(), + return_kind: event.return_kind.as_deref(), + call_materialization_kind: event.call_materialization_kind.as_deref(), + target_source_kind: event.target_source_kind.as_deref(), + })?); + } + } + + if result.commits.is_empty() { + lines.push(serde_json::to_string(&TraceRecord::RETIRE { + cycle: 0, + row_id: row_ids.first().map(String::as_str).unwrap_or("uop0"), + status: &result.metrics.exit_reason, + })?); + lines.push(serde_json::to_string(&TraceRecord::BlockEvt { + cycle: 0, + row_id: row_ids.first().map(String::as_str).unwrap_or("uop0"), + kind: "fault", + detail: &result.metrics.exit_reason, + })?); + } else { + for (idx, commit) in result.commits.iter().enumerate() { + let status = if idx + 1 == result.commits.len() { + result.metrics.exit_reason.as_str() + } else { + "retired" + }; + let row_id = row_ids + .get(idx) + .map(String::as_str) + .unwrap_or_else(|| row_ids.last().map(String::as_str).unwrap_or("uop0")); + lines.push(serde_json::to_string(&TraceRecord::RETIRE { + cycle: commit.cycle, + row_id, + status, + })?); + } + + let last_commit = result.commits.last().expect("checked non-empty commits"); + let last_row = row_ids.last().map(String::as_str).unwrap_or("uop0"); + lines.push(serde_json::to_string(&TraceRecord::BlockEvt { + cycle: last_commit.cycle, + row_id: last_row, + kind: "fault", + detail: &result.metrics.exit_reason, + })?); + } + + fs::write(path, lines.join("\n") + "\n") + .with_context(|| format!("failed to write {}", path.display())) +} + +fn collect_row_ids(stage_events: &[StageTraceEvent], commit_count: usize) -> Vec { + let mut row_ids = Vec::new(); + let mut seen = BTreeSet::new(); + for event in stage_events { + if seen.insert(event.row_id.clone()) { + row_ids.push(event.row_id.clone()); + } + } + let target_rows = commit_count.max(1); + for idx in row_ids.len()..target_rows { + row_ids.push(format!("uop{idx}")); + } + row_ids +} + +fn collect_lane_catalog(stage_events: &[StageTraceEvent]) -> Vec { + let mut lane_catalog = Vec::new(); + let mut seen = BTreeSet::new(); + for event in stage_events { + if seen.insert(event.lane_id.clone()) { + lane_catalog.push(LaneCatalogEntry { + lane_id: event.lane_id.clone(), + label: event.lane_id.clone(), + }); + } + } + if lane_catalog.is_empty() { + lane_catalog.push(LaneCatalogEntry { + lane_id: "scalar0".to_string(), + label: "scalar0".to_string(), + }); + } + lane_catalog +} + +fn row_label(result: &RunResult, idx: usize) -> String { + if let Some(commit) = result.commits.get(idx) { + format!("{}@0x{:x}", result.image_name, commit.pc) + } else { + format!("{}@0x{:x}", result.image_name, result.entry_pc) + } +} + +#[cfg(test)] +mod tests { + use super::*; + use isa::{CommitRecord, EngineKind, RunMetrics}; + + #[test] + fn linxtrace_meta_is_first_record() { + let result = RunResult { + image_name: "sample.elf".to_string(), + entry_pc: 0x1000, + metrics: RunMetrics { + engine: EngineKind::Func, + cycles: 1, + commits: 1, + exit_reason: "bootstrap".to_string(), + }, + commits: vec![CommitRecord::unsupported( + 0, + 0x1000, + 0, + 4, + &isa::BlockMeta::default(), + )], + decoded: Vec::new(), + }; + let tmp = tempfile::NamedTempFile::new().unwrap(); + write_linxtrace(tmp.path(), &result, &[]).unwrap(); + let text = fs::read_to_string(tmp.path()).unwrap(); + assert!(text.lines().next().unwrap().contains("\"type\":\"META\"")); + } + + #[test] + fn linxtrace_emits_multi_row_defs_and_retires() { + let result = RunResult { + image_name: "sample.elf".to_string(), + entry_pc: 0x1000, + metrics: RunMetrics { + engine: EngineKind::Cycle, + cycles: 4, + commits: 2, + exit_reason: "guest_exit(0)".to_string(), + }, + commits: vec![ + CommitRecord::unsupported(2, 0x1000, 0x15, 0, &isa::BlockMeta::default()), + CommitRecord::unsupported(3, 0x1004, 0x302b, 0, &isa::BlockMeta::default()), + ], + decoded: Vec::new(), + }; + let stage_events = vec![ + StageTraceEvent { + cycle: 0, + row_id: "uop0".to_string(), + stage_id: "F0".to_string(), + lane_id: "scalar0".to_string(), + stall: false, + cause: "resident".to_string(), + checkpoint_id: Some(7), + trap_cause: Some(0x0000_B001), + traparg0: Some(0x1004), + target_setup_epoch: Some(5), + boundary_epoch: Some(7), + target_source_owner_row_id: Some("uop3".to_string()), + target_source_epoch: Some(4), + target_owner_row_id: Some("uop7".to_string()), + target_producer_kind: Some("setc_tgt".to_string()), + branch_kind: Some("cond".to_string()), + return_kind: Some("fret_stk".to_string()), + call_materialization_kind: Some("adjacent_setret".to_string()), + target_source_kind: Some("call_return_adjacent_setret".to_string()), + }, + StageTraceEvent { + cycle: 1, + row_id: "uop1".to_string(), + stage_id: "F0".to_string(), + lane_id: "scalar0".to_string(), + stall: false, + cause: "resident".to_string(), + checkpoint_id: None, + trap_cause: None, + traparg0: None, + target_setup_epoch: None, + boundary_epoch: None, + target_source_owner_row_id: None, + target_source_epoch: None, + target_owner_row_id: None, + target_producer_kind: None, + branch_kind: None, + return_kind: None, + call_materialization_kind: None, + target_source_kind: None, + }, + ]; + + let tmp = tempfile::NamedTempFile::new().unwrap(); + write_linxtrace(tmp.path(), &result, &stage_events).unwrap(); + let text = fs::read_to_string(tmp.path()).unwrap(); + + assert!(text.contains("\"row_id\":\"uop0\"")); + assert!(text.contains("\"row_id\":\"uop1\"")); + assert_eq!(text.matches("\"type\":\"OP_DEF\"").count(), 2); + assert_eq!(text.matches("\"type\":\"RETIRE\"").count(), 2); + assert!(text.contains("\"status\":\"retired\"")); + assert!(text.contains("\"status\":\"guest_exit(0)\"")); + assert!(text.contains("\"checkpoint_id\":7")); + assert!(text.contains("\"trap_cause\":45057")); + assert!(text.contains("\"traparg0\":4100")); + assert!(text.contains("\"target_setup_epoch\":5")); + assert!(text.contains("\"boundary_epoch\":7")); + assert!(text.contains("\"target_source_owner_row_id\":\"uop3\"")); + assert!(text.contains("\"target_source_epoch\":4")); + assert!(text.contains("\"target_owner_row_id\":\"uop7\"")); + assert!(text.contains("\"target_producer_kind\":\"setc_tgt\"")); + assert!(text.contains("\"branch_kind\":\"cond\"")); + assert!(text.contains("\"return_kind\":\"fret_stk\"")); + assert!(text.contains("\"call_materialization_kind\":\"adjacent_setret\"")); + assert!(text.contains("\"target_source_kind\":\"call_return_adjacent_setret\"")); + } +} diff --git a/crates/trace/src/schema/mod.rs b/crates/trace/src/schema/mod.rs new file mode 100644 index 0000000..be5d8b1 --- /dev/null +++ b/crates/trace/src/schema/mod.rs @@ -0,0 +1 @@ +// Shared trace-schema namespace for future schema/type splits. diff --git a/tests/fixtures/bootstrap_runtime.toml b/tests/fixtures/bootstrap_runtime.toml new file mode 100644 index 0000000..1c2ade3 --- /dev/null +++ b/tests/fixtures/bootstrap_runtime.toml @@ -0,0 +1,7 @@ +mem_bytes = 134217728 +stack_size = 8388608 +args = ["guest-program"] + +[env] +LX_TRACE = "1" +LX_BOOTSTRAP = "1" diff --git a/tests/fixtures/bootstrap_sweep.toml b/tests/fixtures/bootstrap_sweep.toml new file mode 100644 index 0000000..2cef122 --- /dev/null +++ b/tests/fixtures/bootstrap_sweep.toml @@ -0,0 +1,11 @@ +[[cases]] +name = "bootstrap-func" +engine = "func" +elf = "/absolute/path/to/guest.elf" +iterations = 1 + +[[cases]] +name = "bootstrap-cycle" +engine = "cycle" +elf = "/absolute/path/to/guest.elf" +iterations = 1 diff --git a/tests/fixtures/file_io_input.txt b/tests/fixtures/file_io_input.txt new file mode 100644 index 0000000..7e21986 --- /dev/null +++ b/tests/fixtures/file_io_input.txt @@ -0,0 +1 @@ +fixture-data:linxcoremodel diff --git a/tests/fixtures/linux_user_bootstrap_runtime.toml b/tests/fixtures/linux_user_bootstrap_runtime.toml new file mode 100644 index 0000000..24e83ef --- /dev/null +++ b/tests/fixtures/linux_user_bootstrap_runtime.toml @@ -0,0 +1,7 @@ +mem_bytes = 134217728 +stack_size = 8388608 +args = ["alpha", "beta"] + +[env] +LX_BOOTSTRAP = "1" +LX_TRACE = "1" diff --git a/tests/fixtures/linux_user_bootstrap_stack.c b/tests/fixtures/linux_user_bootstrap_stack.c new file mode 100644 index 0000000..23c25a9 --- /dev/null +++ b/tests/fixtures/linux_user_bootstrap_stack.c @@ -0,0 +1,138 @@ +typedef unsigned long u64; + +struct aux_pair { + u64 key; + u64 value; +}; + +extern void _start(void); +__attribute__((noreturn)) void bootstrap_main(u64 *initial_sp); + +static inline long linx_syscall1(long number, long arg0) +{ + register long a7 __asm__("a7") = number; + register long a0 __asm__("a0") = arg0; + __asm__ volatile("acrc 1" : "+r"(a0) : "r"(a7) : "memory"); + return a0; +} + +static inline long linx_syscall3(long number, long arg0, long arg1, long arg2) +{ + register long a7 __asm__("a7") = number; + register long a0 __asm__("a0") = arg0; + register long a1 __asm__("a1") = arg1; + register long a2 __asm__("a2") = arg2; + __asm__ volatile("acrc 1" : "+r"(a0) : "r"(a7), "r"(a1), "r"(a2) : "memory"); + return a0; +} + +static inline __attribute__((noreturn)) void linx_exit(int code) +{ + (void)linx_syscall1(93, code); + __builtin_unreachable(); +} + +static long string_eq(const char *lhs, const char *rhs) +{ + while (*lhs && *rhs) { + if (*lhs != *rhs) + return 0; + lhs += 1; + rhs += 1; + } + return *lhs == *rhs; +} + +static long prefix_eq(const char *lhs, const char *prefix) +{ + while (*prefix) { + if (*lhs != *prefix) + return 0; + lhs += 1; + prefix += 1; + } + return 1; +} + +static long env_contains(char **envp, const char *needle) +{ + long idx = 0; + while (envp[idx]) { + if (string_eq(envp[idx], needle)) + return 1; + idx += 1; + } + return 0; +} + +static const struct aux_pair *find_auxv(const struct aux_pair *auxv, u64 key) +{ + long idx = 0; + while (auxv[idx].key != 0) { + if (auxv[idx].key == key) + return &auxv[idx]; + idx += 1; + } + return (const struct aux_pair *)0; +} + +__attribute__((naked, noreturn)) void _start(void) +{ + __asm__ volatile( + "c.movr sp, ->a0\n\t" + "j bootstrap_main"); +} + +__attribute__((noreturn)) void bootstrap_main(u64 *sp) +{ + static const char ok[] = "bootstrap stack ok\n"; + char **argv = (char **)(sp + 1); + char **envp; + const struct aux_pair *auxv; + const struct aux_pair *pagesz; + const struct aux_pair *entry; + const struct aux_pair *random; + const struct aux_pair *execfn; + long wrote; + + if (sp[0] != 3) + linx_exit(40); + if (!string_eq(argv[1], "alpha")) + linx_exit(41); + if (!string_eq(argv[2], "beta")) + linx_exit(42); + if (!prefix_eq(argv[0], "/Users/zhoubot/linx-isa/tools/LinxCoreModel/out/bringup/linux_user_bootstrap_stack.elf")) + linx_exit(43); + + envp = argv + sp[0] + 1; + while (*envp) + envp += 1; + envp += 1; + auxv = (const struct aux_pair *)envp; + + if (!env_contains(argv + sp[0] + 1, "LX_BOOTSTRAP=1")) + linx_exit(44); + if (!env_contains(argv + sp[0] + 1, "LX_TRACE=1")) + linx_exit(45); + + pagesz = find_auxv(auxv, 6); + entry = find_auxv(auxv, 9); + random = find_auxv(auxv, 25); + execfn = find_auxv(auxv, 31); + if (!pagesz || pagesz->value != 4096) + linx_exit(46); + if (!entry || entry->value != (u64)&_start) + linx_exit(47); + if (!random || !execfn) + linx_exit(48); + if (!string_eq((const char *)execfn->value, argv[0])) + linx_exit(49); + if (((const unsigned char *)random->value)[0] != 0xA5 || + ((const unsigned char *)random->value)[15] != 0xA5) + linx_exit(50); + + wrote = linx_syscall3(64, 1, (long)ok, (long)(sizeof(ok) - 1)); + if (wrote < 0) + linx_exit(51); + linx_exit(0); +} diff --git a/tests/fixtures/linux_user_compiler_smoke.c b/tests/fixtures/linux_user_compiler_smoke.c new file mode 100644 index 0000000..24d03a7 --- /dev/null +++ b/tests/fixtures/linux_user_compiler_smoke.c @@ -0,0 +1,42 @@ +static inline long linx_syscall3(long number, long arg0, long arg1, long arg2) +{ + register long a7 __asm__("a7") = number; + register long a0 __asm__("a0") = arg0; + register long a1 __asm__("a1") = arg1; + register long a2 __asm__("a2") = arg2; + __asm__ volatile("acrc 1" : "+r"(a0) : "r"(a7), "r"(a1), "r"(a2) : "memory"); + return a0; +} + +static inline __attribute__((noreturn)) void linx_exit(int code) +{ + register long a7 __asm__("a7") = 93; + register long a0 __asm__("a0") = code; + __asm__ volatile("acrc 1" : "+r"(a0) : "r"(a7) : "memory"); + __builtin_unreachable(); +} + +static long mix_values(long x, long y) +{ + long acc = x + y; + if ((acc & 1) == 0) + return acc + 7; + return acc - 3; +} + +__attribute__((noreturn)) void _start(void) +{ + static const char ok[] = "compiler path ok\n"; + static const char bad[] = "compiler path bad\n"; + long value = mix_values(10, 12); + const char *msg = ok; + long len = (long)(sizeof(ok) - 1); + + if (value != 29) { + msg = bad; + len = (long)(sizeof(bad) - 1); + } + + (void)linx_syscall3(64, 1, (long)msg, len); + linx_exit(value == 29 ? 0 : 1); +} diff --git a/tests/fixtures/linux_user_epoll_eventfd.c b/tests/fixtures/linux_user_epoll_eventfd.c new file mode 100644 index 0000000..c14b8d6 --- /dev/null +++ b/tests/fixtures/linux_user_epoll_eventfd.c @@ -0,0 +1,117 @@ +typedef unsigned long u64; +typedef unsigned int u32; + +struct epoll_event_guest { + u32 events; + u32 __pad; + u64 data; +}; + +static inline long linx_syscall1(long number, long arg0) +{ + register long a7 __asm__("a7") = number; + register long a0 __asm__("a0") = arg0; + __asm__ volatile("acrc 1" : "+r"(a0) : "r"(a7) : "memory"); + return a0; +} + +static inline long linx_syscall2(long number, long arg0, long arg1) +{ + register long a7 __asm__("a7") = number; + register long a0 __asm__("a0") = arg0; + register long a1 __asm__("a1") = arg1; + __asm__ volatile("acrc 1" : "+r"(a0) : "r"(a7), "r"(a1) : "memory"); + return a0; +} + +static inline long linx_syscall3(long number, long arg0, long arg1, long arg2) +{ + register long a7 __asm__("a7") = number; + register long a0 __asm__("a0") = arg0; + register long a1 __asm__("a1") = arg1; + register long a2 __asm__("a2") = arg2; + __asm__ volatile("acrc 1" : "+r"(a0) : "r"(a7), "r"(a1), "r"(a2) : "memory"); + return a0; +} + +static inline long linx_syscall4(long number, long arg0, long arg1, long arg2, long arg3) +{ + register long a7 __asm__("a7") = number; + register long a0 __asm__("a0") = arg0; + register long a1 __asm__("a1") = arg1; + register long a2 __asm__("a2") = arg2; + register long a3 __asm__("a3") = arg3; + __asm__ volatile("acrc 1" : "+r"(a0) : "r"(a7), "r"(a1), "r"(a2), "r"(a3) : "memory"); + return a0; +} + +static inline long linx_syscall6(long number, long arg0, long arg1, long arg2, long arg3, long arg4, long arg5) +{ + register long a7 __asm__("a7") = number; + register long a0 __asm__("a0") = arg0; + register long a1 __asm__("a1") = arg1; + register long a2 __asm__("a2") = arg2; + register long a3 __asm__("a3") = arg3; + register long a4 __asm__("a4") = arg4; + register long a5 __asm__("a5") = arg5; + __asm__ volatile("acrc 1" : "+r"(a0) : "r"(a7), "r"(a1), "r"(a2), "r"(a3), "r"(a4), "r"(a5) : "memory"); + return a0; +} + +static inline __attribute__((noreturn)) void linx_exit(int code) +{ + (void)linx_syscall1(93, code); + __builtin_unreachable(); +} + +__attribute__((noreturn)) void _start(void) +{ + static const char ok[] = "epoll eventfd ok\n"; + u64 write_value = 7; + u64 read_value = 0; + int event_fd; + int epoll_fd; + struct epoll_event_guest ctl; + struct epoll_event_guest out[2]; + + ctl.events = 0x001; + ctl.__pad = 0; + ctl.data = 0x1122334455667788UL; + out[0].events = 0; + out[0].__pad = 0; + out[0].data = 0; + out[1].events = 0; + out[1].__pad = 0; + out[1].data = 0; + + event_fd = (int)linx_syscall2(19, 0, 0); + if (event_fd < 0) + linx_exit(210); + + epoll_fd = (int)linx_syscall1(20, 0); + if (epoll_fd < 0) + linx_exit(211); + + if (linx_syscall4(21, epoll_fd, 1, event_fd, (long)&ctl) != 0) + linx_exit(212); + if (linx_syscall3(64, event_fd, (long)&write_value, 8) != 8) + linx_exit(213); + if (linx_syscall6(22, epoll_fd, (long)out, 2, 0, 0, 0) != 1) + linx_exit(214); + if ((out[0].events & 0x001) == 0) + linx_exit(215); + if (out[0].data != 0x1122334455667788UL) + linx_exit(216); + if (linx_syscall3(63, event_fd, (long)&read_value, 8) != 8) + linx_exit(217); + if (read_value != 7) + linx_exit(218); + if (linx_syscall3(64, 1, (long)ok, (long)(sizeof(ok) - 1)) != (long)(sizeof(ok) - 1)) + linx_exit(219); + if (linx_syscall1(57, epoll_fd) != 0) + linx_exit(220); + if (linx_syscall1(57, event_fd) != 0) + linx_exit(221); + + linx_exit(0); +} diff --git a/tests/fixtures/linux_user_fd_control.c b/tests/fixtures/linux_user_fd_control.c new file mode 100644 index 0000000..5c96a80 --- /dev/null +++ b/tests/fixtures/linux_user_fd_control.c @@ -0,0 +1,93 @@ +typedef unsigned int u32; + +struct winsize_guest { + unsigned short row; + unsigned short col; + unsigned short xpixel; + unsigned short ypixel; +}; + +static inline long linx_syscall1(long number, long arg0) +{ + register long a7 __asm__("a7") = number; + register long a0 __asm__("a0") = arg0; + __asm__ volatile("acrc 1" : "+r"(a0) : "r"(a7) : "memory"); + return a0; +} + +static inline long linx_syscall2(long number, long arg0, long arg1) +{ + register long a7 __asm__("a7") = number; + register long a0 __asm__("a0") = arg0; + register long a1 __asm__("a1") = arg1; + __asm__ volatile("acrc 1" : "+r"(a0) : "r"(a7), "r"(a1) : "memory"); + return a0; +} + +static inline long linx_syscall3(long number, long arg0, long arg1, long arg2) +{ + register long a7 __asm__("a7") = number; + register long a0 __asm__("a0") = arg0; + register long a1 __asm__("a1") = arg1; + register long a2 __asm__("a2") = arg2; + __asm__ volatile("acrc 1" : "+r"(a0) : "r"(a7), "r"(a1), "r"(a2) : "memory"); + return a0; +} + +static inline __attribute__((noreturn)) void linx_exit(int code) +{ + (void)linx_syscall1(93, code); + __builtin_unreachable(); +} + +__attribute__((noreturn)) void _start(void) +{ + static const char ok[] = "fd control ok\n"; + int pipefd[2]; + int read_fd; + int dup_fd; + char in[3]; + struct winsize_guest ws; + u32 pgrp = 77; + + if (linx_syscall2(59, (long)pipefd, 02000000) != 0) + linx_exit(170); + read_fd = pipefd[0]; + dup_fd = (int)linx_syscall3(24, pipefd[1], 20, 02000000); + if (dup_fd != 20) + linx_exit(171); + if (linx_syscall2(25, dup_fd, 1) != 1) + linx_exit(172); + + if (linx_syscall3(64, dup_fd, (long)"abc", 3) != 3) + linx_exit(173); + if (linx_syscall3(63, read_fd, (long)in, 3) != 3) + linx_exit(174); + if (in[0] != 'a' || in[1] != 'b' || in[2] != 'c') + linx_exit(175); + + if (linx_syscall3(29, 1, 0x5413, (long)&ws) != 0) + linx_exit(176); + if (ws.row != 24 || ws.col != 80) + linx_exit(177); + + if (linx_syscall3(29, 1, 0x5410, (long)&pgrp) != 0) + linx_exit(178); + pgrp = 0; + if (linx_syscall3(29, 1, 0x540f, (long)&pgrp) != 0) + linx_exit(179); + if (pgrp != 77) + linx_exit(180); + + if (linx_syscall3(64, 1, (long)ok, (long)(sizeof(ok) - 1)) != (long)(sizeof(ok) - 1)) + linx_exit(181); + + if (linx_syscall1(57, dup_fd) != 0) + linx_exit(182); + if (linx_syscall1(57, pipefd[1]) != 0) + linx_exit(183); + if (linx_syscall1(57, read_fd) != 0) + linx_exit(184); + + linx_exit(0); +} diff --git a/tests/fixtures/linux_user_file_io.c b/tests/fixtures/linux_user_file_io.c new file mode 100644 index 0000000..9ffef0d --- /dev/null +++ b/tests/fixtures/linux_user_file_io.c @@ -0,0 +1,87 @@ +static inline long linx_syscall0(long number) +{ + register long a7 __asm__("a7") = number; + register long a0 __asm__("a0"); + __asm__ volatile("acrc 1" : "=r"(a0) : "r"(a7) : "memory"); + return a0; +} + +static inline long linx_syscall1(long number, long arg0) +{ + register long a7 __asm__("a7") = number; + register long a0 __asm__("a0") = arg0; + __asm__ volatile("acrc 1" : "+r"(a0) : "r"(a7) : "memory"); + return a0; +} + +static inline long linx_syscall3(long number, long arg0, long arg1, long arg2) +{ + register long a7 __asm__("a7") = number; + register long a0 __asm__("a0") = arg0; + register long a1 __asm__("a1") = arg1; + register long a2 __asm__("a2") = arg2; + __asm__ volatile("acrc 1" : "+r"(a0) : "r"(a7), "r"(a1), "r"(a2) : "memory"); + return a0; +} + +static inline long linx_syscall4(long number, long arg0, long arg1, long arg2, long arg3) +{ + register long a7 __asm__("a7") = number; + register long a0 __asm__("a0") = arg0; + register long a1 __asm__("a1") = arg1; + register long a2 __asm__("a2") = arg2; + register long a3 __asm__("a3") = arg3; + __asm__ volatile("acrc 1" : "+r"(a0) : "r"(a7), "r"(a1), "r"(a2), "r"(a3) : "memory"); + return a0; +} + +static inline __attribute__((noreturn)) void linx_exit(int code) +{ + (void)linx_syscall1(93, code); + __builtin_unreachable(); +} + +static long buffer_matches(const char *lhs, const char *rhs, long len) +{ + long idx = 0; + while (idx < len) { + if (lhs[idx] != rhs[idx]) + return 0; + idx += 1; + } + return 1; +} + +__attribute__((noreturn)) void _start(void) +{ + static const char path[] = "file_io_input.txt"; + static const char expected[] = "fixture-data:linxcoremodel\n"; + static const char ok[] = "file io ok\n"; + static const char bad[] = "file io bad\n"; + char buffer[32]; + long fd = linx_syscall4(56, -100, (long)path, 0, 0); + long size; + long wrote; + + if (fd < 0) + linx_exit(10); + + size = linx_syscall3(63, fd, (long)buffer, (long)(sizeof(expected) - 1)); + if (size < 0) + linx_exit(11); + + (void)linx_syscall1(57, fd); + + if (size == (long)(sizeof(expected) - 1) && + buffer_matches(buffer, expected, size)) { + wrote = linx_syscall3(64, 1, (long)ok, (long)(sizeof(ok) - 1)); + if (wrote < 0) + linx_exit(12); + linx_exit(0); + } + + wrote = linx_syscall3(64, 1, (long)bad, (long)(sizeof(bad) - 1)); + if (wrote < 0) + linx_exit(13); + linx_exit(1); +} diff --git a/tests/fixtures/linux_user_file_io_runtime.toml b/tests/fixtures/linux_user_file_io_runtime.toml new file mode 100644 index 0000000..6dd3cc3 --- /dev/null +++ b/tests/fixtures/linux_user_file_io_runtime.toml @@ -0,0 +1,6 @@ +mem_bytes = 134217728 +stack_size = 8388608 +args = [] +workdir = "/Users/zhoubot/linx-isa/tools/LinxCoreModel/tests/fixtures" + +[env] diff --git a/tests/fixtures/linux_user_futex_smoke.c b/tests/fixtures/linux_user_futex_smoke.c new file mode 100644 index 0000000..51e5a06 --- /dev/null +++ b/tests/fixtures/linux_user_futex_smoke.c @@ -0,0 +1,67 @@ +typedef unsigned int u32; + +static inline long linx_syscall3(long number, long arg0, long arg1, long arg2) +{ + register long a7 __asm__("a7") = number; + register long a0 __asm__("a0") = arg0; + register long a1 __asm__("a1") = arg1; + register long a2 __asm__("a2") = arg2; + __asm__ volatile("acrc 1" : "+r"(a0) : "r"(a7), "r"(a1), "r"(a2) : "memory"); + return a0; +} + +static inline long linx_syscall4(long number, long arg0, long arg1, long arg2, long arg3) +{ + register long a7 __asm__("a7") = number; + register long a0 __asm__("a0") = arg0; + register long a1 __asm__("a1") = arg1; + register long a2 __asm__("a2") = arg2; + register long a3 __asm__("a3") = arg3; + __asm__ volatile("acrc 1" : "+r"(a0) : "r"(a7), "r"(a1), "r"(a2), "r"(a3) : "memory"); + return a0; +} + +static inline long linx_syscall1(long number, long arg0) +{ + register long a7 __asm__("a7") = number; + register long a0 __asm__("a0") = arg0; + __asm__ volatile("acrc 1" : "+r"(a0) : "r"(a7) : "memory"); + return a0; +} + +static inline __attribute__((noreturn)) void linx_exit(int code) +{ + (void)linx_syscall1(93, code); + __builtin_unreachable(); +} + +#define FUTEX_WAIT 0 +#define FUTEX_WAKE 1 +#define FUTEX_PRIVATE 128 +#define EAGAIN 11L + +__attribute__((noreturn)) void _start(void) +{ + static const char ok[] = "futex smoke ok\n"; + u32 fut = 7; + long rc; + + rc = linx_syscall4(98, (long)&fut, FUTEX_WAIT | FUTEX_PRIVATE, 3, 0); + if (rc != -EAGAIN) + linx_exit(80); + + rc = linx_syscall3(98, (long)&fut, FUTEX_WAKE | FUTEX_PRIVATE, 1); + if (rc != 0) + linx_exit(81); + + fut = 2; + fut = 5; + rc = linx_syscall4(98, (long)&fut, FUTEX_WAIT | FUTEX_PRIVATE, 2, 0); + if (rc != -EAGAIN) + linx_exit(82); + + if (linx_syscall3(64, 1, (long)ok, (long)(sizeof(ok) - 1)) != (long)(sizeof(ok) - 1)) + linx_exit(83); + + linx_exit(0); +} diff --git a/tests/fixtures/linux_user_heap_map.c b/tests/fixtures/linux_user_heap_map.c new file mode 100644 index 0000000..2bb87ca --- /dev/null +++ b/tests/fixtures/linux_user_heap_map.c @@ -0,0 +1,116 @@ +static inline long linx_syscall1(long number, long arg0) +{ + register long a7 __asm__("a7") = number; + register long a0 __asm__("a0") = arg0; + __asm__ volatile("acrc 1" : "+r"(a0) : "r"(a7) : "memory"); + return a0; +} + +static inline long linx_syscall2(long number, long arg0, long arg1) +{ + register long a7 __asm__("a7") = number; + register long a0 __asm__("a0") = arg0; + register long a1 __asm__("a1") = arg1; + __asm__ volatile("acrc 1" : "+r"(a0) : "r"(a7), "r"(a1) : "memory"); + return a0; +} + +static inline long linx_syscall3(long number, long arg0, long arg1, long arg2) +{ + register long a7 __asm__("a7") = number; + register long a0 __asm__("a0") = arg0; + register long a1 __asm__("a1") = arg1; + register long a2 __asm__("a2") = arg2; + __asm__ volatile("acrc 1" : "+r"(a0) : "r"(a7), "r"(a1), "r"(a2) : "memory"); + return a0; +} + +static inline long linx_syscall6( + long number, + long arg0, + long arg1, + long arg2, + long arg3, + long arg4, + long arg5) +{ + register long a7 __asm__("a7") = number; + register long a0 __asm__("a0") = arg0; + register long a1 __asm__("a1") = arg1; + register long a2 __asm__("a2") = arg2; + register long a3 __asm__("a3") = arg3; + register long a4 __asm__("a4") = arg4; + register long a5 __asm__("a5") = arg5; + __asm__ volatile( + "acrc 1" + : "+r"(a0) + : "r"(a7), "r"(a1), "r"(a2), "r"(a3), "r"(a4), "r"(a5) + : "memory"); + return a0; +} + +static inline __attribute__((noreturn)) void linx_exit(int code) +{ + (void)linx_syscall1(93, code); + __builtin_unreachable(); +} + +#define PROT_READ 0x1 +#define PROT_WRITE 0x2 +#define MAP_PRIVATE 0x02 +#define MAP_ANONYMOUS 0x20 +#define PAGE_SIZE 4096L + +__attribute__((noreturn)) void _start(void) +{ + static const char ok[] = "heap map ok\n"; + char *heap_base = (char *)linx_syscall1(214, 0); + char *heap_top; + char *map; + long wrote; + + if ((long)heap_base <= 0) + linx_exit(30); + + heap_top = (char *)linx_syscall1(214, (long)(heap_base + PAGE_SIZE)); + if (heap_top != heap_base + PAGE_SIZE) + linx_exit(31); + + heap_base[0] = 'b'; + heap_base[1] = 'r'; + heap_base[2] = 'k'; + if (heap_base[0] != 'b' || heap_base[2] != 'k') + linx_exit(32); + + if ((char *)linx_syscall1(214, (long)heap_base) != heap_base) + linx_exit(33); + + map = (char *)linx_syscall6( + 222, + 0, + PAGE_SIZE, + PROT_READ | PROT_WRITE, + MAP_PRIVATE | MAP_ANONYMOUS, + -1, + 0); + if ((long)map < 0) + linx_exit(34); + if (((long)map & (PAGE_SIZE - 1)) != 0) + linx_exit(35); + + map[0] = 'm'; + map[1] = 'a'; + map[2] = 'p'; + map[3] = '\n'; + if (map[1] != 'a' || map[3] != '\n') + linx_exit(36); + + if (linx_syscall2(215, (long)map, PAGE_SIZE) != 0) + linx_exit(37); + + wrote = linx_syscall3(64, 1, (long)ok, (long)(sizeof(ok) - 1)); + if (wrote < 0) + linx_exit(38); + + linx_exit(0); +} diff --git a/tests/fixtures/linux_user_identity_startup.c b/tests/fixtures/linux_user_identity_startup.c new file mode 100644 index 0000000..8741cfa --- /dev/null +++ b/tests/fixtures/linux_user_identity_startup.c @@ -0,0 +1,105 @@ +typedef unsigned long u64; + +static inline long linx_syscall0(long number) +{ + register long a7 __asm__("a7") = number; + register long a0 __asm__("a0"); + __asm__ volatile("acrc 1" : "=r"(a0) : "r"(a7) : "memory"); + return a0; +} + +static inline long linx_syscall1(long number, long arg0) +{ + register long a7 __asm__("a7") = number; + register long a0 __asm__("a0") = arg0; + __asm__ volatile("acrc 1" : "+r"(a0) : "r"(a7) : "memory"); + return a0; +} + +static inline long linx_syscall2(long number, long arg0, long arg1) +{ + register long a7 __asm__("a7") = number; + register long a0 __asm__("a0") = arg0; + register long a1 __asm__("a1") = arg1; + __asm__ volatile("acrc 1" : "+r"(a0) : "r"(a7), "r"(a1) : "memory"); + return a0; +} + +static inline long linx_syscall3(long number, long arg0, long arg1, long arg2) +{ + register long a7 __asm__("a7") = number; + register long a0 __asm__("a0") = arg0; + register long a1 __asm__("a1") = arg1; + register long a2 __asm__("a2") = arg2; + __asm__ volatile("acrc 1" : "+r"(a0) : "r"(a7), "r"(a1), "r"(a2) : "memory"); + return a0; +} + +static inline __attribute__((noreturn)) void linx_exit(int code) +{ + (void)linx_syscall1(93, code); + __builtin_unreachable(); +} + +struct utsname_guest { + char sysname[65]; + char nodename[65]; + char release[65]; + char version[65]; + char machine[65]; + char domainname[65]; +}; + +struct robust_list_head_guest { + u64 next; + long futex_offset; + u64 pending; +}; + +static int string_eq(const char *lhs, const char *rhs) +{ + while (*lhs != 0 && *rhs != 0) { + if (*lhs != *rhs) + return 0; + ++lhs; + ++rhs; + } + return *lhs == *rhs; +} + +__attribute__((noreturn)) void _start(void) +{ + static const char ok[] = "identity startup ok\n"; + struct utsname_guest uts; + struct robust_list_head_guest robust = {0, 0, 0}; + int clear_child_tid = -1; + long tid; + + tid = linx_syscall1(96, (long)&clear_child_tid); + if (tid <= 0) + linx_exit(70); + if (tid != linx_syscall0(178)) + linx_exit(71); + + if (linx_syscall2(99, (long)&robust, (long)sizeof(robust)) != 0) + linx_exit(72); + + if (linx_syscall1(160, (long)&uts) != 0) + linx_exit(73); + if (!string_eq(uts.sysname, "Linux")) + linx_exit(74); + if (!string_eq(uts.machine, "linx64")) + linx_exit(75); + if (!string_eq(uts.nodename, "linxcoremodel")) + linx_exit(76); + + if (linx_syscall0(174) != 0 || linx_syscall0(175) != 0) + linx_exit(77); + if (linx_syscall0(176) != 0 || linx_syscall0(177) != 0) + linx_exit(78); + + if (linx_syscall3(64, 1, (long)ok, (long)(sizeof(ok) - 1)) != (long)(sizeof(ok) - 1)) + linx_exit(79); + + linx_exit(0); +} diff --git a/tests/fixtures/linux_user_mprotect_signal.c b/tests/fixtures/linux_user_mprotect_signal.c new file mode 100644 index 0000000..b8812d5 --- /dev/null +++ b/tests/fixtures/linux_user_mprotect_signal.c @@ -0,0 +1,121 @@ +static inline long linx_syscall1(long number, long arg0) +{ + register long a7 __asm__("a7") = number; + register long a0 __asm__("a0") = arg0; + __asm__ volatile("acrc 1" : "+r"(a0) : "r"(a7) : "memory"); + return a0; +} + +static inline long linx_syscall2(long number, long arg0, long arg1) +{ + register long a7 __asm__("a7") = number; + register long a0 __asm__("a0") = arg0; + register long a1 __asm__("a1") = arg1; + __asm__ volatile("acrc 1" : "+r"(a0) : "r"(a7), "r"(a1) : "memory"); + return a0; +} + +static inline long linx_syscall3(long number, long arg0, long arg1, long arg2) +{ + register long a7 __asm__("a7") = number; + register long a0 __asm__("a0") = arg0; + register long a1 __asm__("a1") = arg1; + register long a2 __asm__("a2") = arg2; + __asm__ volatile("acrc 1" : "+r"(a0) : "r"(a7), "r"(a1), "r"(a2) : "memory"); + return a0; +} + +static inline long linx_syscall4(long number, long arg0, long arg1, long arg2, long arg3) +{ + register long a7 __asm__("a7") = number; + register long a0 __asm__("a0") = arg0; + register long a1 __asm__("a1") = arg1; + register long a2 __asm__("a2") = arg2; + register long a3 __asm__("a3") = arg3; + __asm__ volatile("acrc 1" : "+r"(a0) : "r"(a7), "r"(a1), "r"(a2), "r"(a3) : "memory"); + return a0; +} + +static inline long linx_syscall6( + long number, + long arg0, + long arg1, + long arg2, + long arg3, + long arg4, + long arg5) +{ + register long a7 __asm__("a7") = number; + register long a0 __asm__("a0") = arg0; + register long a1 __asm__("a1") = arg1; + register long a2 __asm__("a2") = arg2; + register long a3 __asm__("a3") = arg3; + register long a4 __asm__("a4") = arg4; + register long a5 __asm__("a5") = arg5; + __asm__ volatile( + "acrc 1" + : "+r"(a0) + : "r"(a7), "r"(a1), "r"(a2), "r"(a3), "r"(a4), "r"(a5) + : "memory"); + return a0; +} + +static inline __attribute__((noreturn)) void linx_exit(int code) +{ + (void)linx_syscall1(93, code); + __builtin_unreachable(); +} + +#define PROT_NONE 0x0 +#define PROT_READ 0x1 +#define PROT_WRITE 0x2 +#define MAP_PRIVATE 0x02 +#define MAP_ANONYMOUS 0x20 +#define PAGE_SIZE 4096L +#define EFAULT 14L + +__attribute__((noreturn)) void _start(void) +{ + static const char ok[] = "mprotect signal ok\n"; + unsigned long old_mask[2] = {~0UL, ~0UL}; + char *map = (char *)linx_syscall6( + 222, + 0, + PAGE_SIZE, + PROT_READ | PROT_WRITE, + MAP_PRIVATE | MAP_ANONYMOUS, + -1, + 0); + long rc; + + if ((long)map < 0) + linx_exit(60); + + map[0] = 'P'; + + if (linx_syscall3(226, (long)map, PAGE_SIZE, PROT_NONE) != 0) + linx_exit(61); + + rc = linx_syscall3(64, 1, (long)map, 1); + if (rc != -EFAULT) + linx_exit(62); + + rc = linx_syscall4(135, 0, 0, (long)old_mask, sizeof(old_mask)); + if (rc != 0) + linx_exit(63); + if (old_mask[0] != 0 || old_mask[1] != 0) + linx_exit(64); + + if (linx_syscall3(226, (long)map, PAGE_SIZE, PROT_READ) != 0) + linx_exit(65); + if (linx_syscall3(64, 1, (long)map, 1) != 1) + linx_exit(66); + + if (linx_syscall2(215, (long)map, PAGE_SIZE) != 0) + linx_exit(67); + + if (linx_syscall3(64, 1, (long)ok, (long)(sizeof(ok) - 1)) != (long)(sizeof(ok) - 1)) + linx_exit(68); + + linx_exit(0); +} diff --git a/tests/fixtures/linux_user_path_random.c b/tests/fixtures/linux_user_path_random.c new file mode 100644 index 0000000..2fa3ee8 --- /dev/null +++ b/tests/fixtures/linux_user_path_random.c @@ -0,0 +1,119 @@ +typedef unsigned int u32; +typedef unsigned long u64; + +struct stat_guest { + u64 dev; + u64 ino; + u32 mode; + u32 nlink; + u32 uid; + u32 gid; + u64 rdev; + u64 __pad0; + long size; + int blksize; + int __pad1; + long blocks; + long atime_sec; + u64 atime_nsec; + long mtime_sec; + u64 mtime_nsec; + long ctime_sec; + u64 ctime_nsec; + u32 __unused[3]; +}; + +static inline long linx_syscall1(long number, long arg0) +{ + register long a7 __asm__("a7") = number; + register long a0 __asm__("a0") = arg0; + __asm__ volatile("acrc 1" : "+r"(a0) : "r"(a7) : "memory"); + return a0; +} + +static inline long linx_syscall2(long number, long arg0, long arg1) +{ + register long a7 __asm__("a7") = number; + register long a0 __asm__("a0") = arg0; + register long a1 __asm__("a1") = arg1; + __asm__ volatile("acrc 1" : "+r"(a0) : "r"(a7), "r"(a1) : "memory"); + return a0; +} + +static inline long linx_syscall3(long number, long arg0, long arg1, long arg2) +{ + register long a7 __asm__("a7") = number; + register long a0 __asm__("a0") = arg0; + register long a1 __asm__("a1") = arg1; + register long a2 __asm__("a2") = arg2; + __asm__ volatile("acrc 1" : "+r"(a0) : "r"(a7), "r"(a1), "r"(a2) : "memory"); + return a0; +} + +static inline long linx_syscall4(long number, long arg0, long arg1, long arg2, long arg3) +{ + register long a7 __asm__("a7") = number; + register long a0 __asm__("a0") = arg0; + register long a1 __asm__("a1") = arg1; + register long a2 __asm__("a2") = arg2; + register long a3 __asm__("a3") = arg3; + __asm__ volatile("acrc 1" : "+r"(a0) : "r"(a7), "r"(a1), "r"(a2), "r"(a3) : "memory"); + return a0; +} + +static inline __attribute__((noreturn)) void linx_exit(int code) +{ + (void)linx_syscall1(93, code); + __builtin_unreachable(); +} + +__attribute__((noreturn)) void _start(void) +{ + static const char ok[] = "path random ok\n"; + static const char file_name[] = "file_io_input.txt"; + char cwd[256]; + unsigned char random_buf[16]; + struct stat_guest st; + long fd; + long flags; + long count; + int i; + + if (linx_syscall2(17, (long)cwd, (long)sizeof(cwd)) != (long)cwd) + linx_exit(130); + if (cwd[0] != '/') + linx_exit(131); + + if (linx_syscall4(79, -100, (long)file_name, (long)&st, 0) != 0) + linx_exit(132); + if (st.size != 27) + linx_exit(133); + + fd = linx_syscall4(56, -100, (long)file_name, 0, 0); + if (fd < 0) + linx_exit(134); + + if (linx_syscall3(25, fd, 2, 1) != 0) + linx_exit(135); + flags = linx_syscall2(25, fd, 1); + if (flags != 1) + linx_exit(136); + + count = linx_syscall3(278, (long)random_buf, (long)sizeof(random_buf), 0); + if (count != (long)sizeof(random_buf)) + linx_exit(137); + for (i = 0; i < (int)sizeof(random_buf); ++i) { + if (random_buf[i] != 0) + break; + } + if (i == (int)sizeof(random_buf)) + linx_exit(138); + + if (linx_syscall3(64, 1, (long)ok, (long)(sizeof(ok) - 1)) != (long)(sizeof(ok) - 1)) + linx_exit(139); + + if (linx_syscall1(57, fd) != 0) + linx_exit(140); + + linx_exit(0); +} diff --git a/tests/fixtures/linux_user_path_random_runtime.toml b/tests/fixtures/linux_user_path_random_runtime.toml new file mode 100644 index 0000000..6dd3cc3 --- /dev/null +++ b/tests/fixtures/linux_user_path_random_runtime.toml @@ -0,0 +1,6 @@ +mem_bytes = 134217728 +stack_size = 8388608 +args = [] +workdir = "/Users/zhoubot/linx-isa/tools/LinxCoreModel/tests/fixtures" + +[env] diff --git a/tests/fixtures/linux_user_ppoll_sigaltstack.c b/tests/fixtures/linux_user_ppoll_sigaltstack.c new file mode 100644 index 0000000..e0fd488 --- /dev/null +++ b/tests/fixtures/linux_user_ppoll_sigaltstack.c @@ -0,0 +1,128 @@ +typedef unsigned long u64; +typedef unsigned int u32; + +struct pollfd_guest { + int fd; + short events; + short revents; +}; + +struct timespec_guest { + long tv_sec; + long tv_nsec; +}; + +struct stack_t_guest { + void *ss_sp; + int ss_flags; + int __pad; + unsigned long ss_size; +}; + +static inline long linx_syscall1(long number, long arg0) +{ + register long a7 __asm__("a7") = number; + register long a0 __asm__("a0") = arg0; + __asm__ volatile("acrc 1" : "+r"(a0) : "r"(a7) : "memory"); + return a0; +} + +static inline long linx_syscall2(long number, long arg0, long arg1) +{ + register long a7 __asm__("a7") = number; + register long a0 __asm__("a0") = arg0; + register long a1 __asm__("a1") = arg1; + __asm__ volatile("acrc 1" : "+r"(a0) : "r"(a7), "r"(a1) : "memory"); + return a0; +} + +static inline long linx_syscall3(long number, long arg0, long arg1, long arg2) +{ + register long a7 __asm__("a7") = number; + register long a0 __asm__("a0") = arg0; + register long a1 __asm__("a1") = arg1; + register long a2 __asm__("a2") = arg2; + __asm__ volatile("acrc 1" : "+r"(a0) : "r"(a7), "r"(a1), "r"(a2) : "memory"); + return a0; +} + +static inline long linx_syscall5(long number, long arg0, long arg1, long arg2, long arg3, long arg4) +{ + register long a7 __asm__("a7") = number; + register long a0 __asm__("a0") = arg0; + register long a1 __asm__("a1") = arg1; + register long a2 __asm__("a2") = arg2; + register long a3 __asm__("a3") = arg3; + register long a4 __asm__("a4") = arg4; + __asm__ volatile("acrc 1" : "+r"(a0) : "r"(a7), "r"(a1), "r"(a2), "r"(a3), "r"(a4) : "memory"); + return a0; +} + +static inline __attribute__((noreturn)) void linx_exit(int code) +{ + (void)linx_syscall1(93, code); + __builtin_unreachable(); +} + +__attribute__((noreturn)) void _start(void) +{ + static const char ok[] = "ppoll sigaltstack ok\n"; + char alt[4096]; + char in[2]; + char out[2] = {'o', 'k'}; + int pipefd[2]; + struct stack_t_guest ss; + struct stack_t_guest old; + struct pollfd_guest pfd; + struct timespec_guest ts; + + ss.ss_sp = alt; + ss.ss_flags = 0; + ss.__pad = 0; + ss.ss_size = sizeof(alt); + old.ss_sp = 0; + old.ss_flags = 0; + old.__pad = 0; + old.ss_size = 0; + + if (linx_syscall2(132, (long)&ss, (long)&old) != 0) + linx_exit(190); + if (old.ss_flags != 2) + linx_exit(191); + + old.ss_sp = 0; + old.ss_flags = 0; + old.ss_size = 0; + if (linx_syscall2(132, 0, (long)&old) != 0) + linx_exit(192); + + if (linx_syscall2(59, (long)pipefd, 0) != 0) + linx_exit(193); + if (linx_syscall3(64, pipefd[1], (long)out, 2) != 2) + linx_exit(194); + + pfd.fd = pipefd[0]; + pfd.events = 0x001; + pfd.revents = 0; + ts.tv_sec = 0; + ts.tv_nsec = 0; + if (linx_syscall5(73, (long)&pfd, 1, (long)&ts, 0, 0) != 1) + linx_exit(195); + if ((pfd.revents & 0x001) == 0) + linx_exit(196); + + if (linx_syscall3(63, pipefd[0], (long)in, 2) != 2) + linx_exit(197); + if (in[0] != 'o' || in[1] != 'k') + linx_exit(198); + + if (linx_syscall3(64, 1, (long)ok, (long)(sizeof(ok) - 1)) != (long)(sizeof(ok) - 1)) + linx_exit(199); + + if (linx_syscall1(57, pipefd[1]) != 0) + linx_exit(200); + if (linx_syscall1(57, pipefd[0]) != 0) + linx_exit(201); + + linx_exit(0); +} diff --git a/tests/fixtures/linux_user_pselect6.c b/tests/fixtures/linux_user_pselect6.c new file mode 100644 index 0000000..6a4f51a --- /dev/null +++ b/tests/fixtures/linux_user_pselect6.c @@ -0,0 +1,136 @@ +typedef unsigned long u64; + +struct timespec_guest { + long tv_sec; + long tv_nsec; +}; + +struct pselect_sigdata { + const void *sigmask; + u64 sigset_size; +}; + +struct fd_set_guest { + unsigned long fds_bits[16]; +}; + +void *memset(void *dst, int value, unsigned long count) +{ + unsigned char *ptr = (unsigned char *)dst; + unsigned long idx; + for (idx = 0; idx < count; ++idx) + ptr[idx] = (unsigned char)value; + return dst; +} + +static inline long linx_syscall1(long number, long arg0) +{ + register long a7 __asm__("a7") = number; + register long a0 __asm__("a0") = arg0; + __asm__ volatile("acrc 1" : "+r"(a0) : "r"(a7) : "memory"); + return a0; +} + +static inline long linx_syscall2(long number, long arg0, long arg1) +{ + register long a7 __asm__("a7") = number; + register long a0 __asm__("a0") = arg0; + register long a1 __asm__("a1") = arg1; + __asm__ volatile("acrc 1" : "+r"(a0) : "r"(a7), "r"(a1) : "memory"); + return a0; +} + +static inline long linx_syscall3(long number, long arg0, long arg1, long arg2) +{ + register long a7 __asm__("a7") = number; + register long a0 __asm__("a0") = arg0; + register long a1 __asm__("a1") = arg1; + register long a2 __asm__("a2") = arg2; + __asm__ volatile("acrc 1" : "+r"(a0) : "r"(a7), "r"(a1), "r"(a2) : "memory"); + return a0; +} + +static inline long linx_syscall6(long number, long arg0, long arg1, long arg2, long arg3, long arg4, long arg5) +{ + register long a7 __asm__("a7") = number; + register long a0 __asm__("a0") = arg0; + register long a1 __asm__("a1") = arg1; + register long a2 __asm__("a2") = arg2; + register long a3 __asm__("a3") = arg3; + register long a4 __asm__("a4") = arg4; + register long a5 __asm__("a5") = arg5; + __asm__ volatile("acrc 1" : "+r"(a0) : "r"(a7), "r"(a1), "r"(a2), "r"(a3), "r"(a4), "r"(a5) : "memory"); + return a0; +} + +static inline __attribute__((noreturn)) void linx_exit(int code) +{ + (void)linx_syscall1(93, code); + __builtin_unreachable(); +} + +static void fd_zero(struct fd_set_guest *set) +{ + int i; + for (i = 0; i < 16; ++i) + set->fds_bits[i] = 0; +} + +static void fd_set_one(int fd, struct fd_set_guest *set) +{ + set->fds_bits[fd / 64] |= 1UL << (fd % 64); +} + +static int fd_isset(int fd, const struct fd_set_guest *set) +{ + return (set->fds_bits[fd / 64] & (1UL << (fd % 64))) != 0; +} + +__attribute__((noreturn)) void _start(void) +{ + static const char ok[] = "pselect6 ok\n"; + char out[2] = {'o', 'k'}; + char in[2]; + int pipefd[2]; + struct fd_set_guest readfds; + struct timespec_guest ts; + struct pselect_sigdata sigdata; + int i; + + in[0] = 0; + in[1] = 0; + pipefd[0] = 0; + pipefd[1] = 0; + for (i = 0; i < 16; ++i) + readfds.fds_bits[i] = 0; + + if (linx_syscall2(59, (long)pipefd, 0) != 0) + linx_exit(210); + if (linx_syscall3(64, pipefd[1], (long)out, 2) != 2) + linx_exit(211); + + fd_set_one(pipefd[0], &readfds); + ts.tv_sec = 0; + ts.tv_nsec = 0; + sigdata.sigmask = 0; + sigdata.sigset_size = 0; + + if (linx_syscall6(72, pipefd[0] + 1, (long)&readfds, 0, 0, (long)&ts, (long)&sigdata) != 1) + linx_exit(212); + if (!fd_isset(pipefd[0], &readfds)) + linx_exit(213); + + if (linx_syscall3(63, pipefd[0], (long)in, 2) != 2) + linx_exit(214); + if (in[0] != 'o' || in[1] != 'k') + linx_exit(215); + + if (linx_syscall3(64, 1, (long)ok, (long)(sizeof(ok) - 1)) != (long)(sizeof(ok) - 1)) + linx_exit(216); + if (linx_syscall1(57, pipefd[1]) != 0) + linx_exit(217); + if (linx_syscall1(57, pipefd[0]) != 0) + linx_exit(218); + + linx_exit(0); +} diff --git a/tests/fixtures/linux_user_setxid_identity.c b/tests/fixtures/linux_user_setxid_identity.c new file mode 100644 index 0000000..250dd0d --- /dev/null +++ b/tests/fixtures/linux_user_setxid_identity.c @@ -0,0 +1,70 @@ +typedef unsigned int u32; + +static inline long linx_syscall1(long number, long arg0) +{ + register long a7 __asm__("a7") = number; + register long a0 __asm__("a0") = arg0; + __asm__ volatile("acrc 1" : "+r"(a0) : "r"(a7) : "memory"); + return a0; +} + +static inline long linx_syscall3(long number, long arg0, long arg1, long arg2) +{ + register long a7 __asm__("a7") = number; + register long a0 __asm__("a0") = arg0; + register long a1 __asm__("a1") = arg1; + register long a2 __asm__("a2") = arg2; + __asm__ volatile("acrc 1" : "+r"(a0) : "r"(a7), "r"(a1), "r"(a2) : "memory"); + return a0; +} + +static inline __attribute__((noreturn)) void linx_exit(int code) +{ + (void)linx_syscall1(93, code); + __builtin_unreachable(); +} + +__attribute__((noreturn)) void _start(void) +{ + static const char ok[] = "setxid identity ok\n"; + u32 ruid = 99; + u32 euid = 99; + u32 suid = 99; + u32 rgid = 77; + u32 egid = 77; + u32 sgid = 77; + + if (linx_syscall3(148, (long)&ruid, (long)&euid, (long)&suid) != 0) + linx_exit(110); + if (linx_syscall3(150, (long)&rgid, (long)&egid, (long)&sgid) != 0) + linx_exit(111); + if (ruid != 0 || euid != 0 || suid != 0) + linx_exit(112); + if (rgid != 0 || egid != 0 || sgid != 0) + linx_exit(113); + + if (linx_syscall1(146, 0) != 0) + linx_exit(114); + if (linx_syscall1(144, 0) != 0) + linx_exit(115); + if (linx_syscall3(147, -1, 0, -1) != 0) + linx_exit(116); + if (linx_syscall3(149, -1, 0, -1) != 0) + linx_exit(117); + + ruid = euid = suid = 99; + rgid = egid = sgid = 77; + if (linx_syscall3(148, (long)&ruid, (long)&euid, (long)&suid) != 0) + linx_exit(118); + if (linx_syscall3(150, (long)&rgid, (long)&egid, (long)&sgid) != 0) + linx_exit(119); + if (ruid != 0 || euid != 0 || suid != 0) + linx_exit(120); + if (rgid != 0 || egid != 0 || sgid != 0) + linx_exit(121); + + if (linx_syscall3(64, 1, (long)ok, (long)(sizeof(ok) - 1)) != (long)(sizeof(ok) - 1)) + linx_exit(122); + + linx_exit(0); +} diff --git a/tests/fixtures/linux_user_stat_lseek.c b/tests/fixtures/linux_user_stat_lseek.c new file mode 100644 index 0000000..cb59320 --- /dev/null +++ b/tests/fixtures/linux_user_stat_lseek.c @@ -0,0 +1,136 @@ +static inline long linx_syscall1(long number, long arg0) +{ + register long a7 __asm__("a7") = number; + register long a0 __asm__("a0") = arg0; + __asm__ volatile("acrc 1" : "+r"(a0) : "r"(a7) : "memory"); + return a0; +} + +static inline long linx_syscall2(long number, long arg0, long arg1) +{ + register long a7 __asm__("a7") = number; + register long a0 __asm__("a0") = arg0; + register long a1 __asm__("a1") = arg1; + __asm__ volatile("acrc 1" : "+r"(a0) : "r"(a7), "r"(a1) : "memory"); + return a0; +} + +static inline long linx_syscall3(long number, long arg0, long arg1, long arg2) +{ + register long a7 __asm__("a7") = number; + register long a0 __asm__("a0") = arg0; + register long a1 __asm__("a1") = arg1; + register long a2 __asm__("a2") = arg2; + __asm__ volatile("acrc 1" : "+r"(a0) : "r"(a7), "r"(a1), "r"(a2) : "memory"); + return a0; +} + +static inline long linx_syscall4(long number, long arg0, long arg1, long arg2, long arg3) +{ + register long a7 __asm__("a7") = number; + register long a0 __asm__("a0") = arg0; + register long a1 __asm__("a1") = arg1; + register long a2 __asm__("a2") = arg2; + register long a3 __asm__("a3") = arg3; + __asm__ volatile("acrc 1" : "+r"(a0) : "r"(a7), "r"(a1), "r"(a2), "r"(a3) : "memory"); + return a0; +} + +static inline __attribute__((noreturn)) void linx_exit(int code) +{ + (void)linx_syscall1(93, code); + __builtin_unreachable(); +} + +struct linx_stat { + unsigned long st_dev; + unsigned long st_ino; + unsigned int st_mode; + unsigned int st_nlink; + unsigned int st_uid; + unsigned int st_gid; + unsigned long st_rdev; + unsigned long __pad1; + long st_size; + int st_blksize; + int __pad2; + long st_blocks; + long st_atime; + unsigned long st_atime_nsec; + long st_mtime; + unsigned long st_mtime_nsec; + long st_ctime; + unsigned long st_ctime_nsec; + unsigned int __unused4; + unsigned int __unused5; +}; + +struct linx_timespec { + long tv_sec; + long tv_nsec; +}; + +typedef char linx_stat_size_is_128[(sizeof(struct linx_stat) == 128) ? 1 : -1]; + +static long buffer_matches(const char *lhs, const char *rhs, long len) +{ + long idx = 0; + while (idx < len) { + if (lhs[idx] != rhs[idx]) + return 0; + idx += 1; + } + return 1; +} + +__attribute__((noreturn)) void _start(void) +{ + static const char path[] = "file_io_input.txt"; + static const char expected[] = "fixture-data:linxcoremodel\n"; + static const char ok[] = "stat lseek ok\n"; + static const char bad[] = "stat lseek bad\n"; + char buffer[32]; + struct linx_stat st; + struct linx_timespec ts; + long fd = linx_syscall4(56, -100, (long)path, 0, 0); + long size; + long end; + long wrote; + + if (fd < 0) + linx_exit(20); + + if (linx_syscall2(80, fd, (long)&st) < 0) + linx_exit(21); + + if (st.st_size != (long)(sizeof(expected) - 1)) + linx_exit(22); + + end = linx_syscall3(62, fd, 0, 2); + if (end != st.st_size) + linx_exit(23); + + if (linx_syscall3(62, fd, 0, 0) != 0) + linx_exit(24); + + if (linx_syscall2(113, 0, (long)&ts) < 0) + linx_exit(25); + + if (ts.tv_nsec < 0 || ts.tv_nsec >= 1000000000L) + linx_exit(26); + + size = linx_syscall3(63, fd, (long)buffer, (long)(sizeof(expected) - 1)); + if (size != st.st_size) + linx_exit(27); + + (void)linx_syscall1(57, fd); + + if (!buffer_matches(buffer, expected, size)) + linx_exit(28); + + wrote = linx_syscall3(64, 1, (long)ok, (long)(sizeof(ok) - 1)); + if (wrote < 0) + linx_exit(29); + + linx_exit(0); +} diff --git a/tests/fixtures/linux_user_sysinfo_prlimit.c b/tests/fixtures/linux_user_sysinfo_prlimit.c new file mode 100644 index 0000000..c5da206 --- /dev/null +++ b/tests/fixtures/linux_user_sysinfo_prlimit.c @@ -0,0 +1,114 @@ +typedef unsigned long u64; +typedef unsigned int u32; + +static inline long linx_syscall1(long number, long arg0) +{ + register long a7 __asm__("a7") = number; + register long a0 __asm__("a0") = arg0; + __asm__ volatile("acrc 1" : "+r"(a0) : "r"(a7) : "memory"); + return a0; +} + +static inline long linx_syscall2(long number, long arg0, long arg1) +{ + register long a7 __asm__("a7") = number; + register long a0 __asm__("a0") = arg0; + register long a1 __asm__("a1") = arg1; + __asm__ volatile("acrc 1" : "+r"(a0) : "r"(a7), "r"(a1) : "memory"); + return a0; +} + +static inline long linx_syscall3(long number, long arg0, long arg1, long arg2) +{ + register long a7 __asm__("a7") = number; + register long a0 __asm__("a0") = arg0; + register long a1 __asm__("a1") = arg1; + register long a2 __asm__("a2") = arg2; + __asm__ volatile("acrc 1" : "+r"(a0) : "r"(a7), "r"(a1), "r"(a2) : "memory"); + return a0; +} + +static inline long linx_syscall4(long number, long arg0, long arg1, long arg2, long arg3) +{ + register long a7 __asm__("a7") = number; + register long a0 __asm__("a0") = arg0; + register long a1 __asm__("a1") = arg1; + register long a2 __asm__("a2") = arg2; + register long a3 __asm__("a3") = arg3; + __asm__ volatile("acrc 1" : "+r"(a0) : "r"(a7), "r"(a1), "r"(a2), "r"(a3) : "memory"); + return a0; +} + +static inline __attribute__((noreturn)) void linx_exit(int code) +{ + (void)linx_syscall1(93, code); + __builtin_unreachable(); +} + +struct sysinfo_guest { + u64 uptime; + u64 loads[3]; + u64 totalram; + u64 freeram; + u64 sharedram; + u64 bufferram; + u64 totalswap; + u64 freeswap; + unsigned short procs; + unsigned short pad; + u64 totalhigh; + u64 freehigh; + u32 mem_unit; + char reserved[256]; +}; + +struct rlimit_guest { + u64 cur; + u64 max; +}; + +#define RLIMIT_STACK 3 + +__attribute__((noreturn)) void _start(void) +{ + static const char ok[] = "sysinfo prlimit ok\n"; + struct sysinfo_guest si; + struct rlimit_guest lim; + struct rlimit_guest new_lim; + long ppid; + + ppid = linx_syscall1(173, 0); + if (ppid <= 0) + linx_exit(90); + + if (linx_syscall1(179, (long)&si) != 0) + linx_exit(91); + if (si.totalram == 0 || si.mem_unit == 0) + linx_exit(92); + if (si.freeram > si.totalram) + linx_exit(93); + if (si.procs != 1) + linx_exit(94); + + if (linx_syscall4(261, 0, RLIMIT_STACK, 0, (long)&lim) != 0) + linx_exit(95); + if (lim.cur == 0 || lim.max == 0 || lim.cur > lim.max) + linx_exit(96); + + new_lim.cur = lim.cur / 2; + if (new_lim.cur == 0) + linx_exit(97); + new_lim.max = lim.max; + + if (linx_syscall4(261, 0, RLIMIT_STACK, (long)&new_lim, 0) != 0) + linx_exit(98); + if (linx_syscall4(261, 0, RLIMIT_STACK, 0, (long)&lim) != 0) + linx_exit(99); + if (lim.cur != new_lim.cur || lim.max != new_lim.max) + linx_exit(100); + + if (linx_syscall3(64, 1, (long)ok, (long)(sizeof(ok) - 1)) != (long)(sizeof(ok) - 1)) + linx_exit(101); + + linx_exit(0); +} diff --git a/tests/fixtures/linux_user_thread_runtime.c b/tests/fixtures/linux_user_thread_runtime.c new file mode 100644 index 0000000..c4fade0 --- /dev/null +++ b/tests/fixtures/linux_user_thread_runtime.c @@ -0,0 +1,94 @@ +typedef unsigned long u64; + +static inline long linx_syscall1(long number, long arg0) +{ + register long a7 __asm__("a7") = number; + register long a0 __asm__("a0") = arg0; + __asm__ volatile("acrc 1" : "+r"(a0) : "r"(a7) : "memory"); + return a0; +} + +static inline long linx_syscall2(long number, long arg0, long arg1) +{ + register long a7 __asm__("a7") = number; + register long a0 __asm__("a0") = arg0; + register long a1 __asm__("a1") = arg1; + __asm__ volatile("acrc 1" : "+r"(a0) : "r"(a7), "r"(a1) : "memory"); + return a0; +} + +static inline long linx_syscall3(long number, long arg0, long arg1, long arg2) +{ + register long a7 __asm__("a7") = number; + register long a0 __asm__("a0") = arg0; + register long a1 __asm__("a1") = arg1; + register long a2 __asm__("a2") = arg2; + __asm__ volatile("acrc 1" : "+r"(a0) : "r"(a7), "r"(a1), "r"(a2) : "memory"); + return a0; +} + +static inline long linx_syscall5(long number, long arg0, long arg1, long arg2, long arg3, long arg4) +{ + register long a7 __asm__("a7") = number; + register long a0 __asm__("a0") = arg0; + register long a1 __asm__("a1") = arg1; + register long a2 __asm__("a2") = arg2; + register long a3 __asm__("a3") = arg3; + register long a4 __asm__("a4") = arg4; + __asm__ volatile("acrc 1" : "+r"(a0) : "r"(a7), "r"(a1), "r"(a2), "r"(a3), "r"(a4) : "memory"); + return a0; +} + +static inline __attribute__((noreturn)) void linx_exit(int code) +{ + (void)linx_syscall1(93, code); + __builtin_unreachable(); +} + +static int str_eq(const char *lhs, const char *rhs) +{ + while (*lhs && *rhs) { + if (*lhs != *rhs) + return 0; + ++lhs; + ++rhs; + } + return *lhs == *rhs; +} + +__attribute__((noreturn)) void _start(void) +{ + static const char ok[] = "thread runtime ok\n"; + static const char name[] = "bringup-thr"; + char got_name[16]; + unsigned char *page; + long query; + + page = (unsigned char *)linx_syscall5(222, 0, 4096, 3, 0x22, -1); + if ((long)page < 0) + linx_exit(150); + page[0] = 0x5a; + + if (linx_syscall5(167, 15, (long)name, 0, 0, 0) != 0) + linx_exit(151); + if (linx_syscall5(167, 16, (long)got_name, 0, 0, 0) != 0) + linx_exit(152); + if (!str_eq(got_name, "bringup-thr")) + linx_exit(153); + + if (linx_syscall3(233, (long)page, 4096, 4) != 0) + linx_exit(154); + + query = linx_syscall2(283, 0, 0); + if ((query & 8) == 0 || (query & 16) == 0) + linx_exit(155); + if (linx_syscall2(283, 16, 0) != 0) + linx_exit(156); + if (linx_syscall2(283, 8, 0) != 0) + linx_exit(157); + + if (linx_syscall3(64, 1, (long)ok, (long)(sizeof(ok) - 1)) != (long)(sizeof(ok) - 1)) + linx_exit(158); + + linx_exit(0); +} diff --git a/tests/fixtures/linux_user_tls_rseq.c b/tests/fixtures/linux_user_tls_rseq.c new file mode 100644 index 0000000..92d25fb --- /dev/null +++ b/tests/fixtures/linux_user_tls_rseq.c @@ -0,0 +1,82 @@ +typedef unsigned int u32; +typedef unsigned long u64; + +struct rseq_guest { + u32 cpu_id_start; + u32 cpu_id; + u64 rseq_cs; + u32 flags; + u32 pad; + u64 extra[1]; +}; + +static inline long linx_syscall1(long number, long arg0) +{ + register long a7 __asm__("a7") = number; + register long a0 __asm__("a0") = arg0; + __asm__ volatile("acrc 1" : "+r"(a0) : "r"(a7) : "memory"); + return a0; +} + +static inline long linx_syscall4(long number, long arg0, long arg1, long arg2, long arg3) +{ + register long a7 __asm__("a7") = number; + register long a0 __asm__("a0") = arg0; + register long a1 __asm__("a1") = arg1; + register long a2 __asm__("a2") = arg2; + register long a3 __asm__("a3") = arg3; + __asm__ volatile("acrc 1" : "+r"(a0) : "r"(a7), "r"(a1), "r"(a2), "r"(a3) : "memory"); + return a0; +} + +static inline long linx_syscall3(long number, long arg0, long arg1, long arg2) +{ + register long a7 __asm__("a7") = number; + register long a0 __asm__("a0") = arg0; + register long a1 __asm__("a1") = arg1; + register long a2 __asm__("a2") = arg2; + __asm__ volatile("acrc 1" : "+r"(a0) : "r"(a7), "r"(a1), "r"(a2) : "memory"); + return a0; +} + +static inline __attribute__((noreturn)) void linx_exit(int code) +{ + (void)linx_syscall1(93, code); + __builtin_unreachable(); +} + +static inline void linx_set_tp(void *p) +{ + __asm__ volatile("ssrset %0, %1" : : "r"(p), "i"(0x0000) : "memory"); +} + +static inline void *linx_get_tp(void) +{ + void *tp; + __asm__ volatile("ssrget %1, ->%0" : "=r"(tp) : "i"(0x0000) : "memory"); + return tp; +} + +__attribute__((noreturn)) void _start(void) +{ + static const char ok[] = "tls rseq ok\n"; + struct rseq_guest rseq; + unsigned long tp_cookie = 0x123456789abcdef0ul; + + linx_set_tp(&tp_cookie); + if (linx_get_tp() != &tp_cookie) + linx_exit(160); + + if (linx_syscall4(293, (long)&rseq, (long)sizeof(rseq), 0, 0x53053053) != 0) + linx_exit(161); + if (rseq.cpu_id_start != 0 || rseq.cpu_id != 0 || rseq.rseq_cs != 0 || rseq.flags != 0) + linx_exit(162); + + if (linx_syscall4(293, (long)&rseq, (long)sizeof(rseq), 1, 0x53053053) != 0) + linx_exit(163); + + if (linx_syscall3(64, 1, (long)ok, (long)(sizeof(ok) - 1)) != (long)(sizeof(ok) - 1)) + linx_exit(164); + + linx_exit(0); +} diff --git a/tests/fixtures/linux_user_wait4_nochild.c b/tests/fixtures/linux_user_wait4_nochild.c new file mode 100644 index 0000000..2256a81 --- /dev/null +++ b/tests/fixtures/linux_user_wait4_nochild.c @@ -0,0 +1,78 @@ +typedef unsigned long u64; +typedef unsigned int u32; + +struct timeval_guest { + long tv_sec; + long tv_usec; +}; + +struct rusage_guest { + struct timeval_guest ru_utime; + struct timeval_guest ru_stime; + long fields[14 + 16]; +}; + +void *memset(void *dst, int value, unsigned long count) +{ + unsigned char *ptr = (unsigned char *)dst; + unsigned char byte = (unsigned char)value; + unsigned long i; + for (i = 0; i < count; ++i) + ptr[i] = byte; + return dst; +} + +static inline long linx_syscall1(long number, long arg0) +{ + register long a7 __asm__("a7") = number; + register long a0 __asm__("a0") = arg0; + __asm__ volatile("acrc 1" : "+r"(a0) : "r"(a7) : "memory"); + return a0; +} + +static inline long linx_syscall4(long number, long arg0, long arg1, long arg2, long arg3) +{ + register long a7 __asm__("a7") = number; + register long a0 __asm__("a0") = arg0; + register long a1 __asm__("a1") = arg1; + register long a2 __asm__("a2") = arg2; + register long a3 __asm__("a3") = arg3; + __asm__ volatile("acrc 1" : "+r"(a0) : "r"(a7), "r"(a1), "r"(a2), "r"(a3) : "memory"); + return a0; +} + +static inline long linx_syscall3(long number, long arg0, long arg1, long arg2) +{ + register long a7 __asm__("a7") = number; + register long a0 __asm__("a0") = arg0; + register long a1 __asm__("a1") = arg1; + register long a2 __asm__("a2") = arg2; + __asm__ volatile("acrc 1" : "+r"(a0) : "r"(a7), "r"(a1), "r"(a2) : "memory"); + return a0; +} + +static inline __attribute__((noreturn)) void linx_exit(int code) +{ + (void)linx_syscall1(93, code); + __builtin_unreachable(); +} + +__attribute__((noreturn)) void _start(void) +{ + static const char ok[] = "wait4 nochild ok\n"; + int status = 0x11223344; + struct rusage_guest ru; + long rc; + memset(&ru, 0xA5, sizeof(ru)); + + rc = linx_syscall4(260, -1, (long)&status, 0, (long)&ru); + if (rc != -10) + linx_exit(230); + if (status != 0x11223344) + linx_exit(231); + + if (linx_syscall3(64, 1, (long)ok, (long)(sizeof(ok) - 1)) != (long)(sizeof(ok) - 1)) + linx_exit(232); + + linx_exit(0); +} diff --git a/tests/fixtures/linux_user_write_exit.s b/tests/fixtures/linux_user_write_exit.s new file mode 100644 index 0000000..6eaf2a5 --- /dev/null +++ b/tests/fixtures/linux_user_write_exit.s @@ -0,0 +1,19 @@ + .text + .globl _start + .type _start,@function +_start: + C.BSTART.STD + addi zero, 1, ->a0 + addtpc .Lmessage, ->a1 + addi a1, .Lmessage, ->a1 + addi zero, 19, ->a2 + addi zero, 64, ->a7 + acrc 1 + addi zero, 0, ->a0 + addi zero, 93, ->a7 + acrc 1 + C.BSTOP + + .section .rodata +.Lmessage: + .asciz "hello from linxisa\n" From 345a999e96432b0156615835e8ea099656a2fd06 Mon Sep 17 00:00:00 2001 From: RuoyuZhou Date: Sun, 15 Mar 2026 09:07:21 +0800 Subject: [PATCH 2/3] Add governance, docs, and crosscheck gates --- .github/CODEOWNERS | 15 ++ .github/ISSUE_TEMPLATE/bug_report.yml | 26 +++ .github/ISSUE_TEMPLATE/docs.yml | 16 ++ .github/ISSUE_TEMPLATE/regression.yml | 22 +++ .github/dependabot.yml | 12 ++ .github/pull_request_template.md | 17 ++ .github/workflows/ci.yml | 48 +++++ .gitignore | 1 + CODE_OF_CONDUCT.md | 36 ++++ CONTRIBUTING.md | 54 ++++++ LICENSE | 160 ++++++++++++++++ README.md | 28 +++ SECURITY.md | 25 +++ crates/camodel/src/tests.rs | 55 ++++++ docs/bringup/gates/latest.json | 178 +++++++++++++++++ docs/project/layout.md | 62 ++++++ docs/verification/crosscheck-gates.md | 44 +++++ tools/ci/check_repo_layout.sh | 113 +++++++++++ tools/regression/run_crosschecks.sh | 266 ++++++++++++++++++++++++++ 19 files changed, 1178 insertions(+) create mode 100644 .github/CODEOWNERS create mode 100644 .github/ISSUE_TEMPLATE/bug_report.yml create mode 100644 .github/ISSUE_TEMPLATE/docs.yml create mode 100644 .github/ISSUE_TEMPLATE/regression.yml create mode 100644 .github/dependabot.yml create mode 100644 .github/pull_request_template.md create mode 100644 .github/workflows/ci.yml create mode 100644 CODE_OF_CONDUCT.md create mode 100644 CONTRIBUTING.md create mode 100644 LICENSE create mode 100644 SECURITY.md create mode 100644 docs/bringup/gates/latest.json create mode 100644 docs/project/layout.md create mode 100644 docs/verification/crosscheck-gates.md create mode 100755 tools/ci/check_repo_layout.sh create mode 100755 tools/regression/run_crosschecks.sh diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS new file mode 100644 index 0000000..2ec833b --- /dev/null +++ b/.github/CODEOWNERS @@ -0,0 +1,15 @@ +* @LinxISA/maintainers + +/.github/ @LinxISA/maintainers +/docs/ @LinxISA/maintainers +/tools/ @LinxISA/maintainers + +/crates/camodel/ @LinxISA/maintainers +/crates/funcmodel/ @LinxISA/maintainers +/crates/cosim/ @LinxISA/maintainers +/crates/trace/ @LinxISA/maintainers +/crates/runtime/ @LinxISA/maintainers +/crates/isa/ @LinxISA/maintainers +/crates/elf/ @LinxISA/maintainers +/crates/dse/ @LinxISA/maintainers +/crates/lx-tools/ @LinxISA/maintainers diff --git a/.github/ISSUE_TEMPLATE/bug_report.yml b/.github/ISSUE_TEMPLATE/bug_report.yml new file mode 100644 index 0000000..c29089f --- /dev/null +++ b/.github/ISSUE_TEMPLATE/bug_report.yml @@ -0,0 +1,26 @@ +name: Bug report +description: Report a functional, cycle-model, trace, or tooling bug +title: "[bug] " +labels: ["bug"] +body: + - type: textarea + id: summary + attributes: + label: Summary + description: What is broken? + validations: + required: true + - type: textarea + id: repro + attributes: + label: Reproduction + description: Include commands, fixture paths, and expected vs actual behavior. + validations: + required: true + - type: textarea + id: validation + attributes: + label: Validation context + description: Note whether `tools/regression/run_crosschecks.sh` was run and what failed. + validations: + required: false diff --git a/.github/ISSUE_TEMPLATE/docs.yml b/.github/ISSUE_TEMPLATE/docs.yml new file mode 100644 index 0000000..7c4ef01 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/docs.yml @@ -0,0 +1,16 @@ +name: Documentation +description: Request or report a docs update +title: "[docs] " +labels: ["documentation"] +body: + - type: textarea + id: issue + attributes: + label: What should change? + validations: + required: true + - type: input + id: path + attributes: + label: Relevant path + description: README, docs path, or crate path if known. diff --git a/.github/ISSUE_TEMPLATE/regression.yml b/.github/ISSUE_TEMPLATE/regression.yml new file mode 100644 index 0000000..d622dd8 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/regression.yml @@ -0,0 +1,22 @@ +name: Regression +description: Report a behavior change against a previously passing gate or smoke +title: "[regression] " +labels: ["regression"] +body: + - type: input + id: last_good + attributes: + label: Last known good commit + validations: + required: true + - type: input + id: first_bad + attributes: + label: First known bad commit + - type: textarea + id: evidence + attributes: + label: Evidence + description: Include command lines, gate report excerpts, and any relevant logs or trace references. + validations: + required: true diff --git a/.github/dependabot.yml b/.github/dependabot.yml new file mode 100644 index 0000000..dd700fa --- /dev/null +++ b/.github/dependabot.yml @@ -0,0 +1,12 @@ +version: 2 +updates: + - package-ecosystem: "cargo" + directory: "/" + schedule: + interval: "weekly" + open-pull-requests-limit: 10 + + - package-ecosystem: "github-actions" + directory: "/" + schedule: + interval: "weekly" diff --git a/.github/pull_request_template.md b/.github/pull_request_template.md new file mode 100644 index 0000000..8ee9d05 --- /dev/null +++ b/.github/pull_request_template.md @@ -0,0 +1,17 @@ +## Summary + +Describe what this PR changes and why. + +## Validation + +- [ ] `bash tools/ci/check_repo_layout.sh` +- [ ] `bash tools/regression/run_crosschecks.sh` +- [ ] (Optional local smoke) `bash tools/regression/run_crosschecks.sh --require-smoke` + +## Notes + +- [ ] Public docs were updated if crate names, verification commands, or repo + structure changed +- [ ] No old `linxcore-*` crate names were reintroduced +- [ ] Superproject references were only updated where they point directly to + this workspace diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml new file mode 100644 index 0000000..922df53 --- /dev/null +++ b/.github/workflows/ci.yml @@ -0,0 +1,48 @@ +name: CI + +on: + push: + branches: [main] + pull_request: + +permissions: + contents: read + +jobs: + layout: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + - name: Check repository layout + run: bash tools/ci/check_repo_layout.sh + + workspace: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + - uses: dtolnay/rust-toolchain@stable + with: + components: rustfmt + - uses: Swatinem/rust-cache@v2 + - name: Cargo fmt + run: cargo fmt --all --check + - name: Cargo test + run: cargo test -q + + crosscheck: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + - uses: dtolnay/rust-toolchain@stable + with: + components: rustfmt + - uses: Swatinem/rust-cache@v2 + - name: Run repo-local crosschecks + run: bash tools/regression/run_crosschecks.sh --out-dir out/ci-crosschecks + - name: Upload gate artifacts + uses: actions/upload-artifact@v4 + with: + name: crosscheck-artifacts + path: | + out/ci-crosschecks + docs/bringup/gates/latest.json diff --git a/.gitignore b/.gitignore index ee920b2..6f33f32 100644 --- a/.gitignore +++ b/.gitignore @@ -5,3 +5,4 @@ out/ *.jsonl *.linxtrace *.md.tmp +!docs/bringup/gates/latest.json diff --git a/CODE_OF_CONDUCT.md b/CODE_OF_CONDUCT.md new file mode 100644 index 0000000..6f931ae --- /dev/null +++ b/CODE_OF_CONDUCT.md @@ -0,0 +1,36 @@ +# Code of Conduct + +## Our Pledge + +We want LinxCoreModel to be a professional, technically rigorous, and welcoming +open source project. Contributors and maintainers are expected to participate in +ways that keep discussion focused, respectful, and useful. + +## Our Standards + +Examples of behavior that contribute to a positive environment: + +- engaging with technical disagreements directly and respectfully +- giving actionable review feedback with clear evidence +- assuming good faith while still holding a high engineering bar +- documenting decisions so later contributors can reproduce and audit them + +Examples of unacceptable behavior: + +- personal attacks, insults, or harassment +- bad-faith argumentation or repeated derailment of technical discussion +- publishing private information without consent +- disruptive behavior that prevents productive collaboration + +## Enforcement + +Project maintainers are responsible for clarifying and enforcing these +standards. They may remove, edit, or reject comments, commits, issues, pull +requests, and other contributions that violate this Code of Conduct. + +## Reporting + +For conduct concerns, use GitHub’s private maintainer contact path where +available, or open a private security/advisory report if the behavior is tied to +an exploit or disclosure issue. Maintainers will review reports confidentially +when practical. diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md new file mode 100644 index 0000000..3c9c94b --- /dev/null +++ b/CONTRIBUTING.md @@ -0,0 +1,54 @@ +# Contributing to LinxCoreModel + +LinxCoreModel is the Rust modeling workspace under `tools/LinxCoreModel` in the +larger `linx-isa` superproject. + +## Scope and Ownership + +- Keep repository-local changes inside this workspace unless the work explicitly + requires a superproject integration change. +- Do not reintroduce `linxcore-*` crate names inside this workspace. The crate + graph is now `camodel`, `funcmodel`, `cosim`, `isa`, `elf`, `runtime`, + `trace`, `dse`, and `lx-tools`. +- Keep owner boundaries explicit. In particular: + - `camodel` uses domain/stage modules + - `funcmodel` uses engine/memory/syscall/trace domains + - shared CLI logic belongs in `crates/lx-tools/src/cli/` + +## Required Local Checks + +Run these before opening or updating a pull request: + +```bash +bash tools/ci/check_repo_layout.sh +bash tools/regression/run_crosschecks.sh --require-smoke +``` + +If you do not have a local bring-up ELF under +`out/bringup/linux_user_compiler_smoke_O0.elf`, you can still run the non-smoke +gates: + +```bash +bash tools/regression/run_crosschecks.sh +``` + +## Pull Requests + +- Keep changes focused. Do not mix workspace refactors, behavior changes, and + unrelated superproject edits in one PR. +- Include validation evidence and note whether the smoke/crosscheck gates were + run with a local ELF or in test-only mode. +- Update docs when you change public crate names, verification commands, or + owner/module boundaries. + +## Superproject Relationship + +This repository is intentionally narrower than the `linx-isa` superproject. Use +superproject governance only where it directly applies here: + +- mirror reusable patterns like `tools/ci`, `tools/regression`, and + `docs/bringup/gates` +- do not copy unrelated kernel/compiler/emulator process into this repo +- do not rename architectural references to “LinxCore” or “LinxISA” in the + wider superproject just because this workspace dropped redundant crate + prefixes diff --git a/LICENSE b/LICENSE new file mode 100644 index 0000000..04e7c0d --- /dev/null +++ b/LICENSE @@ -0,0 +1,160 @@ +Apache License +Version 2.0, January 2004 +http://www.apache.org/licenses/ + +TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + +1. Definitions. + +"License" shall mean the terms and conditions for use, reproduction, and +distribution as defined by Sections 1 through 9 of this document. + +"Licensor" shall mean the copyright owner or entity authorized by the copyright +owner that is granting the License. + +"Legal Entity" shall mean the union of the acting entity and all other entities +that control, are controlled by, or are under common control with that entity. +For the purposes of this definition, "control" means (i) the power, direct or +indirect, to cause the direction or management of such entity, whether by +contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the +outstanding shares, or (iii) beneficial ownership of such entity. + +"You" (or "Your") shall mean an individual or Legal Entity exercising +permissions granted by this License. + +"Source" form shall mean the preferred form for making modifications, including +but not limited to software source code, documentation source, and configuration +files. + +"Object" form shall mean any form resulting from mechanical transformation or +translation of a Source form, including but not limited to compiled object code, +generated documentation, and conversions to other media types. + +"Work" shall mean the work of authorship, whether in Source or Object form, +made available under the License, as indicated by a copyright notice that is +included in or attached to the work (an example is provided in the Appendix +below). + +"Derivative Works" shall mean any work, whether in Source or Object form, that +is based on (or derived from) the Work and for which the editorial revisions, +annotations, elaborations, or other modifications represent, as a whole, an +original work of authorship. For the purposes of this License, Derivative Works +shall not include works that remain separable from, or merely link (or bind by +name) to the interfaces of, the Work and Derivative Works thereof. + +"Contribution" shall mean any work of authorship, including the original +version of the Work and any modifications or additions to that Work or +Derivative Works thereof, that is intentionally submitted to Licensor for +inclusion in the Work by the copyright owner or by an individual or Legal +Entity authorized to submit on behalf of the copyright owner. For the purposes +of this definition, "submitted" means any form of electronic, verbal, or +written communication sent to the Licensor or its representatives, including +but not limited to communication on electronic mailing lists, source code +control systems, and issue tracking systems that are managed by, or on behalf +of, the Licensor for the purpose of discussing and improving the Work, but +excluding communication that is conspicuously marked or otherwise designated in +writing by the copyright owner as "Not a Contribution." + +"Contributor" shall mean Licensor and any individual or Legal Entity on behalf +of whom a Contribution has been received by Licensor and subsequently +incorporated within the Work. + +2. Grant of Copyright License. Subject to the terms and conditions of this +License, each Contributor hereby grants to You a perpetual, worldwide, +non-exclusive, no-charge, royalty-free, irrevocable copyright license to +reproduce, prepare Derivative Works of, publicly display, publicly perform, +sublicense, and distribute the Work and such Derivative Works in Source or +Object form. + +3. Grant of Patent License. Subject to the terms and conditions of this +License, each Contributor hereby grants to You a perpetual, worldwide, +non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this +section) patent license to make, have made, use, offer to sell, sell, import, +and otherwise transfer the Work, where such license applies only to those +patent claims licensable by such Contributor that are necessarily infringed by +their Contribution(s) alone or by combination of their Contribution(s) with the +Work to which such Contribution(s) was submitted. If You institute patent +litigation against any entity (including a cross-claim or counterclaim in a +lawsuit) alleging that the Work or a Contribution incorporated within the Work +constitutes direct or contributory patent infringement, then any patent +licenses granted to You under this License for that Work shall terminate as of +the date such litigation is filed. + +4. Redistribution. You may reproduce and distribute copies of the Work or +Derivative Works thereof in any medium, with or without modifications, and in +Source or Object form, provided that You meet the following conditions: + +(a) You must give any other recipients of the Work or Derivative Works a copy +of this License; and + +(b) You must cause any modified files to carry prominent notices stating that +You changed the files; and + +(c) You must retain, in the Source form of any Derivative Works that You +distribute, all copyright, patent, trademark, and attribution notices from the +Source form of the Work, excluding those notices that do not pertain to any +part of the Derivative Works; and + +(d) If the Work includes a "NOTICE" text file as part of its distribution, then +any Derivative Works that You distribute must include a readable copy of the +attribution notices contained within such NOTICE file, excluding those notices +that do not pertain to any part of the Derivative Works, in at least one of the +following places: within a NOTICE text file distributed as part of the +Derivative Works; within the Source form or documentation, if provided along +with the Derivative Works; or, within a display generated by the Derivative +Works, if and wherever such third-party notices normally appear. The contents +of the NOTICE file are for informational purposes only and do not modify the +License. You may add Your own attribution notices within Derivative Works that +You distribute, alongside or as an addendum to the NOTICE text from the Work, +provided that such additional attribution notices cannot be construed as +modifying the License. + +You may add Your own copyright statement to Your modifications and may provide +additional or different license terms and conditions for use, reproduction, or +distribution of Your modifications, or for any such Derivative Works as a +whole, provided Your use, reproduction, and distribution of the Work otherwise +complies with the conditions stated in this License. + +5. Submission of Contributions. Unless You explicitly state otherwise, any +Contribution intentionally submitted for inclusion in the Work by You to the +Licensor shall be under the terms and conditions of this License, without any +additional terms or conditions. Notwithstanding the above, nothing herein shall +supersede or modify the terms of any separate license agreement you may have +executed with Licensor regarding such Contributions. + +6. Trademarks. This License does not grant permission to use the trade names, +trademarks, service marks, or product names of the Licensor, except as required +for reasonable and customary use in describing the origin of the Work and +reproducing the content of the NOTICE file. + +7. Disclaimer of Warranty. Unless required by applicable law or agreed to in +writing, Licensor provides the Work (and each Contributor provides its +Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +KIND, either express or implied, including, without limitation, any warranties +or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A +PARTICULAR PURPOSE. You are solely responsible for determining the +appropriateness of using or redistributing the Work and assume any risks +associated with Your exercise of permissions under this License. + +8. Limitation of Liability. In no event and under no legal theory, whether in +tort (including negligence), contract, or otherwise, unless required by +applicable law (such as deliberate and grossly negligent acts) or agreed to in +writing, shall any Contributor be liable to You for damages, including any +direct, indirect, special, incidental, or consequential damages of any +character arising as a result of this License or out of the use or inability to +use the Work (including but not limited to damages for loss of goodwill, work +stoppage, computer failure or malfunction, or any and all other commercial +damages or losses), even if such Contributor has been advised of the +possibility of such damages. + +9. Accepting Warranty or Additional Liability. While redistributing the Work or +Derivative Works thereof, You may choose to offer, and charge a fee for, +acceptance of support, warranty, indemnity, or other liability obligations +and/or rights consistent with this License. However, in accepting such +obligations, You may act only on Your own behalf and on Your sole +responsibility, not on behalf of any other Contributor, and only if You agree +to indemnify, defend, and hold each Contributor harmless for any liability +incurred by, or claims asserted against, such Contributor by reason of your +accepting any such warranty or additional liability. + +END OF TERMS AND CONDITIONS diff --git a/README.md b/README.md index 4c4db4c..262cf02 100644 --- a/README.md +++ b/README.md @@ -2,6 +2,14 @@ LinxCoreModel is the Rust modeling workspace for LinxCore. +Project policy and verification entrypoints: + +- contributor guidance: [`CONTRIBUTING.md`](CONTRIBUTING.md) +- security process: [`SECURITY.md`](SECURITY.md) +- workspace layout: [`docs/project/layout.md`](docs/project/layout.md) +- crosscheck gates: [`docs/verification/crosscheck-gates.md`](docs/verification/crosscheck-gates.md) +- latest gate report: [`docs/bringup/gates/latest.json`](docs/bringup/gates/latest.json) + Current workspace contents: - `isa`: shared architectural types and trace contracts @@ -82,3 +90,23 @@ Still incomplete in this phase: - bootstrap auxv now includes deterministic `AT_MINSIGSTKSZ`, `AT_PLATFORM`, `AT_HWCAP*`, `AT_CLKTCK`, and `AT_SYSINFO_EHDR` entries for libc startup paths - cycle-accurate execution and QEMU lockstep remain separate follow-on work + +## Verification + +Minimum structural check: + +```bash +bash tools/ci/check_repo_layout.sh +``` + +Full repo-local gate pack: + +```bash +bash tools/regression/run_crosschecks.sh +``` + +To require the local CLI smoke on a previously built bring-up ELF: + +```bash +bash tools/regression/run_crosschecks.sh --require-smoke +``` diff --git a/SECURITY.md b/SECURITY.md new file mode 100644 index 0000000..da5acf4 --- /dev/null +++ b/SECURITY.md @@ -0,0 +1,25 @@ +# Security Policy + +## Reporting a Vulnerability + +Use GitHub’s **Report a vulnerability** feature for this repository whenever +possible. That keeps the initial report private and lets maintainers coordinate +fixes before disclosure. + +Include: + +- a clear description of the issue and expected impact +- the affected crate(s) or tool(s) +- reproduction steps or a minimal proof of concept +- the commit or branch used for testing + +## Supported Versions + +Security fixes are applied to the default branch, `main`. Historical branches +may receive fixes only at maintainer discretion. + +## Scope Notes + +LinxCoreModel is a modeling workspace. Vulnerabilities in this repository may +affect trace tooling, host file access through syscall shims, or analysis tools, +even when they do not affect the RTL or architectural specification directly. diff --git a/crates/camodel/src/tests.rs b/crates/camodel/src/tests.rs index 5e5676b..2d4c571 100644 --- a/crates/camodel/src/tests.rs +++ b/crates/camodel/src/tests.rs @@ -1,5 +1,6 @@ use super::*; use elf::{LoadedElf, SegmentImage}; +use funcmodel::{FuncEngine, FuncRunOptions}; use isa::CommitRecord; use runtime::GuestRuntime; use runtime::{BootInfo, GuestMemory, MemoryRegion, RuntimeConfig}; @@ -81,6 +82,54 @@ fn cycle_engine_retires_multiple_uops() { ); } +#[test] +fn crosscheck_func_and_cycle_engines_on_sample_runtime() { + let program = vec![ + enc_addi(2, 0, 1), + enc_addi(3, 2, 2), + enc_addi(4, 3, 3), + enc_addi(9, 0, 93), + enc_acrc(1), + ]; + let runtime = sample_runtime(&program, &[]); + + let func_bundle = FuncEngine + .run(&runtime, &FuncRunOptions { max_steps: 64 }) + .unwrap(); + let cycle_bundle = CycleEngine + .run( + &runtime, + &CycleRunOptions { + max_cycles: 64, + ..CycleRunOptions::default() + }, + ) + .unwrap(); + + assert_eq!( + func_bundle.result.metrics.exit_reason, + cycle_bundle.result.metrics.exit_reason + ); + assert_eq!( + func_bundle.result.commits.len(), + cycle_bundle.result.commits.len() + ); + + let func_commits = func_bundle + .result + .commits + .iter() + .map(normalize_commit_for_crosscheck) + .collect::>(); + let cycle_commits = cycle_bundle + .result + .commits + .iter() + .map(normalize_commit_for_crosscheck) + .collect::>(); + assert_eq!(func_commits, cycle_commits); +} + #[test] fn dependent_uop_picks_after_producer_wakeup_window() { let program = vec![ @@ -8802,6 +8851,12 @@ fn sample_runtime(words: &[u32], extra_regions: &[MemoryRegion]) -> GuestRuntime } } +fn normalize_commit_for_crosscheck(commit: &CommitRecord) -> CommitRecord { + let mut normalized = commit.clone(); + normalized.cycle = 0; + normalized +} + fn enc_addi(rd: u32, rs1: u32, imm: u32) -> u32 { ((imm & 0x0fff) << 20) | (rs1 << 15) | (rd << 7) | 0x15 } diff --git a/docs/bringup/gates/latest.json b/docs/bringup/gates/latest.json new file mode 100644 index 0000000..06a008d --- /dev/null +++ b/docs/bringup/gates/latest.json @@ -0,0 +1,178 @@ +{ + "generated_at_utc": "2026-03-15 01:06:51Z", + "repo": "LinxISA/LinxCoreModel", + "runs": [ + { + "run_id": "local-20260315T010647Z", + "generated_at_utc": "2026-03-15 01:06:51Z", + "profile": "repo-local-crosscheck", + "lane": "local", + "trace_schema_version": "1.0", + "gates": [ + { + "gate": "Repository layout", + "domain": "Repo", + "classification": "layout_ok", + "command": "bash tools/ci/check_repo_layout.sh", + "status": "pass", + "required": true, + "owner": "maintainers", + "waived": false, + "evidence_type": "log", + "evidence": [ + "log:out/crosschecks/logs/layout.log" + ] + }, + { + "gate": "Cargo fmt", + "domain": "Workspace", + "classification": "fmt_clean", + "command": "cargo fmt --all --check", + "status": "pass", + "required": true, + "owner": "maintainers", + "waived": false, + "evidence_type": "log", + "evidence": [ + "log:out/crosschecks/logs/fmt.log" + ] + }, + { + "gate": "camodel tests", + "domain": "camodel", + "classification": "cargo_test_pass", + "command": "cargo test -q -p camodel", + "status": "pass", + "required": true, + "owner": "maintainers", + "waived": false, + "evidence_type": "log", + "evidence": [ + "log:out/crosschecks/logs/camodel.log" + ] + }, + { + "gate": "funcmodel tests", + "domain": "funcmodel", + "classification": "cargo_test_pass", + "command": "cargo test -q -p funcmodel", + "status": "pass", + "required": true, + "owner": "maintainers", + "waived": false, + "evidence_type": "log", + "evidence": [ + "log:out/crosschecks/logs/funcmodel.log" + ] + }, + { + "gate": "trace tests", + "domain": "trace", + "classification": "cargo_test_pass", + "command": "cargo test -q -p trace", + "status": "pass", + "required": true, + "owner": "maintainers", + "waived": false, + "evidence_type": "log", + "evidence": [ + "log:out/crosschecks/logs/trace.log" + ] + }, + { + "gate": "cosim tests", + "domain": "cosim", + "classification": "cargo_test_pass", + "command": "cargo test -q -p cosim", + "status": "pass", + "required": true, + "owner": "maintainers", + "waived": false, + "evidence_type": "log", + "evidence": [ + "log:out/crosschecks/logs/cosim.log" + ] + }, + { + "gate": "workspace tests", + "domain": "Workspace", + "classification": "cargo_test_pass", + "command": "cargo test -q", + "status": "pass", + "required": true, + "owner": "maintainers", + "waived": false, + "evidence_type": "log", + "evidence": [ + "log:out/crosschecks/logs/workspace.log" + ] + }, + { + "gate": "func/cycle synthetic crosscheck", + "domain": "Crosscheck", + "classification": "func_cycle_match", + "command": "cargo test -q -p camodel crosscheck_func_and_cycle_engines_on_sample_runtime", + "status": "pass", + "required": true, + "owner": "maintainers", + "waived": false, + "evidence_type": "log", + "evidence": [ + "log:out/crosschecks/logs/crosscheck-unit.log" + ] + }, + { + "gate": "functional CLI smoke", + "domain": "Crosscheck", + "classification": "func_cli_smoke_pass", + "command": "cargo run --quiet --bin lx-run -- --engine func --elf out/bringup/linux_user_compiler_smoke_O0.elf --max-steps 100000 --out-dir out/crosschecks/func-smoke", + "status": "pass", + "required": true, + "owner": "maintainers", + "waived": false, + "evidence_type": "log", + "evidence": [ + "log:out/crosschecks/logs/func-smoke.log" + ] + }, + { + "gate": "cycle CLI smoke", + "domain": "Crosscheck", + "classification": "cycle_cli_smoke_pass", + "command": "cargo run --quiet --bin lx-run -- --engine cycle --elf out/bringup/linux_user_compiler_smoke_O0.elf --max-cycles 512 --out-dir out/crosschecks/cycle-smoke", + "status": "pass", + "required": true, + "owner": "maintainers", + "waived": false, + "evidence_type": "log", + "evidence": [ + "log:out/crosschecks/logs/cycle-smoke.log" + ] + }, + { + "gate": "cycle vs func commit crosscheck", + "domain": "Crosscheck", + "classification": "cycle_func_cosim_match", + "command": "cargo run --quiet --bin lx-cosim -- --engine cycle --elf out/bringup/linux_user_compiler_smoke_O0.elf --qemu out/crosschecks/func-smoke/commit.jsonl --out-dir out/crosschecks/cosim", + "status": "pass", + "required": true, + "owner": "maintainers", + "waived": false, + "evidence_type": "log", + "evidence": [ + "log:out/crosschecks/logs/cosim-smoke.log" + ] + } + ], + "metrics": { + "func_exit_reason": "guest_exit(0)", + "func_commits": 77, + "cycle_exit_reason": "guest_exit(0)", + "cycle_commits": 77, + "cycle_cycles": 160, + "cosim_match": true, + "cosim_matched_commits": 77 + } + } + ] +} diff --git a/docs/project/layout.md b/docs/project/layout.md new file mode 100644 index 0000000..c812710 --- /dev/null +++ b/docs/project/layout.md @@ -0,0 +1,62 @@ +# Workspace Layout + +LinxCoreModel is the Rust modeling workspace for LinxISA/LinxCore. It is kept +under `tools/LinxCoreModel` in the larger `linx-isa` superproject. + +## Crate Map + +- `camodel`: cycle-accurate execution model and stage/owner trace generation +- `funcmodel`: functional execution model and Linux-user syscall shims +- `cosim`: commit-stream comparison and M1 lockstep helpers +- `trace`: `linxtrace.v1` and commit JSONL writers +- `runtime`: guest runtime bootstrap and memory/syscall host state +- `elf`: static ELF loading +- `isa`: architectural state, decode, trace schema, and shared types +- `dse`: sweep/report support +- `lx-tools`: `lx-run`, `lx-cosim`, `lx-trace`, `lx-sweep` + +## Owner Boundaries + +### `camodel` + +- `core/`: engine entrypoints, shared state, config, uop model +- `frontend/`: fetch, decode stages, dispatch, checkpoint assignment, restart gating +- `issue/`: IQ residency, qtags, ready tables, `P1/I1/I2` +- `backend/`: execute stages and LSU owner state +- `control/`: `ROB/CMT/FLS`, redirect, traps, dynamic-target recovery +- `decode/`: committed-stream to uop construction and classification helpers +- `trace/`: CA stage-event shaping + +### `funcmodel` + +- `core/`: engine state and run options +- `exec/`: functional execution loop +- `memory/`: guest memory helpers +- `syscalls/`: Linux-user syscall handling +- `trace/`: functional trace glue + +## Naming Contract + +This workspace intentionally dropped redundant `linxcore-*` crate names. +Historical names should not be reintroduced in code, manifests, docs, or CI: + +- `linxcore-cycle` -> `camodel` +- `linxcore-func` -> `funcmodel` +- `linxcore-cosim` -> `cosim` +- `linxcore-isa` -> `isa` +- `linxcore-elf` -> `elf` +- `linxcore-runtime` -> `runtime` +- `linxcore-trace` -> `trace` +- `linxcore-dse` -> `dse` + +## Superproject Relationship + +This repo mirrors selected governance patterns from the superproject: + +- `.github/` for review and CI policy +- `tools/ci/` for structural checks +- `tools/regression/` for repeatable gate execution +- `docs/bringup/gates/latest.json` for machine-readable gate output + +It does **not** inherit unrelated superproject responsibilities like kernel, +compiler, emulator, or RTL release process. diff --git a/docs/verification/crosscheck-gates.md b/docs/verification/crosscheck-gates.md new file mode 100644 index 0000000..4a0cbed --- /dev/null +++ b/docs/verification/crosscheck-gates.md @@ -0,0 +1,44 @@ +# Crosscheck Gates + +The canonical repo-local gate entrypoint is: + +```bash +bash tools/regression/run_crosschecks.sh +``` + +## What It Runs + +Required gates: + +- `bash tools/ci/check_repo_layout.sh` +- `cargo fmt --all --check` +- `cargo test -q -p camodel` +- `cargo test -q -p funcmodel` +- `cargo test -q -p trace` +- `cargo test -q -p cosim` +- `cargo test -q` +- `cargo test -q -p camodel crosscheck_func_and_cycle_engines_on_sample_runtime` + +Optional smoke gates: + +- `lx-run --engine func` on a locally available bring-up ELF +- `lx-run --engine cycle` on the same ELF +- `lx-cosim --engine cycle --qemu ` to crosscheck cycle + against the functional commit stream + +By default the smoke gates run only if a local ELF exists at +`out/bringup/linux_user_compiler_smoke_O0.elf`. + +To require those smoke gates, use: + +```bash +bash tools/regression/run_crosschecks.sh --require-smoke +``` + +## Gate Report + +The latest machine-readable report is written to +`docs/bringup/gates/latest.json`. + +This mirrors the superproject convention of keeping a JSON gate summary beside +human-readable docs, while keeping the checks repo-local and model-specific. diff --git a/tools/ci/check_repo_layout.sh b/tools/ci/check_repo_layout.sh new file mode 100755 index 0000000..4ac8073 --- /dev/null +++ b/tools/ci/check_repo_layout.sh @@ -0,0 +1,113 @@ +#!/usr/bin/env bash +set -euo pipefail + +ROOT="$(cd "$(dirname "${BASH_SOURCE[0]}")/../.." && pwd)" + +required_root_files=( + "README.md" + "LICENSE" + "CONTRIBUTING.md" + "SECURITY.md" + "CODE_OF_CONDUCT.md" + ".github/CODEOWNERS" + ".github/dependabot.yml" + ".github/ISSUE_TEMPLATE/bug_report.yml" + ".github/ISSUE_TEMPLATE/regression.yml" + ".github/ISSUE_TEMPLATE/docs.yml" + ".github/workflows/ci.yml" + "docs/project/layout.md" + "docs/verification/crosscheck-gates.md" + "docs/bringup/gates/latest.json" + "tools/regression/run_crosschecks.sh" +) + +required_crates=( + "camodel" + "funcmodel" + "cosim" + "isa" + "elf" + "runtime" + "trace" + "dse" + "lx-tools" +) + +for path in "${required_root_files[@]}"; do + [[ -e "$ROOT/$path" ]] || { + echo "missing required path: $path" >&2 + exit 1 + } +done + +for crate in "${required_crates[@]}"; do + [[ -d "$ROOT/crates/$crate" ]] || { + echo "missing required crate directory: crates/$crate" >&2 + exit 1 + } +done + +for old in \ + linxcore-cycle \ + linxcore-func \ + linxcore-cosim \ + linxcore-isa \ + linxcore-elf \ + linxcore-runtime \ + linxcore-trace \ + linxcore-dse +do + [[ ! -e "$ROOT/crates/$old" ]] || { + echo "obsolete crate directory still present: crates/$old" >&2 + exit 1 + } +done + +[[ ! -e "$ROOT/crates/camodel/src/stages" ]] || { + echo "obsolete path still present: crates/camodel/src/stages" >&2 + exit 1 +} + +for dir in core frontend issue backend control decode trace; do + [[ -d "$ROOT/crates/camodel/src/$dir" ]] || { + echo "missing camodel owner directory: crates/camodel/src/$dir" >&2 + exit 1 + } +done + +for dir in core exec memory syscalls trace; do + [[ -d "$ROOT/crates/funcmodel/src/$dir" ]] || { + echo "missing funcmodel owner directory: crates/funcmodel/src/$dir" >&2 + exit 1 + } +done + +for dir in linxtrace commit schema; do + [[ -d "$ROOT/crates/trace/src/$dir" ]] || { + echo "missing trace owner directory: crates/trace/src/$dir" >&2 + exit 1 + } +done + +for dir in protocol compare qemu; do + [[ -d "$ROOT/crates/cosim/src/$dir" ]] || { + echo "missing cosim owner directory: crates/cosim/src/$dir" >&2 + exit 1 + } +done + +[[ -d "$ROOT/crates/lx-tools/src/cli" ]] || { + echo "missing lx-tools shared cli directory" >&2 + exit 1 +} +[[ -d "$ROOT/crates/lx-tools/src/bin" ]] || { + echo "missing lx-tools bin directory" >&2 + exit 1 +} + +if grep -R -n 'name = "linxcore-' "$ROOT/crates" "$ROOT/Cargo.toml" >/dev/null 2>&1; then + echo "obsolete linxcore-* package name detected" >&2 + exit 1 +fi + +echo "repo layout OK" diff --git a/tools/regression/run_crosschecks.sh b/tools/regression/run_crosschecks.sh new file mode 100755 index 0000000..aaf8136 --- /dev/null +++ b/tools/regression/run_crosschecks.sh @@ -0,0 +1,266 @@ +#!/usr/bin/env bash +set -euo pipefail + +ROOT="$(cd "$(dirname "${BASH_SOURCE[0]}")/../.." && pwd)" +OUT_DIR="${ROOT}/out/crosschecks" +ELF_PATH="${ROOT}/out/bringup/linux_user_compiler_smoke_O0.elf" +RUN_ID="local-$(date -u +%Y%m%dT%H%M%SZ)" +REQUIRE_SMOKE=0 + +while [[ $# -gt 0 ]]; do + case "$1" in + --out-dir) + OUT_DIR="$2" + shift 2 + ;; + --elf) + ELF_PATH="$2" + shift 2 + ;; + --run-id) + RUN_ID="$2" + shift 2 + ;; + --require-smoke) + REQUIRE_SMOKE=1 + shift + ;; + *) + echo "unknown argument: $1" >&2 + exit 2 + ;; + esac +done + +mkdir -p "$OUT_DIR/logs" "$OUT_DIR/func-smoke" "$OUT_DIR/cycle-smoke" "$OUT_DIR/cosim" + +GATE_ROWS=() +overall=0 + +run_gate() { + local gate="$1" + local domain="$2" + local classification="$3" + local logfile="$4" + shift 4 + local -a cmd=("$@") + + echo "== ${gate}" + printf 'command:' + printf ' %q' "${cmd[@]}" + printf '\n' + + set +e + "${cmd[@]}" >"$logfile" 2>&1 + local rc=$? + set -e + + local status="pass" + if [[ $rc -ne 0 ]]; then + status="fail" + overall=1 + fi + + GATE_ROWS+=("${gate}|${domain}|${classification}|${status}|${logfile}|$(printf '%q ' "${cmd[@]}")") + echo "status: ${status}" + echo "log: ${logfile}" + echo +} + +run_gate \ + "Repository layout" \ + "Repo" \ + "layout_ok" \ + "$OUT_DIR/logs/layout.log" \ + bash "$ROOT/tools/ci/check_repo_layout.sh" + +run_gate \ + "Cargo fmt" \ + "Workspace" \ + "fmt_clean" \ + "$OUT_DIR/logs/fmt.log" \ + cargo fmt --all --check + +run_gate \ + "camodel tests" \ + "camodel" \ + "cargo_test_pass" \ + "$OUT_DIR/logs/camodel.log" \ + cargo test -q -p camodel + +run_gate \ + "funcmodel tests" \ + "funcmodel" \ + "cargo_test_pass" \ + "$OUT_DIR/logs/funcmodel.log" \ + cargo test -q -p funcmodel + +run_gate \ + "trace tests" \ + "trace" \ + "cargo_test_pass" \ + "$OUT_DIR/logs/trace.log" \ + cargo test -q -p trace + +run_gate \ + "cosim tests" \ + "cosim" \ + "cargo_test_pass" \ + "$OUT_DIR/logs/cosim.log" \ + cargo test -q -p cosim + +run_gate \ + "workspace tests" \ + "Workspace" \ + "cargo_test_pass" \ + "$OUT_DIR/logs/workspace.log" \ + cargo test -q + +run_gate \ + "func/cycle synthetic crosscheck" \ + "Crosscheck" \ + "func_cycle_match" \ + "$OUT_DIR/logs/crosscheck-unit.log" \ + cargo test -q -p camodel crosscheck_func_and_cycle_engines_on_sample_runtime + +SMOKE_PRESENT=0 +if [[ -f "$ELF_PATH" ]]; then + SMOKE_PRESENT=1 + run_gate \ + "functional CLI smoke" \ + "Crosscheck" \ + "func_cli_smoke_pass" \ + "$OUT_DIR/logs/func-smoke.log" \ + cargo run --quiet --bin lx-run -- --engine func --elf "$ELF_PATH" --max-steps 100000 --out-dir "$OUT_DIR/func-smoke" + + run_gate \ + "cycle CLI smoke" \ + "Crosscheck" \ + "cycle_cli_smoke_pass" \ + "$OUT_DIR/logs/cycle-smoke.log" \ + cargo run --quiet --bin lx-run -- --engine cycle --elf "$ELF_PATH" --max-cycles 512 --out-dir "$OUT_DIR/cycle-smoke" + + run_gate \ + "cycle vs func commit crosscheck" \ + "Crosscheck" \ + "cycle_func_cosim_match" \ + "$OUT_DIR/logs/cosim-smoke.log" \ + cargo run --quiet --bin lx-cosim -- --engine cycle --elf "$ELF_PATH" --qemu "$OUT_DIR/func-smoke/commit.jsonl" --out-dir "$OUT_DIR/cosim" +elif [[ "$REQUIRE_SMOKE" == "1" ]]; then + echo "required smoke ELF missing: $ELF_PATH" >&2 + exit 1 +fi + +export ROOT OUT_DIR RUN_ID SMOKE_PRESENT +export GATE_ROWS_JOINED +GATE_ROWS_JOINED="$(printf '%s\n' "${GATE_ROWS[@]}")" + +python3 - <<'PY' +import json +import os +from datetime import datetime, timezone +from pathlib import Path + +root = Path(os.environ["ROOT"]) +out_dir = Path(os.environ["OUT_DIR"]) +run_id = os.environ["RUN_ID"] +smoke_present = os.environ["SMOKE_PRESENT"] == "1" + +def normalize_path_text(text: str) -> str: + root_prefix = f"{root}/" + out_prefix = f"{out_dir}/" + if root_prefix in text: + text = text.replace(root_prefix, "") + if out_prefix in text: + text = text.replace(out_prefix, "out/crosschecks/") + return text + +def load_json_if_present(path: Path): + if not path.exists(): + return None + text = path.read_text() + start = text.find("{") + if start == -1: + return None + return json.loads(text[start:]) + +gates = [] +for raw in os.environ.get("GATE_ROWS_JOINED", "").splitlines(): + gate, domain, classification, status, logfile, command = raw.split("|", 5) + gates.append( + { + "gate": gate, + "domain": domain, + "classification": classification, + "command": normalize_path_text(command.strip()), + "status": status, + "required": True, + "owner": "maintainers", + "waived": False, + "evidence_type": "log", + "evidence": [f"log:{normalize_path_text(logfile)}"], + } + ) + +if not smoke_present: + for gate, classification in [ + ("functional CLI smoke", "func_cli_smoke_skipped"), + ("cycle CLI smoke", "cycle_cli_smoke_skipped"), + ("cycle vs func commit crosscheck", "cycle_func_cosim_skipped"), + ]: + gates.append( + { + "gate": gate, + "domain": "Crosscheck", + "classification": classification, + "command": "skipped: smoke ELF not present", + "status": "skip", + "required": False, + "owner": "maintainers", + "waived": False, + "evidence_type": "terminal", + "evidence": ["terminal: optional smoke gate skipped because no local ELF was present"], + } + ) + +func_run = load_json_if_present(out_dir / "logs" / "func-smoke.log") +cycle_run = load_json_if_present(out_dir / "logs" / "cycle-smoke.log") +cosim_report = load_json_if_present(out_dir / "logs" / "cosim-smoke.log") + +run = { + "run_id": run_id, + "generated_at_utc": datetime.now(timezone.utc).strftime("%Y-%m-%d %H:%M:%SZ"), + "profile": "repo-local-crosscheck", + "lane": "local", + "trace_schema_version": "1.0", + "gates": gates, +} + +metrics = {} +if isinstance(func_run, dict): + metrics["func_exit_reason"] = func_run["metrics"]["exit_reason"] + metrics["func_commits"] = func_run["metrics"]["commits"] +if isinstance(cycle_run, dict): + metrics["cycle_exit_reason"] = cycle_run["metrics"]["exit_reason"] + metrics["cycle_commits"] = cycle_run["metrics"]["commits"] + metrics["cycle_cycles"] = cycle_run["metrics"]["cycles"] +if isinstance(cosim_report, dict): + metrics["cosim_match"] = cosim_report.get("mismatch") is None + metrics["cosim_matched_commits"] = cosim_report.get("matched_commits") +if metrics: + run["metrics"] = metrics + +report = { + "generated_at_utc": run["generated_at_utc"], + "repo": "LinxISA/LinxCoreModel", + "runs": [run], +} + +(root / "docs" / "bringup" / "gates" / "latest.json").write_text(json.dumps(report, indent=2) + "\n") +(out_dir / "gate-report.json").write_text(json.dumps(report, indent=2) + "\n") +PY + +echo "wrote gate report: $ROOT/docs/bringup/gates/latest.json" +echo "wrote gate artifact: $OUT_DIR/gate-report.json" + +exit "$overall" From ecdd37c8a7d353a897c0e390dfc3558f3555743b Mon Sep 17 00:00:00 2001 From: RuoyuZhou Date: Sun, 15 Mar 2026 09:09:53 +0800 Subject: [PATCH 3/3] Vendor ISA data for standalone CI --- .gitignore | 1 + README.md | 2 +- crates/isa/data/linxisa-v0.4.json | 1 + crates/isa/src/lib.rs | 5 ++--- docs/bringup/gates/latest.json | 6 +++--- 5 files changed, 8 insertions(+), 7 deletions(-) create mode 100644 crates/isa/data/linxisa-v0.4.json diff --git a/.gitignore b/.gitignore index 6f33f32..c678610 100644 --- a/.gitignore +++ b/.gitignore @@ -6,3 +6,4 @@ out/ *.linxtrace *.md.tmp !docs/bringup/gates/latest.json +!crates/isa/data/linxisa-v0.4.json diff --git a/README.md b/README.md index 262cf02..7e4e161 100644 --- a/README.md +++ b/README.md @@ -33,7 +33,7 @@ Current crate layout: The current implementation lands the full workspace shape, executable `lx-*` surface, static ELF loading, runtime bootstrap, syscall allowlist scaffolding, commit/pipeview emission, lockstep compare helpers, and a full table-driven -LinxISA `v0.4` decoder sourced from the canonical ISA JSON. +LinxISA `v0.4` decoder sourced from a vendored copy of the canonical ISA JSON. Current functional-engine status: diff --git a/crates/isa/data/linxisa-v0.4.json b/crates/isa/data/linxisa-v0.4.json new file mode 100644 index 0000000..b371db8 --- /dev/null +++ b/crates/isa/data/linxisa-v0.4.json @@ -0,0 +1 @@ +{"instruction_count":740,"instructions":[{"asm":"acrc rst_type","encoding":{"length_bits":32,"parts":[{"fields":[{"name":"RST_Type","pieces":[{"insn_lsb":20,"insn_msb":23,"token":"RST_Type","width":4}],"signed":null}],"index":0,"mask":"0xff0fffff","match":"0x0000302b","pattern":"00000000....00000011000000101011","width_bits":32}]},"encoding_kind":"L32","group":"Execution Control","id":"acrc_32_a9c0e33f9904","length_bits":32,"mnemonic":"ACRC","parts":[{"segments":[{"const":{"value":0,"width":4},"lsb":28,"msb":31,"token":"4'b0000","width":4},{"const":{"value":0,"width":4},"lsb":24,"msb":27,"token":"4'b0000","width":4},{"lsb":20,"msb":23,"token":"RST_Type","width":4},{"const":{"value":0,"width":5},"lsb":15,"msb":19,"token":"5'b0_0000","width":5},{"const":{"value":3,"width":3},"lsb":12,"msb":14,"token":"3'b011","width":3},{"const":{"value":0,"width":5},"lsb":7,"msb":11,"token":"5'b0_0000","width":5},{"const":{"value":2,"width":3},"lsb":4,"msb":6,"token":"3'b010","width":3},{"const":{"value":5,"width":3},"lsb":1,"msb":3,"token":"3'b101","width":3},{"const":{"value":1,"width":1},"lsb":0,"msb":0,"token":"1","width":1}],"width_bits":32}],"source":{"file":"opcodes/lx_32.opc","line":11},"uid":"a9c0e33f9904","uop_big_kind":"SYS","uop_class":{"note":"User confirmed A","source":"group_rule","uop_kind":"SYS"},"uop_group":"SYS"},{"asm":"acre rra_type","encoding":{"length_bits":32,"parts":[{"fields":[{"name":"RRA_Type","pieces":[{"insn_lsb":20,"insn_msb":23,"token":"RRA_Type","width":4}],"signed":null}],"index":0,"mask":"0xff0fffff","match":"0x0100302b","pattern":"00000001....00000011000000101011","width_bits":32}]},"encoding_kind":"L32","group":"Execution Control","id":"acre_32_54b80944d32d","length_bits":32,"mnemonic":"ACRE","parts":[{"segments":[{"const":{"value":0,"width":4},"lsb":28,"msb":31,"token":"4'b0000","width":4},{"const":{"value":1,"width":4},"lsb":24,"msb":27,"token":"4'b0001","width":4},{"lsb":20,"msb":23,"token":"RRA_Type","width":4},{"const":{"value":0,"width":5},"lsb":15,"msb":19,"token":"5'b0_0000","width":5},{"const":{"value":3,"width":3},"lsb":12,"msb":14,"token":"3'b011","width":3},{"const":{"value":0,"width":5},"lsb":7,"msb":11,"token":"5'b0_0000","width":5},{"const":{"value":2,"width":3},"lsb":4,"msb":6,"token":"3'b010","width":3},{"const":{"value":5,"width":3},"lsb":1,"msb":3,"token":"3'b101","width":3},{"const":{"value":1,"width":1},"lsb":0,"msb":0,"token":"1","width":1}],"width_bits":32}],"source":{"file":"opcodes/lx_32.opc","line":12},"uid":"54b80944d32d","uop_big_kind":"SYS","uop_class":{"note":"User confirmed A","source":"group_rule","uop_kind":"SYS"},"uop_group":"SYS"},{"asm":"add SrcL, SrcR<{.sw,.uw,.neg}><<, ->{t, u, Rd}","encoding":{"length_bits":32,"parts":[{"fields":[{"name":"RegDst","pieces":[{"insn_lsb":7,"insn_msb":11,"token":"RegDst","width":5}],"signed":null},{"name":"SrcL","pieces":[{"insn_lsb":15,"insn_msb":19,"token":"SrcL","width":5}],"signed":null},{"name":"SrcR","pieces":[{"insn_lsb":20,"insn_msb":24,"token":"SrcR","width":5}],"signed":null},{"name":"SrcRType","pieces":[{"insn_lsb":25,"insn_msb":26,"token":"SrcRType","width":2}],"signed":null},{"name":"shamt","pieces":[{"insn_lsb":27,"insn_msb":31,"token":"shamt","width":5}],"signed":null}],"index":0,"mask":"0x0000707f","match":"0x00000005","pattern":".................000.....0000101","width_bits":32}]},"encoding_kind":"L32","group":"Arithmetic Operation 64bit","id":"add_32_d04202886d0a","length_bits":32,"mnemonic":"ADD","parts":[{"segments":[{"lsb":27,"msb":31,"token":"shamt","width":5},{"lsb":25,"msb":26,"token":"SrcRType","width":2},{"lsb":20,"msb":24,"token":"SrcR","width":5},{"lsb":15,"msb":19,"token":"SrcL","width":5},{"const":{"value":0,"width":3},"lsb":12,"msb":14,"token":"3'b000","width":3},{"lsb":7,"msb":11,"token":"RegDst","width":5},{"const":{"value":0,"width":3},"lsb":4,"msb":6,"token":"3'b000","width":3},{"const":{"value":2,"width":3},"lsb":1,"msb":3,"token":"3'b010","width":3},{"const":{"value":1,"width":1},"lsb":0,"msb":0,"token":"1","width":1}],"width_bits":32}],"source":{"file":"opcodes/lx_32.opc","line":13},"uid":"d04202886d0a","uop_big_kind":"ALU","uop_class":{"source":"group_rule","uop_kind":"ALU"},"uop_group":"ALU"},{"asm":"addi SrcL, uimm, ->{t, u, Rd}","encoding":{"length_bits":32,"parts":[{"fields":[{"name":"RegDst","pieces":[{"insn_lsb":7,"insn_msb":11,"token":"RegDst","width":5}],"signed":null},{"name":"SrcL","pieces":[{"insn_lsb":15,"insn_msb":19,"token":"SrcL","width":5}],"signed":null},{"name":"uimm12","pieces":[{"insn_lsb":20,"insn_msb":31,"token":"uimm12","width":12}],"signed":false}],"index":0,"mask":"0x0000707f","match":"0x00000015","pattern":".................000.....0010101","width_bits":32}]},"encoding_kind":"L32","group":"Arithmetic Operation 64bit","id":"addi_32_2decd0a93a0a","length_bits":32,"mnemonic":"ADDI","parts":[{"segments":[{"lsb":20,"msb":31,"token":"uimm12","width":12},{"lsb":15,"msb":19,"token":"SrcL","width":5},{"const":{"value":0,"width":3},"lsb":12,"msb":14,"token":"3'b000","width":3},{"lsb":7,"msb":11,"token":"RegDst","width":5},{"const":{"value":1,"width":3},"lsb":4,"msb":6,"token":"3'b001","width":3},{"const":{"value":2,"width":3},"lsb":1,"msb":3,"token":"3'b010","width":3},{"const":{"value":1,"width":1},"lsb":0,"msb":0,"token":"1","width":1}],"width_bits":32}],"source":{"file":"opcodes/lx_32.opc","line":14},"uid":"2decd0a93a0a","uop_big_kind":"ALU","uop_class":{"source":"group_rule","uop_kind":"ALU"},"uop_group":"ALU"},{"asm":"addiw SrcL, uimm, ->{t, u, Rd}","encoding":{"length_bits":32,"parts":[{"fields":[{"name":"RegDst","pieces":[{"insn_lsb":7,"insn_msb":11,"token":"RegDst","width":5}],"signed":null},{"name":"SrcL","pieces":[{"insn_lsb":15,"insn_msb":19,"token":"SrcL","width":5}],"signed":null},{"name":"uimm12","pieces":[{"insn_lsb":20,"insn_msb":31,"token":"uimm12","width":12}],"signed":false}],"index":0,"mask":"0x0000707f","match":"0x00000035","pattern":".................000.....0110101","width_bits":32}]},"encoding_kind":"L32","group":"Arithmetic Operation 32bit","id":"addiw_32_08cc89cd2689","length_bits":32,"mnemonic":"ADDIW","parts":[{"segments":[{"lsb":20,"msb":31,"token":"uimm12","width":12},{"lsb":15,"msb":19,"token":"SrcL","width":5},{"const":{"value":0,"width":3},"lsb":12,"msb":14,"token":"3'b000","width":3},{"lsb":7,"msb":11,"token":"RegDst","width":5},{"const":{"value":3,"width":3},"lsb":4,"msb":6,"token":"3'b011","width":3},{"const":{"value":2,"width":3},"lsb":1,"msb":3,"token":"3'b010","width":3},{"const":{"value":1,"width":1},"lsb":0,"msb":0,"token":"1","width":1}],"width_bits":32}],"source":{"file":"opcodes/lx_32.opc","line":15},"uid":"08cc89cd2689","uop_big_kind":"ALU","uop_class":{"source":"group_rule","uop_kind":"ALU"},"uop_group":"ALU"},{"asm":"addtpc simm, ->{t, u, Rd}","encoding":{"length_bits":32,"parts":[{"constraints":[{"field":"RegDst","op":"!=","value":"RA"}],"fields":[{"name":"RegDst","pieces":[{"insn_lsb":7,"insn_msb":11,"token":"RegDst","width":5}],"signed":null},{"name":"imm20","pieces":[{"insn_lsb":12,"insn_msb":31,"token":"imm20","width":20}],"signed":null}],"index":0,"mask":"0x0000007f","match":"0x00000007","pattern":".........................0000111","width_bits":32}]},"encoding_kind":"L32","group":"PC-Relative","id":"addtpc_32_e5aa0f0abca3","length_bits":32,"mnemonic":"ADDTPC","parts":[{"segments":[{"lsb":12,"msb":31,"token":"imm20","width":20},{"lsb":7,"msb":11,"token":"RegDst","width":5},{"const":{"value":0,"width":3},"lsb":4,"msb":6,"token":"3'b000","width":3},{"const":{"value":3,"width":3},"lsb":1,"msb":3,"token":"3'b011","width":3},{"const":{"value":1,"width":1},"lsb":0,"msb":0,"token":"1","width":1}],"width_bits":32}],"source":{"file":"opcodes/lx_32.opc","line":16},"uid":"e5aa0f0abca3","uop_big_kind":"BRU","uop_class":{"note":"User confirmed C (pc read is BRU)","source":"group_rule","uop_kind":"BRU"},"uop_group":"BRU"},{"asm":"addw SrcL, SrcR<{.sw,.uw,.neg}><<, ->{t, u, Rd}","encoding":{"length_bits":32,"parts":[{"fields":[{"name":"RegDst","pieces":[{"insn_lsb":7,"insn_msb":11,"token":"RegDst","width":5}],"signed":null},{"name":"SrcL","pieces":[{"insn_lsb":15,"insn_msb":19,"token":"SrcL","width":5}],"signed":null},{"name":"SrcR","pieces":[{"insn_lsb":20,"insn_msb":24,"token":"SrcR","width":5}],"signed":null},{"name":"SrcRType","pieces":[{"insn_lsb":25,"insn_msb":26,"token":"SrcRType","width":2}],"signed":null},{"name":"shamt","pieces":[{"insn_lsb":27,"insn_msb":31,"token":"shamt","width":5}],"signed":null}],"index":0,"mask":"0x0000707f","match":"0x00000025","pattern":".................000.....0100101","width_bits":32}]},"encoding_kind":"L32","group":"Arithmetic Operation 32bit","id":"addw_32_a27109fe30fc","length_bits":32,"mnemonic":"ADDW","parts":[{"segments":[{"lsb":27,"msb":31,"token":"shamt","width":5},{"lsb":25,"msb":26,"token":"SrcRType","width":2},{"lsb":20,"msb":24,"token":"SrcR","width":5},{"lsb":15,"msb":19,"token":"SrcL","width":5},{"const":{"value":0,"width":3},"lsb":12,"msb":14,"token":"3'b000","width":3},{"lsb":7,"msb":11,"token":"RegDst","width":5},{"const":{"value":2,"width":3},"lsb":4,"msb":6,"token":"3'b010","width":3},{"const":{"value":2,"width":3},"lsb":1,"msb":3,"token":"3'b010","width":3},{"const":{"value":1,"width":1},"lsb":0,"msb":0,"token":"1","width":1}],"width_bits":32}],"source":{"file":"opcodes/lx_32.opc","line":17},"uid":"a27109fe30fc","uop_big_kind":"ALU","uop_class":{"source":"group_rule","uop_kind":"ALU"},"uop_group":"ALU"},{"asm":"and SrcL, SrcR<{.sw,.uw,.not}><<, ->{t, u, Rd}","encoding":{"length_bits":32,"parts":[{"fields":[{"name":"RegDst","pieces":[{"insn_lsb":7,"insn_msb":11,"token":"RegDst","width":5}],"signed":null},{"name":"SrcL","pieces":[{"insn_lsb":15,"insn_msb":19,"token":"SrcL","width":5}],"signed":null},{"name":"SrcR","pieces":[{"insn_lsb":20,"insn_msb":24,"token":"SrcR","width":5}],"signed":null},{"name":"SrcRType","pieces":[{"insn_lsb":25,"insn_msb":26,"token":"SrcRType","width":2}],"signed":null},{"name":"shamt","pieces":[{"insn_lsb":27,"insn_msb":31,"token":"shamt","width":5}],"signed":null}],"index":0,"mask":"0x0000707f","match":"0x00002005","pattern":".................010.....0000101","width_bits":32}]},"encoding_kind":"L32","group":"Arithmetic Operation 64bit","id":"and_32_b6a903a3ec94","length_bits":32,"mnemonic":"AND","parts":[{"segments":[{"lsb":27,"msb":31,"token":"shamt","width":5},{"lsb":25,"msb":26,"token":"SrcRType","width":2},{"lsb":20,"msb":24,"token":"SrcR","width":5},{"lsb":15,"msb":19,"token":"SrcL","width":5},{"const":{"value":2,"width":3},"lsb":12,"msb":14,"token":"3'b010","width":3},{"lsb":7,"msb":11,"token":"RegDst","width":5},{"const":{"value":0,"width":3},"lsb":4,"msb":6,"token":"3'b000","width":3},{"const":{"value":2,"width":3},"lsb":1,"msb":3,"token":"3'b010","width":3},{"const":{"value":1,"width":1},"lsb":0,"msb":0,"token":"1","width":1}],"width_bits":32}],"source":{"file":"opcodes/lx_32.opc","line":18},"uid":"b6a903a3ec94","uop_big_kind":"ALU","uop_class":{"source":"group_rule","uop_kind":"ALU"},"uop_group":"ALU"},{"asm":"andi SrcL, simm, ->{t, u, Rd}","encoding":{"length_bits":32,"parts":[{"fields":[{"name":"RegDst","pieces":[{"insn_lsb":7,"insn_msb":11,"token":"RegDst","width":5}],"signed":null},{"name":"SrcL","pieces":[{"insn_lsb":15,"insn_msb":19,"token":"SrcL","width":5}],"signed":null},{"name":"simm12","pieces":[{"insn_lsb":20,"insn_msb":31,"token":"simm12","width":12}],"signed":true}],"index":0,"mask":"0x0000707f","match":"0x00002015","pattern":".................010.....0010101","width_bits":32}]},"encoding_kind":"L32","group":"Arithmetic Operation 64bit","id":"andi_32_1d9302e57d30","length_bits":32,"mnemonic":"ANDI","parts":[{"segments":[{"lsb":20,"msb":31,"token":"simm12","width":12},{"lsb":15,"msb":19,"token":"SrcL","width":5},{"const":{"value":2,"width":3},"lsb":12,"msb":14,"token":"3'b010","width":3},{"lsb":7,"msb":11,"token":"RegDst","width":5},{"const":{"value":1,"width":3},"lsb":4,"msb":6,"token":"3'b001","width":3},{"const":{"value":2,"width":3},"lsb":1,"msb":3,"token":"3'b010","width":3},{"const":{"value":1,"width":1},"lsb":0,"msb":0,"token":"1","width":1}],"width_bits":32}],"source":{"file":"opcodes/lx_32.opc","line":19},"uid":"1d9302e57d30","uop_big_kind":"ALU","uop_class":{"source":"group_rule","uop_kind":"ALU"},"uop_group":"ALU"},{"asm":"andiw SrcL, simm, ->{t, u, Rd}","encoding":{"length_bits":32,"parts":[{"fields":[{"name":"RegDst","pieces":[{"insn_lsb":7,"insn_msb":11,"token":"RegDst","width":5}],"signed":null},{"name":"SrcL","pieces":[{"insn_lsb":15,"insn_msb":19,"token":"SrcL","width":5}],"signed":null},{"name":"simm12","pieces":[{"insn_lsb":20,"insn_msb":31,"token":"simm12","width":12}],"signed":true}],"index":0,"mask":"0x0000707f","match":"0x00002035","pattern":".................010.....0110101","width_bits":32}]},"encoding_kind":"L32","group":"Arithmetic Operation 32bit","id":"andiw_32_9ec1f7343dbd","length_bits":32,"mnemonic":"ANDIW","parts":[{"segments":[{"lsb":20,"msb":31,"token":"simm12","width":12},{"lsb":15,"msb":19,"token":"SrcL","width":5},{"const":{"value":2,"width":3},"lsb":12,"msb":14,"token":"3'b010","width":3},{"lsb":7,"msb":11,"token":"RegDst","width":5},{"const":{"value":3,"width":3},"lsb":4,"msb":6,"token":"3'b011","width":3},{"const":{"value":2,"width":3},"lsb":1,"msb":3,"token":"3'b010","width":3},{"const":{"value":1,"width":1},"lsb":0,"msb":0,"token":"1","width":1}],"width_bits":32}],"source":{"file":"opcodes/lx_32.opc","line":20},"uid":"9ec1f7343dbd","uop_big_kind":"ALU","uop_class":{"source":"group_rule","uop_kind":"ALU"},"uop_group":"ALU"},{"asm":"andw SrcL, SrcR<{.sw,.uw,.not}><<, ->{t, u, Rd}","encoding":{"length_bits":32,"parts":[{"fields":[{"name":"RegDst","pieces":[{"insn_lsb":7,"insn_msb":11,"token":"RegDst","width":5}],"signed":null},{"name":"SrcL","pieces":[{"insn_lsb":15,"insn_msb":19,"token":"SrcL","width":5}],"signed":null},{"name":"SrcR","pieces":[{"insn_lsb":20,"insn_msb":24,"token":"SrcR","width":5}],"signed":null},{"name":"SrcRType","pieces":[{"insn_lsb":25,"insn_msb":26,"token":"SrcRType","width":2}],"signed":null},{"name":"shamt","pieces":[{"insn_lsb":27,"insn_msb":31,"token":"shamt","width":5}],"signed":null}],"index":0,"mask":"0x0000707f","match":"0x00002025","pattern":".................010.....0100101","width_bits":32}]},"encoding_kind":"L32","group":"Arithmetic Operation 32bit","id":"andw_32_6907ed7cec90","length_bits":32,"mnemonic":"ANDW","parts":[{"segments":[{"lsb":27,"msb":31,"token":"shamt","width":5},{"lsb":25,"msb":26,"token":"SrcRType","width":2},{"lsb":20,"msb":24,"token":"SrcR","width":5},{"lsb":15,"msb":19,"token":"SrcL","width":5},{"const":{"value":2,"width":3},"lsb":12,"msb":14,"token":"3'b010","width":3},{"lsb":7,"msb":11,"token":"RegDst","width":5},{"const":{"value":2,"width":3},"lsb":4,"msb":6,"token":"3'b010","width":3},{"const":{"value":2,"width":3},"lsb":1,"msb":3,"token":"3'b010","width":3},{"const":{"value":1,"width":1},"lsb":0,"msb":0,"token":"1","width":1}],"width_bits":32}],"source":{"file":"opcodes/lx_32.opc","line":21},"uid":"6907ed7cec90","uop_big_kind":"ALU","uop_class":{"source":"group_rule","uop_kind":"ALU"},"uop_group":"ALU"},{"asm":"assert SrcL","encoding":{"length_bits":32,"parts":[{"fields":[{"name":"SrcL","pieces":[{"insn_lsb":15,"insn_msb":19,"token":"SrcL","width":5}],"signed":null}],"index":0,"mask":"0xfff07fff","match":"0x0000102b","pattern":"000000000000.....001000000101011","width_bits":32}]},"encoding_kind":"L32","group":"Execution Control","id":"assert_32_f05d67874ae5","length_bits":32,"mnemonic":"ASSERT","parts":[{"segments":[{"const":{"value":0,"width":4},"lsb":28,"msb":31,"token":"4'b0000","width":4},{"const":{"value":0,"width":4},"lsb":24,"msb":27,"token":"4'b0000","width":4},{"const":{"value":0,"width":4},"lsb":20,"msb":23,"token":"4'b0000","width":4},{"lsb":15,"msb":19,"token":"SrcL","width":5},{"const":{"value":1,"width":3},"lsb":12,"msb":14,"token":"3'b001","width":3},{"const":{"value":0,"width":5},"lsb":7,"msb":11,"token":"5'b0_0000","width":5},{"const":{"value":2,"width":3},"lsb":4,"msb":6,"token":"3'b010","width":3},{"const":{"value":5,"width":3},"lsb":1,"msb":3,"token":"3'b101","width":3},{"const":{"value":1,"width":1},"lsb":0,"msb":0,"token":"1","width":1}],"width_bits":32}],"source":{"file":"opcodes/lx_32.opc","line":22},"uid":"f05d67874ae5","uop_big_kind":"SYS","uop_class":{"note":"User confirmed A","source":"group_rule","uop_kind":"SYS"},"uop_group":"SYS"},{"asm":"B.ARG NORM.normal","encoding":{"length_bits":32,"parts":[{"fields":[],"index":0,"mask":"0xffffffff","match":"0x000fa023","pattern":"00000000000011111010000000100011","width_bits":32}]},"encoding_kind":"L32","group":"Block Argument","id":"b_arg_32_374ec956affe","length_bits":32,"mnemonic":"B.ARG","parts":[{"segments":[{"const":{"value":0,"width":12},"lsb":20,"msb":31,"token":"12'h000","width":12},{"const":{"value":31,"width":5},"lsb":15,"msb":19,"token":"5'b1_1111","width":5},{"const":{"value":2,"width":3},"lsb":12,"msb":14,"token":"3'b010","width":3},{"const":{"value":0,"width":5},"lsb":7,"msb":11,"token":"5'b0_0000","width":5},{"const":{"value":2,"width":3},"lsb":4,"msb":6,"token":"3'b010","width":3},{"const":{"value":1,"width":3},"lsb":1,"msb":3,"token":"3'b001","width":3},{"const":{"value":1,"width":1},"lsb":0,"msb":0,"token":"1","width":1}],"width_bits":32}],"source":{"file":"opcodes/lx_32.opc","line":29},"uid":"374ec956affe","uop_big_kind":"CMD","uop_class":{"cmd_kind":"BLOCK_ARGUMENT","note":"User confirmed A","source":"group_rule","uop_kind":"CMD"},"uop_group":"CMD"},{"asm":"B.ARG format","encoding":{"length_bits":32,"parts":[{"fields":[{"name":"format","pieces":[{"insn_lsb":7,"insn_msb":11,"token":"format","width":5}],"signed":null}],"index":0,"mask":"0xfffff07f","match":"0x00003043","pattern":"00000000000000000011.....1000011","width_bits":32}]},"encoding_kind":"L32","group":"Block Argument","id":"b_arg_32_47e8ac50ac96","length_bits":32,"mnemonic":"B.ARG","parts":[{"segments":[{"const":{"value":0,"width":17},"lsb":15,"msb":31,"token":"17'b0_0000_0000_0000_0000","width":17},{"const":{"value":3,"width":3},"lsb":12,"msb":14,"token":"3'b011","width":3},{"lsb":7,"msb":11,"token":"format","width":5},{"const":{"value":4,"width":3},"lsb":4,"msb":6,"token":"3'b100","width":3},{"const":{"value":1,"width":3},"lsb":1,"msb":3,"token":"3'b001","width":3},{"const":{"value":1,"width":1},"lsb":0,"msb":0,"token":"1","width":1}],"width_bits":32}],"source":{"file":"opcodes/lx_32.opc","line":24},"uid":"47e8ac50ac96","uop_big_kind":"CMD","uop_class":{"cmd_kind":"BLOCK_ARGUMENT","note":"User confirmed A","source":"group_rule","uop_kind":"CMD"},"uop_group":"CMD"},{"asm":"B.ARG NZ2DN.canon","encoding":{"length_bits":32,"parts":[{"fields":[],"index":0,"mask":"0xffffffff","match":"0x020fae23","pattern":"00000010000011111010111000100011","width_bits":32}]},"encoding_kind":"L32","group":"Block Argument","id":"b_arg_32_5c8bfa662370","length_bits":32,"mnemonic":"B.ARG","parts":[{"segments":[{"const":{"value":32,"width":12},"lsb":20,"msb":31,"token":"12'h020","width":12},{"const":{"value":31,"width":5},"lsb":15,"msb":19,"token":"5'b1_1111","width":5},{"const":{"value":2,"width":3},"lsb":12,"msb":14,"token":"3'b010","width":3},{"const":{"value":28,"width":5},"lsb":7,"msb":11,"token":"5'b1_1100","width":5},{"const":{"value":2,"width":3},"lsb":4,"msb":6,"token":"3'b010","width":3},{"const":{"value":1,"width":3},"lsb":1,"msb":3,"token":"3'b001","width":3},{"const":{"value":1,"width":1},"lsb":0,"msb":0,"token":"1","width":1}],"width_bits":32}],"source":{"file":"opcodes/lx_32.opc","line":28},"uid":"5c8bfa662370","uop_big_kind":"CMD","uop_class":{"cmd_kind":"BLOCK_ARGUMENT","note":"User confirmed A","source":"group_rule","uop_kind":"CMD"},"uop_group":"CMD"},{"asm":"B.ARG ND2ZN.normal, FP16, Null","encoding":{"length_bits":32,"parts":[{"fields":[],"index":0,"mask":"0xffffffff","match":"0x180221a3","pattern":"00011000000000100010000110100011","width_bits":32}]},"encoding_kind":"L32","group":"Block Argument","id":"b_arg_32_95152c29a268","length_bits":32,"mnemonic":"B.ARG","parts":[{"segments":[{"const":{"value":384,"width":12},"lsb":20,"msb":31,"token":"12'h180","width":12},{"const":{"value":4,"width":5},"lsb":15,"msb":19,"token":"5'b0_0100","width":5},{"const":{"value":2,"width":3},"lsb":12,"msb":14,"token":"3'b010","width":3},{"const":{"value":3,"width":5},"lsb":7,"msb":11,"token":"5'b0_0011","width":5},{"const":{"value":2,"width":3},"lsb":4,"msb":6,"token":"3'b010","width":3},{"const":{"value":1,"width":3},"lsb":1,"msb":3,"token":"3'b001","width":3},{"const":{"value":1,"width":1},"lsb":0,"msb":0,"token":"1","width":1}],"width_bits":32}],"source":{"file":"opcodes/lx_32.opc","line":25},"uid":"95152c29a268","uop_big_kind":"CMD","uop_class":{"cmd_kind":"BLOCK_ARGUMENT","note":"User confirmed A","source":"group_rule","uop_kind":"CMD"},"uop_group":"CMD"},{"asm":"B.ARG DN2ZN.normal, FP16, Null","encoding":{"length_bits":32,"parts":[{"fields":[],"index":0,"mask":"0xffffffff","match":"0x18022423","pattern":"00011000000000100010010000100011","width_bits":32}]},"encoding_kind":"L32","group":"Block Argument","id":"b_arg_32_c6d5c49a4ad7","length_bits":32,"mnemonic":"B.ARG","parts":[{"segments":[{"const":{"value":384,"width":12},"lsb":20,"msb":31,"token":"12'h180","width":12},{"const":{"value":4,"width":5},"lsb":15,"msb":19,"token":"5'b0_0100","width":5},{"const":{"value":2,"width":3},"lsb":12,"msb":14,"token":"3'b010","width":3},{"const":{"value":8,"width":5},"lsb":7,"msb":11,"token":"5'b0_1000","width":5},{"const":{"value":2,"width":3},"lsb":4,"msb":6,"token":"3'b010","width":3},{"const":{"value":1,"width":3},"lsb":1,"msb":3,"token":"3'b001","width":3},{"const":{"value":1,"width":1},"lsb":0,"msb":0,"token":"1","width":1}],"width_bits":32}],"source":{"file":"opcodes/lx_32.opc","line":27},"uid":"c6d5c49a4ad7","uop_big_kind":"CMD","uop_class":{"cmd_kind":"BLOCK_ARGUMENT","note":"User confirmed A","source":"group_rule","uop_kind":"CMD"},"uop_group":"CMD"},{"asm":"B.ARG DN2NZ.normal, FP32, Null","encoding":{"length_bits":32,"parts":[{"fields":[],"index":0,"mask":"0xffffffff","match":"0x1800a4a3","pattern":"00011000000000001010010010100011","width_bits":32}]},"encoding_kind":"L32","group":"Block Argument","id":"b_arg_32_f19d18f2126b","length_bits":32,"mnemonic":"B.ARG","parts":[{"segments":[{"const":{"value":384,"width":12},"lsb":20,"msb":31,"token":"12'h180","width":12},{"const":{"value":1,"width":5},"lsb":15,"msb":19,"token":"5'b0_0001","width":5},{"const":{"value":2,"width":3},"lsb":12,"msb":14,"token":"3'b010","width":3},{"const":{"value":9,"width":5},"lsb":7,"msb":11,"token":"5'b0_1001","width":5},{"const":{"value":2,"width":3},"lsb":4,"msb":6,"token":"3'b010","width":3},{"const":{"value":1,"width":3},"lsb":1,"msb":3,"token":"3'b001","width":3},{"const":{"value":1,"width":1},"lsb":0,"msb":0,"token":"1","width":1}],"width_bits":32}],"source":{"file":"opcodes/lx_32.opc","line":26},"uid":"f19d18f2126b","uop_big_kind":"CMD","uop_class":{"cmd_kind":"BLOCK_ARGUMENT","note":"User confirmed A","source":"group_rule","uop_kind":"CMD"},"uop_group":"CMD"},{"asm":"B.ATTR {trap, atomic, , far, DataLayout.{canon, normal}, SrcType, PadValue, DR}","encoding":{"length_bits":32,"parts":[{"fields":[{"name":"C","pieces":[{"insn_lsb":25,"insn_msb":25,"token":"C","width":1}],"signed":null},{"name":"DR","pieces":[{"insn_lsb":26,"insn_msb":26,"token":"DR","width":1}],"signed":null},{"name":"DataLayout","pieces":[{"insn_lsb":7,"insn_msb":11,"token":"DataLayout","width":5}],"signed":null},{"name":"DataType","pieces":[{"insn_lsb":20,"insn_msb":24,"token":"DataType","width":5}],"signed":null},{"name":"PadValue","pieces":[{"insn_lsb":27,"insn_msb":31,"token":"PadValue","width":5}],"signed":null},{"name":"T","pieces":[{"insn_lsb":19,"insn_msb":19,"token":"T","width":1}],"signed":null},{"name":"aq","pieces":[{"insn_lsb":16,"insn_msb":16,"token":"aq","width":1}],"signed":null},{"name":"atom","pieces":[{"insn_lsb":17,"insn_msb":17,"token":"atom","width":1}],"signed":null},{"name":"far","pieces":[{"insn_lsb":18,"insn_msb":18,"token":"far","width":1}],"signed":null},{"name":"rl","pieces":[{"insn_lsb":15,"insn_msb":15,"token":"rl","width":1}],"signed":null}],"index":0,"mask":"0x0000707f","match":"0x00000023","pattern":".................000.....0100011","width_bits":32}]},"encoding_kind":"L32","group":"Block Attribute","id":"b_attr_32_58b896a8d70a","length_bits":32,"mnemonic":"B.ATTR","parts":[{"segments":[{"lsb":27,"msb":31,"token":"PadValue","width":5},{"lsb":26,"msb":26,"token":"DR","width":1},{"lsb":25,"msb":25,"token":"C","width":1},{"lsb":20,"msb":24,"token":"DataType","width":5},{"lsb":19,"msb":19,"token":"T","width":1},{"lsb":18,"msb":18,"token":"far","width":1},{"lsb":17,"msb":17,"token":"atom","width":1},{"lsb":16,"msb":16,"token":"aq","width":1},{"lsb":15,"msb":15,"token":"rl","width":1},{"const":{"value":0,"width":3},"lsb":12,"msb":14,"token":"3'b000","width":3},{"lsb":7,"msb":11,"token":"DataLayout","width":5},{"const":{"value":2,"width":3},"lsb":4,"msb":6,"token":"3'b010","width":3},{"const":{"value":1,"width":3},"lsb":1,"msb":3,"token":"3'b001","width":3},{"const":{"value":1,"width":1},"lsb":0,"msb":0,"token":"1","width":1}],"width_bits":32}],"source":{"file":"opcodes/lx_32.opc","line":23},"uid":"58b896a8d70a","uop_big_kind":"CMD","uop_class":{"cmd_kind":"DESC_B_ATTR","note":"User confirmed A","source":"group_rule","uop_kind":"CMD"},"uop_group":"CMD"},{"asm":"B.DIM RegSrc, uimm, ->LB2","encoding":{"length_bits":32,"parts":[{"fields":[{"name":"RegSrc","pieces":[{"insn_lsb":15,"insn_msb":19,"token":"RegSrc","width":5}],"signed":null},{"name":"uimm17","pieces":[{"insn_lsb":20,"insn_msb":31,"token":"uimm17[11:0]","value_lsb":0,"value_msb":11,"width":12},{"insn_lsb":7,"insn_msb":11,"token":"uimm17[16:12]","value_lsb":12,"value_msb":16,"width":5}],"signed":false}],"index":0,"mask":"0x0000707f","match":"0x00002043","pattern":".................010.....1000011","width_bits":32}]},"encoding_kind":"L32","group":"Block Argument","id":"b_dim_32_1caa1aa2944a","length_bits":32,"mnemonic":"B.DIM","parts":[{"segments":[{"lsb":20,"msb":31,"token":"uimm17[11:0]","width":12},{"lsb":15,"msb":19,"token":"RegSrc","width":5},{"const":{"value":2,"width":3},"lsb":12,"msb":14,"token":"3'b010","width":3},{"lsb":7,"msb":11,"token":"uimm17[16:12]","width":5},{"const":{"value":4,"width":3},"lsb":4,"msb":6,"token":"3'b100","width":3},{"const":{"value":1,"width":3},"lsb":1,"msb":3,"token":"3'b001","width":3},{"const":{"value":1,"width":1},"lsb":0,"msb":0,"token":"1","width":1}],"width_bits":32}],"source":{"file":"opcodes/lx_32.opc","line":30},"uid":"1caa1aa2944a","uop_big_kind":"CMD","uop_class":{"cmd_kind":"BLOCK_ARGUMENT","note":"User confirmed A","source":"group_rule","uop_kind":"CMD"},"uop_group":"CMD"},{"asm":"B.DIM RegSrc, uimm, ->LB0","encoding":{"length_bits":32,"parts":[{"fields":[{"name":"RegSrc","pieces":[{"insn_lsb":15,"insn_msb":19,"token":"RegSrc","width":5}],"signed":null},{"name":"uimm17","pieces":[{"insn_lsb":20,"insn_msb":31,"token":"uimm17[11:0]","value_lsb":0,"value_msb":11,"width":12},{"insn_lsb":7,"insn_msb":11,"token":"uimm17[16:12]","value_lsb":12,"value_msb":16,"width":5}],"signed":false}],"index":0,"mask":"0x0000707f","match":"0x00000043","pattern":".................000.....1000011","width_bits":32}]},"encoding_kind":"L32","group":"Block Argument","id":"b_dim_32_27602ab68929","length_bits":32,"mnemonic":"B.DIM","parts":[{"segments":[{"lsb":20,"msb":31,"token":"uimm17[11:0]","width":12},{"lsb":15,"msb":19,"token":"RegSrc","width":5},{"const":{"value":0,"width":3},"lsb":12,"msb":14,"token":"3'b000","width":3},{"lsb":7,"msb":11,"token":"uimm17[16:12]","width":5},{"const":{"value":4,"width":3},"lsb":4,"msb":6,"token":"3'b100","width":3},{"const":{"value":1,"width":3},"lsb":1,"msb":3,"token":"3'b001","width":3},{"const":{"value":1,"width":1},"lsb":0,"msb":0,"token":"1","width":1}],"width_bits":32}],"source":{"file":"opcodes/lx_32.opc","line":31},"uid":"27602ab68929","uop_big_kind":"CMD","uop_class":{"cmd_kind":"BLOCK_ARGUMENT","note":"User confirmed A","source":"group_rule","uop_kind":"CMD"},"uop_group":"CMD"},{"asm":"B.DIM RegSrc, uimm, ->LB1","encoding":{"length_bits":32,"parts":[{"fields":[{"name":"RegSrc","pieces":[{"insn_lsb":15,"insn_msb":19,"token":"RegSrc","width":5}],"signed":null},{"name":"uimm17","pieces":[{"insn_lsb":20,"insn_msb":31,"token":"uimm17[11:0]","value_lsb":0,"value_msb":11,"width":12},{"insn_lsb":7,"insn_msb":11,"token":"uimm17[16:12]","value_lsb":12,"value_msb":16,"width":5}],"signed":false}],"index":0,"mask":"0x0000707f","match":"0x00001043","pattern":".................001.....1000011","width_bits":32}]},"encoding_kind":"L32","group":"Block Argument","id":"b_dim_32_4191099a5f4d","length_bits":32,"mnemonic":"B.DIM","parts":[{"segments":[{"lsb":20,"msb":31,"token":"uimm17[11:0]","width":12},{"lsb":15,"msb":19,"token":"RegSrc","width":5},{"const":{"value":1,"width":3},"lsb":12,"msb":14,"token":"3'b001","width":3},{"lsb":7,"msb":11,"token":"uimm17[16:12]","width":5},{"const":{"value":4,"width":3},"lsb":4,"msb":6,"token":"3'b100","width":3},{"const":{"value":1,"width":3},"lsb":1,"msb":3,"token":"3'b001","width":3},{"const":{"value":1,"width":1},"lsb":0,"msb":0,"token":"1","width":1}],"width_bits":32}],"source":{"file":"opcodes/lx_32.opc","line":32},"uid":"4191099a5f4d","uop_big_kind":"CMD","uop_class":{"cmd_kind":"BLOCK_ARGUMENT","note":"User confirmed A","source":"group_rule","uop_kind":"CMD"},"uop_group":"CMD"},{"asm":"b.eq SrcL, SrcR, label","encoding":{"length_bits":32,"parts":[{"fields":[{"name":"SrcL","pieces":[{"insn_lsb":15,"insn_msb":19,"token":"SrcL","width":5}],"signed":null},{"name":"SrcR","pieces":[{"insn_lsb":20,"insn_msb":24,"token":"SrcR","width":5}],"signed":null},{"name":"simm12","pieces":[{"insn_lsb":25,"insn_msb":31,"token":"simm12[6:0]","value_lsb":0,"value_msb":6,"width":7},{"insn_lsb":7,"insn_msb":11,"token":"simm12[11:7]","value_lsb":7,"value_msb":11,"width":5}],"signed":true}],"index":0,"mask":"0x0000707f","match":"0x00000027","pattern":".................000.....0100111","width_bits":32}]},"encoding_kind":"L32","group":"Branch","id":"b_eq_32_41f00e5abd89","length_bits":32,"mnemonic":"B.EQ","parts":[{"segments":[{"lsb":25,"msb":31,"token":"simm12[6:0]","width":7},{"lsb":20,"msb":24,"token":"SrcR","width":5},{"lsb":15,"msb":19,"token":"SrcL","width":5},{"const":{"value":0,"width":3},"lsb":12,"msb":14,"token":"3'b000","width":3},{"lsb":7,"msb":11,"token":"simm12[11:7]","width":5},{"const":{"value":2,"width":3},"lsb":4,"msb":6,"token":"3'b010","width":3},{"const":{"value":3,"width":3},"lsb":1,"msb":3,"token":"3'b011","width":3},{"const":{"value":1,"width":1},"lsb":0,"msb":0,"token":"1","width":1}],"width_bits":32}],"source":{"file":"opcodes/lx_32.opc","line":33},"uid":"41f00e5abd89","uop_big_kind":"BRU","uop_class":{"source":"group_rule","uop_kind":"BRU"},"uop_group":"BRU"},{"asm":"b.ge SrcL, SrcR, label","encoding":{"length_bits":32,"parts":[{"fields":[{"name":"SrcL","pieces":[{"insn_lsb":15,"insn_msb":19,"token":"SrcL","width":5}],"signed":null},{"name":"SrcR","pieces":[{"insn_lsb":20,"insn_msb":24,"token":"SrcR","width":5}],"signed":null},{"name":"simm12","pieces":[{"insn_lsb":25,"insn_msb":31,"token":"simm12[6:0]","value_lsb":0,"value_msb":6,"width":7},{"insn_lsb":7,"insn_msb":11,"token":"simm12[11:7]","value_lsb":7,"value_msb":11,"width":5}],"signed":true}],"index":0,"mask":"0x0000707f","match":"0x00003027","pattern":".................011.....0100111","width_bits":32}]},"encoding_kind":"L32","group":"Branch","id":"b_ge_32_7bd9050705dc","length_bits":32,"mnemonic":"B.GE","parts":[{"segments":[{"lsb":25,"msb":31,"token":"simm12[6:0]","width":7},{"lsb":20,"msb":24,"token":"SrcR","width":5},{"lsb":15,"msb":19,"token":"SrcL","width":5},{"const":{"value":3,"width":3},"lsb":12,"msb":14,"token":"3'b011","width":3},{"lsb":7,"msb":11,"token":"simm12[11:7]","width":5},{"const":{"value":2,"width":3},"lsb":4,"msb":6,"token":"3'b010","width":3},{"const":{"value":3,"width":3},"lsb":1,"msb":3,"token":"3'b011","width":3},{"const":{"value":1,"width":1},"lsb":0,"msb":0,"token":"1","width":1}],"width_bits":32}],"source":{"file":"opcodes/lx_32.opc","line":34},"uid":"7bd9050705dc","uop_big_kind":"BRU","uop_class":{"source":"group_rule","uop_kind":"BRU"},"uop_group":"BRU"},{"asm":"b.geu SrcL, SrcR, label","encoding":{"length_bits":32,"parts":[{"fields":[{"name":"SrcL","pieces":[{"insn_lsb":15,"insn_msb":19,"token":"SrcL","width":5}],"signed":null},{"name":"SrcR","pieces":[{"insn_lsb":20,"insn_msb":24,"token":"SrcR","width":5}],"signed":null},{"name":"simm12","pieces":[{"insn_lsb":25,"insn_msb":31,"token":"simm12[6:0]","value_lsb":0,"value_msb":6,"width":7},{"insn_lsb":7,"insn_msb":11,"token":"simm12[11:7]","value_lsb":7,"value_msb":11,"width":5}],"signed":true}],"index":0,"mask":"0x0000707f","match":"0x00005027","pattern":".................101.....0100111","width_bits":32}]},"encoding_kind":"L32","group":"Branch","id":"b_geu_32_43a6e57dce55","length_bits":32,"mnemonic":"B.GEU","parts":[{"segments":[{"lsb":25,"msb":31,"token":"simm12[6:0]","width":7},{"lsb":20,"msb":24,"token":"SrcR","width":5},{"lsb":15,"msb":19,"token":"SrcL","width":5},{"const":{"value":5,"width":3},"lsb":12,"msb":14,"token":"3'b101","width":3},{"lsb":7,"msb":11,"token":"simm12[11:7]","width":5},{"const":{"value":2,"width":3},"lsb":4,"msb":6,"token":"3'b010","width":3},{"const":{"value":3,"width":3},"lsb":1,"msb":3,"token":"3'b011","width":3},{"const":{"value":1,"width":1},"lsb":0,"msb":0,"token":"1","width":1}],"width_bits":32}],"source":{"file":"opcodes/lx_32.opc","line":35},"uid":"43a6e57dce55","uop_big_kind":"BRU","uop_class":{"source":"group_rule","uop_kind":"BRU"},"uop_group":"BRU"},{"asm":"B.HINT {BR.{likely, unlikely}, TEMP.{hot, warm, cool, none}, PRFSIZE}","encoding":{"length_bits":32,"parts":[{"fields":[{"name":"L/UL","pieces":[{"insn_lsb":16,"insn_msb":16,"token":"L/UL","width":1}],"signed":null},{"name":"V","pieces":[{"insn_lsb":15,"insn_msb":15,"token":"V","width":1}],"signed":null},{"name":"prefetch_size","pieces":[{"insn_lsb":20,"insn_msb":31,"token":"prefetch_size","width":12}],"signed":null},{"name":"temp","pieces":[{"insn_lsb":17,"insn_msb":18,"token":"temp","width":2}],"signed":null}],"index":0,"mask":"0x00087fff","match":"0x00000033","pattern":"............0....000000000110011","width_bits":32}]},"encoding_kind":"L32","group":"Block Hint","id":"b_hint_32_69d942ff1583","length_bits":32,"mnemonic":"B.HINT","parts":[{"segments":[{"lsb":20,"msb":31,"token":"prefetch_size","width":12},{"const":{"value":0,"width":1},"lsb":19,"msb":19,"token":"0","width":1},{"lsb":17,"msb":18,"token":"temp","width":2},{"lsb":16,"msb":16,"token":"L/UL","width":1},{"lsb":15,"msb":15,"token":"V","width":1},{"const":{"value":0,"width":3},"lsb":12,"msb":14,"token":"3'b000","width":3},{"const":{"value":0,"width":5},"lsb":7,"msb":11,"token":"5'b0_0000","width":5},{"const":{"value":3,"width":3},"lsb":4,"msb":6,"token":"3'b011","width":3},{"const":{"value":1,"width":3},"lsb":1,"msb":3,"token":"3'b001","width":3},{"const":{"value":1,"width":1},"lsb":0,"msb":0,"token":"1","width":1}],"width_bits":32}],"source":{"file":"opcodes/lx_32.opc","line":36},"uid":"69d942ff1583","uop_big_kind":"CMD","uop_class":{"cmd_kind":"DESC_B_HINT","note":"User confirmed A","source":"group_rule","uop_kind":"CMD"},"uop_group":"CMD"},{"asm":"B.HINT TRACE.{begin, end}","encoding":{"length_bits":32,"parts":[{"fields":[{"name":"B/E","pieces":[{"insn_lsb":15,"insn_msb":15,"token":"B/E","width":1}],"signed":null},{"name":"reserve","pieces":[{"insn_lsb":16,"insn_msb":31,"token":"reserve","width":16}],"signed":null}],"index":0,"mask":"0x00007fff","match":"0x00001033","pattern":".................001000000110011","width_bits":32}]},"encoding_kind":"L32","group":"Block Hint","id":"b_hint_32_a65821182bf3","length_bits":32,"mnemonic":"B.HINT","parts":[{"segments":[{"lsb":16,"msb":31,"token":"reserve","width":16},{"lsb":15,"msb":15,"token":"B/E","width":1},{"const":{"value":1,"width":3},"lsb":12,"msb":14,"token":"3'b001","width":3},{"const":{"value":0,"width":5},"lsb":7,"msb":11,"token":"5'b0_0000","width":5},{"const":{"value":3,"width":3},"lsb":4,"msb":6,"token":"3'b011","width":3},{"const":{"value":1,"width":3},"lsb":1,"msb":3,"token":"3'b001","width":3},{"const":{"value":1,"width":1},"lsb":0,"msb":0,"token":"1","width":1}],"width_bits":32}],"source":{"file":"opcodes/lx_32.opc","line":37},"uid":"a65821182bf3","uop_big_kind":"CMD","uop_class":{"cmd_kind":"DESC_B_HINT","note":"User confirmed A","source":"group_rule","uop_kind":"CMD"},"uop_group":"CMD"},{"asm":"B.IOD DepSrc0, DepSrc1, DepSrc2, ->DepDst","encoding":{"length_bits":32,"parts":[{"fields":[{"name":"DepDst","pieces":[{"insn_lsb":7,"insn_msb":11,"token":"DepDst","width":5}],"signed":null},{"name":"DepSrc0","pieces":[{"insn_lsb":15,"insn_msb":19,"token":"DepSrc0","width":5}],"signed":null},{"name":"DepSrc1","pieces":[{"insn_lsb":20,"insn_msb":24,"token":"DepSrc1","width":5}],"signed":null},{"name":"DepSrc2","pieces":[{"insn_lsb":27,"insn_msb":31,"token":"DepSrc2","width":5}],"signed":null}],"index":0,"mask":"0x0600707f","match":"0x00001013","pattern":".....00..........001.....0010011","width_bits":32}]},"encoding_kind":"L32","group":"Block Input & Output","id":"b_iod_32_d4d0a426dcab","length_bits":32,"mnemonic":"B.IOD","parts":[{"segments":[{"lsb":27,"msb":31,"token":"DepSrc2","width":5},{"const":{"value":0,"width":2},"lsb":25,"msb":26,"token":"2'b00","width":2},{"lsb":20,"msb":24,"token":"DepSrc1","width":5},{"lsb":15,"msb":19,"token":"DepSrc0","width":5},{"const":{"value":1,"width":3},"lsb":12,"msb":14,"token":"3'b001","width":3},{"lsb":7,"msb":11,"token":"DepDst","width":5},{"const":{"value":1,"width":3},"lsb":4,"msb":6,"token":"3'b001","width":3},{"const":{"value":1,"width":3},"lsb":1,"msb":3,"token":"3'b001","width":3},{"const":{"value":1,"width":1},"lsb":0,"msb":0,"token":"1","width":1}],"width_bits":32}],"source":{"file":"opcodes/lx_32.opc","line":38},"uid":"d4d0a426dcab","uop_big_kind":"CMD","uop_class":{"cmd_kind":"BLOCK_IO","note":"User confirmed A","source":"group_rule","uop_kind":"CMD"},"uop_group":"CMD"},{"asm":"B.IOR [RegSrc0, RegSrc1, RegSrc2],[RegDst]","encoding":{"length_bits":32,"parts":[{"fields":[{"name":"RegDst","pieces":[{"insn_lsb":7,"insn_msb":11,"token":"RegDst","width":5}],"signed":null},{"name":"RegSrc0","pieces":[{"insn_lsb":15,"insn_msb":19,"token":"RegSrc0","width":5}],"signed":null},{"name":"RegSrc1","pieces":[{"insn_lsb":20,"insn_msb":24,"token":"RegSrc1","width":5}],"signed":null},{"name":"RegSrc2","pieces":[{"insn_lsb":27,"insn_msb":31,"token":"RegSrc2","width":5}],"signed":null}],"index":0,"mask":"0x0600707f","match":"0x00000013","pattern":".....00..........000.....0010011","width_bits":32}]},"encoding_kind":"L32","group":"Block Input & Output","id":"b_ior_32_c3ea71404eb3","length_bits":32,"mnemonic":"B.IOR","parts":[{"segments":[{"lsb":27,"msb":31,"token":"RegSrc2","width":5},{"const":{"value":0,"width":2},"lsb":25,"msb":26,"token":"2'b00","width":2},{"lsb":20,"msb":24,"token":"RegSrc1","width":5},{"lsb":15,"msb":19,"token":"RegSrc0","width":5},{"const":{"value":0,"width":3},"lsb":12,"msb":14,"token":"3'b000","width":3},{"lsb":7,"msb":11,"token":"RegDst","width":5},{"const":{"value":1,"width":3},"lsb":4,"msb":6,"token":"3'b001","width":3},{"const":{"value":1,"width":3},"lsb":1,"msb":3,"token":"3'b001","width":3},{"const":{"value":1,"width":1},"lsb":0,"msb":0,"token":"1","width":1}],"width_bits":32}],"source":{"file":"opcodes/lx_32.opc","line":39},"uid":"c3ea71404eb3","uop_big_kind":"CMD","uop_class":{"cmd_kind":"BLOCK_IO","note":"User confirmed A","source":"group_rule","uop_kind":"CMD"},"uop_group":"CMD"},{"asm":"B.IOT [SrcTile0<.reuse>, SrcTile1<.reuse>], group=1, ->DstTile","encoding":{"length_bits":32,"parts":[{"fields":[{"name":"DstTile","pieces":[{"insn_lsb":25,"insn_msb":27,"token":"DstTile","width":3}],"signed":null},{"name":"RegSrc","pieces":[{"insn_lsb":7,"insn_msb":11,"token":"RegSrc","width":5}],"signed":null},{"name":"S0R","pieces":[{"insn_lsb":30,"insn_msb":30,"token":"S0R","width":1}],"signed":null},{"name":"S0V","pieces":[{"insn_lsb":28,"insn_msb":28,"token":"S0V","width":1}],"signed":null},{"name":"S1R","pieces":[{"insn_lsb":31,"insn_msb":31,"token":"S1R","width":1}],"signed":null},{"name":"S1V","pieces":[{"insn_lsb":29,"insn_msb":29,"token":"S1V","width":1}],"signed":null},{"name":"SrcTile0","pieces":[{"insn_lsb":15,"insn_msb":19,"token":"SrcTile0","width":5}],"signed":null},{"name":"SrcTile1","pieces":[{"insn_lsb":20,"insn_msb":24,"token":"SrcTile1","width":5}],"signed":null}],"index":0,"mask":"0x0000707f","match":"0x00005013","pattern":".................101.....0010011","width_bits":32}]},"encoding_kind":"L32","group":"Block Input & Output","id":"b_iot_32_5537088c4f03","length_bits":32,"mnemonic":"B.IOT","parts":[{"segments":[{"lsb":31,"msb":31,"token":"S1R","width":1},{"lsb":30,"msb":30,"token":"S0R","width":1},{"lsb":29,"msb":29,"token":"S1V","width":1},{"lsb":28,"msb":28,"token":"S0V","width":1},{"lsb":25,"msb":27,"token":"DstTile","width":3},{"lsb":20,"msb":24,"token":"SrcTile1","width":5},{"lsb":15,"msb":19,"token":"SrcTile0","width":5},{"const":{"value":5,"width":3},"lsb":12,"msb":14,"token":"3'b101","width":3},{"lsb":7,"msb":11,"token":"RegSrc","width":5},{"const":{"value":1,"width":3},"lsb":4,"msb":6,"token":"3'b001","width":3},{"const":{"value":1,"width":3},"lsb":1,"msb":3,"token":"3'b001","width":3},{"const":{"value":1,"width":1},"lsb":0,"msb":0,"token":"1","width":1}],"width_bits":32}],"source":{"file":"opcodes/lx_32.opc","line":40},"uid":"5537088c4f03","uop_big_kind":"CMD","uop_class":{"cmd_kind":"BLOCK_IO","note":"User confirmed A","source":"group_rule","uop_kind":"CMD"},"uop_group":"CMD"},{"asm":"B.IOT [SrcTile0<.reuse>, SrcTile1<.reuse>], group=0, ->DstTile","encoding":{"length_bits":32,"parts":[{"fields":[{"name":"DstTile","pieces":[{"insn_lsb":25,"insn_msb":27,"token":"DstTile","width":3}],"signed":null},{"name":"RegSrc","pieces":[{"insn_lsb":7,"insn_msb":11,"token":"RegSrc","width":5}],"signed":null},{"name":"S0R","pieces":[{"insn_lsb":30,"insn_msb":30,"token":"S0R","width":1}],"signed":null},{"name":"S0V","pieces":[{"insn_lsb":28,"insn_msb":28,"token":"S0V","width":1}],"signed":null},{"name":"S1R","pieces":[{"insn_lsb":31,"insn_msb":31,"token":"S1R","width":1}],"signed":null},{"name":"S1V","pieces":[{"insn_lsb":29,"insn_msb":29,"token":"S1V","width":1}],"signed":null},{"name":"SrcTile0","pieces":[{"insn_lsb":15,"insn_msb":19,"token":"SrcTile0","width":5}],"signed":null},{"name":"SrcTile1","pieces":[{"insn_lsb":20,"insn_msb":24,"token":"SrcTile1","width":5}],"signed":null}],"index":0,"mask":"0x0000707f","match":"0x00004013","pattern":".................100.....0010011","width_bits":32}]},"encoding_kind":"L32","group":"Block Input & Output","id":"b_iot_32_f6b1a38eb134","length_bits":32,"mnemonic":"B.IOT","parts":[{"segments":[{"lsb":31,"msb":31,"token":"S1R","width":1},{"lsb":30,"msb":30,"token":"S0R","width":1},{"lsb":29,"msb":29,"token":"S1V","width":1},{"lsb":28,"msb":28,"token":"S0V","width":1},{"lsb":25,"msb":27,"token":"DstTile","width":3},{"lsb":20,"msb":24,"token":"SrcTile1","width":5},{"lsb":15,"msb":19,"token":"SrcTile0","width":5},{"const":{"value":4,"width":3},"lsb":12,"msb":14,"token":"3'b100","width":3},{"lsb":7,"msb":11,"token":"RegSrc","width":5},{"const":{"value":1,"width":3},"lsb":4,"msb":6,"token":"3'b001","width":3},{"const":{"value":1,"width":3},"lsb":1,"msb":3,"token":"3'b001","width":3},{"const":{"value":1,"width":1},"lsb":0,"msb":0,"token":"1","width":1}],"width_bits":32}],"source":{"file":"opcodes/lx_32.opc","line":41},"uid":"f6b1a38eb134","uop_big_kind":"CMD","uop_class":{"cmd_kind":"BLOCK_IO","note":"User confirmed A","source":"group_rule","uop_kind":"CMD"},"uop_group":"CMD"},{"asm":"B.IOTI [SrcTile0<.reuse>, SrcTile1<.reuse>], group=0, ->DstTile","encoding":{"length_bits":32,"parts":[{"fields":[{"name":"DstTile","pieces":[{"insn_lsb":25,"insn_msb":27,"token":"DstTile","width":3}],"signed":null},{"name":"S0R","pieces":[{"insn_lsb":30,"insn_msb":30,"token":"S0R","width":1}],"signed":null},{"name":"S0V","pieces":[{"insn_lsb":28,"insn_msb":28,"token":"S0V","width":1}],"signed":null},{"name":"S1R","pieces":[{"insn_lsb":31,"insn_msb":31,"token":"S1R","width":1}],"signed":null},{"name":"S1V","pieces":[{"insn_lsb":29,"insn_msb":29,"token":"S1V","width":1}],"signed":null},{"name":"SrcTile0","pieces":[{"insn_lsb":15,"insn_msb":19,"token":"SrcTile0","width":5}],"signed":null},{"name":"SrcTile1","pieces":[{"insn_lsb":20,"insn_msb":24,"token":"SrcTile1","width":5}],"signed":null},{"name":"imm5","pieces":[{"insn_lsb":7,"insn_msb":11,"token":"imm5","width":5}],"signed":null}],"index":0,"mask":"0x0000707f","match":"0x00006013","pattern":".................110.....0010011","width_bits":32}]},"encoding_kind":"L32","group":"Block Input & Output","id":"b_ioti_32_0be0ecce86bb","length_bits":32,"mnemonic":"B.IOTI","parts":[{"segments":[{"lsb":31,"msb":31,"token":"S1R","width":1},{"lsb":30,"msb":30,"token":"S0R","width":1},{"lsb":29,"msb":29,"token":"S1V","width":1},{"lsb":28,"msb":28,"token":"S0V","width":1},{"lsb":25,"msb":27,"token":"DstTile","width":3},{"lsb":20,"msb":24,"token":"SrcTile1","width":5},{"lsb":15,"msb":19,"token":"SrcTile0","width":5},{"const":{"value":6,"width":3},"lsb":12,"msb":14,"token":"3'b110","width":3},{"lsb":7,"msb":11,"token":"imm5","width":5},{"const":{"value":1,"width":3},"lsb":4,"msb":6,"token":"3'b001","width":3},{"const":{"value":1,"width":3},"lsb":1,"msb":3,"token":"3'b001","width":3},{"const":{"value":1,"width":1},"lsb":0,"msb":0,"token":"1","width":1}],"width_bits":32}],"source":{"file":"opcodes/lx_32.opc","line":42},"uid":"0be0ecce86bb","uop_big_kind":"CMD","uop_class":{"cmd_kind":"BLOCK_IO","note":"User confirmed A","source":"group_rule","uop_kind":"CMD"},"uop_group":"CMD"},{"asm":"B.IOTI [SrcTile0<.reuse>, SrcTile1<.reuse>], group=1, ->DstTile","encoding":{"length_bits":32,"parts":[{"fields":[{"name":"DstTile","pieces":[{"insn_lsb":25,"insn_msb":27,"token":"DstTile","width":3}],"signed":null},{"name":"S0R","pieces":[{"insn_lsb":30,"insn_msb":30,"token":"S0R","width":1}],"signed":null},{"name":"S0V","pieces":[{"insn_lsb":28,"insn_msb":28,"token":"S0V","width":1}],"signed":null},{"name":"S1R","pieces":[{"insn_lsb":31,"insn_msb":31,"token":"S1R","width":1}],"signed":null},{"name":"S1V","pieces":[{"insn_lsb":29,"insn_msb":29,"token":"S1V","width":1}],"signed":null},{"name":"SrcTile0","pieces":[{"insn_lsb":15,"insn_msb":19,"token":"SrcTile0","width":5}],"signed":null},{"name":"SrcTile1","pieces":[{"insn_lsb":20,"insn_msb":24,"token":"SrcTile1","width":5}],"signed":null},{"name":"imm5","pieces":[{"insn_lsb":7,"insn_msb":11,"token":"imm5","width":5}],"signed":null}],"index":0,"mask":"0x0000707f","match":"0x00007013","pattern":".................111.....0010011","width_bits":32}]},"encoding_kind":"L32","group":"Block Input & Output","id":"b_ioti_32_fb045cf4149a","length_bits":32,"mnemonic":"B.IOTI","parts":[{"segments":[{"lsb":31,"msb":31,"token":"S1R","width":1},{"lsb":30,"msb":30,"token":"S0R","width":1},{"lsb":29,"msb":29,"token":"S1V","width":1},{"lsb":28,"msb":28,"token":"S0V","width":1},{"lsb":25,"msb":27,"token":"DstTile","width":3},{"lsb":20,"msb":24,"token":"SrcTile1","width":5},{"lsb":15,"msb":19,"token":"SrcTile0","width":5},{"const":{"value":7,"width":3},"lsb":12,"msb":14,"token":"3'b111","width":3},{"lsb":7,"msb":11,"token":"imm5","width":5},{"const":{"value":1,"width":3},"lsb":4,"msb":6,"token":"3'b001","width":3},{"const":{"value":1,"width":3},"lsb":1,"msb":3,"token":"3'b001","width":3},{"const":{"value":1,"width":1},"lsb":0,"msb":0,"token":"1","width":1}],"width_bits":32}],"source":{"file":"opcodes/lx_32.opc","line":43},"uid":"fb045cf4149a","uop_big_kind":"CMD","uop_class":{"cmd_kind":"BLOCK_IO","note":"User confirmed A","source":"group_rule","uop_kind":"CMD"},"uop_group":"CMD"},{"asm":"b.lt SrcL, SrcR, label","encoding":{"length_bits":32,"parts":[{"fields":[{"name":"SrcL","pieces":[{"insn_lsb":15,"insn_msb":19,"token":"SrcL","width":5}],"signed":null},{"name":"SrcR","pieces":[{"insn_lsb":20,"insn_msb":24,"token":"SrcR","width":5}],"signed":null},{"name":"simm12","pieces":[{"insn_lsb":25,"insn_msb":31,"token":"simm12[6:0]","value_lsb":0,"value_msb":6,"width":7},{"insn_lsb":7,"insn_msb":11,"token":"simm12[11:7]","value_lsb":7,"value_msb":11,"width":5}],"signed":true}],"index":0,"mask":"0x0000707f","match":"0x00002027","pattern":".................010.....0100111","width_bits":32}]},"encoding_kind":"L32","group":"Branch","id":"b_lt_32_2ca5ecd25cfb","length_bits":32,"mnemonic":"B.LT","parts":[{"segments":[{"lsb":25,"msb":31,"token":"simm12[6:0]","width":7},{"lsb":20,"msb":24,"token":"SrcR","width":5},{"lsb":15,"msb":19,"token":"SrcL","width":5},{"const":{"value":2,"width":3},"lsb":12,"msb":14,"token":"3'b010","width":3},{"lsb":7,"msb":11,"token":"simm12[11:7]","width":5},{"const":{"value":2,"width":3},"lsb":4,"msb":6,"token":"3'b010","width":3},{"const":{"value":3,"width":3},"lsb":1,"msb":3,"token":"3'b011","width":3},{"const":{"value":1,"width":1},"lsb":0,"msb":0,"token":"1","width":1}],"width_bits":32}],"source":{"file":"opcodes/lx_32.opc","line":44},"uid":"2ca5ecd25cfb","uop_big_kind":"BRU","uop_class":{"source":"group_rule","uop_kind":"BRU"},"uop_group":"BRU"},{"asm":"b.ltu SrcL, SrcR, label","encoding":{"length_bits":32,"parts":[{"fields":[{"name":"SrcL","pieces":[{"insn_lsb":15,"insn_msb":19,"token":"SrcL","width":5}],"signed":null},{"name":"SrcR","pieces":[{"insn_lsb":20,"insn_msb":24,"token":"SrcR","width":5}],"signed":null},{"name":"simm12","pieces":[{"insn_lsb":25,"insn_msb":31,"token":"simm12[6:0]","value_lsb":0,"value_msb":6,"width":7},{"insn_lsb":7,"insn_msb":11,"token":"simm12[11:7]","value_lsb":7,"value_msb":11,"width":5}],"signed":true}],"index":0,"mask":"0x0000707f","match":"0x00004027","pattern":".................100.....0100111","width_bits":32}]},"encoding_kind":"L32","group":"Branch","id":"b_ltu_32_f1ea7ad44e37","length_bits":32,"mnemonic":"B.LTU","parts":[{"segments":[{"lsb":25,"msb":31,"token":"simm12[6:0]","width":7},{"lsb":20,"msb":24,"token":"SrcR","width":5},{"lsb":15,"msb":19,"token":"SrcL","width":5},{"const":{"value":4,"width":3},"lsb":12,"msb":14,"token":"3'b100","width":3},{"lsb":7,"msb":11,"token":"simm12[11:7]","width":5},{"const":{"value":2,"width":3},"lsb":4,"msb":6,"token":"3'b010","width":3},{"const":{"value":3,"width":3},"lsb":1,"msb":3,"token":"3'b011","width":3},{"const":{"value":1,"width":1},"lsb":0,"msb":0,"token":"1","width":1}],"width_bits":32}],"source":{"file":"opcodes/lx_32.opc","line":45},"uid":"f1ea7ad44e37","uop_big_kind":"BRU","uop_class":{"source":"group_rule","uop_kind":"BRU"},"uop_group":"BRU"},{"asm":"b.ne SrcL, SrcR, label","encoding":{"length_bits":32,"parts":[{"fields":[{"name":"SrcL","pieces":[{"insn_lsb":15,"insn_msb":19,"token":"SrcL","width":5}],"signed":null},{"name":"SrcR","pieces":[{"insn_lsb":20,"insn_msb":24,"token":"SrcR","width":5}],"signed":null},{"name":"simm12","pieces":[{"insn_lsb":25,"insn_msb":31,"token":"simm12[6:0]","value_lsb":0,"value_msb":6,"width":7},{"insn_lsb":7,"insn_msb":11,"token":"simm12[11:7]","value_lsb":7,"value_msb":11,"width":5}],"signed":true}],"index":0,"mask":"0x0000707f","match":"0x00001027","pattern":".................001.....0100111","width_bits":32}]},"encoding_kind":"L32","group":"Branch","id":"b_ne_32_831af6a36ff4","length_bits":32,"mnemonic":"B.NE","parts":[{"segments":[{"lsb":25,"msb":31,"token":"simm12[6:0]","width":7},{"lsb":20,"msb":24,"token":"SrcR","width":5},{"lsb":15,"msb":19,"token":"SrcL","width":5},{"const":{"value":1,"width":3},"lsb":12,"msb":14,"token":"3'b001","width":3},{"lsb":7,"msb":11,"token":"simm12[11:7]","width":5},{"const":{"value":2,"width":3},"lsb":4,"msb":6,"token":"3'b010","width":3},{"const":{"value":3,"width":3},"lsb":1,"msb":3,"token":"3'b011","width":3},{"const":{"value":1,"width":1},"lsb":0,"msb":0,"token":"1","width":1}],"width_bits":32}],"source":{"file":"opcodes/lx_32.opc","line":46},"uid":"831af6a36ff4","uop_big_kind":"BRU","uop_class":{"source":"group_rule","uop_kind":"BRU"},"uop_group":"BRU"},{"asm":"b.nz label","encoding":{"length_bits":32,"parts":[{"fields":[{"name":"simm22","pieces":[{"insn_lsb":15,"insn_msb":31,"token":"simm22[16:0]","value_lsb":0,"value_msb":16,"width":17},{"insn_lsb":7,"insn_msb":11,"token":"simm22[21:17]","value_lsb":17,"value_msb":21,"width":5}],"signed":true}],"index":0,"mask":"0x0000707f","match":"0x00002037","pattern":".................010.....0110111","width_bits":32}]},"encoding_kind":"L32","group":"Branch","id":"b_nz_32_0f583cdd8d4d","length_bits":32,"mnemonic":"B.NZ","parts":[{"segments":[{"lsb":15,"msb":31,"token":"simm22[16:0]","width":17},{"const":{"value":2,"width":3},"lsb":12,"msb":14,"token":"3'b010","width":3},{"lsb":7,"msb":11,"token":"simm22[21:17]","width":5},{"const":{"value":3,"width":3},"lsb":4,"msb":6,"token":"3'b011","width":3},{"const":{"value":3,"width":3},"lsb":1,"msb":3,"token":"3'b011","width":3},{"const":{"value":1,"width":1},"lsb":0,"msb":0,"token":"1","width":1}],"width_bits":32}],"source":{"file":"opcodes/lx_32.opc","line":47},"uid":"0f583cdd8d4d","uop_big_kind":"BRU","uop_class":{"source":"group_rule","uop_kind":"BRU"},"uop_group":"BRU"},{"asm":"B.TEXT