Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
122 commits
Select commit Hold shift + click to select a range
5c9d239
docs(plans): smoltcp passt-pattern port — spec + Phase 0 plan
dpsoft Apr 27, 2026
8d63aaa
test(network): scaffold network_baseline pins with frame helpers
dpsoft Apr 27, 2026
bc9eefb
test(network): address review — restore reserved constants, alias IpA…
dpsoft Apr 27, 2026
21134d8
test(network): pin TCP handshake SYN-ACK emission
dpsoft Apr 27, 2026
1226986
test(network): pin TCP guest↔host data round-trip
dpsoft Apr 27, 2026
5838586
test(network): BROKEN_ON_PURPOSE pin — 256 KB to_host cliff
dpsoft Apr 28, 2026
6cc850c
test(network): hoist inline `use` statements to module scope
dpsoft Apr 28, 2026
a5b9128
test(network): pin TCP rate limit, concurrent cap, deny list
dpsoft Apr 28, 2026
cf59b33
test(network): pin ARP reply behavior for gateway and subnet
dpsoft Apr 28, 2026
3dc5309
test(network): pin DNS resolution and cache xid-rewrite
dpsoft Apr 28, 2026
40c0f7e
test(network): fix build_udp_frame payload_len double-count
dpsoft Apr 28, 2026
279af3b
test(network): BROKEN_ON_PURPOSE pin — UDP non-DNS dropped
dpsoft Apr 28, 2026
4d96ad7
test(network): BROKEN_ON_PURPOSE pin — ICMP echo dropped
dpsoft Apr 28, 2026
41c8382
bench(network): divan microbenches for SLIRP hot paths
dpsoft Apr 28, 2026
499ee35
bench(network): parametric NAT-walk scaling at 1/100/1000 flows
dpsoft Apr 28, 2026
7cca766
bench(network): DNS cache hit and miss paths
dpsoft Apr 28, 2026
7868bb2
ci(bench): include network microbenches in regression gate
dpsoft Apr 28, 2026
e1ed1e2
bench(network): voidbox-network-bench binary scaffold
dpsoft Apr 28, 2026
df898d6
bench(network): TCP throughput via busybox nc
dpsoft Apr 28, 2026
68136d1
bench(network): TCP RR/CRR latency p50/p99
dpsoft Apr 28, 2026
594190b
bench(network): UDP DNS qps and JSON report output
dpsoft Apr 28, 2026
3143e1f
docs(plans): rename SmoltcpBackend → SlirpBackend in spec + Phase 0 plan
dpsoft Apr 28, 2026
b7e426c
feat(network): introduce NetworkBackend trait
dpsoft Apr 28, 2026
046d57d
refactor(slirp): add drain_to_guest wrapper for trait fit
dpsoft Apr 28, 2026
5095d6d
refactor(slirp): move poll body into drain_to_guest, drop alloc
dpsoft Apr 28, 2026
66f007f
feat(slirp): impl NetworkBackend for SlirpStack
dpsoft Apr 28, 2026
dbe5208
refactor(virtio_net): hold dyn NetworkBackend, reuse rx buffer
dpsoft Apr 28, 2026
bf3cd6a
refactor(network): rename SlirpStack to SlirpBackend
dpsoft Apr 28, 2026
028707c
docs(plans): add Phase 1 plan (ICMP echo via SOCK_DGRAM IPPROTO_ICMP)
dpsoft Apr 28, 2026
fa48f05
feat(slirp): add IcmpEchoEntry + IPPROTO_ICMP socket helper
dpsoft Apr 28, 2026
3d2ec08
refactor(slirp): hoist FromRawFd to module scope, drop redundant use …
dpsoft Apr 28, 2026
c5112c9
feat(slirp): forward guest ICMP echo via SOCK_DGRAM IPPROTO_ICMP
dpsoft Apr 28, 2026
5180bda
feat(slirp): relay ICMP echo replies back to guest
dpsoft Apr 28, 2026
195038f
feat(slirp): warn-once + fallback when unprivileged ICMP forbidden
dpsoft Apr 28, 2026
f9330da
test(network): flip ICMP pin — assert echo reply (was BROKEN_ON_PURPOSE)
dpsoft Apr 28, 2026
8572122
bench(network): populate ICMP RR latency p50
dpsoft Apr 29, 2026
77dfc67
fix(scripts): revert setuid busybox in test image (Phase 1.6 regression)
dpsoft Apr 29, 2026
83f7dcb
docs(plans): add Phase 2 plan (generalize UDP via per-flow connected …
dpsoft Apr 29, 2026
4d46c5f
feat(slirp): add UdpFlowEntry + per-flow connected socket helper
dpsoft Apr 29, 2026
0aff7df
feat(slirp): forward non-DNS UDP via per-flow connected sockets
dpsoft Apr 29, 2026
cd41b8f
ci(bench): add strict voidbox-network-bench step (no continue-on-error)
dpsoft Apr 29, 2026
b117c13
feat(slirp): relay UDP flow replies back to guest
dpsoft Apr 29, 2026
cced8ad
feat(slirp): UDP flow idle reap (60s)
dpsoft Apr 29, 2026
b79e07f
test(network): full RTT for UDP pin (was BROKEN_ON_PURPOSE one-way)
dpsoft Apr 29, 2026
0758df1
bench(network): document DNS qps busybox-nc bottleneck (set null + WARN)
dpsoft Apr 29, 2026
0d0ab20
fix(startup-bench): require userspace vsock backend for snapshot capture
dpsoft Apr 29, 2026
c26d44c
docs(plans): add Phase 3 plan (TCP relay rewrite via MSG_PEEK + seque…
dpsoft Apr 29, 2026
ecc624a
docs(plans): lock observability as a hard non-negotiable invariant
dpsoft Apr 29, 2026
1882c33
refactor(slirp): add bytes_in_flight to TcpNatEntry (no behavior change)
dpsoft Apr 29, 2026
e143f7a
refactor(slirp): add recv_peek helper using libc::recv MSG_PEEK
dpsoft Apr 29, 2026
bc1708a
refactor(slirp): peek-based host→guest TCP relay (drops to_guest depe…
dpsoft Apr 29, 2026
ee9f8da
refactor(slirp): ACK-driven consume from kernel socket
dpsoft Apr 29, 2026
4a41f57
refactor(slirp): drop to_host buffer + 256KB cliff, use TCP backpressure
dpsoft Apr 29, 2026
03a1f59
refactor(slirp): drop to_guest/to_host/pending_ack fields and dead he…
dpsoft Apr 29, 2026
ae94859
test(network): flip 256KB cliff pin — assert >1MB throughput succeeds
dpsoft Apr 29, 2026
5fe4316
bench(network): tcp_bulk_throughput_1mb — measures post-Phase-3 backp…
dpsoft Apr 29, 2026
4471c91
bench(network): --bulk-mb mode to measure post-Phase-3 backpressure
dpsoft Apr 29, 2026
120ad73
docs(plans): add Phase 4 plan (unified flow table refactor)
dpsoft Apr 29, 2026
827135e
refactor(slirp): define FlowKey + FlowEntry enums (no callers yet)
dpsoft Apr 29, 2026
f5a2d11
fix(ci): non-Linux stubs for benches/network.rs + voidbox-network-bench
dpsoft Apr 30, 2026
ee353c5
docs(plans): add three Phase 4 benches (mixed flows, per-protocol, ta…
dpsoft Apr 30, 2026
93523ba
refactor(slirp): add flow_table field on SlirpBackend (parallel to ex…
dpsoft Apr 30, 2026
e94998c
refactor(slirp): migrate ICMP path to flow_table
dpsoft Apr 30, 2026
29206d1
refactor(slirp): migrate UDP path to flow_table
dpsoft Apr 30, 2026
9c3fac9
refactor(slirp): migrate TCP path to flow_table
dpsoft Apr 30, 2026
7cad565
refactor(slirp): update Phase 4 doc header for unified flow table
dpsoft Apr 30, 2026
f53de94
bench(network): poll_with_n_mixed_flows — mixed TCP/UDP/ICMP at scale
dpsoft Apr 30, 2026
ae9195b
bench(network): process_udp_frame + process_icmp_echo_request
dpsoft Apr 30, 2026
01ea90a
bench(network): add flow_table_insert_remove synthetic microbench
dpsoft Apr 30, 2026
8566451
docs(plans): add Phase 5 plan (stateless NAT + port forwarding)
dpsoft Apr 30, 2026
81ba8ca
feat(network): add nat.rs with stateless translate_outbound (no calle…
dpsoft Apr 30, 2026
aad628b
refactor(slirp): add nat::Rules field on SlirpBackend (parallel to de…
dpsoft Apr 30, 2026
4d622d2
refactor(slirp): TCP path uses nat::translate_outbound
dpsoft Apr 30, 2026
dbb641c
refactor(slirp): UDP path uses nat::translate_outbound, drop deny_lis…
dpsoft Apr 30, 2026
1c27145
refactor(slirp): plumb port_forwards from NetworkConfig into nat::Rules
dpsoft Apr 30, 2026
7e8d5ce
test(network): pin nat::translate_outbound (loopback, external, deny)
dpsoft Apr 30, 2026
d31a3ec
bench(network): nat_translate_outbound_hot_path — Phase 5 baseline
dpsoft Apr 30, 2026
4baaa9a
feat(slirp): TcpNatState::SynSent + handle inbound SYN-ACK
dpsoft Apr 30, 2026
a464fc1
bench(network): tcp_inbound_syn_ack_transition — Phase 5.5b.1 microbench
dpsoft Apr 30, 2026
9b077d2
feat(slirp): add synthesize_inbound_syn helper for port-forwarding
dpsoft Apr 30, 2026
473971f
bench(network): synthesize_inbound_syn pure-compute (Phase 5.5b.2.b)
dpsoft Apr 30, 2026
b2fbf58
feat(slirp): port-forward listener thread implementation (not wired yet)
dpsoft Apr 30, 2026
efbf5a9
feat(slirp): wire spawn_port_forward_listeners from with_security
dpsoft Apr 30, 2026
423fba2
test(network): tcp_port_forward_inbound — Phase 5.5b e2e contract
dpsoft Apr 30, 2026
aa60b8a
bench(network): port_forward_accept_latency — Phase 5.5b wall-clock b…
dpsoft Apr 30, 2026
5a02b14
chore(bench): add scripts/bench-compare.sh — phase comparison report
dpsoft Apr 30, 2026
9cab10e
test(network): icmp_echo_returns_reply — probe + assert, no silent skip
dpsoft Apr 30, 2026
bb64525
fix(network-bench): skip failed iterations + drop guest-ping ICMP path
dpsoft Apr 30, 2026
6a892c0
bench(network): migrate from deprecated .poll() to drain_to_guest()
dpsoft Apr 30, 2026
163bed3
chore(bench): bench-compare.sh — fall back without bench-helpers feature
dpsoft Apr 30, 2026
e6de98a
fix(network-bench): bound accept-thread lifetimes with deadlines
dpsoft Apr 30, 2026
47868f0
docs: Phase 6 overview plan — TCP lifecycle + async connect + windows…
dpsoft Apr 30, 2026
a9f9d01
docs: Phase 6.4 detailed TDD plan — epoll-driven RX
dpsoft Apr 30, 2026
3821cbc
test(network): drain_n via drain_to_guest + real retransmit in 256kb …
dpsoft Apr 30, 2026
3e47ffb
test(network): pin tcp_rx_latency_sub_5ms (BROKEN_ON_PURPOSE)
dpsoft Apr 30, 2026
131ceb9
Revert "test(network): pin tcp_rx_latency_sub_5ms (BROKEN_ON_PURPOSE)"
dpsoft Apr 30, 2026
7c2a5b5
docs(phase6.4): drop Task 2 unit-level pin — VMM-level contract instead
dpsoft Apr 30, 2026
0bdd99d
feat(network): EpollDispatch skeleton with epoll_create1
dpsoft Apr 30, 2026
07bc6a8
feat(network): EpollDispatch register/unregister
dpsoft Apr 30, 2026
2311cec
feat(network): EpollDispatch::wait_with_timeout
dpsoft Apr 30, 2026
fe92f5d
feat(network): EpollDispatch self-pipe wakeup
dpsoft Apr 30, 2026
45cba72
refactor(slirp): SlirpBackend holds Arc<Mutex<EpollDispatch>> + Waker
dpsoft May 1, 2026
085a22a
feat(slirp): register TCP flows with EpollDispatch
dpsoft May 1, 2026
f2734d5
feat(slirp): register UDP + ICMP flows with EpollDispatch
dpsoft May 1, 2026
a5600a3
feat(slirp): relay loops dispatch by epoll readiness
dpsoft May 1, 2026
11d21a6
feat(vmm): net_poll_thread driven by epoll_wait
dpsoft May 1, 2026
5d6c786
feat(slirp): rebuild epoll set on snapshot restore
dpsoft May 1, 2026
590edd0
fix(test): gate epoll_set_rebuilt smoke test on bench-helpers feature
dpsoft May 2, 2026
85f1f16
bench(network): tcp_rx_latency_one_packet — Phase 6.4 baseline
dpsoft May 2, 2026
ed048e5
perf(slirp): eliminate epoll mutex contention via event queue
dpsoft May 3, 2026
17b437b
perf(slirp): replace two-pass relay sweep with lazy close queue
dpsoft May 3, 2026
bdef4bd
perf(slirp): prefer try_lock on epoll over pending_events in drain_to…
dpsoft May 3, 2026
15231cb
perf(slirp): wake net-poll thread when process_guest_frame queues frames
dpsoft May 3, 2026
bebeb30
perf(vmm): epoll_wait timeout 50ms → 5ms — restore CRR latency
dpsoft May 3, 2026
1d3e816
chore: remove Phase-N references from inline + doc comments
dpsoft May 3, 2026
9a46865
perf(vmm): adaptive epoll_wait timeout (5 ms active / 50 ms idle)
dpsoft May 4, 2026
ec6c4e2
fix(slirp): collision-safe flow tokens via monotonic counter
dpsoft May 4, 2026
2894920
fix(slirp): EpollDispatch lock-free — register/unregister never block
dpsoft May 4, 2026
5560498
perf(slirp): O(1) dedup in idle-timeout sweep via HashSet
dpsoft May 4, 2026
d43dbc4
docs(vmm): net_poll_thread doc-comment matches adaptive timeout
dpsoft May 4, 2026
dcbf18b
style: rust-style sweep on Phase 6.4 / Copilot-fix code
dpsoft May 4, 2026
13e70dd
Merge remote-tracking branch 'origin/main' into smoltcp-passt-port-ph…
dpsoft May 6, 2026
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
189 changes: 164 additions & 25 deletions benches/network.rs
Original file line number Diff line number Diff line change
Expand Up @@ -170,8 +170,8 @@ mod linux_benches {
///
/// The timed section is a single `poll()` call on the pre-populated stack,
/// so the measurement reflects the NAT-walk cost at that table size.
/// Today the walk is `O(n)`; the unified flow table planned for Phase 4
/// should keep the same asymptotic complexity but with smaller constants.
/// Today the walk is `O(n)`; the unified flow table keeps the same
/// asymptotic complexity but with smaller per-entry constants.
#[divan::bench(args = [1, 100, 1000])]
fn poll_with_n_flows(bencher: Bencher, n: usize) {
let mut stack = SlirpBackend::new().unwrap();
Expand Down Expand Up @@ -276,9 +276,9 @@ mod linux_benches {
});
}

/// Pure-compute bench for `nat::translate_outbound`. Phase 5 baseline
/// for future hasher / data-structure changes (e.g. moving deny_cidrs
/// from `Vec<Ipv4Net>` to a longest-prefix trie). Tens of nanoseconds
/// Pure-compute bench for `nat::translate_outbound`. Baseline for future
/// hasher / data-structure changes (e.g. moving deny_cidrs from
/// `Vec<Ipv4Net>` to a longest-prefix trie). Tens of nanoseconds
/// expected; microseconds would indicate an allocation in the hot path.
#[divan::bench]
fn nat_translate_outbound_hot_path(bencher: Bencher) {
Expand All @@ -305,13 +305,13 @@ mod linux_benches {
/// Measures TCP bulk throughput through the SLIRP relay under backpressure.
///
/// Pushes 1 MiB through the relay in 1 KiB chunks with a constrained host
/// receiver (`SO_RCVBUF=4096`) so the post-Phase-3 backpressure path is
/// exercised every iteration. Divan reports throughput in MB/s alongside
/// per-iteration latency, giving a numerical regression signal for the
/// passt-style sequence-mirroring + don't-ACK-on-EAGAIN backpressure path.
/// receiver (`SO_RCVBUF=4096`) so the backpressure path is exercised every
/// iteration. Divan reports throughput in MB/s alongside per-iteration
/// latency, giving a numerical regression signal for the passt-style
/// sequence-mirroring + don't-ACK-on-EAGAIN backpressure path.
///
/// The 95% delivery threshold mirrors `tcp_writes_more_than_256kb_succeed`
/// — the binary contract test for Phase 3.
/// — the binary contract test for TCP backpressure correctness.
#[divan::bench(sample_count = 10)]
fn tcp_bulk_throughput_1mb(bencher: Bencher) {
use smoltcp::wire::TcpControl;
Expand Down Expand Up @@ -612,13 +612,12 @@ mod linux_benches {

/// Open `n/3` TCP + `n/3` UDP + `n/3` ICMP-echo flows, then time `poll()`.
///
/// Mirrors `poll_with_n_flows` (TCP-only) but exercises Phase 4's
/// unified `flow_table` with all three protocols populated. Catches
/// enum-dispatch + filter regressions at scale: each `relay_*_data`
/// loop now `filter(|k| matches!(k, FlowKey::Foo(_)))` over the unified
/// table, so per-protocol scan cost is `O(total_flows)` not
/// `O(this_protocol's_flows)`. This bench is the regression gate for
/// that change.
/// Mirrors `poll_with_n_flows` (TCP-only) but exercises the unified
/// `flow_table` with all three protocols populated. Catches enum-dispatch
/// and filter regressions at scale: each `relay_*_data` loop filters
/// by `FlowKey` variant over the unified table, so per-protocol scan cost
/// is `O(total_flows)` not `O(this_protocol's_flows)`. This bench is the
/// regression gate for that property.
#[divan::bench(args = [3, 99, 999])]
fn poll_with_n_mixed_flows(bencher: Bencher, n: usize) {
let mut stack = SlirpBackend::new().unwrap();
Expand Down Expand Up @@ -649,10 +648,10 @@ mod linux_benches {

/// Insert + remove `n` flow-table entries using synthetic data.
///
/// Pure-compute baseline for the unified `HashMap<FlowKey, FlowEntry>`
/// in Phase 4. Phase 5+ reference number for hasher experiments
/// (foldhash, ahash, SipHash) or container-shape changes (e.g.
/// hashbrown raw API). Uses synthetic `u32` values instead of real
/// Pure-compute baseline for the unified `HashMap<FlowKey, FlowEntry>`.
/// Reference number for hasher experiments (foldhash, ahash, SipHash)
/// or container-shape changes (e.g. hashbrown raw API). Uses synthetic
/// `u32` values instead of real
/// `TcpNatEntry` (which requires TcpStream) to isolate HashMap
/// mechanics from socket cloning overhead — the real cost is
/// HashMap insert/remove, not socket ops.
Expand Down Expand Up @@ -784,8 +783,8 @@ mod linux_benches {
}

/// Pure-compute cost of synthesizing an inbound SYN frame for
/// port-forwarding (Phase 5.5b.2). No stack allocation or guest frame
/// processing — just the `build_tcp_packet_static` wire encoding.
/// port-forwarding. No stack allocation or guest frame processing —
/// just the `build_tcp_packet_static` wire encoding.
///
/// Expected magnitude: sub-microsecond (pure packet construction).
///
Expand Down Expand Up @@ -843,8 +842,8 @@ mod linux_benches {
/// not a bug. Regressions in the inbound state machine or the listener
/// poll loop will shift the distribution upward beyond 50 ms.
///
/// Phase 5.5b baseline. Regressions in the inbound state machine or
/// listener-poll loop will surface numerically against this measurement.
/// Regressions in the inbound state machine or listener-poll loop will
/// surface numerically against this measurement.
#[divan::bench(sample_count = 20, sample_size = 1)]
fn port_forward_accept_latency(bencher: Bencher) {
const GUEST_PORT: u16 = 8080;
Expand Down Expand Up @@ -897,4 +896,144 @@ mod linux_benches {
worker.join().expect("worker thread panicked");
});
}

/// Cost of one `drain_to_guest` call when one TCP flow is `Established`
/// and the host kernel has data ready to relay.
///
/// Captures the per-packet SLIRP dispatch overhead via epoll: epoll_wait
/// (non-blocking, zero-timeout), readiness scan, peek, and Ethernet frame
/// construction. Only the flows with data ready are dispatched — flows
/// with nothing to relay are skipped.
///
/// This bench cannot exercise the `net_poll_thread` 50 ms epoll cycle
/// (that thread does not run inside divan). The wall-clock latency floor
/// is captured separately by `voidbox-network-bench`'s `tcp_rx_latency_us_p50`
/// field; see that binary's `Report` struct for the measurement shape.
///
/// Requires the `bench-helpers` feature (compile with
/// `cargo bench --features bench-helpers`).
#[cfg(feature = "bench-helpers")]
#[divan::bench(sample_count = 50, sample_size = 10)]
fn tcp_rx_latency_one_packet(bencher: Bencher) {
use smoltcp::wire::TcpControl;
use std::io::Write;
use std::net::TcpListener;

const GUEST_SRC_PORT: u16 = 49155;
const INITIAL_GUEST_SEQ: u32 = 5000;
const PAYLOAD: &[u8] = &[0xAB; 64];

// Build a fresh stack with one Established TCP flow. Setup happens
// outside the timed loop so divan only measures the relay dispatch.
let listener = TcpListener::bind("127.0.0.1:0").unwrap();
let host_port = listener.local_addr().unwrap().port();
let server_thread = thread::spawn(move || listener.accept().unwrap());

let mut stack = SlirpBackend::new().unwrap();

// 3-way handshake: guest sends SYN → stack produces SYN-ACK → guest
// sends ACK. This mirrors `tcp_bulk_throughput_1mb` setup.
let syn = build_tcp_syn_for_latency_bench(GUEST_SRC_PORT, host_port, INITIAL_GUEST_SEQ);
stack.process_guest_frame(&syn).unwrap();

// Drain for up to 200 ms to collect the SYN-ACK.
let mut drain_frames: Vec<Vec<u8>> = Vec::new();
let gateway_seq = {
let deadline = std::time::Instant::now() + Duration::from_millis(200);
loop {
drain_frames.clear();
stack.drain_to_guest(&mut drain_frames);
if let Some((seq, _, _, _)) = drain_frames
.iter()
.find_map(|f| parse_tcp_to_guest_frame(f))
{
break seq;
}
if std::time::Instant::now() > deadline {
panic!("no SYN-ACK within deadline");
}
thread::sleep(Duration::from_millis(5));
}
};

// Complete the handshake: guest sends ACK.
let ack = build_tcp_data_frame(
SLIRP_GATEWAY_IP,
GUEST_SRC_PORT,
host_port,
INITIAL_GUEST_SEQ + 1,
gateway_seq + 1,
TcpControl::None,
&[],
);
stack.process_guest_frame(&ack).unwrap();

// The server thread accepted the connection; grab the socket.
let (mut server_sock, _) = server_thread.join().unwrap();
server_sock
.set_nonblocking(true)
.expect("server non-blocking");

// Set up state for the timed loop.
let mut out: Vec<Vec<u8>> = Vec::with_capacity(8);
let guest_seq = INITIAL_GUEST_SEQ + 1;

// Prime: put one payload in the kernel buffer before the first
// iteration begins so the first measured call sees a ready event.
let _ = server_sock.write(PAYLOAD);

bencher.bench_local(|| {
out.clear();
// Refill the kernel buffer from the previous iteration's drain.
// write() may return EAGAIN if the buffer is full; that is fine —
// the previous iteration's peek left data in place.
let _ = server_sock.write(divan::black_box(PAYLOAD));

// The cost we are measuring: one non-blocking epoll_wait + relay.
divan::black_box(&mut stack).drain_to_guest(&mut out);

// Consume the relay output so inject_to_guest doesn't grow
// unboundedly across iterations.
divan::black_box(&out);

// Keep the TCP stream happy: send an ACK for any data the relay
// fed into inject_to_guest (frame content doesn't matter for the
// bench; we just need the host stream not to stall).
for frame in &out {
if let Some((data_seq, _, _, plen)) = parse_tcp_to_guest_frame(frame) {
if plen > 0 {
let ack_back = build_tcp_data_frame(
SLIRP_GATEWAY_IP,
GUEST_SRC_PORT,
host_port,
guest_seq,
data_seq.wrapping_add(plen as u32),
TcpControl::None,
&[],
);
let _ = stack.process_guest_frame(&ack_back);
}
}
}
});
}

/// Build a SYN frame from the guest toward the host for the latency bench.
///
/// Identical to `build_tcp_data_frame` with `TcpControl::Syn` and zero
/// `ack`. Kept as a separate function to document intent: this is the
/// opening segment of the 3-way handshake used by
/// `tcp_rx_latency_one_packet`.
#[cfg(feature = "bench-helpers")]
fn build_tcp_syn_for_latency_bench(src_port: u16, dst_port: u16, seq: u32) -> Vec<u8> {
build_tcp_data_frame(
SLIRP_GATEWAY_IP,
src_port,
dst_port,
seq,
0,
smoltcp::wire::TcpControl::Syn,
&[],
)
}
} // mod linux_benches
Loading
Loading