diff --git a/src/network/slirp.rs b/src/network/slirp.rs index 111c0916..38b92187 100644 --- a/src/network/slirp.rs +++ b/src/network/slirp.rs @@ -912,6 +912,7 @@ impl SlirpBackend { Err(_) => return Ok(()), }; + let crr_trace_enter = Instant::now(); // Track inject_to_guest growth so we can wake the net-poll // thread if this call queued any frames. The poll thread blocks // in epoll_wait waiting on FD readiness; an ACK queued during @@ -933,17 +934,33 @@ impl SlirpBackend { } } - if self.inject_to_guest.len() > inject_len_before { + let queued = self.inject_to_guest.len() > inject_len_before; + if queued { self.epoll_waker.wake(); } + if queued { + tracing::trace!( + target: "slirp_crr", + ethertype = ?eth.ethertype(), + queued = self.inject_to_guest.len() - inject_len_before, + process_us = crr_trace_enter.elapsed().as_micros() as u64, + "process_guest_frame: queued frames + waker.wake()" + ); + } Ok(()) } /// Drain frames destined to the guest into `out`, reusing the caller's /// buffer across calls and avoiding a fresh allocation on every tick. /// + /// crr-trace: emits a `slirp_crr` event when ≥1 frame is drained, so the + /// timeline shows when the net-poll thread actually picks the frames up + /// after a `epoll_waker.wake()`. + /// /// See [`crate::network::NetworkBackend::drain_to_guest`]. pub fn drain_to_guest(&mut self, out: &mut Vec>) { + let crr_trace_enter = Instant::now(); + let crr_trace_inject_before = self.inject_to_guest.len(); // Check rx_queue size before polling. let rx_count = { let q = self.queue.lock().unwrap(); @@ -1029,7 +1046,18 @@ impl SlirpBackend { } out.append(&mut q.tx_queue); } + let inject_drained = self.inject_to_guest.len(); out.append(&mut self.inject_to_guest); + if inject_drained > 0 || crr_trace_inject_before > 0 { + tracing::trace!( + target: "slirp_crr", + inject_drained, + inject_before = crr_trace_inject_before, + ready_events = ready.len(), + drain_us = crr_trace_enter.elapsed().as_micros() as u64, + "drain_to_guest: emitted to net-poll" + ); + } } /// Poll the stack and return ethernet frames to send to the guest. @@ -1693,6 +1721,13 @@ impl SlirpBackend { &[], ); self.inject_to_guest.push(syn_ack); + tracing::trace!( + target: "slirp_crr", + guest_port = key.guest_src_port, + dst_port = key.dst_port, + host_fd, + "syn_ack queued (immediate connect path)" + ); debug!( "SLIRP TCP: SYN-ACK sent for {}:{} (immediate connect)", dst_ip, dst_port diff --git a/src/vmm/mod.rs b/src/vmm/mod.rs index 97fe2d0f..569401a8 100644 --- a/src/vmm/mod.rs +++ b/src/vmm/mod.rs @@ -1661,6 +1661,8 @@ fn net_poll_thread(net_dev: Arc>, vm: Arc, running: A // adaptive timeout in the active 5 ms cadence even though // `epoll_events.is_empty()`. let mut raw_kernel_events: usize = 0; + let crr_trace_wait_start = std::time::Instant::now(); + let crr_trace_timeout_used = epoll_wait_timeout; if let Some(ref ep_arc) = epoll_arc { raw_kernel_events = ep_arc .wait_with_timeout(&mut epoll_events, epoll_wait_timeout) @@ -1668,6 +1670,16 @@ fn net_poll_thread(net_dev: Arc>, vm: Arc, running: A } else { std::thread::sleep(FALLBACK_SLEEP); } + if raw_kernel_events > 0 { + tracing::trace!( + target: "slirp_crr", + raw_events = raw_kernel_events, + visible_events = epoll_events.len(), + wait_us = crr_trace_wait_start.elapsed().as_micros() as u64, + timeout_ms = crr_trace_timeout_used.as_millis() as u64, + "net_poll_thread: epoll_wait returned" + ); + } // Adapt the next-cycle timeout based on this cycle's outcome. // Any kernel event (real readiness OR self-pipe wake from the @@ -1692,6 +1704,7 @@ fn net_poll_thread(net_dev: Arc>, vm: Arc, running: A } } + let crr_trace_inject_start = std::time::Instant::now(); let has_interrupt = { let mut guard = match net_dev.lock() { Ok(g) => g, @@ -1715,6 +1728,11 @@ fn net_poll_thread(net_dev: Arc>, vm: Arc, running: A unsafe { libc::ioctl(vm_fd, KVM_IRQ_LINE as _, &deassert_irq); } + tracing::trace!( + target: "slirp_crr", + inject_us = crr_trace_inject_start.elapsed().as_micros() as u64, + "net_poll_thread: try_inject_rx + IRQ raised" + ); } }