Skip to content

Refactor mock ring accessor API; add typed accessors #352

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Open
wants to merge 2 commits into
base: main
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
6 changes: 6 additions & 0 deletions virtio-queue/CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -2,8 +2,14 @@

## Added

- Introduced `RingAccess` trait to handle endianness conversion of ring elements in mock tests.
- Added typed accessor methods to `SplitQueueRing` for safer and more readable load/store of ring fields (e.g., `load_idx`, `store_ring_entry`).

## Changed

- Refactored tests and mock infrastructure to use typed accessors instead of raw `Ref`/`ArrayRef` and manual endianness conversion.
- Made `SplitQueueRing` generic over `RingAccess` types.

## Fixed

# v0.16.0
Expand Down
11 changes: 11 additions & 0 deletions virtio-queue/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -151,6 +151,17 @@ following ones:
* `AvailIter` - is a consuming iterator over all available descriptor chain
heads in the queue.

### Queue helper methods

To help with directly manipulating the memory backing a queue, the test utilities expose a set of convenience methods:

* `load_idx` / `store_idx` – read or write the ring `idx` field.
* `load_ring_entry` / `store_ring_entry` – read or write individual ring entries.
* `load_flags` / `store_flags` and `load_event` / `store_event` – access the auxiliary fields of the rings.
* `start` / `end` – get the guest address range covered by the ring.

These helpers are available on the objects returned by the `avail()` and `used()` accessors provided by `MockSplitQueue` and make it easy to set up a queue in tests or simple examples.

## Save/Restore Queue

The `Queue` allows saving the state through the `state` function which returns
Expand Down
4 changes: 2 additions & 2 deletions virtio-queue/src/desc/split.rs
Original file line number Diff line number Diff line change
Expand Up @@ -37,8 +37,8 @@ use virtio_bindings::bindings::virtio_ring::{
/// # let desc = RawDescriptor::from(SplitDescriptor::new(0x2000, 0x1000, VRING_DESC_F_WRITE as u16, 0));
/// # vq.desc_table().store(1, desc);
/// #
/// # vq.avail().ring().ref_at(0).unwrap().store(u16::to_le(0));
/// # vq.avail().idx().store(u16::to_le(1));
/// # vq.avail().store_ring_entry(0, 0).unwrap();
/// # vq.avail().store_idx(1);
/// # q
/// # }
/// let m = &GuestMemoryMmap::<()>::from_ranges(&[(GuestAddress(0), 0x10000)]).unwrap();
Expand Down
139 changes: 131 additions & 8 deletions virtio-queue/src/mock.rs
Original file line number Diff line number Diff line change
Expand Up @@ -117,9 +117,37 @@ impl<'a, M: GuestMemory, T: ByteValued> ArrayRef<'a, M, T> {
}
}

/// Trait for converting queue values to and from little-endian representation.
pub trait RingAccess: ByteValued + Copy {
/// Convert from host to little-endian.
fn to_le(self) -> Self;
/// Convert from little-endian to host.
fn from_le(val: Self) -> Self;
}

impl RingAccess for u16 {
fn to_le(self) -> Self {
u16::to_le(self)
}

fn from_le(val: Self) -> Self {
u16::from_le(val)
}
}

impl RingAccess for VirtqUsedElem {
fn to_le(self) -> Self {
self
}

fn from_le(val: Self) -> Self {
val
}
}

/// Represents a virtio queue ring. The only difference between the used and available rings,
/// is the ring element type.
pub struct SplitQueueRing<'a, M, T: ByteValued> {
pub struct SplitQueueRing<'a, M, T: RingAccess> {
flags: Ref<'a, M, u16>,
// The value stored here should more precisely be a `Wrapping<u16>`, but that would require a
// `ByteValued` impl for this type, which is not provided in vm-memory. Implementing the trait
Expand All @@ -131,7 +159,7 @@ pub struct SplitQueueRing<'a, M, T: ByteValued> {
event: Ref<'a, M, u16>,
}

impl<'a, M: GuestMemory, T: ByteValued> SplitQueueRing<'a, M, T> {
impl<'a, M: GuestMemory, T: RingAccess> SplitQueueRing<'a, M, T> {
/// Create a new `SplitQueueRing` instance
pub fn new(mem: &'a M, base: GuestAddress, len: u16) -> Self {
let event_addr = base
Expand Down Expand Up @@ -165,14 +193,44 @@ impl<'a, M: GuestMemory, T: ByteValued> SplitQueueRing<'a, M, T> {
.unwrap()
}

/// Return a reference to the idx field.
pub fn idx(&self) -> &Ref<'a, M, u16> {
&self.idx
/// Load the value of the `flags` field.
pub fn load_flags(&self) -> u16 {
u16::from_le(self.flags.load())
}

/// Store the `flags` field.
pub fn store_flags(&self, val: u16) {
self.flags.store(u16::to_le(val))
}

/// Return a reference to the ring field.
pub fn ring(&self) -> &ArrayRef<'a, M, T> {
&self.ring
/// Load the value of the `idx` field.
pub fn load_idx(&self) -> u16 {
u16::from_le(self.idx.load())
}

/// Store the `idx` field.
pub fn store_idx(&self, val: u16) {
self.idx.store(u16::to_le(val))
}

/// Load a ring entry at `index`.
pub fn load_ring_entry(&self, index: usize) -> Result<T, MockError> {
self.ring.ref_at(index).map(|r| T::from_le(r.load()))
}

/// Store a ring entry at `index`.
pub fn store_ring_entry(&self, index: usize, val: T) -> Result<(), MockError> {
self.ring.ref_at(index).map(|r| r.store(val.to_le()))
}

/// Load the value of the event field.
pub fn load_event(&self) -> u16 {
u16::from_le(self.event.load())
}

/// Store the event field.
pub fn store_event(&self, val: u16) {
self.event.store(u16::to_le(val))
}
}

Expand Down Expand Up @@ -523,3 +581,68 @@ impl<'a, M: GuestMemory> MockSplitQueue<'a, M> {
Ok(())
}
}

#[cfg(test)]
mod tests {
use super::*;
use vm_memory::{GuestAddress, GuestMemoryMmap};

// SplitQueueRing load/store API coverage for AvailRing (u16)
#[test]
fn test_avail_ring_load_store() {
let mem = &GuestMemoryMmap::<()>::from_ranges(&[(GuestAddress(0), 0x10000)]).unwrap();
let len = 8u16;
let base = GuestAddress(0x1000);
let ring: AvailRing<_> = AvailRing::new(mem, base, len);

// flags
ring.store_flags(0x55aa);
assert_eq!(ring.load_flags(), 0x55aa);

// idx
ring.store_idx(7);
assert_eq!(ring.load_idx(), 7);

// ring entry
ring.store_ring_entry(3, 0xbeef).unwrap();
assert_eq!(ring.load_ring_entry(3).unwrap(), 0xbeef);

// event field
ring.store_event(0x1234);
assert_eq!(ring.load_event(), 0x1234);

// out-of-bounds must error
assert!(matches!(
ring.store_ring_entry(len as usize, 0).unwrap_err(),
MockError::InvalidIndex
));
}

// SplitQueueRing load/store API coverage for UsedRing (VirtqUsedElem)
#[test]
fn test_used_ring_load_store() {
let mem = &GuestMemoryMmap::<()>::from_ranges(&[(GuestAddress(0), 0x20000)]).unwrap();
let len = 8u16;
let base = GuestAddress(0x3000);
let ring: UsedRing<_> = UsedRing::new(mem, base, len);

// flags
ring.store_flags(0xccdd);
assert_eq!(ring.load_flags(), 0xccdd);

// idx
ring.store_idx(2);
assert_eq!(ring.load_idx(), 2);

// ring entry
let elem = VirtqUsedElem::new(42, 0x1000);
ring.store_ring_entry(0, elem).unwrap();
let read = ring.load_ring_entry(0).unwrap();
assert_eq!(read.id(), 42);
assert_eq!(read.len(), 0x1000);

// event field
ring.store_event(0xdead);
assert_eq!(ring.load_event(), 0xdead);
}
}
26 changes: 13 additions & 13 deletions virtio-queue/src/queue.rs
Original file line number Diff line number Diff line change
Expand Up @@ -870,19 +870,19 @@ mod tests {
let mut q: Queue = vq.create_queue().unwrap();

assert_eq!(q.used_idx(mem, Ordering::Acquire).unwrap(), Wrapping(0));
assert_eq!(u16::from_le(vq.used().idx().load()), 0);
assert_eq!(vq.used().load_idx(), 0);

// index too large
assert!(q.add_used(mem, 16, 0x1000).is_err());
assert_eq!(u16::from_le(vq.used().idx().load()), 0);
assert_eq!(vq.used().load_idx(), 0);

// should be ok
q.add_used(mem, 1, 0x1000).unwrap();
assert_eq!(q.next_used, Wrapping(1));
assert_eq!(q.used_idx(mem, Ordering::Acquire).unwrap(), Wrapping(1));
assert_eq!(u16::from_le(vq.used().idx().load()), 1);
assert_eq!(vq.used().load_idx(), 1);

let x = vq.used().ring().ref_at(0).unwrap().load();
let x = vq.used().load_ring_entry(0).unwrap();
assert_eq!(x.id(), 1);
assert_eq!(x.len(), 0x1000);
}
Expand Down Expand Up @@ -1074,7 +1074,7 @@ mod tests {
// Update the index of the chain that can be consumed to not be the last one.
// This enables us to consume chains in multiple iterations as opposed to consuming
// all the driver written chains at once.
vq.avail().idx().store(u16::to_le(2));
vq.avail().store_idx(2);
// No descriptor chains are consumed at this point.
assert_eq!(q.next_avail(), 0);

Expand Down Expand Up @@ -1107,7 +1107,7 @@ mod tests {
assert_eq!(q.next_avail(), 2);
assert_eq!(q.next_used(), 2);
// Let the device know it can consume one more chain.
vq.avail().idx().store(u16::to_le(3));
vq.avail().store_idx(3);
i = 0;

loop {
Expand All @@ -1131,7 +1131,7 @@ mod tests {
// ring. Ideally this should be done on a separate thread.
// Because of this update, the loop should be iterated again to consume the new
// available descriptor chains.
vq.avail().idx().store(u16::to_le(4));
vq.avail().store_idx(4);
if !q.enable_notification(mem).unwrap() {
break;
}
Expand All @@ -1143,7 +1143,7 @@ mod tests {

// Set an `idx` that is bigger than the number of entries added in the ring.
// This is an allowed scenario, but the indexes of the chain will have unexpected values.
vq.avail().idx().store(u16::to_le(7));
vq.avail().store_idx(7);
loop {
q.disable_notification(mem).unwrap();

Expand Down Expand Up @@ -1198,7 +1198,7 @@ mod tests {

vq.add_desc_chains(&descs, 0).unwrap();
// Let the device know it can consume chains with the index < 2.
vq.avail().idx().store(u16::to_le(3));
vq.avail().store_idx(3);
// No descriptor chains are consumed at this point.
assert_eq!(q.next_avail(), 0);
assert_eq!(q.next_used(), 0);
Expand Down Expand Up @@ -1231,7 +1231,7 @@ mod tests {

// Decrement `idx` which should be forbidden. We don't enforce this thing, but we should
// test that we don't panic in case the driver decrements it.
vq.avail().idx().store(u16::to_le(1));
vq.avail().store_idx(1);
// Invalid available ring index
assert!(q.iter(mem).is_err());
}
Expand Down Expand Up @@ -1268,16 +1268,16 @@ mod tests {
// When the number of chains exposed by the driver is equal to or less than the queue
// size, the available ring index is valid and constructs an iterator successfully.
let avail_idx = Wrapping(q.next_avail()) + Wrapping(queue_size);
vq.avail().idx().store(u16::to_le(avail_idx.0));
vq.avail().store_idx(avail_idx.0);
assert!(q.iter(mem).is_ok());
let avail_idx = Wrapping(q.next_avail()) + Wrapping(queue_size - 1);
vq.avail().idx().store(u16::to_le(avail_idx.0));
vq.avail().store_idx(avail_idx.0);
assert!(q.iter(mem).is_ok());

// When the number of chains exposed by the driver is larger than the queue size, the
// available ring index is invalid and produces an error from constructing an iterator.
let avail_idx = Wrapping(q.next_avail()) + Wrapping(queue_size + 1);
vq.avail().idx().store(u16::to_le(avail_idx.0));
vq.avail().store_idx(avail_idx.0);
assert!(q.iter(mem).is_err());
}

Expand Down