From 45ea8bac5c1b89d4aa40c7ecbbcb32e205c4bae2 Mon Sep 17 00:00:00 2001 From: Alexandru Agache Date: Wed, 29 Sep 2021 14:55:41 +0300 Subject: [PATCH 01/10] relax trait bounds for AvailIter & DescriptorChain We are using a `: GuestAddressSpace` trait bound for `AvailIter` and `DescriptorChain`, but the objects are actually only interested in the fact that `M` can dereference to a `GuestMemory` implementation. Signed-off-by: Alexandru Agache --- crates/devices/virtio-blk/src/request.rs | 18 +++++--- crates/virtio-queue/src/lib.rs | 52 ++++++++++++++++-------- 2 files changed, 46 insertions(+), 24 deletions(-) diff --git a/crates/devices/virtio-blk/src/request.rs b/crates/devices/virtio-blk/src/request.rs index 7da9c6af..cabd9e9d 100644 --- a/crates/devices/virtio-blk/src/request.rs +++ b/crates/devices/virtio-blk/src/request.rs @@ -24,6 +24,7 @@ //! approach. use std::fmt::{self, Display}; +use std::ops::Deref; use std::result; use crate::defs::{ @@ -32,9 +33,7 @@ use crate::defs::{ }; use virtio_queue::{Descriptor, DescriptorChain}; -use vm_memory::{ - ByteValued, Bytes, GuestAddress, GuestAddressSpace, GuestMemory, GuestMemoryError, -}; +use vm_memory::{ByteValued, Bytes, GuestAddress, GuestMemory, GuestMemoryError}; /// Block request parsing errors. #[derive(Debug)] @@ -159,7 +158,10 @@ impl Request { } // Checks that a descriptor meets the minimal requirements for a valid status descriptor. - fn check_status_desc(mem: &M, desc: Descriptor) -> Result<()> { + fn check_status_desc(mem: &M, desc: Descriptor) -> Result<()> + where + M: GuestMemory + ?Sized, + { // The status MUST always be writable. if !desc.is_write_only() { return Err(Error::UnexpectedReadOnlyDescriptor); @@ -202,7 +204,11 @@ impl Request { /// # Arguments /// * `desc_chain` - A mutable reference to the descriptor chain that should point to the /// buffers of a virtio block request. - pub fn parse(desc_chain: &mut DescriptorChain) -> Result { + pub fn parse(desc_chain: &mut DescriptorChain) -> Result + where + M: Deref, + M::Target: GuestMemory, + { let chain_head = desc_chain.next().ok_or(Error::DescriptorChainTooShort)?; // The head contains the request type which MUST be readable. if chain_head.is_write_only() { @@ -235,7 +241,7 @@ impl Request { } let status_desc = desc; - Request::check_status_desc::<::M>(desc_chain.memory(), status_desc)?; + Request::check_status_desc(desc_chain.memory(), status_desc)?; request.status_addr = status_desc.addr(); Ok(request) diff --git a/crates/virtio-queue/src/lib.rs b/crates/virtio-queue/src/lib.rs index 40184d92..cd89fe86 100644 --- a/crates/virtio-queue/src/lib.rs +++ b/crates/virtio-queue/src/lib.rs @@ -148,8 +148,8 @@ unsafe impl ByteValued for Descriptor {} /// A virtio descriptor chain. #[derive(Clone, Debug)] -pub struct DescriptorChain { - mem: M::T, +pub struct DescriptorChain { + mem: M, desc_table: GuestAddress, queue_size: u16, head_index: u16, @@ -158,9 +158,13 @@ pub struct DescriptorChain { is_indirect: bool, } -impl DescriptorChain { +impl DescriptorChain +where + M: Deref, + M::Target: GuestMemory, +{ fn with_ttl( - mem: M::T, + mem: M, desc_table: GuestAddress, queue_size: u16, ttl: u16, @@ -178,7 +182,7 @@ impl DescriptorChain { } /// Create a new `DescriptorChain` instance. - fn new(mem: M::T, desc_table: GuestAddress, queue_size: u16, head_index: u16) -> Self { + fn new(mem: M, desc_table: GuestAddress, queue_size: u16, head_index: u16) -> Self { Self::with_ttl(mem, desc_table, queue_size, queue_size, head_index) } @@ -189,8 +193,8 @@ impl DescriptorChain { /// Return a `GuestMemory` object that can be used to access the buffers /// pointed to by the descriptor chain. - pub fn memory(&self) -> &M::M { - &*self.mem + pub fn memory(&self) -> &M::Target { + self.mem.deref() } /// Returns an iterator that only yields the readable descriptors in the chain. @@ -239,7 +243,11 @@ impl DescriptorChain { } } -impl Iterator for DescriptorChain { +impl Iterator for DescriptorChain +where + M: Deref, + M::Target: GuestMemory, +{ type Item = Descriptor; /// Returns the next descriptor in this descriptor chain, if there is one. @@ -284,12 +292,16 @@ impl Iterator for DescriptorChain { /// An iterator for readable or writable descriptors. #[derive(Clone)] -pub struct DescriptorChainRwIter { +pub struct DescriptorChainRwIter { chain: DescriptorChain, writable: bool, } -impl Iterator for DescriptorChainRwIter { +impl Iterator for DescriptorChainRwIter +where + M: Deref, + M::Target: GuestMemory, +{ type Item = Descriptor; /// Returns the next descriptor in this descriptor chain, if there is one. @@ -313,9 +325,9 @@ impl Iterator for DescriptorChainRwIter { // We can't derive Debug, because rustc doesn't generate the M::T: Debug // constraint -impl Debug for DescriptorChainRwIter +impl Debug for DescriptorChainRwIter where - M::T: Debug, + M: Debug, { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_struct("DescriptorChainRwIter") @@ -327,8 +339,8 @@ where /// Consuming iterator over all available descriptor chain heads in the queue. #[derive(Debug)] -pub struct AvailIter<'b, M: GuestAddressSpace> { - mem: M::T, +pub struct AvailIter<'b, M> { + mem: M, desc_table: GuestAddress, avail_ring: GuestAddress, last_index: Wrapping, @@ -336,7 +348,7 @@ pub struct AvailIter<'b, M: GuestAddressSpace> { next_avail: &'b mut Wrapping, } -impl<'b, M: GuestAddressSpace> AvailIter<'b, M> { +impl<'b, M> AvailIter<'b, M> { /// Goes back one position in the available descriptor chain offered by the driver. /// /// Rust does not support bidirectional iterators. This is the only way to revert the effect @@ -349,7 +361,11 @@ impl<'b, M: GuestAddressSpace> AvailIter<'b, M> { } } -impl<'b, M: GuestAddressSpace> Iterator for AvailIter<'b, M> { +impl<'b, M> Iterator for AvailIter<'b, M> +where + M: Clone + Deref, + M::Target: GuestMemory, +{ type Item = DescriptorChain; fn next(&mut self) -> Option { @@ -563,7 +579,7 @@ pub struct QueueState { impl QueueState { /// Get a consuming iterator over all available descriptor chain heads offered by the driver. - pub fn iter(&mut self, mem: M::T) -> Result, Error> { + pub fn iter(&mut self, mem: M::T) -> Result, Error> { self.avail_idx(&mem, Ordering::Acquire) .map(move |idx| AvailIter { mem, @@ -1105,7 +1121,7 @@ impl> Queue { impl Queue> { /// A consuming iterator over all available descriptor chain heads offered by the driver. - pub fn iter(&mut self) -> Result, Error> { + pub fn iter(&mut self) -> Result, Error> { self.state.iter(self.mem.memory()) } } From e1e6ac0c7d21ed4d630a952c3d8d5f1fd51cb078 Mon Sep 17 00:00:00 2001 From: Alexandru Agache Date: Wed, 29 Sep 2021 15:03:32 +0300 Subject: [PATCH 02/10] remove generic type parameter M from QueueState Signed-off-by: Alexandru Agache --- crates/virtio-queue/src/lib.rs | 130 +++++++++++++++++++------------- crates/virtio-queue/src/mock.rs | 4 +- 2 files changed, 78 insertions(+), 56 deletions(-) diff --git a/crates/virtio-queue/src/lib.rs b/crates/virtio-queue/src/lib.rs index cd89fe86..0a260841 100644 --- a/crates/virtio-queue/src/lib.rs +++ b/crates/virtio-queue/src/lib.rs @@ -21,7 +21,6 @@ pub mod mock; use std::convert::TryFrom; use std::fmt::{self, Debug, Display}; -use std::marker::PhantomData; use std::mem::size_of; use std::num::Wrapping; use std::ops::{Deref, DerefMut}; @@ -423,15 +422,15 @@ impl VirtqUsedElem { unsafe impl ByteValued for VirtqUsedElem {} /// Struct to hold an exclusive reference to the underlying `QueueState` object. -pub enum QueueStateGuard<'a, M: GuestAddressSpace> { +pub enum QueueStateGuard<'a> { /// A reference to a `QueueState` object. - StateObject(&'a mut QueueState), + StateObject(&'a mut QueueState), /// A `MutexGuard` for a `QueueState` object. - MutexGuard(MutexGuard<'a, QueueState>), + MutexGuard(MutexGuard<'a, QueueState>), } -impl<'a, M: GuestAddressSpace> Deref for QueueStateGuard<'a, M> { - type Target = QueueState; +impl<'a> Deref for QueueStateGuard<'a> { + type Target = QueueState; fn deref(&self) -> &Self::Target { match self { @@ -441,7 +440,7 @@ impl<'a, M: GuestAddressSpace> Deref for QueueStateGuard<'a, M> { } } -impl<'a, M: GuestAddressSpace> DerefMut for QueueStateGuard<'a, M> { +impl<'a> DerefMut for QueueStateGuard<'a> { fn deref_mut(&mut self) -> &mut Self::Target { match self { QueueStateGuard::StateObject(v) => v, @@ -454,12 +453,12 @@ impl<'a, M: GuestAddressSpace> DerefMut for QueueStateGuard<'a, M> { /// /// To optimize for performance, different implementations of the `QueueStateT` trait may be /// provided for single-threaded context and multi-threaded context. -pub trait QueueStateT { +pub trait QueueStateT { /// Construct an empty virtio queue state object with the given `max_size`. fn new(max_size: u16) -> Self; /// Check whether the queue configuration is valid. - fn is_valid(&self, mem: &M::T) -> bool; + fn is_valid(&self, mem: &M) -> bool; /// Reset the queue to the initial state. fn reset(&mut self); @@ -468,7 +467,7 @@ pub trait QueueStateT { /// /// Logically this method will acquire the underlying lock protecting the `QueueState` Object. /// The lock will be released when the returned object gets dropped. - fn lock(&mut self) -> QueueStateGuard<'_, M>; + fn lock(&mut self) -> QueueStateGuard; /// Get the maximum size of the virtio queue. fn max_size(&self) -> u16; @@ -507,20 +506,21 @@ pub trait QueueStateT { fn set_event_idx(&mut self, enabled: bool); /// Read the `idx` field from the available ring. - fn avail_idx(&self, mem: &M::T, order: Ordering) -> Result, Error>; + fn avail_idx(&self, mem: &M, order: Ordering) -> Result, Error>; /// Put a used descriptor head into the used ring. - fn add_used(&mut self, mem: &M::T, head_index: u16, len: u32) -> Result<(), Error>; + fn add_used(&mut self, mem: &M, head_index: u16, len: u32) + -> Result<(), Error>; /// Enable notification events from the guest driver. /// /// Return true if one or more descriptors can be consumed from the available ring after /// notifications were enabled (and thus it's possible there will be no corresponding /// notification). - fn enable_notification(&mut self, mem: &M::T) -> Result; + fn enable_notification(&mut self, mem: &M) -> Result; /// Disable notification events from the guest driver. - fn disable_notification(&mut self, mem: &M::T) -> Result<(), Error>; + fn disable_notification(&mut self, mem: &M) -> Result<(), Error>; /// Check whether a notification to the guest is needed. /// @@ -528,7 +528,7 @@ pub trait QueueStateT { /// driver will actually be notified, remember the associated index in the used ring, and /// won't return `true` again until the driver updates `used_event` and/or the notification /// conditions hold once more. - fn needs_notification(&mut self, mem: &M::T) -> Result; + fn needs_notification(&mut self, mem: &M) -> Result; /// Return the index for the next descriptor in the available ring. fn next_avail(&self) -> u16; @@ -543,7 +543,7 @@ const DEFAULT_USED_RING_ADDR: u64 = 0x0; /// Struct to maintain information and manipulate state of a virtio queue. #[derive(Clone, Debug)] -pub struct QueueState { +pub struct QueueState { /// The maximal size in elements offered by the device pub max_size: u16, @@ -573,14 +573,16 @@ pub struct QueueState { /// Guest physical address of the used ring pub used_ring: GuestAddress, - - phantom: PhantomData, } -impl QueueState { +impl QueueState { /// Get a consuming iterator over all available descriptor chain heads offered by the driver. - pub fn iter(&mut self, mem: M::T) -> Result, Error> { - self.avail_idx(&mem, Ordering::Acquire) + pub fn iter(&mut self, mem: M) -> Result, Error> + where + M: Deref, + M::Target: GuestMemory + Sized, + { + self.avail_idx(mem.deref(), Ordering::Acquire) .map(move |idx| AvailIter { mem, desc_table: self.desc_table, @@ -593,7 +595,12 @@ impl QueueState { // Helper method that writes `val` to the `avail_event` field of the used ring, using // the provided ordering. - fn set_avail_event(&self, mem: &M::T, val: u16, order: Ordering) -> Result<(), Error> { + fn set_avail_event( + &self, + mem: &M, + val: u16, + order: Ordering, + ) -> Result<(), Error> { let elem_sz = VIRTQ_USED_ELEMENT_SIZE * u64::from(self.size); let offset = VIRTQ_USED_RING_HEADER_SIZE + elem_sz; let addr = self.used_ring.unchecked_add(offset); @@ -602,7 +609,12 @@ impl QueueState { } // Set the value of the `flags` field of the used ring, applying the specified ordering. - fn set_used_flags(&mut self, mem: &M::T, val: u16, order: Ordering) -> Result<(), Error> { + fn set_used_flags( + &mut self, + mem: &M, + val: u16, + order: Ordering, + ) -> Result<(), Error> { mem.store(val, self.used_ring, order) .map_err(Error::GuestMemory) } @@ -611,7 +623,7 @@ impl QueueState { // // Every access in this method uses `Relaxed` ordering because a fence is added by the caller // when appropriate. - fn set_notification(&mut self, mem: &M::T, enable: bool) -> Result<(), Error> { + fn set_notification(&mut self, mem: &M, enable: bool) -> Result<(), Error> { if enable { if self.event_idx_enabled { // We call `set_avail_event` using the `next_avail` value, instead of reading @@ -640,7 +652,7 @@ impl QueueState { /// Neither of these interrupt suppression methods are reliable, as they are not synchronized /// with the device, but they serve as useful optimizations. So we only ensure access to the /// virtq_avail.used_event is atomic, but do not need to synchronize with other memory accesses. - fn used_event(&self, mem: &M::T, order: Ordering) -> Result, Error> { + fn used_event(&self, mem: &M, order: Ordering) -> Result, Error> { // Safe because we have validated the queue and access guest // memory through GuestMemory interfaces. let elem_sz = u64::from(self.size) * VIRTQ_AVAIL_ELEMENT_SIZE; @@ -653,7 +665,7 @@ impl QueueState { } } -impl QueueStateT for QueueState { +impl QueueStateT for QueueState { fn new(max_size: u16) -> Self { QueueState { max_size, @@ -666,11 +678,10 @@ impl QueueStateT for QueueState { next_used: Wrapping(0), event_idx_enabled: false, signalled_used: None, - phantom: PhantomData, } } - fn is_valid(&self, mem: &M::T) -> bool { + fn is_valid(&self, mem: &M) -> bool { let queue_size = self.size as u64; let desc_table = self.desc_table; let desc_table_size = size_of::() as u64 * queue_size; @@ -728,7 +739,7 @@ impl QueueStateT for QueueState { self.event_idx_enabled = false; } - fn lock(&mut self) -> QueueStateGuard<'_, M> { + fn lock(&mut self) -> QueueStateGuard { QueueStateGuard::StateObject(self) } @@ -793,7 +804,7 @@ impl QueueStateT for QueueState { self.event_idx_enabled = enabled; } - fn avail_idx(&self, mem: &M::T, order: Ordering) -> Result, Error> { + fn avail_idx(&self, mem: &M, order: Ordering) -> Result, Error> { let addr = self.avail_ring.unchecked_add(2); mem.load(addr, order) @@ -801,7 +812,12 @@ impl QueueStateT for QueueState { .map_err(Error::GuestMemory) } - fn add_used(&mut self, mem: &M::T, head_index: u16, len: u32) -> Result<(), Error> { + fn add_used( + &mut self, + mem: &M, + head_index: u16, + len: u32, + ) -> Result<(), Error> { if head_index >= self.size { error!( "attempted to add out of bounds descriptor to used ring: {}", @@ -847,7 +863,7 @@ impl QueueStateT for QueueState { // break; // } // } - fn enable_notification(&mut self, mem: &M::T) -> Result { + fn enable_notification(&mut self, mem: &M) -> Result { self.set_notification(mem, true)?; // Ensures the following read is not reordered before any previous write operation. fence(Ordering::SeqCst); @@ -862,11 +878,11 @@ impl QueueStateT for QueueState { .map(|idx| idx != self.next_avail) } - fn disable_notification(&mut self, mem: &M::T) -> Result<(), Error> { + fn disable_notification(&mut self, mem: &M) -> Result<(), Error> { self.set_notification(mem, false) } - fn needs_notification(&mut self, mem: &M::T) -> Result { + fn needs_notification(&mut self, mem: &M) -> Result { let used_idx = self.next_used; // Complete all the writes in add_used() before reading the event. @@ -902,18 +918,18 @@ impl QueueStateT for QueueState { /// Struct to maintain information and manipulate state of a virtio queue for multi-threaded /// context. #[derive(Clone, Debug)] -pub struct QueueStateSync { - state: Arc>>, +pub struct QueueStateSync { + state: Arc>, } -impl QueueStateT for QueueStateSync { +impl QueueStateT for QueueStateSync { fn new(max_size: u16) -> Self { QueueStateSync { state: Arc::new(Mutex::new(QueueState::new(max_size))), } } - fn is_valid(&self, mem: &M::T) -> bool { + fn is_valid(&self, mem: &M) -> bool { self.state.lock().unwrap().is_valid(mem) } @@ -921,7 +937,7 @@ impl QueueStateT for QueueStateSync { self.state.lock().unwrap().reset(); } - fn lock(&mut self) -> QueueStateGuard<'_, M> { + fn lock(&mut self) -> QueueStateGuard { QueueStateGuard::MutexGuard(self.state.lock().unwrap()) } @@ -957,23 +973,28 @@ impl QueueStateT for QueueStateSync { self.state.lock().unwrap().set_event_idx(enabled); } - fn avail_idx(&self, mem: &M::T, order: Ordering) -> Result, Error> { + fn avail_idx(&self, mem: &M, order: Ordering) -> Result, Error> { self.state.lock().unwrap().avail_idx(mem, order) } - fn add_used(&mut self, mem: &M::T, head_index: u16, len: u32) -> Result<(), Error> { + fn add_used( + &mut self, + mem: &M, + head_index: u16, + len: u32, + ) -> Result<(), Error> { self.state.lock().unwrap().add_used(mem, head_index, len) } - fn enable_notification(&mut self, mem: &M::T) -> Result { + fn enable_notification(&mut self, mem: &M) -> Result { self.state.lock().unwrap().enable_notification(mem) } - fn disable_notification(&mut self, mem: &M::T) -> Result<(), Error> { + fn disable_notification(&mut self, mem: &M) -> Result<(), Error> { self.state.lock().unwrap().disable_notification(mem) } - fn needs_notification(&mut self, mem: &M::T) -> Result { + fn needs_notification(&mut self, mem: &M) -> Result { self.state.lock().unwrap().needs_notification(mem) } @@ -988,14 +1009,14 @@ impl QueueStateT for QueueStateSync { /// A convenient wrapper struct for a virtio queue, with associated GuestMemory object. #[derive(Clone, Debug)] -pub struct Queue = QueueState> { +pub struct Queue { /// Guest memory object associated with the queue. pub mem: M, /// Virtio queue state. pub state: S, } -impl> Queue { +impl Queue { /// Construct an empty virtio queue with the given `max_size`. pub fn new(mem: M, max_size: u16) -> Self { Queue { @@ -1006,7 +1027,7 @@ impl> Queue { /// Check whether the queue configuration is valid. pub fn is_valid(&self) -> bool { - self.state.is_valid(&self.mem.memory()) + self.state.is_valid(self.mem.memory().deref()) } /// Reset the queue to the initial state. @@ -1018,7 +1039,7 @@ impl> Queue { /// /// Logically this method will acquire the underlying lock protecting the `QueueState` Object. /// The lock will be released when the returned object gets dropped. - pub fn lock(&mut self) -> QueueStateGuard<'_, M> { + pub fn lock(&mut self) -> QueueStateGuard { self.state.lock() } @@ -1076,12 +1097,13 @@ impl> Queue { /// Read the `idx` field from the available ring. pub fn avail_idx(&self, order: Ordering) -> Result, Error> { - self.state.avail_idx(&self.mem.memory(), order) + self.state.avail_idx(self.mem.memory().deref(), order) } /// Put a used descriptor head into the used ring. pub fn add_used(&mut self, head_index: u16, len: u32) -> Result<(), Error> { - self.state.add_used(&self.mem.memory(), head_index, len) + self.state + .add_used(self.mem.memory().deref(), head_index, len) } /// Enable notification events from the guest driver. @@ -1090,12 +1112,12 @@ impl> Queue { /// notifications were enabled (and thus it's possible there will be no corresponding /// notification). pub fn enable_notification(&mut self) -> Result { - self.state.enable_notification(&self.mem.memory()) + self.state.enable_notification(self.mem.memory().deref()) } /// Disable notification events from the guest driver. pub fn disable_notification(&mut self) -> Result<(), Error> { - self.state.disable_notification(&self.mem.memory()) + self.state.disable_notification(self.mem.memory().deref()) } /// Check whether a notification to the guest is needed. @@ -1105,7 +1127,7 @@ impl> Queue { /// won't return `true` again until the driver updates `used_event` and/or the notification /// conditions hold once more. pub fn needs_notification(&mut self) -> Result { - self.state.needs_notification(&self.mem.memory()) + self.state.needs_notification(self.mem.memory().deref()) } /// Return the index for the next descriptor in the available ring. @@ -1119,7 +1141,7 @@ impl> Queue { } } -impl Queue> { +impl Queue { /// A consuming iterator over all available descriptor chain heads offered by the driver. pub fn iter(&mut self) -> Result, Error> { self.state.iter(self.mem.memory()) diff --git a/crates/virtio-queue/src/mock.rs b/crates/virtio-queue/src/mock.rs index 5e4a4137..057d16b4 100644 --- a/crates/virtio-queue/src/mock.rs +++ b/crates/virtio-queue/src/mock.rs @@ -354,8 +354,8 @@ impl<'a, M: GuestMemory> MockSplitQueue<'a, M> { /// Creates a new `Queue`, using the underlying memory regions represented /// by the `MockSplitQueue`. - pub fn create_queue(&self, a: A) -> Queue> { - let mut q = Queue::>::new(a, self.len); + pub fn create_queue(&self, a: A) -> Queue { + let mut q = Queue::::new(a, self.len); q.state.size = self.len; q.state.ready = true; From 629bc27147cb829b52ede50f4d441abae62d57c7 Mon Sep 17 00:00:00 2001 From: Paolo Bonzini Date: Tue, 26 Oct 2021 15:47:20 +0200 Subject: [PATCH 03/10] remove QueueStateT and QueueStateSync The extremely short critical sections do not make it possible to enforce any invariant on the virtqueue, making QueueStateSync more or less useless. Rip it out, the mutex abstraction will be introduced at the Queue level in the next commits. Signed-off-by: Paolo Bonzini --- crates/virtio-queue/src/lib.rs | 234 +------------------------------- crates/virtio-queue/src/mock.rs | 6 +- 2 files changed, 8 insertions(+), 232 deletions(-) diff --git a/crates/virtio-queue/src/lib.rs b/crates/virtio-queue/src/lib.rs index 0a260841..e5e6ad52 100644 --- a/crates/virtio-queue/src/lib.rs +++ b/crates/virtio-queue/src/lib.rs @@ -23,9 +23,8 @@ use std::convert::TryFrom; use std::fmt::{self, Debug, Display}; use std::mem::size_of; use std::num::Wrapping; -use std::ops::{Deref, DerefMut}; +use std::ops::Deref; use std::sync::atomic::{fence, Ordering}; -use std::sync::{Arc, Mutex, MutexGuard}; use log::error; use vm_memory::{ @@ -421,121 +420,6 @@ impl VirtqUsedElem { unsafe impl ByteValued for VirtqUsedElem {} -/// Struct to hold an exclusive reference to the underlying `QueueState` object. -pub enum QueueStateGuard<'a> { - /// A reference to a `QueueState` object. - StateObject(&'a mut QueueState), - /// A `MutexGuard` for a `QueueState` object. - MutexGuard(MutexGuard<'a, QueueState>), -} - -impl<'a> Deref for QueueStateGuard<'a> { - type Target = QueueState; - - fn deref(&self) -> &Self::Target { - match self { - QueueStateGuard::StateObject(v) => v, - QueueStateGuard::MutexGuard(v) => v.deref(), - } - } -} - -impl<'a> DerefMut for QueueStateGuard<'a> { - fn deref_mut(&mut self) -> &mut Self::Target { - match self { - QueueStateGuard::StateObject(v) => v, - QueueStateGuard::MutexGuard(v) => v.deref_mut(), - } - } -} - -/// Trait to access and manipulate a virtio queue. -/// -/// To optimize for performance, different implementations of the `QueueStateT` trait may be -/// provided for single-threaded context and multi-threaded context. -pub trait QueueStateT { - /// Construct an empty virtio queue state object with the given `max_size`. - fn new(max_size: u16) -> Self; - - /// Check whether the queue configuration is valid. - fn is_valid(&self, mem: &M) -> bool; - - /// Reset the queue to the initial state. - fn reset(&mut self); - - /// Get an exclusive reference to the underlying `QueueState` object. - /// - /// Logically this method will acquire the underlying lock protecting the `QueueState` Object. - /// The lock will be released when the returned object gets dropped. - fn lock(&mut self) -> QueueStateGuard; - - /// Get the maximum size of the virtio queue. - fn max_size(&self) -> u16; - - /// Configure the queue size for the virtio queue. - /// - /// The `size` should power of two and less than or equal to value reported by `max_size()`, - /// otherwise it will panic. - fn set_size(&mut self, size: u16); - - /// Check whether the queue is ready to be processed. - fn ready(&self) -> bool; - - /// Configure the queue to ready for processing. - fn set_ready(&mut self, ready: bool); - - /// Set descriptor table address for the queue. - /// - /// The descriptor table address is 64-bit, the corresponding part will be updated if 'low' - /// and/or `high` is valid. - fn set_desc_table_address(&mut self, low: Option, high: Option); - - /// Set available ring address for the queue. - /// - /// The available ring address is 64-bit, the corresponding part will be updated if 'low' - /// and/or `high` is valid. - fn set_avail_ring_address(&mut self, low: Option, high: Option); - - /// Set used ring address for the queue. - /// - /// The used ring address is 64-bit, the corresponding part will be updated if 'low' - /// and/or `high` is valid. - fn set_used_ring_address(&mut self, low: Option, high: Option); - - /// Enable/disable the VIRTIO_F_RING_EVENT_IDX feature for interrupt coalescing. - fn set_event_idx(&mut self, enabled: bool); - - /// Read the `idx` field from the available ring. - fn avail_idx(&self, mem: &M, order: Ordering) -> Result, Error>; - - /// Put a used descriptor head into the used ring. - fn add_used(&mut self, mem: &M, head_index: u16, len: u32) - -> Result<(), Error>; - - /// Enable notification events from the guest driver. - /// - /// Return true if one or more descriptors can be consumed from the available ring after - /// notifications were enabled (and thus it's possible there will be no corresponding - /// notification). - fn enable_notification(&mut self, mem: &M) -> Result; - - /// Disable notification events from the guest driver. - fn disable_notification(&mut self, mem: &M) -> Result<(), Error>; - - /// Check whether a notification to the guest is needed. - /// - /// Please note this method has side effects: once it returns `true`, it considers the - /// driver will actually be notified, remember the associated index in the used ring, and - /// won't return `true` again until the driver updates `used_event` and/or the notification - /// conditions hold once more. - fn needs_notification(&mut self, mem: &M) -> Result; - - /// Return the index for the next descriptor in the available ring. - fn next_avail(&self) -> u16; - - /// Set the index for the next descriptor in the available ring. - fn set_next_avail(&mut self, next_avail: u16); -} // Default addresses for each virtqueue part const DEFAULT_DESC_TABLE_ADDR: u64 = 0x0; const DEFAULT_AVAIL_RING_ADDR: u64 = 0x0; @@ -663,9 +547,7 @@ impl QueueState { .map(Wrapping) .map_err(Error::GuestMemory) } -} -impl QueueStateT for QueueState { fn new(max_size: u16) -> Self { QueueState { max_size, @@ -739,10 +621,6 @@ impl QueueStateT for QueueState { self.event_idx_enabled = false; } - fn lock(&mut self) -> QueueStateGuard { - QueueStateGuard::StateObject(self) - } - fn max_size(&self) -> u16 { self.max_size } @@ -915,113 +793,21 @@ impl QueueStateT for QueueState { } } -/// Struct to maintain information and manipulate state of a virtio queue for multi-threaded -/// context. -#[derive(Clone, Debug)] -pub struct QueueStateSync { - state: Arc>, -} - -impl QueueStateT for QueueStateSync { - fn new(max_size: u16) -> Self { - QueueStateSync { - state: Arc::new(Mutex::new(QueueState::new(max_size))), - } - } - - fn is_valid(&self, mem: &M) -> bool { - self.state.lock().unwrap().is_valid(mem) - } - - fn reset(&mut self) { - self.state.lock().unwrap().reset(); - } - - fn lock(&mut self) -> QueueStateGuard { - QueueStateGuard::MutexGuard(self.state.lock().unwrap()) - } - - fn max_size(&self) -> u16 { - self.state.lock().unwrap().max_size() - } - - fn set_size(&mut self, size: u16) { - self.state.lock().unwrap().set_size(size); - } - - fn ready(&self) -> bool { - self.state.lock().unwrap().ready - } - - fn set_ready(&mut self, ready: bool) { - self.state.lock().unwrap().set_ready(ready) - } - - fn set_desc_table_address(&mut self, low: Option, high: Option) { - self.state.lock().unwrap().set_desc_table_address(low, high); - } - - fn set_avail_ring_address(&mut self, low: Option, high: Option) { - self.state.lock().unwrap().set_avail_ring_address(low, high); - } - - fn set_used_ring_address(&mut self, low: Option, high: Option) { - self.state.lock().unwrap().set_used_ring_address(low, high); - } - - fn set_event_idx(&mut self, enabled: bool) { - self.state.lock().unwrap().set_event_idx(enabled); - } - - fn avail_idx(&self, mem: &M, order: Ordering) -> Result, Error> { - self.state.lock().unwrap().avail_idx(mem, order) - } - - fn add_used( - &mut self, - mem: &M, - head_index: u16, - len: u32, - ) -> Result<(), Error> { - self.state.lock().unwrap().add_used(mem, head_index, len) - } - - fn enable_notification(&mut self, mem: &M) -> Result { - self.state.lock().unwrap().enable_notification(mem) - } - - fn disable_notification(&mut self, mem: &M) -> Result<(), Error> { - self.state.lock().unwrap().disable_notification(mem) - } - - fn needs_notification(&mut self, mem: &M) -> Result { - self.state.lock().unwrap().needs_notification(mem) - } - - fn next_avail(&self) -> u16 { - self.state.lock().unwrap().next_avail() - } - - fn set_next_avail(&mut self, next_avail: u16) { - self.state.lock().unwrap().set_next_avail(next_avail); - } -} - /// A convenient wrapper struct for a virtio queue, with associated GuestMemory object. #[derive(Clone, Debug)] -pub struct Queue { +pub struct Queue { /// Guest memory object associated with the queue. pub mem: M, /// Virtio queue state. - pub state: S, + pub state: QueueState, } -impl Queue { +impl Queue { /// Construct an empty virtio queue with the given `max_size`. pub fn new(mem: M, max_size: u16) -> Self { Queue { mem, - state: S::new(max_size), + state: QueueState::new(max_size), } } @@ -1035,14 +821,6 @@ impl Queue { self.state.reset() } - /// Get an exclusive reference to the underlying `QueueState` object. - /// - /// Logically this method will acquire the underlying lock protecting the `QueueState` Object. - /// The lock will be released when the returned object gets dropped. - pub fn lock(&mut self) -> QueueStateGuard { - self.state.lock() - } - /// Get the maximum size of the virtio queue. pub fn max_size(&self) -> u16 { self.state.max_size() @@ -1139,9 +917,7 @@ impl Queue { pub fn set_next_avail(&mut self, next_avail: u16) { self.state.set_next_avail(next_avail); } -} -impl Queue { /// A consuming iterator over all available descriptor chain heads offered by the driver. pub fn iter(&mut self) -> Result, Error> { self.state.iter(self.mem.memory()) diff --git a/crates/virtio-queue/src/mock.rs b/crates/virtio-queue/src/mock.rs index 057d16b4..532fd405 100644 --- a/crates/virtio-queue/src/mock.rs +++ b/crates/virtio-queue/src/mock.rs @@ -12,7 +12,7 @@ use vm_memory::{ }; use crate::defs::{VIRTQ_DESC_F_INDIRECT, VIRTQ_DESC_F_NEXT}; -use crate::{Descriptor, Queue, QueueState, VirtqUsedElem}; +use crate::{Descriptor, Queue, VirtqUsedElem}; /// Wrapper struct used for accesing a particular address of a GuestMemory area. pub struct Ref<'a, M, T> { @@ -354,8 +354,8 @@ impl<'a, M: GuestMemory> MockSplitQueue<'a, M> { /// Creates a new `Queue`, using the underlying memory regions represented /// by the `MockSplitQueue`. - pub fn create_queue(&self, a: A) -> Queue { - let mut q = Queue::::new(a, self.len); + pub fn create_queue(&self, a: A) -> Queue { + let mut q = Queue::::new(a, self.len); q.state.size = self.len; q.state.ready = true; From acd527584a99c1c9921b076687c374d492caaa88 Mon Sep 17 00:00:00 2001 From: Paolo Bonzini Date: Tue, 26 Oct 2021 15:47:20 +0200 Subject: [PATCH 04/10] introduce QueueGuard QueueGuard encapsulates Queue during a sequence of accesses under the same critical section. A QueueGuard always uses the same GuestMemory for all operations, and can be extended to be a single critical section in the case of multithreaded VMMs. Signed-off-by: Paolo Bonzini --- crates/virtio-queue/src/lib.rs | 120 ++++++++++++++++++++++++++++++++- 1 file changed, 119 insertions(+), 1 deletion(-) diff --git a/crates/virtio-queue/src/lib.rs b/crates/virtio-queue/src/lib.rs index e5e6ad52..93f24b83 100644 --- a/crates/virtio-queue/src/lib.rs +++ b/crates/virtio-queue/src/lib.rs @@ -21,9 +21,10 @@ pub mod mock; use std::convert::TryFrom; use std::fmt::{self, Debug, Display}; +use std::marker::PhantomData; use std::mem::size_of; use std::num::Wrapping; -use std::ops::Deref; +use std::ops::{Deref, DerefMut}; use std::sync::atomic::{fence, Ordering}; use log::error; @@ -794,6 +795,103 @@ impl QueueState { } /// A convenient wrapper struct for a virtio queue, with associated GuestMemory object. +pub struct QueueGuard<'a, M: 'a + Clone + Deref, S: 'a + DerefMut> +where + M::Target: GuestMemory + Sized, +{ + /// Guest memory object associated with the queue. + pub mem: M, + + /// Virtio queue state. + pub state: S, + + _marker: PhantomData<&'a S::Target>, +} + +impl<'a, M: 'a + Clone + Deref, S: 'a + DerefMut> QueueGuard<'a, M, S> +where + M::Target: GuestMemory + Sized, +{ + fn new(mem: M, state: S) -> QueueGuard<'a, M, S> { + QueueGuard { + mem, + state, + _marker: PhantomData, + } + } + + /// Check whether the queue configuration is valid. + pub fn is_valid(&self) -> bool { + self.state.is_valid(self.mem.deref()) + } + + /// Read the `idx` field from the available ring. + pub fn avail_idx(&self, order: Ordering) -> Result, Error> { + self.state.avail_idx(self.mem.deref(), order) + } + + /// Put a used descriptor head into the used ring. + pub fn add_used(&mut self, head_index: u16, len: u32) -> Result<(), Error> { + self.state.add_used(self.mem.deref(), head_index, len) + } + + /// Enable notification events from the guest driver. + /// + /// Return true if one or more descriptors can be consumed from the available ring after + /// notifications were enabled (and thus it's possible there will be no corresponding + /// notification). + pub fn enable_notification(&mut self) -> Result { + self.state.enable_notification(self.mem.deref()) + } + + /// Disable notification events from the guest driver. + pub fn disable_notification(&mut self) -> Result<(), Error> { + self.state.disable_notification(self.mem.deref()) + } + + /// Check whether a notification to the guest is needed. + /// + /// Please note this method has side effects: once it returns `true`, it considers the + /// driver will actually be notified, remember the associated index in the used ring, and + /// won't return `true` again until the driver updates `used_event` and/or the notification + /// conditions hold once more. + pub fn needs_notification(&mut self) -> Result { + self.state.needs_notification(self.mem.deref()) + } + + /// A consuming iterator over all available descriptor chain heads offered by the driver. + pub fn iter(&mut self) -> Result, Error> { + // FIXME: this is inefficient + self.state.iter(self.mem.clone()) + } +} + +impl<'a, M: 'a + Clone + Deref, S: 'a + DerefMut> Deref + for QueueGuard<'a, M, S> +where + M::Target: GuestMemory + Sized, +{ + type Target = QueueState; + + /// Reset the queue to the initial state. + fn deref(&self) -> &QueueState { + &self.state + } +} + +impl<'a, M: 'a + Clone + Deref, S: 'a + DerefMut> DerefMut + for QueueGuard<'a, M, S> +where + M::Target: GuestMemory + Sized, +{ + /// Reset the queue to the initial state. + fn deref_mut(&mut self) -> &mut QueueState { + &mut self.state + } +} + +/// A convenient wrapper struct for a virtio queue, with associated GuestAddressSpace +/// object. #[derive(Clone, Debug)] pub struct Queue { /// Guest memory object associated with the queue. @@ -811,6 +909,12 @@ impl Queue { } } + /// Return a QueueGuard that allows to do multiple QueueState operations + /// using the same GuestMemory. + pub fn acquire(&mut self) -> QueueGuard { + QueueGuard::new(self.mem.memory(), &mut self.state) + } + /// Check whether the queue configuration is valid. pub fn is_valid(&self) -> bool { self.state.is_valid(self.mem.memory().deref()) @@ -1301,6 +1405,20 @@ mod tests { assert_eq!(x.len, 0x1000); } + #[test] + fn test_queue_guard() { + let m = &GuestMemoryMmap::<()>::from_ranges(&[(GuestAddress(0), 0x10000)]).unwrap(); + let vq = MockSplitQueue::new(m, 16); + + let mut q = vq.create_queue(m); + let mut qstate = q.acquire(); + qstate.ready = true; + qstate.reset(); + assert_eq!(qstate.ready, false); + let mut iter = qstate.iter().unwrap(); + assert!(iter.next().is_none()); + } + #[test] fn test_reset_queue() { let m = &GuestMemoryMmap::<()>::from_ranges(&[(GuestAddress(0), 0x10000)]).unwrap(); From 85f69a4fd48fe7c86a94f1e623b7b1660f7aea9b Mon Sep 17 00:00:00 2001 From: Paolo Bonzini Date: Tue, 26 Oct 2021 14:41:01 +0200 Subject: [PATCH 05/10] mock: add trait to build QueueState and QueueGuard This will allow to reuse the same tests for QueueState and QueueStateSync. The trait is _not_ part of the public API, which consists exclusively of acquire() and lock(). This is because with() is mostly useless for the multithreaded case, where you have a &self but not a &mut self. Signed-off-by: Paolo Bonzini --- crates/virtio-queue/src/generic.rs | 169 +++++++++++++++++++++++++++++ crates/virtio-queue/src/lib.rs | 14 ++- crates/virtio-queue/src/mock.rs | 23 ++-- 3 files changed, 191 insertions(+), 15 deletions(-) create mode 100644 crates/virtio-queue/src/generic.rs diff --git a/crates/virtio-queue/src/generic.rs b/crates/virtio-queue/src/generic.rs new file mode 100644 index 00000000..1e849e35 --- /dev/null +++ b/crates/virtio-queue/src/generic.rs @@ -0,0 +1,169 @@ +// Copyright 2021 Red Hat, Inc. +// +// SPDX-License-Identifier: Apache-2.0 AND BSD-3-Clause + +use std::num::Wrapping; +use std::ops::DerefMut; + +use crate::{Error, Queue, QueueGuard, QueueState}; +use std::sync::atomic::Ordering; +use vm_memory::GuestAddressSpace; + +/// Lifetime-generic guard associated to a QueueStateT. In practice, +/// instead of having a `QueueStateT::Guard<'g>` generic associated type, +/// you have to write `>::Out`. +pub(crate) trait QueueStateGuard<'g> { + /// Type of the guard passed to the `with` method. + type Out: 'g + DerefMut; +} + +pub(crate) trait QueueT { + /// Lifetime-generic guard. Usually this is just `Self`, and implementors + /// of `QueueT` also implement `QueueStateGuard`. + type Guard: for<'g> QueueStateGuard<'g>; + + fn construct(mem: M, state: QueueState) -> Self; + fn with< + 'a, + 'g, + U, + F: FnOnce(QueueGuard>::Out>) -> U, + >( + &'a mut self, + f: F, + ) -> U + where + 'a: 'g; + + /// Check whether the queue configuration is valid. + fn is_valid(&mut self) -> bool { + self.with(|qstate| qstate.is_valid()) + } + + /// Reset the queue to the initial state. + fn reset(&mut self) { + self.with(|mut qstate| qstate.reset()) + } + + /// Get the maximum size of the virtio queue. + fn max_size(&mut self) -> u16 { + self.with(|qstate| qstate.max_size()) + } + + /// Configure the queue size for the virtio queue. + /// + /// The `size` should power of two and less than or equal to value reported by `max_size()`, + /// otherwise it will panic. + fn set_size(&mut self, size: u16) { + self.with(|mut qstate| qstate.set_size(size)) + } + + /// Check whether the queue is ready to be processed. + fn ready(&mut self) -> bool { + self.with(|qstate| qstate.ready()) + } + + /// Configure the queue to ready for processing. + fn set_ready(&mut self, ready: bool) { + self.with(|mut qstate| qstate.set_ready(ready)) + } + + /// Set descriptor table address for the queue. + /// + /// The descriptor table address is 64-bit, the corresponding part will be updated if 'low' + /// and/or `high` is valid. + fn set_desc_table_address(&mut self, low: Option, high: Option) { + self.with(|mut qstate| qstate.set_desc_table_address(low, high)) + } + + /// Set available ring address for the queue. + /// + /// The available ring address is 64-bit, the corresponding part will be updated if 'low' + /// and/or `high` is valid. + fn set_avail_ring_address(&mut self, low: Option, high: Option) { + self.with(|mut qstate| qstate.set_avail_ring_address(low, high)) + } + + /// Set used ring address for the queue. + /// + /// The used ring address is 64-bit, the corresponding part will be updated if 'low' + /// and/or `high` is valid. + fn set_used_ring_address(&mut self, low: Option, high: Option) { + self.with(|mut qstate| qstate.set_used_ring_address(low, high)) + } + + /// Enable/disable the VIRTIO_F_RING_EVENT_IDX feature for interrupt coalescing. + fn set_event_idx(&mut self, enabled: bool) { + self.with(|mut qstate| qstate.set_event_idx(enabled)) + } + + /// Read the `idx` field from the available ring. + fn avail_idx(&mut self, order: Ordering) -> Result, Error> { + self.with(|qstate| qstate.avail_idx(order)) + } + + /// Put a used descriptor head into the used ring. + fn add_used(&mut self, head_index: u16, len: u32) -> Result<(), Error> { + self.with(|mut qstate| qstate.add_used(head_index, len)) + } + + /// Enable notification events from the guest driver. + /// + /// Return true if one or more descriptors can be consumed from the available ring after + /// notifications were enabled (and thus it's possible there will be no corresponding + /// notification). + fn enable_notification(&mut self) -> Result { + self.with(|mut qstate| qstate.enable_notification()) + } + + /// Disable notification events from the guest driver. + fn disable_notification(&mut self) -> Result<(), Error> { + self.with(|mut qstate| qstate.disable_notification()) + } + + /// Check whether a notification to the guest is needed. + /// + /// Please note this method has side effects: once it returns `true`, it considers the + /// driver will actually be notified, remember the associated index in the used ring, and + /// won't return `true` again until the driver updates `used_event` and/or the notification + /// conditions hold once more. + fn needs_notification(&mut self) -> Result { + self.with(|mut qstate| qstate.needs_notification()) + } + + /// Return the index for the next descriptor in the available ring. + fn next_avail(&mut self) -> u16 { + self.with(|qstate| qstate.next_avail()) + } + + /// Set the index for the next descriptor in the available ring. + fn set_next_avail(&mut self, next_avail: u16) { + self.with(|mut qstate| qstate.set_next_avail(next_avail)) + } +} + +impl<'g, M: GuestAddressSpace> QueueStateGuard<'g> for Queue { + type Out = &'g mut QueueState; +} + +impl QueueT for Queue { + type Guard = Self; + + fn construct(mem: M, state: QueueState) -> Self { + Queue { mem, state } + } + fn with< + 'a, + 'g, + U, + F: FnOnce(QueueGuard>::Out>) -> U, + >( + &'a mut self, + f: F, + ) -> U + where + 'a: 'g, + { + f(self.acquire()) + } +} diff --git a/crates/virtio-queue/src/lib.rs b/crates/virtio-queue/src/lib.rs index 93f24b83..61197bbf 100644 --- a/crates/virtio-queue/src/lib.rs +++ b/crates/virtio-queue/src/lib.rs @@ -19,6 +19,8 @@ pub mod defs; #[cfg(any(test, feature = "test-utils"))] pub mod mock; +pub(crate) mod generic; + use std::convert::TryFrom; use std::fmt::{self, Debug, Display}; use std::marker::PhantomData; @@ -1189,7 +1191,7 @@ mod tests { let m = &GuestMemoryMmap::<()>::from_ranges(&[(GuestAddress(0), 0x10000)]).unwrap(); let vq = MockSplitQueue::new(m, 16); - let mut q = vq.create_queue(m); + let mut q: Queue<_> = vq.as_queue(m); // q is currently valid assert!(q.is_valid()); @@ -1325,7 +1327,7 @@ mod tests { let m = &GuestMemoryMmap::<()>::from_ranges(&[(GuestAddress(0), 0x10000)]).unwrap(); let vq = MockSplitQueue::new(m, 16); - let mut q = vq.create_queue(m); + let mut q: Queue<_> = vq.as_queue(m); // q is currently valid assert!(q.is_valid()); @@ -1387,7 +1389,7 @@ mod tests { let m = &GuestMemoryMmap::<()>::from_ranges(&[(GuestAddress(0), 0x10000)]).unwrap(); let vq = MockSplitQueue::new(m, 16); - let mut q = vq.create_queue(m); + let mut q: Queue<_> = vq.as_queue(m); assert_eq!(vq.used().idx().load(), 0); @@ -1424,7 +1426,7 @@ mod tests { let m = &GuestMemoryMmap::<()>::from_ranges(&[(GuestAddress(0), 0x10000)]).unwrap(); let vq = MockSplitQueue::new(m, 16); - let mut q = vq.create_queue(m); + let mut q: Queue<_> = vq.as_queue(m); q.state.size = 8; q.state.ready = true; q.state.reset(); @@ -1438,7 +1440,7 @@ mod tests { let qsize = 16; let vq = MockSplitQueue::new(m, qsize); - let mut q = vq.create_queue(m); + let mut q: Queue<_> = vq.as_queue(m); let avail_addr = vq.avail_addr(); // It should always return true when EVENT_IDX isn't enabled. @@ -1483,7 +1485,7 @@ mod tests { let m = &GuestMemoryMmap::<()>::from_ranges(&[(GuestAddress(0), 0x10000)]).unwrap(); let vq = MockSplitQueue::new(m, 16); - let mut q = vq.create_queue(m); + let mut q: Queue<_> = vq.as_queue(m); let used_addr = vq.used_addr(); assert_eq!(q.state.event_idx_enabled, false); diff --git a/crates/virtio-queue/src/mock.rs b/crates/virtio-queue/src/mock.rs index 532fd405..f77ae55c 100644 --- a/crates/virtio-queue/src/mock.rs +++ b/crates/virtio-queue/src/mock.rs @@ -12,7 +12,8 @@ use vm_memory::{ }; use crate::defs::{VIRTQ_DESC_F_INDIRECT, VIRTQ_DESC_F_NEXT}; -use crate::{Descriptor, Queue, VirtqUsedElem}; +use crate::generic::QueueT; +use crate::{Descriptor, Queue, QueueState, VirtqUsedElem}; /// Wrapper struct used for accesing a particular address of a GuestMemory area. pub struct Ref<'a, M, T> { @@ -352,16 +353,20 @@ impl<'a, M: GuestMemory> MockSplitQueue<'a, M> { self.update_avail_idx(head_idx); } + /// Return a QueueT implementation for this queue. + pub(crate) fn as_queue>(&self, a: A) -> T { + let mut q = QueueState::new(self.len); + q.size = self.len; + q.ready = true; + q.desc_table = self.desc_table_addr; + q.avail_ring = self.avail_addr; + q.used_ring = self.used_addr; + QueueT::construct(a, q) + } + /// Creates a new `Queue`, using the underlying memory regions represented /// by the `MockSplitQueue`. pub fn create_queue(&self, a: A) -> Queue { - let mut q = Queue::::new(a, self.len); - - q.state.size = self.len; - q.state.ready = true; - q.state.desc_table = self.desc_table_addr; - q.state.avail_ring = self.avail_addr; - q.state.used_ring = self.used_addr; - q + self.as_queue(a) } } From 273b933c0b973ca1a46b40cb587328d12e3178cf Mon Sep 17 00:00:00 2001 From: Paolo Bonzini Date: Tue, 26 Oct 2021 16:02:26 +0200 Subject: [PATCH 06/10] tests: introduce a shortcut type AddrSpace Avoid constant repetition of "GuestMemoryMmap::<()>". Signed-off-by: Paolo Bonzini --- crates/virtio-queue/src/lib.rs | 44 ++++++++++++++++------------------ 1 file changed, 21 insertions(+), 23 deletions(-) diff --git a/crates/virtio-queue/src/lib.rs b/crates/virtio-queue/src/lib.rs index 61197bbf..fa8afaa0 100644 --- a/crates/virtio-queue/src/lib.rs +++ b/crates/virtio-queue/src/lib.rs @@ -1038,6 +1038,8 @@ mod tests { use vm_memory::{GuestAddress, GuestMemoryMmap}; + type AddrSpace = GuestMemoryMmap<()>; + #[test] pub fn test_offset() { assert_eq!(offset_of!(Descriptor, addr), 0); @@ -1048,21 +1050,19 @@ mod tests { #[test] fn test_checked_new_descriptor_chain() { - let m = &GuestMemoryMmap::<()>::from_ranges(&[(GuestAddress(0), 0x10000)]).unwrap(); + let m = &AddrSpace::from_ranges(&[(GuestAddress(0), 0x10000)]).unwrap(); let vq = MockSplitQueue::new(m, 16); assert!(vq.end().0 < 0x1000); // index >= queue_size - assert!( - DescriptorChain::<&GuestMemoryMmap>::new(m, vq.start(), 16, 16) - .next() - .is_none() - ); + assert!(DescriptorChain::<&AddrSpace>::new(m, vq.start(), 16, 16) + .next() + .is_none()); // desc_table address is way off assert!( - DescriptorChain::<&GuestMemoryMmap>::new(m, GuestAddress(0x00ff_ffff_ffff), 16, 0) + DescriptorChain::<&AddrSpace>::new(m, GuestAddress(0x00ff_ffff_ffff), 16, 0) .next() .is_none() ); @@ -1073,7 +1073,7 @@ mod tests { let desc = Descriptor::new(0x1000, 0x1000, VIRTQ_DESC_F_NEXT, 16); vq.desc_table().store(0, desc); - let mut c = DescriptorChain::<&GuestMemoryMmap>::new(m, vq.start(), 16, 0); + let mut c = DescriptorChain::<&AddrSpace>::new(m, vq.start(), 16, 0); c.next().unwrap(); assert!(c.next().is_none()); } @@ -1086,7 +1086,7 @@ mod tests { let desc = Descriptor::new(0x2000, 0x1000, 0, 0); vq.desc_table().store(1, desc); - let mut c = DescriptorChain::<&GuestMemoryMmap>::new(m, vq.start(), 16, 0); + let mut c = DescriptorChain::<&AddrSpace>::new(m, vq.start(), 16, 0); assert_eq!( c.memory() as *const GuestMemoryMmap, @@ -1110,7 +1110,7 @@ mod tests { #[test] fn test_new_from_indirect_descriptor() { - let m = &GuestMemoryMmap::<()>::from_ranges(&[(GuestAddress(0), 0x10000)]).unwrap(); + let m = &AddrSpace::from_ranges(&[(GuestAddress(0), 0x10000)]).unwrap(); let vq = MockSplitQueue::new(m, 16); let dtable = vq.desc_table(); @@ -1122,7 +1122,7 @@ mod tests { let desc = Descriptor::new(0x3000, 0x1000, 0, 0); dtable.store(2, desc); - let mut c: DescriptorChain<&GuestMemoryMmap> = DescriptorChain::new(m, vq.start(), 16, 0); + let mut c: DescriptorChain<&AddrSpace> = DescriptorChain::new(m, vq.start(), 16, 0); // The chain logic hasn't parsed the indirect descriptor yet. assert!(!c.is_indirect); @@ -1158,29 +1158,27 @@ mod tests { #[test] fn test_indirect_descriptor_err() { { - let m = &GuestMemoryMmap::<()>::from_ranges(&[(GuestAddress(0), 0x10000)]).unwrap(); + let m = &AddrSpace::from_ranges(&[(GuestAddress(0), 0x10000)]).unwrap(); let vq = MockSplitQueue::new(m, 16); // create a chain with a descriptor pointing to an indirect table let desc = Descriptor::new(0x1001, 0x1000, VIRTQ_DESC_F_INDIRECT, 0); vq.desc_table().store(0, desc); - let mut c: DescriptorChain<&GuestMemoryMmap> = - DescriptorChain::new(m, vq.start(), 16, 0); + let mut c: DescriptorChain<&AddrSpace> = DescriptorChain::new(m, vq.start(), 16, 0); assert!(c.next().is_none()); } { - let m = &GuestMemoryMmap::from_ranges(&[(GuestAddress(0), 0x10000)]).unwrap(); + let m = &AddrSpace::from_ranges(&[(GuestAddress(0), 0x10000)]).unwrap(); let vq = MockSplitQueue::new(m, 16); // create a chain with a descriptor pointing to an indirect table let desc = Descriptor::new(0x1000, 0x1001, VIRTQ_DESC_F_INDIRECT, 0); vq.desc_table().store(0, desc); - let mut c: DescriptorChain<&GuestMemoryMmap> = - DescriptorChain::new(m, vq.start(), 16, 0); + let mut c: DescriptorChain<&AddrSpace> = DescriptorChain::new(m, vq.start(), 16, 0); assert!(c.next().is_none()); } @@ -1188,7 +1186,7 @@ mod tests { #[test] fn test_queue_and_iterator() { - let m = &GuestMemoryMmap::<()>::from_ranges(&[(GuestAddress(0), 0x10000)]).unwrap(); + let m = &AddrSpace::from_ranges(&[(GuestAddress(0), 0x10000)]).unwrap(); let vq = MockSplitQueue::new(m, 16); let mut q: Queue<_> = vq.as_queue(m); @@ -1324,7 +1322,7 @@ mod tests { #[test] fn test_descriptor_and_iterator() { - let m = &GuestMemoryMmap::<()>::from_ranges(&[(GuestAddress(0), 0x10000)]).unwrap(); + let m = &AddrSpace::from_ranges(&[(GuestAddress(0), 0x10000)]).unwrap(); let vq = MockSplitQueue::new(m, 16); let mut q: Queue<_> = vq.as_queue(m); @@ -1386,7 +1384,7 @@ mod tests { #[test] fn test_add_used() { - let m = &GuestMemoryMmap::<()>::from_ranges(&[(GuestAddress(0), 0x10000)]).unwrap(); + let m = &AddrSpace::from_ranges(&[(GuestAddress(0), 0x10000)]).unwrap(); let vq = MockSplitQueue::new(m, 16); let mut q: Queue<_> = vq.as_queue(m); @@ -1423,7 +1421,7 @@ mod tests { #[test] fn test_reset_queue() { - let m = &GuestMemoryMmap::<()>::from_ranges(&[(GuestAddress(0), 0x10000)]).unwrap(); + let m = &AddrSpace::from_ranges(&[(GuestAddress(0), 0x10000)]).unwrap(); let vq = MockSplitQueue::new(m, 16); let mut q: Queue<_> = vq.as_queue(m); @@ -1436,7 +1434,7 @@ mod tests { #[test] fn test_needs_notification() { - let m = &GuestMemoryMmap::<()>::from_ranges(&[(GuestAddress(0), 0x10000)]).unwrap(); + let m = &AddrSpace::from_ranges(&[(GuestAddress(0), 0x10000)]).unwrap(); let qsize = 16; let vq = MockSplitQueue::new(m, qsize); @@ -1482,7 +1480,7 @@ mod tests { #[test] fn test_enable_disable_notification() { - let m = &GuestMemoryMmap::<()>::from_ranges(&[(GuestAddress(0), 0x10000)]).unwrap(); + let m = &AddrSpace::from_ranges(&[(GuestAddress(0), 0x10000)]).unwrap(); let vq = MockSplitQueue::new(m, 16); let mut q: Queue<_> = vq.as_queue(m); From 10306e5767e38a987de1d4cb267dce2f130b6671 Mon Sep 17 00:00:00 2001 From: Paolo Bonzini Date: Tue, 26 Oct 2021 16:26:44 +0200 Subject: [PATCH 07/10] tests: switch to QueueT Make all accesses to the underlying QueueState go through the with() function. The function which will take care of locking when testing QueueSync. Signed-off-by: Paolo Bonzini --- crates/virtio-queue/src/lib.rs | 206 +++++++++++++++++---------------- 1 file changed, 108 insertions(+), 98 deletions(-) diff --git a/crates/virtio-queue/src/lib.rs b/crates/virtio-queue/src/lib.rs index fa8afaa0..3b675158 100644 --- a/crates/virtio-queue/src/lib.rs +++ b/crates/virtio-queue/src/lib.rs @@ -1033,6 +1033,7 @@ impl Queue { #[cfg(test)] mod tests { use super::*; + use generic::QueueT; use memoffset::offset_of; use mock::{DescriptorTable, MockSplitQueue}; @@ -1195,96 +1196,98 @@ mod tests { assert!(q.is_valid()); // shouldn't be valid when not marked as ready - q.state.ready = false; + q.with(|mut qstate| qstate.ready = false); assert!(!q.is_valid()); - q.state.ready = true; + q.with(|mut qstate| qstate.ready = true); + + let max_size = q.with(|qstate| qstate.max_size); // shouldn't be allowed to set a size > max_size - q.set_size(q.state.max_size << 1); - assert_eq!(q.state.size, q.state.max_size); + q.with(|mut qstate| qstate.set_size(max_size << 1)); + assert!(q.with(|qstate| qstate.size) == max_size); // or set the size to 0 - q.set_size(0); - assert_eq!(q.state.size, q.state.max_size); + q.with(|mut qstate| qstate.set_size(0)); + assert!(q.with(|qstate| qstate.size) == max_size); // or set a size which is not a power of 2 - q.set_size(11); - assert_eq!(q.state.size, q.state.max_size); + q.with(|mut qstate| qstate.set_size(11)); + assert!(q.with(|qstate| qstate.size) == max_size); // but should be allowed to set a size if 0 < size <= max_size and size is a power of two - q.set_size(4); - assert_eq!(q.state.size, 4); - q.state.size = q.state.max_size; + q.with(|mut qstate| qstate.set_size(4)); + q.with(|qstate| assert_eq!(qstate.size, 4)); + q.with(|mut qstate| qstate.size = qstate.max_size); // shouldn't be allowed to set an address that breaks the alignment constraint - q.set_desc_table_address(Some(0xf), None); - assert_eq!(q.state.desc_table.0, vq.desc_table_addr().0); + q.with(|mut qstate| qstate.set_desc_table_address(Some(0xf), None)); + q.with(|qstate| assert_eq!(qstate.desc_table.0, vq.desc_table_addr().0)); // should be allowed to set an aligned out of bounds address - q.set_desc_table_address(Some(0xffff_fff0), None); - assert_eq!(q.state.desc_table.0, 0xffff_fff0); + q.with(|mut qstate| qstate.set_desc_table_address(Some(0xffff_fff0), None)); + q.with(|qstate| assert_eq!(qstate.desc_table.0, 0xffff_fff0)); // but shouldn't be valid assert!(!q.is_valid()); // but should be allowed to set a valid description table address - q.set_desc_table_address(Some(0x10), None); - assert_eq!(q.state.desc_table.0, 0x10); + q.with(|mut qstate| qstate.set_desc_table_address(Some(0x10), None)); + q.with(|qstate| assert_eq!(qstate.desc_table.0, 0x10)); assert!(q.is_valid()); - q.state.desc_table = vq.desc_table_addr(); + q.with(|mut qstate| qstate.desc_table = vq.desc_table_addr()); // shouldn't be allowed to set an address that breaks the alignment constraint - q.set_avail_ring_address(Some(0x1), None); - assert_eq!(q.state.avail_ring.0, vq.avail_addr().0); + q.with(|mut qstate| qstate.set_avail_ring_address(Some(0x1), None)); + q.with(|qstate| assert_eq!(qstate.avail_ring.0, vq.avail_addr().0)); // should be allowed to set an aligned out of bounds address - q.set_avail_ring_address(Some(0xffff_fffe), None); - assert_eq!(q.state.avail_ring.0, 0xffff_fffe); + q.with(|mut qstate| qstate.set_avail_ring_address(Some(0xffff_fffe), None)); + q.with(|qstate| assert_eq!(qstate.avail_ring.0, 0xffff_fffe)); // but shouldn't be valid assert!(!q.is_valid()); // but should be allowed to set a valid available ring address - q.set_avail_ring_address(Some(0x2), None); - assert_eq!(q.state.avail_ring.0, 0x2); + q.with(|mut qstate| qstate.set_avail_ring_address(Some(0x2), None)); + q.with(|qstate| assert_eq!(qstate.avail_ring.0, 0x2)); assert!(q.is_valid()); - q.state.avail_ring = vq.avail_addr(); + q.with(|mut qstate| qstate.avail_ring = vq.avail_addr()); // shouldn't be allowed to set an address that breaks the alignment constraint - q.set_used_ring_address(Some(0x3), None); - assert_eq!(q.state.used_ring.0, vq.used_addr().0); + q.with(|mut qstate| qstate.set_used_ring_address(Some(0x3), None)); + q.with(|qstate| assert_eq!(qstate.used_ring.0, vq.used_addr().0)); // should be allowed to set an aligned out of bounds address - q.set_used_ring_address(Some(0xffff_fffc), None); - assert_eq!(q.state.used_ring.0, 0xffff_fffc); + q.with(|mut qstate| qstate.set_used_ring_address(Some(0xffff_fffc), None)); + q.with(|qstate| assert_eq!(qstate.used_ring.0, 0xffff_fffc)); // but shouldn't be valid assert!(!q.is_valid()); // but should be allowed to set a valid used ring address - q.set_used_ring_address(Some(0x4), None); - assert_eq!(q.state.used_ring.0, 0x4); + q.with(|mut qstate| qstate.set_used_ring_address(Some(0x4), None)); + q.with(|qstate| assert_eq!(qstate.used_ring.0, 0x4)); assert!(q.is_valid()); - q.state.used_ring = vq.used_addr(); + q.with(|mut qstate| qstate.used_ring = vq.used_addr()); - { + q.with(|mut qstate| { // an invalid queue should return an iterator with no next - q.state.ready = false; - let mut i = q.iter().unwrap(); + qstate.ready = false; + let mut i = qstate.iter().unwrap(); assert!(i.next().is_none()); - } + }); - q.state.ready = true; + q.with(|mut qstate| qstate.ready = true); // now let's create two simple descriptor chains // the chains are (0, 1) and (2, 3, 4) - { - for j in 0..5u16 { - let flags = match j { - 1 | 4 => 0, - _ => VIRTQ_DESC_F_NEXT, - }; - - let desc = Descriptor::new((0x1000 * (j + 1)) as u64, 0x1000, flags, j + 1); - vq.desc_table().store(j, desc); - } + for j in 0..5u16 { + let flags = match j { + 1 | 4 => 0, + _ => VIRTQ_DESC_F_NEXT, + }; - vq.avail().ring().ref_at(0).store(0); - vq.avail().ring().ref_at(1).store(2); - vq.avail().idx().store(2); + let desc = Descriptor::new((0x1000 * (j + 1)) as u64, 0x1000, flags, j + 1); + vq.desc_table().store(j, desc); + } + + vq.avail().ring().ref_at(0).store(0); + vq.avail().ring().ref_at(1).store(2); + vq.avail().idx().store(2); - let mut i = q.iter().unwrap(); + q.with(|mut qstate| { + let mut i = qstate.iter().unwrap(); { let mut c = i.next().unwrap(); @@ -1311,13 +1314,13 @@ mod tests { { assert!(i.next().is_none()); i.go_to_previous_position(); - let mut c = q.iter().unwrap().next().unwrap(); + let mut c = qstate.iter().unwrap().next().unwrap(); c.next().unwrap(); c.next().unwrap(); c.next().unwrap(); assert!(c.next().is_none()); } - } + }) } #[test] @@ -1347,39 +1350,41 @@ mod tests { vq.avail().ring().ref_at(2).store(5); vq.avail().idx().store(3); - let mut i = q.iter().unwrap(); + q.with(|mut qstate| { + let mut i = qstate.iter().unwrap(); - { - let c = i.next().unwrap(); - assert_eq!(c.head_index(), 0); - - let mut iter = c; - assert!(iter.next().is_some()); - assert!(iter.next().is_some()); - assert!(iter.next().is_none()); - assert!(iter.next().is_none()); - } + { + let c = i.next().unwrap(); + assert_eq!(c.head_index(), 0); - { - let c = i.next().unwrap(); - assert_eq!(c.head_index(), 2); - - let mut iter = c.writable(); - assert!(iter.next().is_some()); - assert!(iter.next().is_some()); - assert!(iter.next().is_none()); - assert!(iter.next().is_none()); - } + let mut iter = c; + assert!(iter.next().is_some()); + assert!(iter.next().is_some()); + assert!(iter.next().is_none()); + assert!(iter.next().is_none()); + } - { - let c = i.next().unwrap(); - assert_eq!(c.head_index(), 5); + { + let c = i.next().unwrap(); + assert_eq!(c.head_index(), 2); - let mut iter = c.readable(); - assert!(iter.next().is_some()); - assert!(iter.next().is_none()); - assert!(iter.next().is_none()); - } + let mut iter = c.writable(); + assert!(iter.next().is_some()); + assert!(iter.next().is_some()); + assert!(iter.next().is_none()); + assert!(iter.next().is_none()); + } + + { + let c = i.next().unwrap(); + assert_eq!(c.head_index(), 5); + + let mut iter = c.readable(); + assert!(iter.next().is_some()); + assert!(iter.next().is_none()); + assert!(iter.next().is_none()); + } + }) } #[test] @@ -1397,7 +1402,7 @@ mod tests { // should be ok q.add_used(1, 0x1000).unwrap(); - assert_eq!(q.state.next_used, Wrapping(1)); + q.with(|qstate| assert_eq!(qstate.next_used, Wrapping(1))); assert_eq!(vq.used().idx().load(), 1); let x = vq.used().ring().ref_at(0).load(); @@ -1425,11 +1430,13 @@ mod tests { let vq = MockSplitQueue::new(m, 16); let mut q: Queue<_> = vq.as_queue(m); - q.state.size = 8; - q.state.ready = true; - q.state.reset(); - assert_eq!(q.state.size, 16); - assert_eq!(q.state.ready, false); + q.with(|mut qstate| { + qstate.size = 8; + qstate.ready = true; + qstate.reset(); + assert_eq!(qstate.size, 16); + assert_eq!(qstate.ready, false); + }) } #[test] @@ -1443,21 +1450,24 @@ mod tests { // It should always return true when EVENT_IDX isn't enabled. for i in 0..qsize { - q.state.next_used = Wrapping(i); + q.with(|mut qstate| qstate.next_used = Wrapping(i)); assert_eq!(q.needs_notification().unwrap(), true); } m.write_obj::(4, avail_addr.unchecked_add(4 + qsize as u64 * 2)) .unwrap(); - q.state.set_event_idx(true); + q.with(|mut qstate| qstate.set_event_idx(true)); // Incrementing up to this value causes an `u16` to wrap back to 0. let wrap = u32::from(u16::MAX) + 1; for i in 0..wrap + 12 { - q.state.next_used = Wrapping(i as u16); - // Let's test wrapping around the maximum index value as well. - let expected = i == 5 || i == (5 + wrap) || q.state.signalled_used.is_none(); + let expected = q.with(|mut qstate| { + qstate.next_used = Wrapping(i as u16); + // Let's test wrapping around the maximum index value as well. + i == 5 || i == (5 + wrap) || qstate.signalled_used.is_none() + }); + assert_eq!(q.needs_notification().unwrap(), expected); } @@ -1471,9 +1481,9 @@ mod tests { .unwrap(); assert_eq!(q.needs_notification().unwrap(), false); - q.state.next_used = Wrapping(15); + q.with(|mut qstate| qstate.next_used = Wrapping(15)); assert_eq!(q.needs_notification().unwrap(), false); - q.state.next_used = Wrapping(0); + q.with(|mut qstate| qstate.next_used = Wrapping(0)); assert_eq!(q.needs_notification().unwrap(), true); assert_eq!(q.needs_notification().unwrap(), false); } @@ -1486,7 +1496,7 @@ mod tests { let mut q: Queue<_> = vq.as_queue(m); let used_addr = vq.used_addr(); - assert_eq!(q.state.event_idx_enabled, false); + q.with(|qstate| assert_eq!(qstate.event_idx_enabled, false)); q.enable_notification().unwrap(); let v = m.read_obj::(used_addr).unwrap(); @@ -1505,13 +1515,13 @@ mod tests { m.write_obj::(2, avail_addr.unchecked_add(2)).unwrap(); assert_eq!(q.enable_notification().unwrap(), true); - q.state.next_avail = Wrapping(2); + q.with(|mut qstate| qstate.next_avail = Wrapping(2)); assert_eq!(q.enable_notification().unwrap(), false); m.write_obj::(8, avail_addr.unchecked_add(2)).unwrap(); assert_eq!(q.enable_notification().unwrap(), true); - q.state.next_avail = Wrapping(8); + q.with(|mut qstate| qstate.next_avail = Wrapping(8)); assert_eq!(q.enable_notification().unwrap(), false); } } From bc7569d0ce238280009ff8e0dca6c71abb0f101a Mon Sep 17 00:00:00 2001 From: Paolo Bonzini Date: Tue, 26 Oct 2021 15:52:19 +0200 Subject: [PATCH 08/10] introduce QueueSync Similar to Queue, QueueSync associates a GuestAddressSpace to a QueueState. The QueueState however is wrapped with reference counting and with a mutex. All access to the QueueSync object has to go through a QueueGuard, which takes care of locking the mutex. Signed-off-by: Paolo Bonzini --- crates/virtio-queue/src/generic.rs | 32 +++++++++++++++++++++++++++++- crates/virtio-queue/src/lib.rs | 28 ++++++++++++++++++++++++++ 2 files changed, 59 insertions(+), 1 deletion(-) diff --git a/crates/virtio-queue/src/generic.rs b/crates/virtio-queue/src/generic.rs index 1e849e35..0043bf88 100644 --- a/crates/virtio-queue/src/generic.rs +++ b/crates/virtio-queue/src/generic.rs @@ -5,8 +5,9 @@ use std::num::Wrapping; use std::ops::DerefMut; -use crate::{Error, Queue, QueueGuard, QueueState}; +use crate::{Error, Queue, QueueGuard, QueueState, QueueSync}; use std::sync::atomic::Ordering; +use std::sync::{Arc, Mutex, MutexGuard}; use vm_memory::GuestAddressSpace; /// Lifetime-generic guard associated to a QueueStateT. In practice, @@ -167,3 +168,32 @@ impl QueueT for Queue { f(self.acquire()) } } + +impl<'g, M: GuestAddressSpace> QueueStateGuard<'g> for QueueSync { + type Out = MutexGuard<'g, QueueState>; +} + +impl QueueT for QueueSync { + type Guard = Self; + + fn construct(mem: M, state: QueueState) -> Self { + QueueSync { + mem, + state: Arc::new(Mutex::new(state)), + } + } + fn with< + 'a, + 'g, + U, + F: FnOnce(QueueGuard>::Out>) -> U, + >( + &'a mut self, + f: F, + ) -> U + where + 'a: 'g, + { + f(self.lock()) + } +} diff --git a/crates/virtio-queue/src/lib.rs b/crates/virtio-queue/src/lib.rs index 3b675158..f40de71a 100644 --- a/crates/virtio-queue/src/lib.rs +++ b/crates/virtio-queue/src/lib.rs @@ -28,6 +28,7 @@ use std::mem::size_of; use std::num::Wrapping; use std::ops::{Deref, DerefMut}; use std::sync::atomic::{fence, Ordering}; +use std::sync::{Arc, Mutex, MutexGuard}; use log::error; use vm_memory::{ @@ -1030,6 +1031,33 @@ impl Queue { } } +/// A convenient wrapper struct for a thread-safe virtio queue, with associated GuestMemory object. +#[derive(Clone, Debug)] +pub struct QueueSync { + /// Guest memory object associated with the queue. + pub mem: M, + /// Virtio queue state. + pub state: Arc>, +} + +impl QueueSync { + /// Construct an empty virtio queue with the given `max_size`. + pub fn new(mem: M, max_size: u16) -> Self { + QueueSync { + mem, + state: Arc::new(Mutex::new(QueueState::new(max_size))), + } + } + + /// Get an exclusive reference to the underlying `QueueState` object. + /// + /// Logically this method will acquire the underlying lock protecting the `QueueState` + /// object. The lock will be released when the returned object gets dropped. + pub fn lock(&self) -> QueueGuard> { + QueueGuard::new(self.mem.memory(), self.state.lock().unwrap()) + } +} + #[cfg(test)] mod tests { use super::*; From 031715e22ec5e6a62e1ac677710cf433909d45d6 Mon Sep 17 00:00:00 2001 From: Paolo Bonzini Date: Tue, 26 Oct 2021 16:59:46 +0200 Subject: [PATCH 09/10] add tests for QueueSync The tests simply invoke the same code twice, once with a Queue and once with a QueueSync. test_queue_guard/test_queue_guard_sync are different, because one uses lock() and one uses acquire(). Suggested-by: Laura Loghin Signed-off-by: Paolo Bonzini --- crates/virtio-queue/src/lib.rs | 122 +++++++++++++++++++++++++++------ 1 file changed, 102 insertions(+), 20 deletions(-) diff --git a/crates/virtio-queue/src/lib.rs b/crates/virtio-queue/src/lib.rs index f40de71a..ab4b13b9 100644 --- a/crates/virtio-queue/src/lib.rs +++ b/crates/virtio-queue/src/lib.rs @@ -1213,12 +1213,10 @@ mod tests { } } - #[test] - fn test_queue_and_iterator() { - let m = &AddrSpace::from_ranges(&[(GuestAddress(0), 0x10000)]).unwrap(); + fn do_test_queue_and_iterator<'a, Q: QueueT<&'a AddrSpace>>(m: &'a AddrSpace) { let vq = MockSplitQueue::new(m, 16); - let mut q: Queue<_> = vq.as_queue(m); + let mut q: Q = vq.as_queue(m); // q is currently valid assert!(q.is_valid()); @@ -1352,11 +1350,21 @@ mod tests { } #[test] - fn test_descriptor_and_iterator() { + fn test_queue_and_iterator() { + let m = &AddrSpace::from_ranges(&[(GuestAddress(0), 0x10000)]).unwrap(); + do_test_queue_and_iterator::>(m); + } + + #[test] + fn test_queue_and_iterator_sync() { let m = &AddrSpace::from_ranges(&[(GuestAddress(0), 0x10000)]).unwrap(); + do_test_queue_and_iterator::>(m); + } + + fn do_test_descriptor_and_iterator<'a, Q: QueueT<&'a AddrSpace>>(m: &'a AddrSpace) { let vq = MockSplitQueue::new(m, 16); - let mut q: Queue<_> = vq.as_queue(m); + let mut q: Q = vq.as_queue(m); // q is currently valid assert!(q.is_valid()); @@ -1416,11 +1424,21 @@ mod tests { } #[test] - fn test_add_used() { + fn test_descriptor_and_iterator() { let m = &AddrSpace::from_ranges(&[(GuestAddress(0), 0x10000)]).unwrap(); + do_test_descriptor_and_iterator::>(m); + } + + #[test] + fn test_descriptor_and_iterator_sync() { + let m = &AddrSpace::from_ranges(&[(GuestAddress(0), 0x10000)]).unwrap(); + do_test_descriptor_and_iterator::>(m); + } + + fn do_test_add_used<'a, Q: QueueT<&'a AddrSpace>>(m: &'a AddrSpace) { let vq = MockSplitQueue::new(m, 16); - let mut q: Queue<_> = vq.as_queue(m); + let mut q: Q = vq.as_queue(m); assert_eq!(vq.used().idx().load(), 0); @@ -1439,12 +1457,20 @@ mod tests { } #[test] - fn test_queue_guard() { - let m = &GuestMemoryMmap::<()>::from_ranges(&[(GuestAddress(0), 0x10000)]).unwrap(); - let vq = MockSplitQueue::new(m, 16); + fn test_add_used() { + let m = &AddrSpace::from_ranges(&[(GuestAddress(0), 0x10000)]).unwrap(); + do_test_add_used::>(m); + } - let mut q = vq.create_queue(m); - let mut qstate = q.acquire(); + #[test] + fn test_add_used_sync() { + let m = &AddrSpace::from_ranges(&[(GuestAddress(0), 0x10000)]).unwrap(); + do_test_add_used::>(m); + } + + fn do_test_queue_guard>( + mut qstate: QueueGuard<&AddrSpace, S>, + ) { qstate.ready = true; qstate.reset(); assert_eq!(qstate.ready, false); @@ -1453,11 +1479,35 @@ mod tests { } #[test] - fn test_reset_queue() { - let m = &AddrSpace::from_ranges(&[(GuestAddress(0), 0x10000)]).unwrap(); + fn test_queue_guard() { + let m = &GuestMemoryMmap::<()>::from_ranges(&[(GuestAddress(0), 0x10000)]).unwrap(); + let vq = MockSplitQueue::new(m, 16); + + let mut q = vq.create_queue(m); + do_test_queue_guard(q.acquire()); + } + + #[test] + fn test_queue_guard_sync() { + let m = &GuestMemoryMmap::<()>::from_ranges(&[(GuestAddress(0), 0x10000)]).unwrap(); + let vq = MockSplitQueue::new(m, 16); + + // Note how the test_queue_guard above has "let mut" here. A singlethreaded + // queue can only be accessed by one thread, and that one needs to have an + // exclusive reference; a multithreaded queue can be accessed by many thread + // and therefore it has to support interior mutability (via Mutex). On the + // other hand, a multithreaded queue is reference counted and therefore it + // cannot be accessed via an exclusive reference *at all*. This is the + // the reason why only Queue has the acquire() method, and only QueueSync + // has a lock() method. + let q: QueueSync<_> = vq.as_queue(m); + do_test_queue_guard(q.lock()); + } + + fn do_test_reset_queue<'a, Q: QueueT<&'a AddrSpace>>(m: &'a AddrSpace) { let vq = MockSplitQueue::new(m, 16); - let mut q: Queue<_> = vq.as_queue(m); + let mut q: Q = vq.as_queue(m); q.with(|mut qstate| { qstate.size = 8; qstate.ready = true; @@ -1468,12 +1518,22 @@ mod tests { } #[test] - fn test_needs_notification() { + fn test_reset_queue() { let m = &AddrSpace::from_ranges(&[(GuestAddress(0), 0x10000)]).unwrap(); + do_test_reset_queue::>(m); + } + + #[test] + fn test_reset_queue_sync() { + let m = &AddrSpace::from_ranges(&[(GuestAddress(0), 0x10000)]).unwrap(); + do_test_reset_queue::>(m); + } + + fn do_test_needs_notification<'a, Q: QueueT<&'a AddrSpace>>(m: &'a AddrSpace) { let qsize = 16; let vq = MockSplitQueue::new(m, qsize); - let mut q: Queue<_> = vq.as_queue(m); + let mut q: Q = vq.as_queue(m); let avail_addr = vq.avail_addr(); // It should always return true when EVENT_IDX isn't enabled. @@ -1517,11 +1577,21 @@ mod tests { } #[test] - fn test_enable_disable_notification() { + fn test_needs_notification() { let m = &AddrSpace::from_ranges(&[(GuestAddress(0), 0x10000)]).unwrap(); + do_test_needs_notification::>(m); + } + + #[test] + fn test_needs_notification_sync() { + let m = &AddrSpace::from_ranges(&[(GuestAddress(0), 0x10000)]).unwrap(); + do_test_needs_notification::>(m); + } + + fn do_test_enable_disable_notification<'a, Q: QueueT<&'a AddrSpace>>(m: &'a AddrSpace) { let vq = MockSplitQueue::new(m, 16); - let mut q: Queue<_> = vq.as_queue(m); + let mut q: Q = vq.as_queue(m); let used_addr = vq.used_addr(); q.with(|qstate| assert_eq!(qstate.event_idx_enabled, false)); @@ -1552,4 +1622,16 @@ mod tests { q.with(|mut qstate| qstate.next_avail = Wrapping(8)); assert_eq!(q.enable_notification().unwrap(), false); } + + #[test] + fn test_enable_disable_notification() { + let m = &AddrSpace::from_ranges(&[(GuestAddress(0), 0x10000)]).unwrap(); + do_test_enable_disable_notification::>(m); + } + + #[test] + fn test_enable_disable_notification_sync() { + let m = &AddrSpace::from_ranges(&[(GuestAddress(0), 0x10000)]).unwrap(); + do_test_enable_disable_notification::>(m); + } } From ebc4db0a25a100250853a1ccfaebbf1c0bb50060 Mon Sep 17 00:00:00 2001 From: Paolo Bonzini Date: Tue, 26 Oct 2021 18:05:56 +0200 Subject: [PATCH 10/10] update coverage Signed-off-by: Paolo Bonzini --- coverage_config_x86_64.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/coverage_config_x86_64.json b/coverage_config_x86_64.json index 89ddc6f9..ee2d51a6 100644 --- a/coverage_config_x86_64.json +++ b/coverage_config_x86_64.json @@ -1,5 +1,5 @@ { - "coverage_score": 90.2, + "coverage_score": 88.8, "exclude_path": "crates/virtio-queue/src/mock.rs", "crate_features": "virtio-blk/backend-stdio" }