diff --git a/coverage_config_x86_64.json b/coverage_config_x86_64.json index 89ddc6f9..ee2d51a6 100644 --- a/coverage_config_x86_64.json +++ b/coverage_config_x86_64.json @@ -1,5 +1,5 @@ { - "coverage_score": 90.2, + "coverage_score": 88.8, "exclude_path": "crates/virtio-queue/src/mock.rs", "crate_features": "virtio-blk/backend-stdio" } diff --git a/crates/devices/virtio-blk/src/request.rs b/crates/devices/virtio-blk/src/request.rs index 7da9c6af..cabd9e9d 100644 --- a/crates/devices/virtio-blk/src/request.rs +++ b/crates/devices/virtio-blk/src/request.rs @@ -24,6 +24,7 @@ //! approach. use std::fmt::{self, Display}; +use std::ops::Deref; use std::result; use crate::defs::{ @@ -32,9 +33,7 @@ use crate::defs::{ }; use virtio_queue::{Descriptor, DescriptorChain}; -use vm_memory::{ - ByteValued, Bytes, GuestAddress, GuestAddressSpace, GuestMemory, GuestMemoryError, -}; +use vm_memory::{ByteValued, Bytes, GuestAddress, GuestMemory, GuestMemoryError}; /// Block request parsing errors. #[derive(Debug)] @@ -159,7 +158,10 @@ impl Request { } // Checks that a descriptor meets the minimal requirements for a valid status descriptor. - fn check_status_desc(mem: &M, desc: Descriptor) -> Result<()> { + fn check_status_desc(mem: &M, desc: Descriptor) -> Result<()> + where + M: GuestMemory + ?Sized, + { // The status MUST always be writable. if !desc.is_write_only() { return Err(Error::UnexpectedReadOnlyDescriptor); @@ -202,7 +204,11 @@ impl Request { /// # Arguments /// * `desc_chain` - A mutable reference to the descriptor chain that should point to the /// buffers of a virtio block request. - pub fn parse(desc_chain: &mut DescriptorChain) -> Result { + pub fn parse(desc_chain: &mut DescriptorChain) -> Result + where + M: Deref, + M::Target: GuestMemory, + { let chain_head = desc_chain.next().ok_or(Error::DescriptorChainTooShort)?; // The head contains the request type which MUST be readable. if chain_head.is_write_only() { @@ -235,7 +241,7 @@ impl Request { } let status_desc = desc; - Request::check_status_desc::<::M>(desc_chain.memory(), status_desc)?; + Request::check_status_desc(desc_chain.memory(), status_desc)?; request.status_addr = status_desc.addr(); Ok(request) diff --git a/crates/virtio-queue/src/generic.rs b/crates/virtio-queue/src/generic.rs new file mode 100644 index 00000000..0043bf88 --- /dev/null +++ b/crates/virtio-queue/src/generic.rs @@ -0,0 +1,199 @@ +// Copyright 2021 Red Hat, Inc. +// +// SPDX-License-Identifier: Apache-2.0 AND BSD-3-Clause + +use std::num::Wrapping; +use std::ops::DerefMut; + +use crate::{Error, Queue, QueueGuard, QueueState, QueueSync}; +use std::sync::atomic::Ordering; +use std::sync::{Arc, Mutex, MutexGuard}; +use vm_memory::GuestAddressSpace; + +/// Lifetime-generic guard associated to a QueueStateT. In practice, +/// instead of having a `QueueStateT::Guard<'g>` generic associated type, +/// you have to write `>::Out`. +pub(crate) trait QueueStateGuard<'g> { + /// Type of the guard passed to the `with` method. + type Out: 'g + DerefMut; +} + +pub(crate) trait QueueT { + /// Lifetime-generic guard. Usually this is just `Self`, and implementors + /// of `QueueT` also implement `QueueStateGuard`. + type Guard: for<'g> QueueStateGuard<'g>; + + fn construct(mem: M, state: QueueState) -> Self; + fn with< + 'a, + 'g, + U, + F: FnOnce(QueueGuard>::Out>) -> U, + >( + &'a mut self, + f: F, + ) -> U + where + 'a: 'g; + + /// Check whether the queue configuration is valid. + fn is_valid(&mut self) -> bool { + self.with(|qstate| qstate.is_valid()) + } + + /// Reset the queue to the initial state. + fn reset(&mut self) { + self.with(|mut qstate| qstate.reset()) + } + + /// Get the maximum size of the virtio queue. + fn max_size(&mut self) -> u16 { + self.with(|qstate| qstate.max_size()) + } + + /// Configure the queue size for the virtio queue. + /// + /// The `size` should power of two and less than or equal to value reported by `max_size()`, + /// otherwise it will panic. + fn set_size(&mut self, size: u16) { + self.with(|mut qstate| qstate.set_size(size)) + } + + /// Check whether the queue is ready to be processed. + fn ready(&mut self) -> bool { + self.with(|qstate| qstate.ready()) + } + + /// Configure the queue to ready for processing. + fn set_ready(&mut self, ready: bool) { + self.with(|mut qstate| qstate.set_ready(ready)) + } + + /// Set descriptor table address for the queue. + /// + /// The descriptor table address is 64-bit, the corresponding part will be updated if 'low' + /// and/or `high` is valid. + fn set_desc_table_address(&mut self, low: Option, high: Option) { + self.with(|mut qstate| qstate.set_desc_table_address(low, high)) + } + + /// Set available ring address for the queue. + /// + /// The available ring address is 64-bit, the corresponding part will be updated if 'low' + /// and/or `high` is valid. + fn set_avail_ring_address(&mut self, low: Option, high: Option) { + self.with(|mut qstate| qstate.set_avail_ring_address(low, high)) + } + + /// Set used ring address for the queue. + /// + /// The used ring address is 64-bit, the corresponding part will be updated if 'low' + /// and/or `high` is valid. + fn set_used_ring_address(&mut self, low: Option, high: Option) { + self.with(|mut qstate| qstate.set_used_ring_address(low, high)) + } + + /// Enable/disable the VIRTIO_F_RING_EVENT_IDX feature for interrupt coalescing. + fn set_event_idx(&mut self, enabled: bool) { + self.with(|mut qstate| qstate.set_event_idx(enabled)) + } + + /// Read the `idx` field from the available ring. + fn avail_idx(&mut self, order: Ordering) -> Result, Error> { + self.with(|qstate| qstate.avail_idx(order)) + } + + /// Put a used descriptor head into the used ring. + fn add_used(&mut self, head_index: u16, len: u32) -> Result<(), Error> { + self.with(|mut qstate| qstate.add_used(head_index, len)) + } + + /// Enable notification events from the guest driver. + /// + /// Return true if one or more descriptors can be consumed from the available ring after + /// notifications were enabled (and thus it's possible there will be no corresponding + /// notification). + fn enable_notification(&mut self) -> Result { + self.with(|mut qstate| qstate.enable_notification()) + } + + /// Disable notification events from the guest driver. + fn disable_notification(&mut self) -> Result<(), Error> { + self.with(|mut qstate| qstate.disable_notification()) + } + + /// Check whether a notification to the guest is needed. + /// + /// Please note this method has side effects: once it returns `true`, it considers the + /// driver will actually be notified, remember the associated index in the used ring, and + /// won't return `true` again until the driver updates `used_event` and/or the notification + /// conditions hold once more. + fn needs_notification(&mut self) -> Result { + self.with(|mut qstate| qstate.needs_notification()) + } + + /// Return the index for the next descriptor in the available ring. + fn next_avail(&mut self) -> u16 { + self.with(|qstate| qstate.next_avail()) + } + + /// Set the index for the next descriptor in the available ring. + fn set_next_avail(&mut self, next_avail: u16) { + self.with(|mut qstate| qstate.set_next_avail(next_avail)) + } +} + +impl<'g, M: GuestAddressSpace> QueueStateGuard<'g> for Queue { + type Out = &'g mut QueueState; +} + +impl QueueT for Queue { + type Guard = Self; + + fn construct(mem: M, state: QueueState) -> Self { + Queue { mem, state } + } + fn with< + 'a, + 'g, + U, + F: FnOnce(QueueGuard>::Out>) -> U, + >( + &'a mut self, + f: F, + ) -> U + where + 'a: 'g, + { + f(self.acquire()) + } +} + +impl<'g, M: GuestAddressSpace> QueueStateGuard<'g> for QueueSync { + type Out = MutexGuard<'g, QueueState>; +} + +impl QueueT for QueueSync { + type Guard = Self; + + fn construct(mem: M, state: QueueState) -> Self { + QueueSync { + mem, + state: Arc::new(Mutex::new(state)), + } + } + fn with< + 'a, + 'g, + U, + F: FnOnce(QueueGuard>::Out>) -> U, + >( + &'a mut self, + f: F, + ) -> U + where + 'a: 'g, + { + f(self.lock()) + } +} diff --git a/crates/virtio-queue/src/lib.rs b/crates/virtio-queue/src/lib.rs index 40184d92..ab4b13b9 100644 --- a/crates/virtio-queue/src/lib.rs +++ b/crates/virtio-queue/src/lib.rs @@ -19,6 +19,8 @@ pub mod defs; #[cfg(any(test, feature = "test-utils"))] pub mod mock; +pub(crate) mod generic; + use std::convert::TryFrom; use std::fmt::{self, Debug, Display}; use std::marker::PhantomData; @@ -148,8 +150,8 @@ unsafe impl ByteValued for Descriptor {} /// A virtio descriptor chain. #[derive(Clone, Debug)] -pub struct DescriptorChain { - mem: M::T, +pub struct DescriptorChain { + mem: M, desc_table: GuestAddress, queue_size: u16, head_index: u16, @@ -158,9 +160,13 @@ pub struct DescriptorChain { is_indirect: bool, } -impl DescriptorChain { +impl DescriptorChain +where + M: Deref, + M::Target: GuestMemory, +{ fn with_ttl( - mem: M::T, + mem: M, desc_table: GuestAddress, queue_size: u16, ttl: u16, @@ -178,7 +184,7 @@ impl DescriptorChain { } /// Create a new `DescriptorChain` instance. - fn new(mem: M::T, desc_table: GuestAddress, queue_size: u16, head_index: u16) -> Self { + fn new(mem: M, desc_table: GuestAddress, queue_size: u16, head_index: u16) -> Self { Self::with_ttl(mem, desc_table, queue_size, queue_size, head_index) } @@ -189,8 +195,8 @@ impl DescriptorChain { /// Return a `GuestMemory` object that can be used to access the buffers /// pointed to by the descriptor chain. - pub fn memory(&self) -> &M::M { - &*self.mem + pub fn memory(&self) -> &M::Target { + self.mem.deref() } /// Returns an iterator that only yields the readable descriptors in the chain. @@ -239,7 +245,11 @@ impl DescriptorChain { } } -impl Iterator for DescriptorChain { +impl Iterator for DescriptorChain +where + M: Deref, + M::Target: GuestMemory, +{ type Item = Descriptor; /// Returns the next descriptor in this descriptor chain, if there is one. @@ -284,12 +294,16 @@ impl Iterator for DescriptorChain { /// An iterator for readable or writable descriptors. #[derive(Clone)] -pub struct DescriptorChainRwIter { +pub struct DescriptorChainRwIter { chain: DescriptorChain, writable: bool, } -impl Iterator for DescriptorChainRwIter { +impl Iterator for DescriptorChainRwIter +where + M: Deref, + M::Target: GuestMemory, +{ type Item = Descriptor; /// Returns the next descriptor in this descriptor chain, if there is one. @@ -313,9 +327,9 @@ impl Iterator for DescriptorChainRwIter { // We can't derive Debug, because rustc doesn't generate the M::T: Debug // constraint -impl Debug for DescriptorChainRwIter +impl Debug for DescriptorChainRwIter where - M::T: Debug, + M: Debug, { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_struct("DescriptorChainRwIter") @@ -327,8 +341,8 @@ where /// Consuming iterator over all available descriptor chain heads in the queue. #[derive(Debug)] -pub struct AvailIter<'b, M: GuestAddressSpace> { - mem: M::T, +pub struct AvailIter<'b, M> { + mem: M, desc_table: GuestAddress, avail_ring: GuestAddress, last_index: Wrapping, @@ -336,7 +350,7 @@ pub struct AvailIter<'b, M: GuestAddressSpace> { next_avail: &'b mut Wrapping, } -impl<'b, M: GuestAddressSpace> AvailIter<'b, M> { +impl<'b, M> AvailIter<'b, M> { /// Goes back one position in the available descriptor chain offered by the driver. /// /// Rust does not support bidirectional iterators. This is the only way to revert the effect @@ -349,7 +363,11 @@ impl<'b, M: GuestAddressSpace> AvailIter<'b, M> { } } -impl<'b, M: GuestAddressSpace> Iterator for AvailIter<'b, M> { +impl<'b, M> Iterator for AvailIter<'b, M> +where + M: Clone + Deref, + M::Target: GuestMemory, +{ type Item = DescriptorChain; fn next(&mut self) -> Option { @@ -406,120 +424,6 @@ impl VirtqUsedElem { unsafe impl ByteValued for VirtqUsedElem {} -/// Struct to hold an exclusive reference to the underlying `QueueState` object. -pub enum QueueStateGuard<'a, M: GuestAddressSpace> { - /// A reference to a `QueueState` object. - StateObject(&'a mut QueueState), - /// A `MutexGuard` for a `QueueState` object. - MutexGuard(MutexGuard<'a, QueueState>), -} - -impl<'a, M: GuestAddressSpace> Deref for QueueStateGuard<'a, M> { - type Target = QueueState; - - fn deref(&self) -> &Self::Target { - match self { - QueueStateGuard::StateObject(v) => v, - QueueStateGuard::MutexGuard(v) => v.deref(), - } - } -} - -impl<'a, M: GuestAddressSpace> DerefMut for QueueStateGuard<'a, M> { - fn deref_mut(&mut self) -> &mut Self::Target { - match self { - QueueStateGuard::StateObject(v) => v, - QueueStateGuard::MutexGuard(v) => v.deref_mut(), - } - } -} - -/// Trait to access and manipulate a virtio queue. -/// -/// To optimize for performance, different implementations of the `QueueStateT` trait may be -/// provided for single-threaded context and multi-threaded context. -pub trait QueueStateT { - /// Construct an empty virtio queue state object with the given `max_size`. - fn new(max_size: u16) -> Self; - - /// Check whether the queue configuration is valid. - fn is_valid(&self, mem: &M::T) -> bool; - - /// Reset the queue to the initial state. - fn reset(&mut self); - - /// Get an exclusive reference to the underlying `QueueState` object. - /// - /// Logically this method will acquire the underlying lock protecting the `QueueState` Object. - /// The lock will be released when the returned object gets dropped. - fn lock(&mut self) -> QueueStateGuard<'_, M>; - - /// Get the maximum size of the virtio queue. - fn max_size(&self) -> u16; - - /// Configure the queue size for the virtio queue. - /// - /// The `size` should power of two and less than or equal to value reported by `max_size()`, - /// otherwise it will panic. - fn set_size(&mut self, size: u16); - - /// Check whether the queue is ready to be processed. - fn ready(&self) -> bool; - - /// Configure the queue to ready for processing. - fn set_ready(&mut self, ready: bool); - - /// Set descriptor table address for the queue. - /// - /// The descriptor table address is 64-bit, the corresponding part will be updated if 'low' - /// and/or `high` is valid. - fn set_desc_table_address(&mut self, low: Option, high: Option); - - /// Set available ring address for the queue. - /// - /// The available ring address is 64-bit, the corresponding part will be updated if 'low' - /// and/or `high` is valid. - fn set_avail_ring_address(&mut self, low: Option, high: Option); - - /// Set used ring address for the queue. - /// - /// The used ring address is 64-bit, the corresponding part will be updated if 'low' - /// and/or `high` is valid. - fn set_used_ring_address(&mut self, low: Option, high: Option); - - /// Enable/disable the VIRTIO_F_RING_EVENT_IDX feature for interrupt coalescing. - fn set_event_idx(&mut self, enabled: bool); - - /// Read the `idx` field from the available ring. - fn avail_idx(&self, mem: &M::T, order: Ordering) -> Result, Error>; - - /// Put a used descriptor head into the used ring. - fn add_used(&mut self, mem: &M::T, head_index: u16, len: u32) -> Result<(), Error>; - - /// Enable notification events from the guest driver. - /// - /// Return true if one or more descriptors can be consumed from the available ring after - /// notifications were enabled (and thus it's possible there will be no corresponding - /// notification). - fn enable_notification(&mut self, mem: &M::T) -> Result; - - /// Disable notification events from the guest driver. - fn disable_notification(&mut self, mem: &M::T) -> Result<(), Error>; - - /// Check whether a notification to the guest is needed. - /// - /// Please note this method has side effects: once it returns `true`, it considers the - /// driver will actually be notified, remember the associated index in the used ring, and - /// won't return `true` again until the driver updates `used_event` and/or the notification - /// conditions hold once more. - fn needs_notification(&mut self, mem: &M::T) -> Result; - - /// Return the index for the next descriptor in the available ring. - fn next_avail(&self) -> u16; - - /// Set the index for the next descriptor in the available ring. - fn set_next_avail(&mut self, next_avail: u16); -} // Default addresses for each virtqueue part const DEFAULT_DESC_TABLE_ADDR: u64 = 0x0; const DEFAULT_AVAIL_RING_ADDR: u64 = 0x0; @@ -527,7 +431,7 @@ const DEFAULT_USED_RING_ADDR: u64 = 0x0; /// Struct to maintain information and manipulate state of a virtio queue. #[derive(Clone, Debug)] -pub struct QueueState { +pub struct QueueState { /// The maximal size in elements offered by the device pub max_size: u16, @@ -557,14 +461,16 @@ pub struct QueueState { /// Guest physical address of the used ring pub used_ring: GuestAddress, - - phantom: PhantomData, } -impl QueueState { +impl QueueState { /// Get a consuming iterator over all available descriptor chain heads offered by the driver. - pub fn iter(&mut self, mem: M::T) -> Result, Error> { - self.avail_idx(&mem, Ordering::Acquire) + pub fn iter(&mut self, mem: M) -> Result, Error> + where + M: Deref, + M::Target: GuestMemory + Sized, + { + self.avail_idx(mem.deref(), Ordering::Acquire) .map(move |idx| AvailIter { mem, desc_table: self.desc_table, @@ -577,7 +483,12 @@ impl QueueState { // Helper method that writes `val` to the `avail_event` field of the used ring, using // the provided ordering. - fn set_avail_event(&self, mem: &M::T, val: u16, order: Ordering) -> Result<(), Error> { + fn set_avail_event( + &self, + mem: &M, + val: u16, + order: Ordering, + ) -> Result<(), Error> { let elem_sz = VIRTQ_USED_ELEMENT_SIZE * u64::from(self.size); let offset = VIRTQ_USED_RING_HEADER_SIZE + elem_sz; let addr = self.used_ring.unchecked_add(offset); @@ -586,7 +497,12 @@ impl QueueState { } // Set the value of the `flags` field of the used ring, applying the specified ordering. - fn set_used_flags(&mut self, mem: &M::T, val: u16, order: Ordering) -> Result<(), Error> { + fn set_used_flags( + &mut self, + mem: &M, + val: u16, + order: Ordering, + ) -> Result<(), Error> { mem.store(val, self.used_ring, order) .map_err(Error::GuestMemory) } @@ -595,7 +511,7 @@ impl QueueState { // // Every access in this method uses `Relaxed` ordering because a fence is added by the caller // when appropriate. - fn set_notification(&mut self, mem: &M::T, enable: bool) -> Result<(), Error> { + fn set_notification(&mut self, mem: &M, enable: bool) -> Result<(), Error> { if enable { if self.event_idx_enabled { // We call `set_avail_event` using the `next_avail` value, instead of reading @@ -624,7 +540,7 @@ impl QueueState { /// Neither of these interrupt suppression methods are reliable, as they are not synchronized /// with the device, but they serve as useful optimizations. So we only ensure access to the /// virtq_avail.used_event is atomic, but do not need to synchronize with other memory accesses. - fn used_event(&self, mem: &M::T, order: Ordering) -> Result, Error> { + fn used_event(&self, mem: &M, order: Ordering) -> Result, Error> { // Safe because we have validated the queue and access guest // memory through GuestMemory interfaces. let elem_sz = u64::from(self.size) * VIRTQ_AVAIL_ELEMENT_SIZE; @@ -635,9 +551,7 @@ impl QueueState { .map(Wrapping) .map_err(Error::GuestMemory) } -} -impl QueueStateT for QueueState { fn new(max_size: u16) -> Self { QueueState { max_size, @@ -650,11 +564,10 @@ impl QueueStateT for QueueState { next_used: Wrapping(0), event_idx_enabled: false, signalled_used: None, - phantom: PhantomData, } } - fn is_valid(&self, mem: &M::T) -> bool { + fn is_valid(&self, mem: &M) -> bool { let queue_size = self.size as u64; let desc_table = self.desc_table; let desc_table_size = size_of::() as u64 * queue_size; @@ -712,10 +625,6 @@ impl QueueStateT for QueueState { self.event_idx_enabled = false; } - fn lock(&mut self) -> QueueStateGuard<'_, M> { - QueueStateGuard::StateObject(self) - } - fn max_size(&self) -> u16 { self.max_size } @@ -777,7 +686,7 @@ impl QueueStateT for QueueState { self.event_idx_enabled = enabled; } - fn avail_idx(&self, mem: &M::T, order: Ordering) -> Result, Error> { + fn avail_idx(&self, mem: &M, order: Ordering) -> Result, Error> { let addr = self.avail_ring.unchecked_add(2); mem.load(addr, order) @@ -785,7 +694,12 @@ impl QueueStateT for QueueState { .map_err(Error::GuestMemory) } - fn add_used(&mut self, mem: &M::T, head_index: u16, len: u32) -> Result<(), Error> { + fn add_used( + &mut self, + mem: &M, + head_index: u16, + len: u32, + ) -> Result<(), Error> { if head_index >= self.size { error!( "attempted to add out of bounds descriptor to used ring: {}", @@ -831,7 +745,7 @@ impl QueueStateT for QueueState { // break; // } // } - fn enable_notification(&mut self, mem: &M::T) -> Result { + fn enable_notification(&mut self, mem: &M) -> Result { self.set_notification(mem, true)?; // Ensures the following read is not reordered before any previous write operation. fence(Ordering::SeqCst); @@ -846,11 +760,11 @@ impl QueueStateT for QueueState { .map(|idx| idx != self.next_avail) } - fn disable_notification(&mut self, mem: &M::T) -> Result<(), Error> { + fn disable_notification(&mut self, mem: &M) -> Result<(), Error> { self.set_notification(mem, false) } - fn needs_notification(&mut self, mem: &M::T) -> Result { + fn needs_notification(&mut self, mem: &M) -> Result { let used_idx = self.next_used; // Complete all the writes in add_used() before reading the event. @@ -883,114 +797,130 @@ impl QueueStateT for QueueState { } } -/// Struct to maintain information and manipulate state of a virtio queue for multi-threaded -/// context. -#[derive(Clone, Debug)] -pub struct QueueStateSync { - state: Arc>>, -} - -impl QueueStateT for QueueStateSync { - fn new(max_size: u16) -> Self { - QueueStateSync { - state: Arc::new(Mutex::new(QueueState::new(max_size))), - } - } - - fn is_valid(&self, mem: &M::T) -> bool { - self.state.lock().unwrap().is_valid(mem) - } - - fn reset(&mut self) { - self.state.lock().unwrap().reset(); - } - - fn lock(&mut self) -> QueueStateGuard<'_, M> { - QueueStateGuard::MutexGuard(self.state.lock().unwrap()) - } - - fn max_size(&self) -> u16 { - self.state.lock().unwrap().max_size() - } - - fn set_size(&mut self, size: u16) { - self.state.lock().unwrap().set_size(size); - } +/// A convenient wrapper struct for a virtio queue, with associated GuestMemory object. +pub struct QueueGuard<'a, M: 'a + Clone + Deref, S: 'a + DerefMut> +where + M::Target: GuestMemory + Sized, +{ + /// Guest memory object associated with the queue. + pub mem: M, - fn ready(&self) -> bool { - self.state.lock().unwrap().ready - } + /// Virtio queue state. + pub state: S, - fn set_ready(&mut self, ready: bool) { - self.state.lock().unwrap().set_ready(ready) - } + _marker: PhantomData<&'a S::Target>, +} - fn set_desc_table_address(&mut self, low: Option, high: Option) { - self.state.lock().unwrap().set_desc_table_address(low, high); +impl<'a, M: 'a + Clone + Deref, S: 'a + DerefMut> QueueGuard<'a, M, S> +where + M::Target: GuestMemory + Sized, +{ + fn new(mem: M, state: S) -> QueueGuard<'a, M, S> { + QueueGuard { + mem, + state, + _marker: PhantomData, + } } - fn set_avail_ring_address(&mut self, low: Option, high: Option) { - self.state.lock().unwrap().set_avail_ring_address(low, high); + /// Check whether the queue configuration is valid. + pub fn is_valid(&self) -> bool { + self.state.is_valid(self.mem.deref()) } - fn set_used_ring_address(&mut self, low: Option, high: Option) { - self.state.lock().unwrap().set_used_ring_address(low, high); + /// Read the `idx` field from the available ring. + pub fn avail_idx(&self, order: Ordering) -> Result, Error> { + self.state.avail_idx(self.mem.deref(), order) } - fn set_event_idx(&mut self, enabled: bool) { - self.state.lock().unwrap().set_event_idx(enabled); + /// Put a used descriptor head into the used ring. + pub fn add_used(&mut self, head_index: u16, len: u32) -> Result<(), Error> { + self.state.add_used(self.mem.deref(), head_index, len) } - fn avail_idx(&self, mem: &M::T, order: Ordering) -> Result, Error> { - self.state.lock().unwrap().avail_idx(mem, order) + /// Enable notification events from the guest driver. + /// + /// Return true if one or more descriptors can be consumed from the available ring after + /// notifications were enabled (and thus it's possible there will be no corresponding + /// notification). + pub fn enable_notification(&mut self) -> Result { + self.state.enable_notification(self.mem.deref()) } - fn add_used(&mut self, mem: &M::T, head_index: u16, len: u32) -> Result<(), Error> { - self.state.lock().unwrap().add_used(mem, head_index, len) + /// Disable notification events from the guest driver. + pub fn disable_notification(&mut self) -> Result<(), Error> { + self.state.disable_notification(self.mem.deref()) } - fn enable_notification(&mut self, mem: &M::T) -> Result { - self.state.lock().unwrap().enable_notification(mem) + /// Check whether a notification to the guest is needed. + /// + /// Please note this method has side effects: once it returns `true`, it considers the + /// driver will actually be notified, remember the associated index in the used ring, and + /// won't return `true` again until the driver updates `used_event` and/or the notification + /// conditions hold once more. + pub fn needs_notification(&mut self) -> Result { + self.state.needs_notification(self.mem.deref()) } - fn disable_notification(&mut self, mem: &M::T) -> Result<(), Error> { - self.state.lock().unwrap().disable_notification(mem) + /// A consuming iterator over all available descriptor chain heads offered by the driver. + pub fn iter(&mut self) -> Result, Error> { + // FIXME: this is inefficient + self.state.iter(self.mem.clone()) } +} - fn needs_notification(&mut self, mem: &M::T) -> Result { - self.state.lock().unwrap().needs_notification(mem) - } +impl<'a, M: 'a + Clone + Deref, S: 'a + DerefMut> Deref + for QueueGuard<'a, M, S> +where + M::Target: GuestMemory + Sized, +{ + type Target = QueueState; - fn next_avail(&self) -> u16 { - self.state.lock().unwrap().next_avail() + /// Reset the queue to the initial state. + fn deref(&self) -> &QueueState { + &self.state } +} - fn set_next_avail(&mut self, next_avail: u16) { - self.state.lock().unwrap().set_next_avail(next_avail); +impl<'a, M: 'a + Clone + Deref, S: 'a + DerefMut> DerefMut + for QueueGuard<'a, M, S> +where + M::Target: GuestMemory + Sized, +{ + /// Reset the queue to the initial state. + fn deref_mut(&mut self) -> &mut QueueState { + &mut self.state } } -/// A convenient wrapper struct for a virtio queue, with associated GuestMemory object. +/// A convenient wrapper struct for a virtio queue, with associated GuestAddressSpace +/// object. #[derive(Clone, Debug)] -pub struct Queue = QueueState> { +pub struct Queue { /// Guest memory object associated with the queue. pub mem: M, /// Virtio queue state. - pub state: S, + pub state: QueueState, } -impl> Queue { +impl Queue { /// Construct an empty virtio queue with the given `max_size`. pub fn new(mem: M, max_size: u16) -> Self { Queue { mem, - state: S::new(max_size), + state: QueueState::new(max_size), } } + /// Return a QueueGuard that allows to do multiple QueueState operations + /// using the same GuestMemory. + pub fn acquire(&mut self) -> QueueGuard { + QueueGuard::new(self.mem.memory(), &mut self.state) + } + /// Check whether the queue configuration is valid. pub fn is_valid(&self) -> bool { - self.state.is_valid(&self.mem.memory()) + self.state.is_valid(self.mem.memory().deref()) } /// Reset the queue to the initial state. @@ -998,14 +928,6 @@ impl> Queue { self.state.reset() } - /// Get an exclusive reference to the underlying `QueueState` object. - /// - /// Logically this method will acquire the underlying lock protecting the `QueueState` Object. - /// The lock will be released when the returned object gets dropped. - pub fn lock(&mut self) -> QueueStateGuard<'_, M> { - self.state.lock() - } - /// Get the maximum size of the virtio queue. pub fn max_size(&self) -> u16 { self.state.max_size() @@ -1060,12 +982,13 @@ impl> Queue { /// Read the `idx` field from the available ring. pub fn avail_idx(&self, order: Ordering) -> Result, Error> { - self.state.avail_idx(&self.mem.memory(), order) + self.state.avail_idx(self.mem.memory().deref(), order) } /// Put a used descriptor head into the used ring. pub fn add_used(&mut self, head_index: u16, len: u32) -> Result<(), Error> { - self.state.add_used(&self.mem.memory(), head_index, len) + self.state + .add_used(self.mem.memory().deref(), head_index, len) } /// Enable notification events from the guest driver. @@ -1074,12 +997,12 @@ impl> Queue { /// notifications were enabled (and thus it's possible there will be no corresponding /// notification). pub fn enable_notification(&mut self) -> Result { - self.state.enable_notification(&self.mem.memory()) + self.state.enable_notification(self.mem.memory().deref()) } /// Disable notification events from the guest driver. pub fn disable_notification(&mut self) -> Result<(), Error> { - self.state.disable_notification(&self.mem.memory()) + self.state.disable_notification(self.mem.memory().deref()) } /// Check whether a notification to the guest is needed. @@ -1089,7 +1012,7 @@ impl> Queue { /// won't return `true` again until the driver updates `used_event` and/or the notification /// conditions hold once more. pub fn needs_notification(&mut self) -> Result { - self.state.needs_notification(&self.mem.memory()) + self.state.needs_notification(self.mem.memory().deref()) } /// Return the index for the next descriptor in the available ring. @@ -1101,23 +1024,51 @@ impl> Queue { pub fn set_next_avail(&mut self, next_avail: u16) { self.state.set_next_avail(next_avail); } -} -impl Queue> { /// A consuming iterator over all available descriptor chain heads offered by the driver. - pub fn iter(&mut self) -> Result, Error> { + pub fn iter(&mut self) -> Result, Error> { self.state.iter(self.mem.memory()) } } +/// A convenient wrapper struct for a thread-safe virtio queue, with associated GuestMemory object. +#[derive(Clone, Debug)] +pub struct QueueSync { + /// Guest memory object associated with the queue. + pub mem: M, + /// Virtio queue state. + pub state: Arc>, +} + +impl QueueSync { + /// Construct an empty virtio queue with the given `max_size`. + pub fn new(mem: M, max_size: u16) -> Self { + QueueSync { + mem, + state: Arc::new(Mutex::new(QueueState::new(max_size))), + } + } + + /// Get an exclusive reference to the underlying `QueueState` object. + /// + /// Logically this method will acquire the underlying lock protecting the `QueueState` + /// object. The lock will be released when the returned object gets dropped. + pub fn lock(&self) -> QueueGuard> { + QueueGuard::new(self.mem.memory(), self.state.lock().unwrap()) + } +} + #[cfg(test)] mod tests { use super::*; + use generic::QueueT; use memoffset::offset_of; use mock::{DescriptorTable, MockSplitQueue}; use vm_memory::{GuestAddress, GuestMemoryMmap}; + type AddrSpace = GuestMemoryMmap<()>; + #[test] pub fn test_offset() { assert_eq!(offset_of!(Descriptor, addr), 0); @@ -1128,21 +1079,19 @@ mod tests { #[test] fn test_checked_new_descriptor_chain() { - let m = &GuestMemoryMmap::<()>::from_ranges(&[(GuestAddress(0), 0x10000)]).unwrap(); + let m = &AddrSpace::from_ranges(&[(GuestAddress(0), 0x10000)]).unwrap(); let vq = MockSplitQueue::new(m, 16); assert!(vq.end().0 < 0x1000); // index >= queue_size - assert!( - DescriptorChain::<&GuestMemoryMmap>::new(m, vq.start(), 16, 16) - .next() - .is_none() - ); + assert!(DescriptorChain::<&AddrSpace>::new(m, vq.start(), 16, 16) + .next() + .is_none()); // desc_table address is way off assert!( - DescriptorChain::<&GuestMemoryMmap>::new(m, GuestAddress(0x00ff_ffff_ffff), 16, 0) + DescriptorChain::<&AddrSpace>::new(m, GuestAddress(0x00ff_ffff_ffff), 16, 0) .next() .is_none() ); @@ -1153,7 +1102,7 @@ mod tests { let desc = Descriptor::new(0x1000, 0x1000, VIRTQ_DESC_F_NEXT, 16); vq.desc_table().store(0, desc); - let mut c = DescriptorChain::<&GuestMemoryMmap>::new(m, vq.start(), 16, 0); + let mut c = DescriptorChain::<&AddrSpace>::new(m, vq.start(), 16, 0); c.next().unwrap(); assert!(c.next().is_none()); } @@ -1166,7 +1115,7 @@ mod tests { let desc = Descriptor::new(0x2000, 0x1000, 0, 0); vq.desc_table().store(1, desc); - let mut c = DescriptorChain::<&GuestMemoryMmap>::new(m, vq.start(), 16, 0); + let mut c = DescriptorChain::<&AddrSpace>::new(m, vq.start(), 16, 0); assert_eq!( c.memory() as *const GuestMemoryMmap, @@ -1190,7 +1139,7 @@ mod tests { #[test] fn test_new_from_indirect_descriptor() { - let m = &GuestMemoryMmap::<()>::from_ranges(&[(GuestAddress(0), 0x10000)]).unwrap(); + let m = &AddrSpace::from_ranges(&[(GuestAddress(0), 0x10000)]).unwrap(); let vq = MockSplitQueue::new(m, 16); let dtable = vq.desc_table(); @@ -1202,7 +1151,7 @@ mod tests { let desc = Descriptor::new(0x3000, 0x1000, 0, 0); dtable.store(2, desc); - let mut c: DescriptorChain<&GuestMemoryMmap> = DescriptorChain::new(m, vq.start(), 16, 0); + let mut c: DescriptorChain<&AddrSpace> = DescriptorChain::new(m, vq.start(), 16, 0); // The chain logic hasn't parsed the indirect descriptor yet. assert!(!c.is_indirect); @@ -1238,135 +1187,133 @@ mod tests { #[test] fn test_indirect_descriptor_err() { { - let m = &GuestMemoryMmap::<()>::from_ranges(&[(GuestAddress(0), 0x10000)]).unwrap(); + let m = &AddrSpace::from_ranges(&[(GuestAddress(0), 0x10000)]).unwrap(); let vq = MockSplitQueue::new(m, 16); // create a chain with a descriptor pointing to an indirect table let desc = Descriptor::new(0x1001, 0x1000, VIRTQ_DESC_F_INDIRECT, 0); vq.desc_table().store(0, desc); - let mut c: DescriptorChain<&GuestMemoryMmap> = - DescriptorChain::new(m, vq.start(), 16, 0); + let mut c: DescriptorChain<&AddrSpace> = DescriptorChain::new(m, vq.start(), 16, 0); assert!(c.next().is_none()); } { - let m = &GuestMemoryMmap::from_ranges(&[(GuestAddress(0), 0x10000)]).unwrap(); + let m = &AddrSpace::from_ranges(&[(GuestAddress(0), 0x10000)]).unwrap(); let vq = MockSplitQueue::new(m, 16); // create a chain with a descriptor pointing to an indirect table let desc = Descriptor::new(0x1000, 0x1001, VIRTQ_DESC_F_INDIRECT, 0); vq.desc_table().store(0, desc); - let mut c: DescriptorChain<&GuestMemoryMmap> = - DescriptorChain::new(m, vq.start(), 16, 0); + let mut c: DescriptorChain<&AddrSpace> = DescriptorChain::new(m, vq.start(), 16, 0); assert!(c.next().is_none()); } } - #[test] - fn test_queue_and_iterator() { - let m = &GuestMemoryMmap::<()>::from_ranges(&[(GuestAddress(0), 0x10000)]).unwrap(); + fn do_test_queue_and_iterator<'a, Q: QueueT<&'a AddrSpace>>(m: &'a AddrSpace) { let vq = MockSplitQueue::new(m, 16); - let mut q = vq.create_queue(m); + let mut q: Q = vq.as_queue(m); // q is currently valid assert!(q.is_valid()); // shouldn't be valid when not marked as ready - q.state.ready = false; + q.with(|mut qstate| qstate.ready = false); assert!(!q.is_valid()); - q.state.ready = true; + q.with(|mut qstate| qstate.ready = true); + + let max_size = q.with(|qstate| qstate.max_size); // shouldn't be allowed to set a size > max_size - q.set_size(q.state.max_size << 1); - assert_eq!(q.state.size, q.state.max_size); + q.with(|mut qstate| qstate.set_size(max_size << 1)); + assert!(q.with(|qstate| qstate.size) == max_size); // or set the size to 0 - q.set_size(0); - assert_eq!(q.state.size, q.state.max_size); + q.with(|mut qstate| qstate.set_size(0)); + assert!(q.with(|qstate| qstate.size) == max_size); // or set a size which is not a power of 2 - q.set_size(11); - assert_eq!(q.state.size, q.state.max_size); + q.with(|mut qstate| qstate.set_size(11)); + assert!(q.with(|qstate| qstate.size) == max_size); // but should be allowed to set a size if 0 < size <= max_size and size is a power of two - q.set_size(4); - assert_eq!(q.state.size, 4); - q.state.size = q.state.max_size; + q.with(|mut qstate| qstate.set_size(4)); + q.with(|qstate| assert_eq!(qstate.size, 4)); + q.with(|mut qstate| qstate.size = qstate.max_size); // shouldn't be allowed to set an address that breaks the alignment constraint - q.set_desc_table_address(Some(0xf), None); - assert_eq!(q.state.desc_table.0, vq.desc_table_addr().0); + q.with(|mut qstate| qstate.set_desc_table_address(Some(0xf), None)); + q.with(|qstate| assert_eq!(qstate.desc_table.0, vq.desc_table_addr().0)); // should be allowed to set an aligned out of bounds address - q.set_desc_table_address(Some(0xffff_fff0), None); - assert_eq!(q.state.desc_table.0, 0xffff_fff0); + q.with(|mut qstate| qstate.set_desc_table_address(Some(0xffff_fff0), None)); + q.with(|qstate| assert_eq!(qstate.desc_table.0, 0xffff_fff0)); // but shouldn't be valid assert!(!q.is_valid()); // but should be allowed to set a valid description table address - q.set_desc_table_address(Some(0x10), None); - assert_eq!(q.state.desc_table.0, 0x10); + q.with(|mut qstate| qstate.set_desc_table_address(Some(0x10), None)); + q.with(|qstate| assert_eq!(qstate.desc_table.0, 0x10)); assert!(q.is_valid()); - q.state.desc_table = vq.desc_table_addr(); + q.with(|mut qstate| qstate.desc_table = vq.desc_table_addr()); // shouldn't be allowed to set an address that breaks the alignment constraint - q.set_avail_ring_address(Some(0x1), None); - assert_eq!(q.state.avail_ring.0, vq.avail_addr().0); + q.with(|mut qstate| qstate.set_avail_ring_address(Some(0x1), None)); + q.with(|qstate| assert_eq!(qstate.avail_ring.0, vq.avail_addr().0)); // should be allowed to set an aligned out of bounds address - q.set_avail_ring_address(Some(0xffff_fffe), None); - assert_eq!(q.state.avail_ring.0, 0xffff_fffe); + q.with(|mut qstate| qstate.set_avail_ring_address(Some(0xffff_fffe), None)); + q.with(|qstate| assert_eq!(qstate.avail_ring.0, 0xffff_fffe)); // but shouldn't be valid assert!(!q.is_valid()); // but should be allowed to set a valid available ring address - q.set_avail_ring_address(Some(0x2), None); - assert_eq!(q.state.avail_ring.0, 0x2); + q.with(|mut qstate| qstate.set_avail_ring_address(Some(0x2), None)); + q.with(|qstate| assert_eq!(qstate.avail_ring.0, 0x2)); assert!(q.is_valid()); - q.state.avail_ring = vq.avail_addr(); + q.with(|mut qstate| qstate.avail_ring = vq.avail_addr()); // shouldn't be allowed to set an address that breaks the alignment constraint - q.set_used_ring_address(Some(0x3), None); - assert_eq!(q.state.used_ring.0, vq.used_addr().0); + q.with(|mut qstate| qstate.set_used_ring_address(Some(0x3), None)); + q.with(|qstate| assert_eq!(qstate.used_ring.0, vq.used_addr().0)); // should be allowed to set an aligned out of bounds address - q.set_used_ring_address(Some(0xffff_fffc), None); - assert_eq!(q.state.used_ring.0, 0xffff_fffc); + q.with(|mut qstate| qstate.set_used_ring_address(Some(0xffff_fffc), None)); + q.with(|qstate| assert_eq!(qstate.used_ring.0, 0xffff_fffc)); // but shouldn't be valid assert!(!q.is_valid()); // but should be allowed to set a valid used ring address - q.set_used_ring_address(Some(0x4), None); - assert_eq!(q.state.used_ring.0, 0x4); + q.with(|mut qstate| qstate.set_used_ring_address(Some(0x4), None)); + q.with(|qstate| assert_eq!(qstate.used_ring.0, 0x4)); assert!(q.is_valid()); - q.state.used_ring = vq.used_addr(); + q.with(|mut qstate| qstate.used_ring = vq.used_addr()); - { + q.with(|mut qstate| { // an invalid queue should return an iterator with no next - q.state.ready = false; - let mut i = q.iter().unwrap(); + qstate.ready = false; + let mut i = qstate.iter().unwrap(); assert!(i.next().is_none()); - } + }); - q.state.ready = true; + q.with(|mut qstate| qstate.ready = true); // now let's create two simple descriptor chains // the chains are (0, 1) and (2, 3, 4) - { - for j in 0..5u16 { - let flags = match j { - 1 | 4 => 0, - _ => VIRTQ_DESC_F_NEXT, - }; - - let desc = Descriptor::new((0x1000 * (j + 1)) as u64, 0x1000, flags, j + 1); - vq.desc_table().store(j, desc); - } + for j in 0..5u16 { + let flags = match j { + 1 | 4 => 0, + _ => VIRTQ_DESC_F_NEXT, + }; + + let desc = Descriptor::new((0x1000 * (j + 1)) as u64, 0x1000, flags, j + 1); + vq.desc_table().store(j, desc); + } - vq.avail().ring().ref_at(0).store(0); - vq.avail().ring().ref_at(1).store(2); - vq.avail().idx().store(2); + vq.avail().ring().ref_at(0).store(0); + vq.avail().ring().ref_at(1).store(2); + vq.avail().idx().store(2); - let mut i = q.iter().unwrap(); + q.with(|mut qstate| { + let mut i = qstate.iter().unwrap(); { let mut c = i.next().unwrap(); @@ -1393,21 +1340,31 @@ mod tests { { assert!(i.next().is_none()); i.go_to_previous_position(); - let mut c = q.iter().unwrap().next().unwrap(); + let mut c = qstate.iter().unwrap().next().unwrap(); c.next().unwrap(); c.next().unwrap(); c.next().unwrap(); assert!(c.next().is_none()); } - } + }) } #[test] - fn test_descriptor_and_iterator() { - let m = &GuestMemoryMmap::<()>::from_ranges(&[(GuestAddress(0), 0x10000)]).unwrap(); + fn test_queue_and_iterator() { + let m = &AddrSpace::from_ranges(&[(GuestAddress(0), 0x10000)]).unwrap(); + do_test_queue_and_iterator::>(m); + } + + #[test] + fn test_queue_and_iterator_sync() { + let m = &AddrSpace::from_ranges(&[(GuestAddress(0), 0x10000)]).unwrap(); + do_test_queue_and_iterator::>(m); + } + + fn do_test_descriptor_and_iterator<'a, Q: QueueT<&'a AddrSpace>>(m: &'a AddrSpace) { let vq = MockSplitQueue::new(m, 16); - let mut q = vq.create_queue(m); + let mut q: Q = vq.as_queue(m); // q is currently valid assert!(q.is_valid()); @@ -1429,47 +1386,59 @@ mod tests { vq.avail().ring().ref_at(2).store(5); vq.avail().idx().store(3); - let mut i = q.iter().unwrap(); + q.with(|mut qstate| { + let mut i = qstate.iter().unwrap(); - { - let c = i.next().unwrap(); - assert_eq!(c.head_index(), 0); - - let mut iter = c; - assert!(iter.next().is_some()); - assert!(iter.next().is_some()); - assert!(iter.next().is_none()); - assert!(iter.next().is_none()); - } + { + let c = i.next().unwrap(); + assert_eq!(c.head_index(), 0); - { - let c = i.next().unwrap(); - assert_eq!(c.head_index(), 2); - - let mut iter = c.writable(); - assert!(iter.next().is_some()); - assert!(iter.next().is_some()); - assert!(iter.next().is_none()); - assert!(iter.next().is_none()); - } + let mut iter = c; + assert!(iter.next().is_some()); + assert!(iter.next().is_some()); + assert!(iter.next().is_none()); + assert!(iter.next().is_none()); + } - { - let c = i.next().unwrap(); - assert_eq!(c.head_index(), 5); + { + let c = i.next().unwrap(); + assert_eq!(c.head_index(), 2); - let mut iter = c.readable(); - assert!(iter.next().is_some()); - assert!(iter.next().is_none()); - assert!(iter.next().is_none()); - } + let mut iter = c.writable(); + assert!(iter.next().is_some()); + assert!(iter.next().is_some()); + assert!(iter.next().is_none()); + assert!(iter.next().is_none()); + } + + { + let c = i.next().unwrap(); + assert_eq!(c.head_index(), 5); + + let mut iter = c.readable(); + assert!(iter.next().is_some()); + assert!(iter.next().is_none()); + assert!(iter.next().is_none()); + } + }) } #[test] - fn test_add_used() { - let m = &GuestMemoryMmap::<()>::from_ranges(&[(GuestAddress(0), 0x10000)]).unwrap(); + fn test_descriptor_and_iterator() { + let m = &AddrSpace::from_ranges(&[(GuestAddress(0), 0x10000)]).unwrap(); + do_test_descriptor_and_iterator::>(m); + } + + #[test] + fn test_descriptor_and_iterator_sync() { + let m = &AddrSpace::from_ranges(&[(GuestAddress(0), 0x10000)]).unwrap(); + do_test_descriptor_and_iterator::>(m); + } + + fn do_test_add_used<'a, Q: QueueT<&'a AddrSpace>>(m: &'a AddrSpace) { let vq = MockSplitQueue::new(m, 16); - let mut q = vq.create_queue(m); + let mut q: Q = vq.as_queue(m); assert_eq!(vq.used().idx().load(), 0); @@ -1479,7 +1448,7 @@ mod tests { // should be ok q.add_used(1, 0x1000).unwrap(); - assert_eq!(q.state.next_used, Wrapping(1)); + q.with(|qstate| assert_eq!(qstate.next_used, Wrapping(1))); assert_eq!(vq.used().idx().load(), 1); let x = vq.used().ring().ref_at(0).load(); @@ -1488,44 +1457,105 @@ mod tests { } #[test] - fn test_reset_queue() { + fn test_add_used() { + let m = &AddrSpace::from_ranges(&[(GuestAddress(0), 0x10000)]).unwrap(); + do_test_add_used::>(m); + } + + #[test] + fn test_add_used_sync() { + let m = &AddrSpace::from_ranges(&[(GuestAddress(0), 0x10000)]).unwrap(); + do_test_add_used::>(m); + } + + fn do_test_queue_guard>( + mut qstate: QueueGuard<&AddrSpace, S>, + ) { + qstate.ready = true; + qstate.reset(); + assert_eq!(qstate.ready, false); + let mut iter = qstate.iter().unwrap(); + assert!(iter.next().is_none()); + } + + #[test] + fn test_queue_guard() { let m = &GuestMemoryMmap::<()>::from_ranges(&[(GuestAddress(0), 0x10000)]).unwrap(); let vq = MockSplitQueue::new(m, 16); let mut q = vq.create_queue(m); - q.state.size = 8; - q.state.ready = true; - q.state.reset(); - assert_eq!(q.state.size, 16); - assert_eq!(q.state.ready, false); + do_test_queue_guard(q.acquire()); } #[test] - fn test_needs_notification() { + fn test_queue_guard_sync() { let m = &GuestMemoryMmap::<()>::from_ranges(&[(GuestAddress(0), 0x10000)]).unwrap(); + let vq = MockSplitQueue::new(m, 16); + + // Note how the test_queue_guard above has "let mut" here. A singlethreaded + // queue can only be accessed by one thread, and that one needs to have an + // exclusive reference; a multithreaded queue can be accessed by many thread + // and therefore it has to support interior mutability (via Mutex). On the + // other hand, a multithreaded queue is reference counted and therefore it + // cannot be accessed via an exclusive reference *at all*. This is the + // the reason why only Queue has the acquire() method, and only QueueSync + // has a lock() method. + let q: QueueSync<_> = vq.as_queue(m); + do_test_queue_guard(q.lock()); + } + + fn do_test_reset_queue<'a, Q: QueueT<&'a AddrSpace>>(m: &'a AddrSpace) { + let vq = MockSplitQueue::new(m, 16); + + let mut q: Q = vq.as_queue(m); + q.with(|mut qstate| { + qstate.size = 8; + qstate.ready = true; + qstate.reset(); + assert_eq!(qstate.size, 16); + assert_eq!(qstate.ready, false); + }) + } + + #[test] + fn test_reset_queue() { + let m = &AddrSpace::from_ranges(&[(GuestAddress(0), 0x10000)]).unwrap(); + do_test_reset_queue::>(m); + } + + #[test] + fn test_reset_queue_sync() { + let m = &AddrSpace::from_ranges(&[(GuestAddress(0), 0x10000)]).unwrap(); + do_test_reset_queue::>(m); + } + + fn do_test_needs_notification<'a, Q: QueueT<&'a AddrSpace>>(m: &'a AddrSpace) { let qsize = 16; let vq = MockSplitQueue::new(m, qsize); - let mut q = vq.create_queue(m); + let mut q: Q = vq.as_queue(m); let avail_addr = vq.avail_addr(); // It should always return true when EVENT_IDX isn't enabled. for i in 0..qsize { - q.state.next_used = Wrapping(i); + q.with(|mut qstate| qstate.next_used = Wrapping(i)); assert_eq!(q.needs_notification().unwrap(), true); } m.write_obj::(4, avail_addr.unchecked_add(4 + qsize as u64 * 2)) .unwrap(); - q.state.set_event_idx(true); + q.with(|mut qstate| qstate.set_event_idx(true)); // Incrementing up to this value causes an `u16` to wrap back to 0. let wrap = u32::from(u16::MAX) + 1; for i in 0..wrap + 12 { - q.state.next_used = Wrapping(i as u16); - // Let's test wrapping around the maximum index value as well. - let expected = i == 5 || i == (5 + wrap) || q.state.signalled_used.is_none(); + let expected = q.with(|mut qstate| { + qstate.next_used = Wrapping(i as u16); + // Let's test wrapping around the maximum index value as well. + i == 5 || i == (5 + wrap) || qstate.signalled_used.is_none() + }); + assert_eq!(q.needs_notification().unwrap(), expected); } @@ -1539,22 +1569,32 @@ mod tests { .unwrap(); assert_eq!(q.needs_notification().unwrap(), false); - q.state.next_used = Wrapping(15); + q.with(|mut qstate| qstate.next_used = Wrapping(15)); assert_eq!(q.needs_notification().unwrap(), false); - q.state.next_used = Wrapping(0); + q.with(|mut qstate| qstate.next_used = Wrapping(0)); assert_eq!(q.needs_notification().unwrap(), true); assert_eq!(q.needs_notification().unwrap(), false); } #[test] - fn test_enable_disable_notification() { - let m = &GuestMemoryMmap::<()>::from_ranges(&[(GuestAddress(0), 0x10000)]).unwrap(); + fn test_needs_notification() { + let m = &AddrSpace::from_ranges(&[(GuestAddress(0), 0x10000)]).unwrap(); + do_test_needs_notification::>(m); + } + + #[test] + fn test_needs_notification_sync() { + let m = &AddrSpace::from_ranges(&[(GuestAddress(0), 0x10000)]).unwrap(); + do_test_needs_notification::>(m); + } + + fn do_test_enable_disable_notification<'a, Q: QueueT<&'a AddrSpace>>(m: &'a AddrSpace) { let vq = MockSplitQueue::new(m, 16); - let mut q = vq.create_queue(m); + let mut q: Q = vq.as_queue(m); let used_addr = vq.used_addr(); - assert_eq!(q.state.event_idx_enabled, false); + q.with(|qstate| assert_eq!(qstate.event_idx_enabled, false)); q.enable_notification().unwrap(); let v = m.read_obj::(used_addr).unwrap(); @@ -1573,13 +1613,25 @@ mod tests { m.write_obj::(2, avail_addr.unchecked_add(2)).unwrap(); assert_eq!(q.enable_notification().unwrap(), true); - q.state.next_avail = Wrapping(2); + q.with(|mut qstate| qstate.next_avail = Wrapping(2)); assert_eq!(q.enable_notification().unwrap(), false); m.write_obj::(8, avail_addr.unchecked_add(2)).unwrap(); assert_eq!(q.enable_notification().unwrap(), true); - q.state.next_avail = Wrapping(8); + q.with(|mut qstate| qstate.next_avail = Wrapping(8)); assert_eq!(q.enable_notification().unwrap(), false); } + + #[test] + fn test_enable_disable_notification() { + let m = &AddrSpace::from_ranges(&[(GuestAddress(0), 0x10000)]).unwrap(); + do_test_enable_disable_notification::>(m); + } + + #[test] + fn test_enable_disable_notification_sync() { + let m = &AddrSpace::from_ranges(&[(GuestAddress(0), 0x10000)]).unwrap(); + do_test_enable_disable_notification::>(m); + } } diff --git a/crates/virtio-queue/src/mock.rs b/crates/virtio-queue/src/mock.rs index 5e4a4137..f77ae55c 100644 --- a/crates/virtio-queue/src/mock.rs +++ b/crates/virtio-queue/src/mock.rs @@ -12,6 +12,7 @@ use vm_memory::{ }; use crate::defs::{VIRTQ_DESC_F_INDIRECT, VIRTQ_DESC_F_NEXT}; +use crate::generic::QueueT; use crate::{Descriptor, Queue, QueueState, VirtqUsedElem}; /// Wrapper struct used for accesing a particular address of a GuestMemory area. @@ -352,16 +353,20 @@ impl<'a, M: GuestMemory> MockSplitQueue<'a, M> { self.update_avail_idx(head_idx); } + /// Return a QueueT implementation for this queue. + pub(crate) fn as_queue>(&self, a: A) -> T { + let mut q = QueueState::new(self.len); + q.size = self.len; + q.ready = true; + q.desc_table = self.desc_table_addr; + q.avail_ring = self.avail_addr; + q.used_ring = self.used_addr; + QueueT::construct(a, q) + } + /// Creates a new `Queue`, using the underlying memory regions represented /// by the `MockSplitQueue`. - pub fn create_queue(&self, a: A) -> Queue> { - let mut q = Queue::>::new(a, self.len); - - q.state.size = self.len; - q.state.ready = true; - q.state.desc_table = self.desc_table_addr; - q.state.avail_ring = self.avail_addr; - q.state.used_ring = self.used_addr; - q + pub fn create_queue(&self, a: A) -> Queue { + self.as_queue(a) } }