diff --git a/coverage_config_x86_64.json b/coverage_config_x86_64.json index 698edece..ab6803c9 100644 --- a/coverage_config_x86_64.json +++ b/coverage_config_x86_64.json @@ -1,5 +1,5 @@ { - "coverage_score": 90.4, + "coverage_score": 83.6, "exclude_path": "crates/virtio-queue/src/mock.rs", "crate_features": "virtio-blk/backend-stdio" } diff --git a/crates/devices/virtio-blk/src/request.rs b/crates/devices/virtio-blk/src/request.rs index 7da9c6af..cabd9e9d 100644 --- a/crates/devices/virtio-blk/src/request.rs +++ b/crates/devices/virtio-blk/src/request.rs @@ -24,6 +24,7 @@ //! approach. use std::fmt::{self, Display}; +use std::ops::Deref; use std::result; use crate::defs::{ @@ -32,9 +33,7 @@ use crate::defs::{ }; use virtio_queue::{Descriptor, DescriptorChain}; -use vm_memory::{ - ByteValued, Bytes, GuestAddress, GuestAddressSpace, GuestMemory, GuestMemoryError, -}; +use vm_memory::{ByteValued, Bytes, GuestAddress, GuestMemory, GuestMemoryError}; /// Block request parsing errors. #[derive(Debug)] @@ -159,7 +158,10 @@ impl Request { } // Checks that a descriptor meets the minimal requirements for a valid status descriptor. - fn check_status_desc(mem: &M, desc: Descriptor) -> Result<()> { + fn check_status_desc(mem: &M, desc: Descriptor) -> Result<()> + where + M: GuestMemory + ?Sized, + { // The status MUST always be writable. if !desc.is_write_only() { return Err(Error::UnexpectedReadOnlyDescriptor); @@ -202,7 +204,11 @@ impl Request { /// # Arguments /// * `desc_chain` - A mutable reference to the descriptor chain that should point to the /// buffers of a virtio block request. - pub fn parse(desc_chain: &mut DescriptorChain) -> Result { + pub fn parse(desc_chain: &mut DescriptorChain) -> Result + where + M: Deref, + M::Target: GuestMemory, + { let chain_head = desc_chain.next().ok_or(Error::DescriptorChainTooShort)?; // The head contains the request type which MUST be readable. if chain_head.is_write_only() { @@ -235,7 +241,7 @@ impl Request { } let status_desc = desc; - Request::check_status_desc::<::M>(desc_chain.memory(), status_desc)?; + Request::check_status_desc(desc_chain.memory(), status_desc)?; request.status_addr = status_desc.addr(); Ok(request) diff --git a/crates/virtio-queue/src/lib.rs b/crates/virtio-queue/src/lib.rs index 5dec1bdf..6504e58f 100644 --- a/crates/virtio-queue/src/lib.rs +++ b/crates/virtio-queue/src/lib.rs @@ -21,7 +21,6 @@ pub mod mock; use std::convert::TryFrom; use std::fmt::{self, Debug, Display}; -use std::marker::PhantomData; use std::mem::size_of; use std::num::Wrapping; use std::ops::{Deref, DerefMut}; @@ -146,8 +145,8 @@ unsafe impl ByteValued for Descriptor {} /// A virtio descriptor chain. #[derive(Clone, Debug)] -pub struct DescriptorChain { - mem: M::T, +pub struct DescriptorChain { + mem: M, desc_table: GuestAddress, queue_size: u16, head_index: u16, @@ -156,9 +155,13 @@ pub struct DescriptorChain { is_indirect: bool, } -impl DescriptorChain { +impl DescriptorChain +where + M: Deref, + M::Target: GuestMemory, +{ fn with_ttl( - mem: M::T, + mem: M, desc_table: GuestAddress, queue_size: u16, ttl: u16, @@ -176,7 +179,7 @@ impl DescriptorChain { } /// Create a new `DescriptorChain` instance. - fn new(mem: M::T, desc_table: GuestAddress, queue_size: u16, head_index: u16) -> Self { + fn new(mem: M, desc_table: GuestAddress, queue_size: u16, head_index: u16) -> Self { Self::with_ttl(mem, desc_table, queue_size, queue_size, head_index) } @@ -187,8 +190,8 @@ impl DescriptorChain { /// Return a `GuestMemory` object that can be used to access the buffers /// pointed to by the descriptor chain. - pub fn memory(&self) -> &M::M { - &*self.mem + pub fn memory(&self) -> &M::Target { + self.mem.deref() } /// Returns an iterator that only yields the readable descriptors in the chain. @@ -237,7 +240,11 @@ impl DescriptorChain { } } -impl Iterator for DescriptorChain { +impl Iterator for DescriptorChain +where + M: Deref, + M::Target: GuestMemory, +{ type Item = Descriptor; /// Returns the next descriptor in this descriptor chain, if there is one. @@ -282,12 +289,16 @@ impl Iterator for DescriptorChain { /// An iterator for readable or writable descriptors. #[derive(Clone)] -pub struct DescriptorChainRwIter { +pub struct DescriptorChainRwIter { chain: DescriptorChain, writable: bool, } -impl Iterator for DescriptorChainRwIter { +impl Iterator for DescriptorChainRwIter +where + M: Deref, + M::Target: GuestMemory, +{ type Item = Descriptor; /// Returns the next descriptor in this descriptor chain, if there is one. @@ -311,9 +322,9 @@ impl Iterator for DescriptorChainRwIter { // We can't derive Debug, because rustc doesn't generate the M::T: Debug // constraint -impl Debug for DescriptorChainRwIter +impl Debug for DescriptorChainRwIter where - M::T: Debug, + M: Debug, { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_struct("DescriptorChainRwIter") @@ -325,8 +336,8 @@ where /// Consuming iterator over all available descriptor chain heads in the queue. #[derive(Debug)] -pub struct AvailIter<'b, M: GuestAddressSpace> { - mem: M::T, +pub struct AvailIter<'b, M> { + mem: M, desc_table: GuestAddress, avail_ring: GuestAddress, last_index: Wrapping, @@ -334,7 +345,7 @@ pub struct AvailIter<'b, M: GuestAddressSpace> { next_avail: &'b mut Wrapping, } -impl<'b, M: GuestAddressSpace> AvailIter<'b, M> { +impl<'b, M> AvailIter<'b, M> { /// Goes back one position in the available descriptor chain offered by the driver. /// /// Rust does not support bidirectional iterators. This is the only way to revert the effect @@ -347,7 +358,11 @@ impl<'b, M: GuestAddressSpace> AvailIter<'b, M> { } } -impl<'b, M: GuestAddressSpace> Iterator for AvailIter<'b, M> { +impl<'b, M> Iterator for AvailIter<'b, M> +where + M: Clone + Deref, + M::Target: GuestMemory, +{ type Item = DescriptorChain; fn next(&mut self) -> Option { @@ -405,15 +420,15 @@ impl VirtqUsedElem { unsafe impl ByteValued for VirtqUsedElem {} /// Struct to hold an exclusive reference to the underlying `QueueState` object. -pub enum QueueStateGuard<'a, M: GuestAddressSpace> { +pub enum QueueStateGuard<'a> { /// A reference to a `QueueState` object. - StateObject(&'a mut QueueState), + StateObject(&'a mut QueueState), /// A `MutexGuard` for a `QueueState` object. - MutexGuard(MutexGuard<'a, QueueState>), + MutexGuard(MutexGuard<'a, QueueState>), } -impl<'a, M: GuestAddressSpace> Deref for QueueStateGuard<'a, M> { - type Target = QueueState; +impl<'a> Deref for QueueStateGuard<'a> { + type Target = QueueState; fn deref(&self) -> &Self::Target { match self { @@ -423,7 +438,7 @@ impl<'a, M: GuestAddressSpace> Deref for QueueStateGuard<'a, M> { } } -impl<'a, M: GuestAddressSpace> DerefMut for QueueStateGuard<'a, M> { +impl<'a> DerefMut for QueueStateGuard<'a> { fn deref_mut(&mut self) -> &mut Self::Target { match self { QueueStateGuard::StateObject(v) => v, @@ -436,12 +451,12 @@ impl<'a, M: GuestAddressSpace> DerefMut for QueueStateGuard<'a, M> { /// /// To optimize for performance, different implementations of the `QueueStateT` trait may be /// provided for single-threaded context and multi-threaded context. -pub trait QueueStateT { +pub trait QueueStateT { /// Construct an empty virtio queue state object with the given `max_size`. fn new(max_size: u16) -> Self; /// Check whether the queue configuration is valid. - fn is_valid(&self, mem: &M::T) -> bool; + fn is_valid(&self, mem: &M) -> bool; /// Reset the queue to the initial state. fn reset(&mut self); @@ -450,7 +465,7 @@ pub trait QueueStateT { /// /// Logically this method will acquire the underlying lock protecting the `QueueState` Object. /// The lock will be released when the returned object gets dropped. - fn lock(&mut self) -> QueueStateGuard<'_, M>; + fn lock(&mut self) -> QueueStateGuard; /// Get the maximum size of the virtio queue. fn max_size(&self) -> u16; @@ -489,20 +504,21 @@ pub trait QueueStateT { fn set_event_idx(&mut self, enabled: bool); /// Read the `idx` field from the available ring. - fn avail_idx(&self, mem: &M::T, order: Ordering) -> Result, Error>; + fn avail_idx(&self, mem: &M, order: Ordering) -> Result, Error>; /// Put a used descriptor head into the used ring. - fn add_used(&mut self, mem: &M::T, head_index: u16, len: u32) -> Result<(), Error>; + fn add_used(&mut self, mem: &M, head_index: u16, len: u32) + -> Result<(), Error>; /// Enable notification events from the guest driver. /// /// Return true if one or more descriptors can be consumed from the available ring after /// notifications were enabled (and thus it's possible there will be no corresponding /// notification). - fn enable_notification(&mut self, mem: &M::T) -> Result; + fn enable_notification(&mut self, mem: &M) -> Result; /// Disable notification events from the guest driver. - fn disable_notification(&mut self, mem: &M::T) -> Result<(), Error>; + fn disable_notification(&mut self, mem: &M) -> Result<(), Error>; /// Check whether a notification to the guest is needed. /// @@ -510,7 +526,7 @@ pub trait QueueStateT { /// driver will actually be notified, remember the associated index in the used ring, and /// won't return `true` again until the driver updates `used_event` and/or the notification /// conditions hold once more. - fn needs_notification(&mut self, mem: &M::T) -> Result; + fn needs_notification(&mut self, mem: &M) -> Result; /// Return the index for the next descriptor in the available ring. fn next_avail(&self) -> u16; @@ -525,7 +541,7 @@ const DEFAULT_USED_RING_ADDR: u64 = 0x0; /// Struct to maintain information and manipulate state of a virtio queue. #[derive(Clone, Debug)] -pub struct QueueState { +pub struct QueueState { /// The maximal size in elements offered by the device pub max_size: u16, @@ -555,14 +571,16 @@ pub struct QueueState { /// Guest physical address of the used ring pub used_ring: GuestAddress, - - phantom: PhantomData, } -impl QueueState { +impl QueueState { /// Get a consuming iterator over all available descriptor chain heads offered by the driver. - pub fn iter(&mut self, mem: M::T) -> Result, Error> { - self.avail_idx(&mem, Ordering::Acquire) + pub fn iter(&mut self, mem: M) -> Result, Error> + where + M: Deref, + M::Target: GuestMemory + Sized, + { + self.avail_idx(mem.deref(), Ordering::Acquire) .map(move |idx| AvailIter { mem, desc_table: self.desc_table, @@ -575,7 +593,12 @@ impl QueueState { // Helper method that writes `val` to the `avail_event` field of the used ring, using // the provided ordering. - fn set_avail_event(&self, mem: &M::T, val: u16, order: Ordering) -> Result<(), Error> { + fn set_avail_event( + &self, + mem: &M, + val: u16, + order: Ordering, + ) -> Result<(), Error> { let elem_sz = VIRTQ_USED_ELEMENT_SIZE * u64::from(self.size); let offset = VIRTQ_USED_RING_HEADER_SIZE + elem_sz; let addr = self.used_ring.unchecked_add(offset); @@ -584,7 +607,12 @@ impl QueueState { } // Set the value of the `flags` field of the used ring, applying the specified ordering. - fn set_used_flags(&mut self, mem: &M::T, val: u16, order: Ordering) -> Result<(), Error> { + fn set_used_flags( + &mut self, + mem: &M, + val: u16, + order: Ordering, + ) -> Result<(), Error> { mem.store(val, self.used_ring, order) .map_err(Error::GuestMemory) } @@ -593,7 +621,7 @@ impl QueueState { // // Every access in this method uses `Relaxed` ordering because a fence is added by the caller // when appropriate. - fn set_notification(&mut self, mem: &M::T, enable: bool) -> Result<(), Error> { + fn set_notification(&mut self, mem: &M, enable: bool) -> Result<(), Error> { if enable { if self.event_idx_enabled { // We call `set_avail_event` using the `next_avail` value, instead of reading @@ -622,7 +650,7 @@ impl QueueState { /// Neither of these interrupt suppression methods are reliable, as they are not synchronized /// with the device, but they serve as useful optimizations. So we only ensure access to the /// virtq_avail.used_event is atomic, but do not need to synchronize with other memory accesses. - fn used_event(&self, mem: &M::T, order: Ordering) -> Result, Error> { + fn used_event(&self, mem: &M, order: Ordering) -> Result, Error> { // Safe because we have validated the queue and access guest // memory through GuestMemory interfaces. let elem_sz = u64::from(self.size) * VIRTQ_AVAIL_ELEMENT_SIZE; @@ -635,7 +663,7 @@ impl QueueState { } } -impl QueueStateT for QueueState { +impl QueueStateT for QueueState { fn new(max_size: u16) -> Self { QueueState { max_size, @@ -648,11 +676,10 @@ impl QueueStateT for QueueState { next_used: Wrapping(0), event_idx_enabled: false, signalled_used: None, - phantom: PhantomData, } } - fn is_valid(&self, mem: &M::T) -> bool { + fn is_valid(&self, mem: &M) -> bool { let queue_size = self.size as u64; let desc_table = self.desc_table; let desc_table_size = size_of::() as u64 * queue_size; @@ -710,7 +737,7 @@ impl QueueStateT for QueueState { self.event_idx_enabled = false; } - fn lock(&mut self) -> QueueStateGuard<'_, M> { + fn lock(&mut self) -> QueueStateGuard { QueueStateGuard::StateObject(self) } @@ -775,7 +802,7 @@ impl QueueStateT for QueueState { self.event_idx_enabled = enabled; } - fn avail_idx(&self, mem: &M::T, order: Ordering) -> Result, Error> { + fn avail_idx(&self, mem: &M, order: Ordering) -> Result, Error> { let addr = self.avail_ring.unchecked_add(2); mem.load(addr, order) @@ -783,7 +810,12 @@ impl QueueStateT for QueueState { .map_err(Error::GuestMemory) } - fn add_used(&mut self, mem: &M::T, head_index: u16, len: u32) -> Result<(), Error> { + fn add_used( + &mut self, + mem: &M, + head_index: u16, + len: u32, + ) -> Result<(), Error> { if head_index >= self.size { error!( "attempted to add out of bounds descriptor to used ring: {}", @@ -829,7 +861,7 @@ impl QueueStateT for QueueState { // break; // } // } - fn enable_notification(&mut self, mem: &M::T) -> Result { + fn enable_notification(&mut self, mem: &M) -> Result { self.set_notification(mem, true)?; // Ensures the following read is not reordered before any previous write operation. fence(Ordering::SeqCst); @@ -844,11 +876,11 @@ impl QueueStateT for QueueState { .map(|idx| idx != self.next_avail) } - fn disable_notification(&mut self, mem: &M::T) -> Result<(), Error> { + fn disable_notification(&mut self, mem: &M) -> Result<(), Error> { self.set_notification(mem, false) } - fn needs_notification(&mut self, mem: &M::T) -> Result { + fn needs_notification(&mut self, mem: &M) -> Result { let used_idx = self.next_used; // Complete all the writes in add_used() before reading the event. @@ -884,18 +916,18 @@ impl QueueStateT for QueueState { /// Struct to maintain information and manipulate state of a virtio queue for multi-threaded /// context. #[derive(Clone, Debug)] -pub struct QueueStateSync { - state: Arc>>, +pub struct QueueStateSync { + state: Arc>, } -impl QueueStateT for QueueStateSync { +impl QueueStateT for QueueStateSync { fn new(max_size: u16) -> Self { QueueStateSync { state: Arc::new(Mutex::new(QueueState::new(max_size))), } } - fn is_valid(&self, mem: &M::T) -> bool { + fn is_valid(&self, mem: &M) -> bool { self.state.lock().unwrap().is_valid(mem) } @@ -903,7 +935,7 @@ impl QueueStateT for QueueStateSync { self.state.lock().unwrap().reset(); } - fn lock(&mut self) -> QueueStateGuard<'_, M> { + fn lock(&mut self) -> QueueStateGuard { QueueStateGuard::MutexGuard(self.state.lock().unwrap()) } @@ -939,23 +971,28 @@ impl QueueStateT for QueueStateSync { self.state.lock().unwrap().set_event_idx(enabled); } - fn avail_idx(&self, mem: &M::T, order: Ordering) -> Result, Error> { + fn avail_idx(&self, mem: &M, order: Ordering) -> Result, Error> { self.state.lock().unwrap().avail_idx(mem, order) } - fn add_used(&mut self, mem: &M::T, head_index: u16, len: u32) -> Result<(), Error> { + fn add_used( + &mut self, + mem: &M, + head_index: u16, + len: u32, + ) -> Result<(), Error> { self.state.lock().unwrap().add_used(mem, head_index, len) } - fn enable_notification(&mut self, mem: &M::T) -> Result { + fn enable_notification(&mut self, mem: &M) -> Result { self.state.lock().unwrap().enable_notification(mem) } - fn disable_notification(&mut self, mem: &M::T) -> Result<(), Error> { + fn disable_notification(&mut self, mem: &M) -> Result<(), Error> { self.state.lock().unwrap().disable_notification(mem) } - fn needs_notification(&mut self, mem: &M::T) -> Result { + fn needs_notification(&mut self, mem: &M) -> Result { self.state.lock().unwrap().needs_notification(mem) } @@ -970,14 +1007,14 @@ impl QueueStateT for QueueStateSync { /// A convenient wrapper struct for a virtio queue, with associated GuestMemory object. #[derive(Clone, Debug)] -pub struct Queue = QueueState> { +pub struct Queue { /// Guest memory object associated with the queue. pub mem: M, /// Virtio queue state. pub state: S, } -impl> Queue { +impl Queue { /// Construct an empty virtio queue with the given `max_size`. pub fn new(mem: M, max_size: u16) -> Self { Queue { @@ -988,7 +1025,7 @@ impl> Queue { /// Check whether the queue configuration is valid. pub fn is_valid(&self) -> bool { - self.state.is_valid(&self.mem.memory()) + self.state.is_valid(self.mem.memory().deref()) } /// Reset the queue to the initial state. @@ -1000,7 +1037,7 @@ impl> Queue { /// /// Logically this method will acquire the underlying lock protecting the `QueueState` Object. /// The lock will be released when the returned object gets dropped. - pub fn lock(&mut self) -> QueueStateGuard<'_, M> { + pub fn lock(&mut self) -> QueueStateGuard { self.state.lock() } @@ -1058,12 +1095,13 @@ impl> Queue { /// Read the `idx` field from the available ring. pub fn avail_idx(&self, order: Ordering) -> Result, Error> { - self.state.avail_idx(&self.mem.memory(), order) + self.state.avail_idx(self.mem.memory().deref(), order) } /// Put a used descriptor head into the used ring. pub fn add_used(&mut self, head_index: u16, len: u32) -> Result<(), Error> { - self.state.add_used(&self.mem.memory(), head_index, len) + self.state + .add_used(self.mem.memory().deref(), head_index, len) } /// Enable notification events from the guest driver. @@ -1072,12 +1110,12 @@ impl> Queue { /// notifications were enabled (and thus it's possible there will be no corresponding /// notification). pub fn enable_notification(&mut self) -> Result { - self.state.enable_notification(&self.mem.memory()) + self.state.enable_notification(self.mem.memory().deref()) } /// Disable notification events from the guest driver. pub fn disable_notification(&mut self) -> Result<(), Error> { - self.state.disable_notification(&self.mem.memory()) + self.state.disable_notification(self.mem.memory().deref()) } /// Check whether a notification to the guest is needed. @@ -1087,7 +1125,7 @@ impl> Queue { /// won't return `true` again until the driver updates `used_event` and/or the notification /// conditions hold once more. pub fn needs_notification(&mut self) -> Result { - self.state.needs_notification(&self.mem.memory()) + self.state.needs_notification(self.mem.memory().deref()) } /// Return the index for the next descriptor in the available ring. @@ -1101,9 +1139,9 @@ impl> Queue { } } -impl Queue> { +impl Queue { /// A consuming iterator over all available descriptor chain heads offered by the driver. - pub fn iter(&mut self) -> Result, Error> { + pub fn iter(&mut self) -> Result, Error> { self.state.iter(self.mem.memory()) } } diff --git a/crates/virtio-queue/src/mock.rs b/crates/virtio-queue/src/mock.rs index 5e4a4137..057d16b4 100644 --- a/crates/virtio-queue/src/mock.rs +++ b/crates/virtio-queue/src/mock.rs @@ -354,8 +354,8 @@ impl<'a, M: GuestMemory> MockSplitQueue<'a, M> { /// Creates a new `Queue`, using the underlying memory regions represented /// by the `MockSplitQueue`. - pub fn create_queue(&self, a: A) -> Queue> { - let mut q = Queue::>::new(a, self.len); + pub fn create_queue(&self, a: A) -> Queue { + let mut q = Queue::::new(a, self.len); q.state.size = self.len; q.state.ready = true;