88use std:: num:: Wrapping ;
99use std:: sync:: atomic:: { Ordering , fence} ;
1010
11+ use crate :: arch:: host_page_size;
1112use crate :: logger:: error;
13+ use crate :: utils:: u64_to_usize;
1214use crate :: vstate:: memory:: { Address , Bitmap , ByteValued , GuestAddress , GuestMemory } ;
1315
1416pub const VIRTQ_DESC_F_NEXT : u16 = 0x1 ;
@@ -32,7 +34,7 @@ pub enum QueueError {
3234 /// Failed to write value into the virtio queue used ring: {0}
3335 MemoryError ( #[ from] vm_memory:: GuestMemoryError ) ,
3436 /// Pointer is not aligned properly: {0:#x} not {1}-byte aligned.
35- PointerNotAligned ( usize , u8 ) ,
37+ PointerNotAligned ( usize , usize ) ,
3638}
3739
3840/// Error type indicating the guest configured a virtio queue such that the avail_idx field would
@@ -310,31 +312,32 @@ impl Queue {
310312 + std:: mem:: size_of :: < u16 > ( )
311313 }
312314
313- fn get_slice_ptr < M : GuestMemory > (
315+ fn get_aligned_slice_ptr < T , M : GuestMemory > (
314316 & self ,
315317 mem : & M ,
316318 addr : GuestAddress ,
317319 len : usize ,
318- ) -> Result < * mut u8 , QueueError > {
320+ alignment : usize ,
321+ ) -> Result < * mut T , QueueError > {
322+ // Guest memory base address is page aligned, so as long as alignment divides page size,
323+ // It suffices to check that the GPA is properly aligned (e.g. we don't need to recheck
324+ // the HVA).
325+ if addr. 0 & ( alignment as u64 - 1 ) != 0 {
326+ return Err ( QueueError :: PointerNotAligned (
327+ u64_to_usize ( addr. 0 ) ,
328+ alignment,
329+ ) ) ;
330+ }
331+
319332 let slice = mem. get_slice ( addr, len) . map_err ( QueueError :: MemoryError ) ?;
320333 slice. bitmap ( ) . mark_dirty ( 0 , len) ;
321- Ok ( slice. ptr_guard_mut ( ) . as_ptr ( ) )
334+ Ok ( slice. ptr_guard_mut ( ) . as_ptr ( ) . cast ( ) )
322335 }
323336
324337 /// Set up pointers to the queue objects in the guest memory
325338 /// and mark memory dirty for those objects
326339 pub fn initialize < M : GuestMemory > ( & mut self , mem : & M ) -> Result < ( ) , QueueError > {
327- self . desc_table_ptr = self
328- . get_slice_ptr ( mem, self . desc_table_address , self . desc_table_size ( ) ) ?
329- . cast ( ) ;
330- self . avail_ring_ptr = self
331- . get_slice_ptr ( mem, self . avail_ring_address , self . avail_ring_size ( ) ) ?
332- . cast ( ) ;
333- self . used_ring_ptr = self
334- . get_slice_ptr ( mem, self . used_ring_address , self . used_ring_size ( ) ) ?
335- . cast ( ) ;
336-
337- // All the above pointers are expected to be aligned properly; otherwise some methods (e.g.
340+ // All the below pointers are verified to be aligned properly; otherwise some methods (e.g.
338341 // `read_volatile()`) will panic. Such an unalignment is possible when restored from a
339342 // broken/fuzzed snapshot.
340343 //
@@ -347,24 +350,12 @@ impl Queue {
347350 // > Available Ring 2
348351 // > Used Ring 4
349352 // > ================ ==========
350- if !self . desc_table_ptr . cast :: < u128 > ( ) . is_aligned ( ) {
351- return Err ( QueueError :: PointerNotAligned (
352- self . desc_table_ptr as usize ,
353- 16 ,
354- ) ) ;
355- }
356- if !self . avail_ring_ptr . is_aligned ( ) {
357- return Err ( QueueError :: PointerNotAligned (
358- self . avail_ring_ptr as usize ,
359- 2 ,
360- ) ) ;
361- }
362- if !self . used_ring_ptr . cast :: < u32 > ( ) . is_aligned ( ) {
363- return Err ( QueueError :: PointerNotAligned (
364- self . used_ring_ptr as usize ,
365- 4 ,
366- ) ) ;
367- }
353+ self . desc_table_ptr =
354+ self . get_aligned_slice_ptr ( mem, self . desc_table_address , self . desc_table_size ( ) , 16 ) ?;
355+ self . avail_ring_ptr =
356+ self . get_aligned_slice_ptr ( mem, self . avail_ring_address , self . avail_ring_size ( ) , 2 ) ?;
357+ self . used_ring_ptr =
358+ self . get_aligned_slice_ptr ( mem, self . used_ring_address , self . used_ring_size ( ) , 4 ) ?;
368359
369360 Ok ( ( ) )
370361 }
0 commit comments