@@ -271,6 +271,261 @@ impl Queue {
271
271
}
272
272
}
273
273
274
+ #[ cfg( kani) ]
275
+ #[ allow( dead_code) ]
276
+ mod verification {
277
+ use std:: mem:: ManuallyDrop ;
278
+ use vm_memory:: MmapRegion ;
279
+
280
+ use std:: num:: Wrapping ;
281
+ use vm_memory:: FileOffset ;
282
+
283
+ use vm_memory:: guest_memory:: GuestMemoryIterator ;
284
+ use vm_memory:: { GuestMemoryRegion , MemoryRegionAddress } ;
285
+
286
+ use super :: * ;
287
+
288
+ /// A made-for-kani version of `vm_memory::GuestMemoryMmap`. Unlike the real
289
+ /// `GuestMemoryMmap`, which manages a list of regions and then does a binary
290
+ /// search to determine which region a specific read or write request goes to,
291
+ /// this only uses a single region. Eliminating this binary search significantly
292
+ /// speeds up all queue proofs, because it eliminates the only loop contained herein,
293
+ /// meaning we can use `kani::unwind(0)` instead of `kani::unwind(2)`. Functionally,
294
+ /// it works identically to `GuestMemoryMmap` with only a single contained region.
295
+ pub struct ProofGuestMemory {
296
+ the_region : vm_memory:: GuestRegionMmap ,
297
+ }
298
+
299
+ impl < ' a > GuestMemoryIterator < ' a , vm_memory:: GuestRegionMmap > for ProofGuestMemory {
300
+ type Iter = std:: iter:: Once < & ' a vm_memory:: GuestRegionMmap > ;
301
+ }
302
+
303
+ impl GuestMemory for ProofGuestMemory {
304
+ type R = vm_memory:: GuestRegionMmap ;
305
+ type I = Self ;
306
+
307
+ fn num_regions ( & self ) -> usize {
308
+ 1
309
+ }
310
+
311
+ fn find_region ( & self , addr : GuestAddress ) -> Option < & Self :: R > {
312
+ self . the_region
313
+ . to_region_addr ( addr)
314
+ . map ( |_| & self . the_region )
315
+ }
316
+
317
+ fn iter ( & self ) -> <Self :: I as GuestMemoryIterator < Self :: R > >:: Iter {
318
+ std:: iter:: once ( & self . the_region )
319
+ }
320
+
321
+ fn try_access < F > (
322
+ & self ,
323
+ count : usize ,
324
+ addr : GuestAddress ,
325
+ mut f : F ,
326
+ ) -> vm_memory:: guest_memory:: Result < usize >
327
+ where
328
+ F : FnMut (
329
+ usize ,
330
+ usize ,
331
+ MemoryRegionAddress ,
332
+ & Self :: R ,
333
+ ) -> vm_memory:: guest_memory:: Result < usize > ,
334
+ {
335
+ // We only have a single region, meaning a lot of the complications of the default
336
+ // try_access implementation for dealing with reads/writes across multiple
337
+ // regions does not apply.
338
+ let region_addr = self
339
+ . the_region
340
+ . to_region_addr ( addr)
341
+ . ok_or ( vm_memory:: guest_memory:: Error :: InvalidGuestAddress ( addr) ) ?;
342
+ self . the_region
343
+ . checked_offset ( region_addr, count)
344
+ . ok_or ( vm_memory:: guest_memory:: Error :: InvalidGuestAddress ( addr) ) ?;
345
+ f ( 0 , count, region_addr, & self . the_region )
346
+ }
347
+ }
348
+
349
+ pub struct ProofContext ( pub Queue , pub ProofGuestMemory ) ;
350
+
351
+ pub struct MmapRegionStub {
352
+ addr : * mut u8 ,
353
+ size : usize ,
354
+ bitmap : ( ) ,
355
+ file_offset : Option < FileOffset > ,
356
+ prot : i32 ,
357
+ flags : i32 ,
358
+ owned : bool ,
359
+ hugetlbfs : Option < bool > ,
360
+ }
361
+
362
+ /// We start the first guest memory region at an offset so that harnesses using
363
+ /// Queue::any() will be exposed to queue segments both before and after valid guest memory.
364
+ /// This is conforming to MockSplitQueue::new() that uses `0` as starting address of the
365
+ /// virtqueue. Also, QUEUE_END is the size only if GUEST_MEMORY_BASE is `0`
366
+ const GUEST_MEMORY_BASE : u64 = 0 ;
367
+
368
+ // We size our guest memory to fit a properly aligned queue, plus some wiggles bytes
369
+ // to make sure we not only test queues where all segments are consecutively aligned.
370
+ // We need to give at least 16 bytes of buffer space for the descriptor table to be
371
+ // able to change its address, as it is 16-byte aligned.
372
+ const GUEST_MEMORY_SIZE : usize = QUEUE_END as usize + 30 ;
373
+
374
+ fn guest_memory ( memory : * mut u8 ) -> ProofGuestMemory {
375
+ // Ideally, we'd want to do
376
+ // let region = unsafe {MmapRegionBuilder::new(GUEST_MEMORY_SIZE)
377
+ // .with_raw_mmap_pointer(bytes.as_mut_ptr())
378
+ // .build()
379
+ // .unwrap()};
380
+ // However, .build() calls to .build_raw(), which contains a call to libc::sysconf.
381
+ // Since kani 0.34.0, stubbing out foreign functions is supported, but due to the rust
382
+ // standard library using a special version of the libc crate, it runs into some problems
383
+ // [1] Even if we work around those problems, we run into performance problems [2].
384
+ // Therefore, for now we stick to this ugly transmute hack (which only works because
385
+ // the kani compiler will never re-order fields, so we can treat repr(Rust) as repr(C)).
386
+ //
387
+ // [1]: https://github.com/model-checking/kani/issues/2673
388
+ // [2]: https://github.com/model-checking/kani/issues/2538
389
+ let region_stub = MmapRegionStub {
390
+ addr : memory,
391
+ size : GUEST_MEMORY_SIZE ,
392
+ bitmap : Default :: default ( ) ,
393
+ file_offset : None ,
394
+ prot : 0 ,
395
+ flags : libc:: MAP_ANONYMOUS | libc:: MAP_PRIVATE ,
396
+ owned : false ,
397
+ hugetlbfs : None ,
398
+ } ;
399
+
400
+ let region: MmapRegion < ( ) > = unsafe { std:: mem:: transmute ( region_stub) } ;
401
+
402
+ let guest_region =
403
+ vm_memory:: GuestRegionMmap :: new ( region, GuestAddress ( GUEST_MEMORY_BASE ) ) . unwrap ( ) ;
404
+
405
+ // Use a single memory region, just as firecracker does for guests of size < 2GB
406
+ // For largest guests, firecracker uses two regions (due to the MMIO gap being
407
+ // at the top of 32-bit address space)
408
+ ProofGuestMemory {
409
+ the_region : guest_region,
410
+ }
411
+ }
412
+
413
+ // can't implement kani::Arbitrary for the relevant types due to orphan rules
414
+ fn setup_kani_guest_memory ( ) -> ProofGuestMemory {
415
+ // Non-deterministic Vec that will be used as the guest memory. We use `exact_vec` for now
416
+ // as `any_vec` will likely result in worse performance. We do not loose much from
417
+ // `exact_vec`, as our proofs do not make any assumptions about "filling" guest
418
+ // memory: Since everything is placed at non-deterministic addresses with
419
+ // non-deterministic lengths, we still cover all scenarios that would be covered by
420
+ // smaller guest memory closely. We leak the memory allocated here, so that it
421
+ // doesnt get deallocated at the end of this function. We do not explicitly
422
+ // de-allocate, but since this is a kani proof, that does not matter.
423
+ guest_memory (
424
+ ManuallyDrop :: new ( kani:: vec:: exact_vec :: < u8 , GUEST_MEMORY_SIZE > ( ) ) . as_mut_ptr ( ) ,
425
+ )
426
+ }
427
+
428
+ const MAX_QUEUE_SIZE : u16 = 256 ;
429
+
430
+ // Constants describing the in-memory layout of a queue of size MAX_QUEUE_SIZE starting
431
+ // at the beginning of guest memory. These are based on Section 2.7 of the VirtIO 1.2
432
+ // specification.
433
+ const QUEUE_BASE_ADDRESS : u64 = GUEST_MEMORY_BASE ;
434
+
435
+ /// descriptor table has 16 bytes per entry, avail ring starts right after
436
+ const AVAIL_RING_BASE_ADDRESS : u64 =
437
+ QUEUE_BASE_ADDRESS + MAX_QUEUE_SIZE as u64 * 16 ;
438
+
439
+ /// Used ring starts after avail ring (which has size 6 + 2 * MAX_QUEUE_SIZE),
440
+ /// and needs 2 bytes of padding
441
+ const USED_RING_BASE_ADDRESS : u64 =
442
+ AVAIL_RING_BASE_ADDRESS + 6 + 2 * MAX_QUEUE_SIZE as u64 + 2 ;
443
+
444
+ /// The address of the first byte after the queue. Since our queue starts at guest physical
445
+ /// address 0, this is also the size of the memory area occupied by the queue.
446
+ /// Note that the used ring structure has size 6 + 8 * MAX_QUEUE_SIZE
447
+ const QUEUE_END : u64 = USED_RING_BASE_ADDRESS + 6 + 8 * MAX_QUEUE_SIZE as u64 ;
448
+
449
+ impl kani:: Arbitrary for ProofContext {
450
+ fn any ( ) -> Self {
451
+ let mem = setup_kani_guest_memory ( ) ;
452
+
453
+ let mut queue = Queue :: new ( MAX_QUEUE_SIZE ) . unwrap ( ) ;
454
+
455
+ queue. ready = true ;
456
+
457
+ queue. set_desc_table_address (
458
+ Some ( QUEUE_BASE_ADDRESS as u32 ) ,
459
+ Some ( ( QUEUE_BASE_ADDRESS >> 32 ) as u32 ) ,
460
+ ) ;
461
+
462
+ queue. set_avail_ring_address (
463
+ Some ( AVAIL_RING_BASE_ADDRESS as u32 ) ,
464
+ Some ( ( AVAIL_RING_BASE_ADDRESS >> 32 ) as u32 ) ,
465
+ ) ;
466
+
467
+ queue. set_used_ring_address (
468
+ Some ( USED_RING_BASE_ADDRESS as u32 ) ,
469
+ Some ( ( USED_RING_BASE_ADDRESS >> 32 ) as u32 ) ,
470
+ ) ;
471
+
472
+ queue. set_next_avail ( kani:: any ( ) ) ;
473
+ queue. set_next_used ( kani:: any ( ) ) ;
474
+ queue. set_event_idx ( kani:: any ( ) ) ;
475
+ queue. num_added = Wrapping ( kani:: any ( ) ) ;
476
+
477
+ kani:: assume ( queue. is_valid ( & mem) ) ;
478
+
479
+ ProofContext ( queue, mem)
480
+ }
481
+ }
482
+
483
+
484
+ #[ kani:: proof]
485
+ #[ kani:: unwind( 0 ) ] // There are no loops anywhere, but kani really enjoys getting stuck in std::ptr::drop_in_place.
486
+ // This is a compiler intrinsic that has a "dummy" implementation in stdlib that just
487
+ // recursively calls itself. Kani will generally unwind this recursion infinitely
488
+ fn verify_spec_2_7_7_2 ( ) {
489
+ // Section 2.7.7.2 deals with device-to-driver notification suppression.
490
+ // It describes a mechanism by which the driver can tell the device that it does not
491
+ // want notifications (IRQs) about the device finishing processing individual buffers
492
+ // (descriptor chain heads) from the avail ring until a specific number of descriptors
493
+ // has been processed. This is done by the driver
494
+ // defining a "used_event" index, which tells the device "please do not notify me until
495
+ // used.ring[used_event] has been written to by you".
496
+ let ProofContext ( mut queue, mem) = kani:: any ( ) ;
497
+
498
+ let num_added_old = queue. num_added . 0 ;
499
+ let needs_notification= queue. needs_notification ( & mem) ;
500
+
501
+ // event_idx_enabled equivalent to VIRTIO_F_EVENT_IDX negotiated
502
+ if !queue. event_idx_enabled {
503
+ // The specification here says
504
+ // After the device writes a descriptor index into the used ring:
505
+ // – If flags is 1, the device SHOULD NOT send a notification.
506
+ // – If flags is 0, the device MUST send a notification
507
+ // flags is the first field in the avail_ring_address, which we completely ignore. We
508
+ // always send a notification, and as there only is a SHOULD NOT, that is okay
509
+ assert ! ( needs_notification. unwrap( ) ) ;
510
+ } else {
511
+ // next_used - 1 is where the previous descriptor was placed
512
+ if Wrapping ( queue. used_event ( & mem, Ordering :: Relaxed ) . unwrap ( ) ) == std:: num:: Wrapping ( queue. next_used - Wrapping ( 1 ) )
513
+ && num_added_old > 0
514
+ {
515
+ // If the idx field in the used ring (which determined where that descriptor index
516
+ // was placed) was equal to used_event, the device MUST send a
517
+ // notification.
518
+ assert ! ( needs_notification. unwrap( ) ) ;
519
+
520
+ kani:: cover!( ) ;
521
+ }
522
+
523
+ // The other case is handled by a "SHOULD NOT send a notification" in the spec.
524
+ // So we do not care
525
+ }
526
+ }
527
+ }
528
+
274
529
impl < ' a > QueueGuard < ' a > for Queue {
275
530
type G = & ' a mut Self ;
276
531
}
0 commit comments