@@ -123,6 +123,9 @@ impl<F, T, S, M> RawTask<F, T, S, M> {
123
123
let offset_r = offset_union;
124
124
125
125
TaskLayout {
126
+ // SAFETY: layout came from a Layout::extend call, which dynamically checks the
127
+ // invariants for StdLayout and returns None if they are not met. The leap_unwrap!
128
+ // would have panicked before this point.
126
129
layout : unsafe { layout. into_std ( ) } ,
127
130
offset_s,
128
131
offset_f,
@@ -163,10 +166,14 @@ where
163
166
unsafe {
164
167
// Allocate enough space for the entire task.
165
168
let ptr = match NonNull :: new ( alloc:: alloc:: alloc ( task_layout. layout ) as * mut ( ) ) {
169
+ // SAFETY: task_layout.layout definitely has non-zero size because it's the layout
170
+ // for a struct with a field that is AtomicUsize.
166
171
None => abort ( ) ,
167
172
Some ( p) => p,
168
173
} ;
169
174
175
+ // SAFETY: task_layout.layout has the correct layout for a C-style struct of Header
176
+ // followed by S followed by union { F, T }.
170
177
let raw = Self :: from_ptr ( ptr. as_ptr ( ) ) ;
171
178
172
179
let crate :: Builder {
@@ -176,6 +183,10 @@ where
176
183
} = builder;
177
184
178
185
// Write the header as the first field of the task.
186
+ // SAFETY: This write it OK because it's through a mutable pointer to a Header<M> that
187
+ // is definitely properly aligned and points to enough memory for a Header<M>. We
188
+ // didn't pass our pointer through any const references or other const-ifying
189
+ // operations so the provenance is good.
179
190
( raw. header as * mut Header < M > ) . write ( Header {
180
191
state : AtomicUsize :: new ( SCHEDULED | TASK | REFERENCE ) ,
181
192
awaiter : UnsafeCell :: new ( None ) ,
@@ -195,12 +206,19 @@ where
195
206
} ) ;
196
207
197
208
// Write the schedule function as the third field of the task.
209
+ // SAFETY: raw.schedule is also non-null, properly aligned, valid for writes of size
210
+ // size_of::<Schedule>().
198
211
( raw. schedule as * mut S ) . write ( schedule) ;
199
212
200
213
// Generate the future, now that the metadata has been pinned in place.
214
+ // SAFETY: SAFETY: Dereferencing raw.header is OK because it's properly initialized
215
+ // since we wrote to it.
201
216
let future = abort_on_panic ( || future ( & ( * raw. header ) . metadata ) ) ;
202
217
203
218
// Write the future as the fourth field of the task.
219
+ // SAFETY: This write is OK because raw.future is non-null, properly-aligned, and valid
220
+ // for writes of size F. Because we're not casting anything here we know it's the right
221
+ // type.
204
222
raw. future . write ( future) ;
205
223
206
224
ptr
@@ -210,10 +228,15 @@ where
210
228
/// Creates a `RawTask` from a raw task pointer.
211
229
#[ inline]
212
230
pub ( crate ) fn from_ptr ( ptr : * const ( ) ) -> Self {
231
+ // TODO: This function technically should be unsafe, since ptr must point to a region that
232
+ // has a size and alignment matching task layout, since doing pointer arithmetic that
233
+ // leaves the region or creating unaligned pointers is UB.
213
234
let task_layout = Self :: task_layout ( ) ;
214
235
let p = ptr as * const u8 ;
215
236
216
237
unsafe {
238
+ // SAFETY: We're just picking apart the given pointer into its constituent fields.
239
+ // These do correctly correspond to the fields as laid out in task_layout.
217
240
Self {
218
241
header : p as * const Header < M > ,
219
242
schedule : p. add ( task_layout. offset_s ) as * const S ,
@@ -232,6 +255,8 @@ where
232
255
unsafe fn wake ( ptr : * const ( ) ) {
233
256
// This is just an optimization. If the schedule function has captured variables, then
234
257
// we'll do less reference counting if we wake the waker by reference and then drop it.
258
+ // TODO: Add safety docs here. What requirements does ptr have to meet? We can probably
259
+ // assume that it has to be a pointer to a properly-allocated task.
235
260
if mem:: size_of :: < S > ( ) > 0 {
236
261
Self :: wake_by_ref ( ptr) ;
237
262
Self :: drop_waker ( ptr) ;
@@ -240,6 +265,8 @@ where
240
265
241
266
let raw = Self :: from_ptr ( ptr) ;
242
267
268
+ // SAFETY: This is just loading the state. Note that this does implicitly create an
269
+ // &AtomicUsize, which is intentional.
243
270
let mut state = ( * raw. header ) . state . load ( Ordering :: Acquire ) ;
244
271
245
272
loop {
@@ -296,6 +323,8 @@ where
296
323
297
324
/// Wakes a waker by reference.
298
325
unsafe fn wake_by_ref ( ptr : * const ( ) ) {
326
+ // TODO: Add safety docs, presumably ptr needs to be alive and point to a
327
+ // correctly-allocated task.
299
328
let raw = Self :: from_ptr ( ptr) ;
300
329
301
330
let mut state = ( * raw. header ) . state . load ( Ordering :: Acquire ) ;
@@ -346,6 +375,8 @@ where
346
375
// because the schedule function cannot be destroyed while the waker is
347
376
// still alive.
348
377
let task = Runnable :: from_raw ( NonNull :: new_unchecked ( ptr as * mut ( ) ) ) ;
378
+ // SAFETY: The task is still alive, so we can call its schedule
379
+ // function.
349
380
( * raw. schedule ) . schedule ( task, ScheduleInfo :: new ( false ) ) ;
350
381
}
351
382
@@ -367,6 +398,8 @@ where
367
398
368
399
// If the reference count overflowed, abort.
369
400
if state > isize:: MAX as usize {
401
+ // NOTE: isize::MAX definitely has more than 1 << 8 numbers between it and usize::MAX
402
+ // so we're guaranteed to hit this.
370
403
abort ( ) ;
371
404
}
372
405
@@ -394,9 +427,17 @@ where
394
427
( * raw. header )
395
428
. state
396
429
. store ( SCHEDULED | CLOSED | REFERENCE , Ordering :: Release ) ;
430
+ // SAFETY: ptr still points to a valid task even though its refcount has dropped
431
+ // to zero.
432
+ // NOTE: We should make sure that the executor is properly dropping scheduled tasks
433
+ // with a refcount of zero.
397
434
Self :: schedule ( ptr, ScheduleInfo :: new ( false ) ) ;
398
435
} else {
399
436
// Otherwise, destroy the task right away.
437
+ // NOTE: This isn't going to drop the output/result from the future. We have to
438
+ // have already dealt with it, so whoever is calling drop_waker needs to be
439
+ // checked. It looks like whoever sets the TASK bit to zero is affirming that they
440
+ // have moved or dropped the output/result.
400
441
Self :: destroy ( ptr) ;
401
442
}
402
443
}
@@ -416,6 +457,8 @@ where
416
457
// If this was the last reference to the task and the `Task` has been dropped too,
417
458
// then destroy the task.
418
459
if new & !( REFERENCE - 1 ) == 0 && new & TASK == 0 {
460
+ // SAFETY: This is safe as long as ptr obeys the same invariants we need everywhere
461
+ // else.
419
462
Self :: destroy ( ptr) ;
420
463
}
421
464
}
@@ -435,6 +478,8 @@ where
435
478
}
436
479
437
480
let task = Runnable :: from_raw ( NonNull :: new_unchecked ( ptr as * mut ( ) ) ) ;
481
+ // NOTE: The schedule function has to drop tasks with a refcount of zero. That's not
482
+ // happening in this function, so it has to be happening in the schedule member function.
438
483
( * raw. schedule ) . schedule ( task, info) ;
439
484
}
440
485
@@ -459,6 +504,9 @@ where
459
504
///
460
505
/// The schedule function will be dropped, and the task will then get deallocated.
461
506
/// The task must be closed before this function is called.
507
+ ///
508
+ /// NOTE: Whoever calls this function has to have already dealt with the return value of the
509
+ /// future or its error if it failed. We are not going to drop it!
462
510
#[ inline]
463
511
unsafe fn destroy ( ptr : * const ( ) ) {
464
512
let raw = Self :: from_ptr ( ptr) ;
@@ -467,13 +515,18 @@ where
467
515
// We need a safeguard against panics because destructors can panic.
468
516
abort_on_panic ( || {
469
517
// Drop the header along with the metadata.
518
+ // SAFETY: This points to a valid Header<M> that we have permission to move out of and
519
+ // drop.
470
520
( raw. header as * mut Header < M > ) . drop_in_place ( ) ;
471
521
472
522
// Drop the schedule function.
523
+ // SAFETY: This points to a valid S that we have permission to move out of and drop.
473
524
( raw. schedule as * mut S ) . drop_in_place ( ) ;
474
525
} ) ;
475
526
476
527
// Finally, deallocate the memory reserved by the task.
528
+ // SAFETY: We know that ptr was allocated with layout task_layout.layout, so deallocating
529
+ // it with the same layout is correct.
477
530
alloc:: alloc:: dealloc ( ptr as * mut u8 , task_layout. layout ) ;
478
531
}
479
532
@@ -482,9 +535,11 @@ where
482
535
/// If polling its future panics, the task will be closed and the panic will be propagated into
483
536
/// the caller.
484
537
unsafe fn run ( ptr : * const ( ) ) -> bool {
538
+ // SAFETY: As long as it's a pointer to a valid task, we can get the raw form of it.
485
539
let raw = Self :: from_ptr ( ptr) ;
486
540
487
541
// Create a context from the raw task pointer and the vtable inside the its header.
542
+ // SAFETY: The implementation of RAW_WAKER_VTABLE is correct.
488
543
let waker = ManuallyDrop :: new ( Waker :: from_raw ( RawWaker :: new ( ptr, & Self :: RAW_WAKER_VTABLE ) ) ) ;
489
544
let cx = & mut Context :: from_waker ( & waker) ;
490
545
@@ -507,6 +562,9 @@ where
507
562
}
508
563
509
564
// Drop the task reference.
565
+ // SAFETY: This pointer is definitely alive because we hold a reference to it.
566
+ // TODO: But who holds the reference to it? The queue in general? The fact that
567
+ // it's scheduled?
510
568
Self :: drop_ref ( ptr) ;
511
569
512
570
// Notify the awaiter that the future has been dropped.
@@ -563,7 +621,10 @@ where
563
621
match poll {
564
622
Poll :: Ready ( out) => {
565
623
// Replace the future with its output.
624
+ // SAFETY: We have exclusive access to the task so we can drop the future for it.
566
625
Self :: drop_future ( ptr) ;
626
+ // SAFETY: raw.output definitely points to a valid memory location to hold the
627
+ // Output type of the future.
567
628
raw. output . write ( out) ;
568
629
569
630
// The task is now completed.
@@ -593,10 +654,12 @@ where
593
654
// Take the awaiter out.
594
655
let mut awaiter = None ;
595
656
if state & AWAITER != 0 {
657
+ // SAFETY: This is safe for the same reasons as we said earlier.
596
658
awaiter = ( * raw. header ) . take ( None ) ;
597
659
}
598
660
599
661
// Drop the task reference.
662
+ // SAFETY: We "own" the ref to this task and are allowed to drop it.
600
663
Self :: drop_ref ( ptr) ;
601
664
602
665
// Notify the awaiter that the future has been dropped.
@@ -625,6 +688,9 @@ where
625
688
if state & CLOSED != 0 && !future_dropped {
626
689
// The thread that closed the task didn't drop the future because it was
627
690
// running so now it's our responsibility to do so.
691
+ // SAFETY: This is corroborated by header.rs where they state that closing
692
+ // a task doesn't drop the future, it just marks it closed and puts it back
693
+ // in the polling queue so a poller can drop it.
628
694
Self :: drop_future ( ptr) ;
629
695
future_dropped = true ;
630
696
}
@@ -648,6 +714,8 @@ where
648
714
}
649
715
650
716
// Drop the task reference.
717
+ // SAFETY: We're allowed to drop the ref as stated earlier. We
718
+ // checked that it won't accidentally be double-dropped.
651
719
Self :: drop_ref ( ptr) ;
652
720
653
721
// Notify the awaiter that the future has been dropped.
@@ -657,10 +725,13 @@ where
657
725
} else if state & SCHEDULED != 0 {
658
726
// The thread that woke the task up didn't reschedule it because
659
727
// it was running so now it's our responsibility to do so.
728
+ // SAFETY: ptr definitely points to a valid task that hasn't been
729
+ // dropped. It has its SCHEDULED bit set.
660
730
Self :: schedule ( ptr, ScheduleInfo :: new ( true ) ) ;
661
731
return true ;
662
732
} else {
663
733
// Drop the task reference.
734
+ // SAFETY: We're still allowed.
664
735
Self :: drop_ref ( ptr) ;
665
736
}
666
737
break ;
@@ -697,6 +768,7 @@ where
697
768
if state & CLOSED != 0 {
698
769
// The thread that closed the task didn't drop the future because it
699
770
// was running so now it's our responsibility to do so.
771
+ // SAFETY: If poll panicked then the thread didn't drop the future.
700
772
RawTask :: < F , T , S , M > :: drop_future ( ptr) ;
701
773
702
774
// Mark the task as not running and not scheduled.
@@ -711,6 +783,7 @@ where
711
783
}
712
784
713
785
// Drop the task reference.
786
+ // SAFETY: We still have permission to drop a ref.
714
787
RawTask :: < F , T , S , M > :: drop_ref ( ptr) ;
715
788
716
789
// Notify the awaiter that the future has been dropped.
@@ -729,6 +802,8 @@ where
729
802
) {
730
803
Ok ( state) => {
731
804
// Drop the future because the task is now closed.
805
+ // SAFETY: This is effectively the same situation as earlier.
806
+ // TODO: DRY this up by refactoring this.
732
807
RawTask :: < F , T , S , M > :: drop_future ( ptr) ;
733
808
734
809
// Take the awaiter out.
0 commit comments