5
5
//! This also includes code for pattern bindings in `let` statements and
6
6
//! function parameters.
7
7
8
- use std:: assert_matches:: assert_matches;
9
8
use std:: borrow:: Borrow ;
10
9
use std:: mem;
11
10
use std:: sync:: Arc ;
12
11
12
+ use itertools:: { Itertools , Position } ;
13
13
use rustc_abi:: VariantIdx ;
14
14
use rustc_data_structures:: fx:: FxIndexMap ;
15
15
use rustc_data_structures:: stack:: ensure_sufficient_stack;
@@ -561,16 +561,19 @@ impl<'a, 'tcx> Builder<'a, 'tcx> {
561
561
// return: it isn't bound by move until right before enter the arm.
562
562
// To handle this we instead unschedule it's drop after each time
563
563
// we lower the guard.
564
+ // As a result, we end up with the drop order of the last sub-branch we lower. To use
565
+ // the drop order for the first sub-branch, we lower sub-branches in reverse (#142163).
566
+ // TODO: I'm saving the breaking change for the next commit. For now, a stopgap:
567
+ let sub_branch_to_use_the_drops_from =
568
+ if arm_match_scope. is_some ( ) { Position :: Last } else { Position :: First } ;
564
569
let target_block = self . cfg . start_new_block ( ) ;
565
- let mut schedule_drops = ScheduleDrops :: Yes ;
566
- let arm = arm_match_scope. unzip ( ) . 0 ;
567
- // We keep a stack of all of the bindings and type ascriptions
568
- // from the parent candidates that we visit, that also need to
569
- // be bound for each candidate.
570
- for sub_branch in branch. sub_branches {
571
- if let Some ( arm) = arm {
572
- self . clear_top_scope ( arm. scope ) ;
573
- }
570
+ for ( pos, sub_branch) in branch. sub_branches . into_iter ( ) . with_position ( ) {
571
+ debug_assert ! ( pos != Position :: Only ) ;
572
+ let schedule_drops = if pos == sub_branch_to_use_the_drops_from {
573
+ ScheduleDrops :: Yes
574
+ } else {
575
+ ScheduleDrops :: No
576
+ } ;
574
577
let binding_end = self . bind_and_guard_matched_candidate (
575
578
sub_branch,
576
579
fake_borrow_temps,
@@ -579,9 +582,6 @@ impl<'a, 'tcx> Builder<'a, 'tcx> {
579
582
schedule_drops,
580
583
emit_storage_live,
581
584
) ;
582
- if arm. is_none ( ) {
583
- schedule_drops = ScheduleDrops :: No ;
584
- }
585
585
self . cfg . goto ( binding_end, outer_source_info, target_block) ;
586
586
}
587
587
@@ -2453,11 +2453,8 @@ impl<'a, 'tcx> Builder<'a, 'tcx> {
2453
2453
2454
2454
// Bindings for guards require some extra handling to automatically
2455
2455
// insert implicit references/dereferences.
2456
- self . bind_matched_candidate_for_guard (
2457
- block,
2458
- schedule_drops,
2459
- sub_branch. bindings . iter ( ) ,
2460
- ) ;
2456
+ // This always schedules storage drops, so we may need to unschedule them below.
2457
+ self . bind_matched_candidate_for_guard ( block, sub_branch. bindings . iter ( ) ) ;
2461
2458
let guard_frame = GuardFrame {
2462
2459
locals : sub_branch
2463
2460
. bindings
@@ -2489,6 +2486,13 @@ impl<'a, 'tcx> Builder<'a, 'tcx> {
2489
2486
)
2490
2487
} ) ;
2491
2488
2489
+ // If this isn't the final sub-branch being lowered, we need to unschedule drops of
2490
+ // bindings and temporaries created for and by the guard. As a result, the drop order
2491
+ // for the arm will correspond to the binding order of the final sub-branch lowered.
2492
+ if matches ! ( schedule_drops, ScheduleDrops :: No ) {
2493
+ self . clear_top_scope ( arm. scope ) ;
2494
+ }
2495
+
2492
2496
let source_info = self . source_info ( guard_span) ;
2493
2497
let guard_end = self . source_info ( tcx. sess . source_map ( ) . end_point ( guard_span) ) ;
2494
2498
let guard_frame = self . guard_context . pop ( ) . unwrap ( ) ;
@@ -2538,14 +2542,10 @@ impl<'a, 'tcx> Builder<'a, 'tcx> {
2538
2542
let cause = FakeReadCause :: ForGuardBinding ;
2539
2543
self . cfg . push_fake_read ( post_guard_block, guard_end, cause, Place :: from ( local_id) ) ;
2540
2544
}
2541
- assert_matches ! (
2542
- schedule_drops,
2543
- ScheduleDrops :: Yes ,
2544
- "patterns with guards must schedule drops"
2545
- ) ;
2545
+ // Only schedule drops for the last sub-branch we lower.
2546
2546
self . bind_matched_candidate_for_arm_body (
2547
2547
post_guard_block,
2548
- ScheduleDrops :: Yes ,
2548
+ schedule_drops ,
2549
2549
by_value_bindings,
2550
2550
emit_storage_live,
2551
2551
) ;
@@ -2671,7 +2671,6 @@ impl<'a, 'tcx> Builder<'a, 'tcx> {
2671
2671
fn bind_matched_candidate_for_guard < ' b > (
2672
2672
& mut self ,
2673
2673
block : BasicBlock ,
2674
- schedule_drops : ScheduleDrops ,
2675
2674
bindings : impl IntoIterator < Item = & ' b Binding < ' tcx > > ,
2676
2675
) where
2677
2676
' tcx : ' b ,
@@ -2690,12 +2689,13 @@ impl<'a, 'tcx> Builder<'a, 'tcx> {
2690
2689
// a reference R: &T pointing to the location matched by
2691
2690
// the pattern, and every occurrence of P within a guard
2692
2691
// denotes *R.
2692
+ // Drops must be scheduled to emit `StorageDead` on the guard's failure/break branches.
2693
2693
let ref_for_guard = self . storage_live_binding (
2694
2694
block,
2695
2695
binding. var_id ,
2696
2696
binding. span ,
2697
2697
RefWithinGuard ,
2698
- schedule_drops ,
2698
+ ScheduleDrops :: Yes ,
2699
2699
) ;
2700
2700
match binding. binding_mode . 0 {
2701
2701
ByRef :: No => {
@@ -2705,13 +2705,14 @@ impl<'a, 'tcx> Builder<'a, 'tcx> {
2705
2705
self . cfg . push_assign ( block, source_info, ref_for_guard, rvalue) ;
2706
2706
}
2707
2707
ByRef :: Yes ( mutbl) => {
2708
- // The arm binding will be by reference, so eagerly create it now.
2708
+ // The arm binding will be by reference, so eagerly create it now. Drops must
2709
+ // be scheduled to emit `StorageDead` on the guard's failure/break branches.
2709
2710
let value_for_arm = self . storage_live_binding (
2710
2711
block,
2711
2712
binding. var_id ,
2712
2713
binding. span ,
2713
2714
OutsideGuard ,
2714
- schedule_drops ,
2715
+ ScheduleDrops :: Yes ,
2715
2716
) ;
2716
2717
2717
2718
let rvalue =
0 commit comments