@@ -256,16 +256,8 @@ struct DropNodeKey {
256256
257257impl Scope {
258258 /// Whether there's anything to do for the cleanup path, that is,
259- /// when unwinding through this scope. This includes destructors,
260- /// but not StorageDead statements, which don't get emitted at all
261- /// for unwinding, for several reasons:
262- /// * clang doesn't emit llvm.lifetime.end for C++ unwinding
263- /// * LLVM's memory dependency analysis can't handle it atm
264- /// * polluting the cleanup MIR with StorageDead creates
265- /// landing pads even though there's no actual destructors
266- /// * freeing up stack space has no effect during unwinding
267- /// Note that for coroutines we do emit StorageDeads, for the
268- /// use of optimizations in the MIR coroutine transform.
259+ /// when unwinding through this scope. This includes destructors
260+ /// and StorageDead statements to maintain proper drop ordering.
269261 fn needs_cleanup ( & self ) -> bool {
270262 self . drops . iter ( ) . any ( |drop| match drop. kind {
271263 DropKind :: Value | DropKind :: ForLint => true ,
@@ -1109,6 +1101,9 @@ impl<'a, 'tcx> Builder<'a, 'tcx> {
11091101 ) ;
11101102 let typing_env = self . typing_env ( ) ;
11111103 let unwind_drops = & mut self . scopes . unwind_drops ;
1104+ let has_storage_drops = self . scopes . scopes [ 1 ..]
1105+ . iter ( )
1106+ . any ( |scope| scope. drops . iter ( ) . any ( |d| d. kind == DropKind :: Storage ) ) ;
11121107
11131108 // the innermost scope contains only the destructors for the tail call arguments
11141109 // we only want to drop these in case of a panic, so we skip it
@@ -1118,7 +1113,11 @@ impl<'a, 'tcx> Builder<'a, 'tcx> {
11181113 let source_info = drop_data. source_info ;
11191114 let local = drop_data. local ;
11201115
1121- if !self . local_decls [ local] . ty . needs_drop ( self . tcx , typing_env) {
1116+ // Skip Value drops for types that don't need drop, but process
1117+ // StorageDead and ForLint drops for all locals
1118+ if drop_data. kind == DropKind :: Value
1119+ && !self . local_decls [ local] . ty . needs_drop ( self . tcx , typing_env)
1120+ {
11221121 continue ;
11231122 }
11241123
@@ -1127,15 +1126,17 @@ impl<'a, 'tcx> Builder<'a, 'tcx> {
11271126 // `unwind_to` should drop the value that we're about to
11281127 // schedule. If dropping this value panics, then we continue
11291128 // with the *next* value on the unwind path.
1130- debug_assert_eq ! (
1131- unwind_drops. drop_nodes[ unwind_to] . data. local,
1132- drop_data. local
1133- ) ;
1134- debug_assert_eq ! (
1135- unwind_drops. drop_nodes[ unwind_to] . data. kind,
1136- drop_data. kind
1137- ) ;
1138- unwind_to = unwind_drops. drop_nodes [ unwind_to] . next ;
1129+ if unwind_to != DropIdx :: MAX {
1130+ debug_assert_eq ! (
1131+ unwind_drops. drop_nodes[ unwind_to] . data. local,
1132+ drop_data. local
1133+ ) ;
1134+ debug_assert_eq ! (
1135+ unwind_drops. drop_nodes[ unwind_to] . data. kind,
1136+ drop_data. kind
1137+ ) ;
1138+ unwind_to = unwind_drops. drop_nodes [ unwind_to] . next ;
1139+ }
11391140
11401141 let mut unwind_entry_point = unwind_to;
11411142
@@ -1162,6 +1163,19 @@ impl<'a, 'tcx> Builder<'a, 'tcx> {
11621163 block = next;
11631164 }
11641165 DropKind :: ForLint => {
1166+ // If this ForLint drop is in unwind_drops, we need to adjust
1167+ // unwind_to to match, just like in build_scope_drops
1168+ if has_storage_drops && unwind_to != DropIdx :: MAX {
1169+ debug_assert_eq ! (
1170+ unwind_drops. drop_nodes[ unwind_to] . data. local,
1171+ drop_data. local
1172+ ) ;
1173+ debug_assert_eq ! (
1174+ unwind_drops. drop_nodes[ unwind_to] . data. kind,
1175+ drop_data. kind
1176+ ) ;
1177+ unwind_to = unwind_drops. drop_nodes [ unwind_to] . next ;
1178+ }
11651179 self . cfg . push (
11661180 block,
11671181 Statement :: new (
@@ -1174,6 +1188,19 @@ impl<'a, 'tcx> Builder<'a, 'tcx> {
11741188 ) ;
11751189 }
11761190 DropKind :: Storage => {
1191+ // If this StorageDead drop is in unwind_drops, we need to adjust
1192+ // unwind_to to match, just like in build_scope_drops
1193+ if has_storage_drops && unwind_to != DropIdx :: MAX {
1194+ debug_assert_eq ! (
1195+ unwind_drops. drop_nodes[ unwind_to] . data. local,
1196+ drop_data. local
1197+ ) ;
1198+ debug_assert_eq ! (
1199+ unwind_drops. drop_nodes[ unwind_to] . data. kind,
1200+ drop_data. kind
1201+ ) ;
1202+ unwind_to = unwind_drops. drop_nodes [ unwind_to] . next ;
1203+ }
11771204 // Only temps and vars need their storage dead.
11781205 assert ! ( local. index( ) > self . arg_count) ;
11791206 self . cfg . push (
@@ -1217,6 +1244,9 @@ impl<'a, 'tcx> Builder<'a, 'tcx> {
12171244 let dropline_to = if has_async_drops { Some ( self . diverge_dropline ( ) ) } else { None } ;
12181245 let scope = self . scopes . scopes . last ( ) . expect ( "leave_top_scope called with no scopes" ) ;
12191246 let typing_env = self . typing_env ( ) ;
1247+ let has_storage_drops = scope. drops . iter ( ) . any ( |d| d. kind == DropKind :: Storage ) ;
1248+ // Only emit StorageDead on unwind paths when there's actually an unwind path
1249+ let storage_dead_on_unwind = has_storage_drops && unwind_to != DropIdx :: MAX ;
12201250 build_scope_drops (
12211251 & mut self . cfg ,
12221252 & mut self . scopes . unwind_drops ,
@@ -1225,7 +1255,7 @@ impl<'a, 'tcx> Builder<'a, 'tcx> {
12251255 block,
12261256 unwind_to,
12271257 dropline_to,
1228- is_coroutine && needs_cleanup ,
1258+ storage_dead_on_unwind ,
12291259 self . arg_count ,
12301260 |v : Local | Self :: is_async_drop_impl ( self . tcx , & self . local_decls , typing_env, v) ,
12311261 )
@@ -1609,9 +1639,18 @@ impl<'a, 'tcx> Builder<'a, 'tcx> {
16091639 }
16101640
16111641 let is_coroutine = self . coroutine . is_some ( ) ;
1642+ // Check if there's a cleanup path (i.e., Value or ForLint drops that require cleanup)
1643+ let has_cleanup_path = self . scopes . scopes [ uncached_scope..=target]
1644+ . iter ( )
1645+ . any ( |scope| scope. drops . iter ( ) . any ( |d| d. kind == DropKind :: Value || d. kind == DropKind :: ForLint ) ) ;
16121646 for scope in & mut self . scopes . scopes [ uncached_scope..=target] {
16131647 for drop in & scope. drops {
1614- if is_coroutine || drop. kind == DropKind :: Value {
1648+ // Add all drops to unwind_drops for all functions (not just coroutines)
1649+ // to maintain proper drop ordering for borrow-checking. This matches
1650+ // the behavior in build_exit_tree.
1651+ // For coroutines, we always add all drops. For other functions, we now
1652+ // also add StorageDead and ForLint drops, but only when there's a cleanup path.
1653+ if is_coroutine || drop. kind == DropKind :: Value || drop. kind == DropKind :: ForLint || ( drop. kind == DropKind :: Storage && has_cleanup_path) {
16151654 cached_drop = self . scopes . unwind_drops . add_drop ( * drop, cached_drop) ;
16161655 }
16171656 }
@@ -1793,11 +1832,11 @@ impl<'a, 'tcx> Builder<'a, 'tcx> {
17931832/// * `scope`, describes the drops that will occur on exiting the scope in regular execution
17941833/// * `block`, the block to branch to once drops are complete (assuming no unwind occurs)
17951834/// * `unwind_to`, describes the drops that would occur at this point in the code if a
1796- /// panic occurred (a subset of the drops in `scope`, since we sometimes elide StorageDead and other
1797- /// instructions on unwinding)
1835+ /// panic occurred (a subset of the drops in `scope`)
17981836/// * `dropline_to`, describes the drops that would occur at this point in the code if a
17991837/// coroutine drop occurred.
1800- /// * `storage_dead_on_unwind`, if true, then we should emit `StorageDead` even when unwinding
1838+ /// * `storage_dead_on_unwind`, if true, then we emit `StorageDead` on the unwind path
1839+ /// and adjust `unwind_to` accordingly (used for all functions with StorageDead drops)
18011840/// * `arg_count`, number of MIR local variables corresponding to fn arguments (used to assert that we don't drop those)
18021841fn build_scope_drops < ' tcx , F > (
18031842 cfg : & mut CFG < ' tcx > ,
@@ -1830,10 +1869,10 @@ where
18301869 // drops panic (panicking while unwinding will abort, so there's no need for
18311870 // another set of arrows).
18321871 //
1833- // For coroutines, we unwind from a drop on a local to its StorageDead
1834- // statement. For other functions we don't worry about StorageDead. The
1835- // drops for the unwind path should have already been generated by
1836- // `diverge_cleanup_gen`.
1872+ // We unwind from a drop on a local to its StorageDead statement for all
1873+ // functions (not just coroutines) to maintain proper drop ordering for
1874+ // borrow-checking. The drops for the unwind path should have already been
1875+ // generated by `diverge_cleanup_gen`.
18371876
18381877 // `unwind_to` indicates what needs to be dropped should unwinding occur.
18391878 // This is a subset of what needs to be dropped when exiting the scope.
@@ -1862,9 +1901,11 @@ where
18621901 //
18631902 // We adjust this BEFORE we create the drop (e.g., `drops[n]`)
18641903 // because `drops[n]` should unwind to `drops[n-1]`.
1865- debug_assert_eq ! ( unwind_drops. drop_nodes[ unwind_to] . data. local, drop_data. local) ;
1866- debug_assert_eq ! ( unwind_drops. drop_nodes[ unwind_to] . data. kind, drop_data. kind) ;
1867- unwind_to = unwind_drops. drop_nodes [ unwind_to] . next ;
1904+ if unwind_to != DropIdx :: MAX {
1905+ debug_assert_eq ! ( unwind_drops. drop_nodes[ unwind_to] . data. local, drop_data. local) ;
1906+ debug_assert_eq ! ( unwind_drops. drop_nodes[ unwind_to] . data. kind, drop_data. kind) ;
1907+ unwind_to = unwind_drops. drop_nodes [ unwind_to] . next ;
1908+ }
18681909
18691910 if let Some ( idx) = dropline_to {
18701911 debug_assert_eq ! ( coroutine_drops. drop_nodes[ idx] . data. local, drop_data. local) ;
@@ -1880,7 +1921,9 @@ where
18801921 continue ;
18811922 }
18821923
1883- unwind_drops. add_entry_point ( block, unwind_to) ;
1924+ if unwind_to != DropIdx :: MAX {
1925+ unwind_drops. add_entry_point ( block, unwind_to) ;
1926+ }
18841927 if let Some ( to) = dropline_to
18851928 && is_async_drop ( local)
18861929 {
@@ -1904,11 +1947,9 @@ where
19041947 }
19051948 DropKind :: ForLint => {
19061949 // As in the `DropKind::Storage` case below:
1907- // normally lint-related drops are not emitted for unwind,
1908- // so we can just leave `unwind_to` unmodified, but in some
1909- // cases we emit things ALSO on the unwind path, so we need to adjust
1910- // `unwind_to` in that case.
1911- if storage_dead_on_unwind {
1950+ // we emit lint-related drops on the unwind path when `storage_dead_on_unwind`
1951+ // is true, so we need to adjust `unwind_to` in that case.
1952+ if storage_dead_on_unwind && unwind_to != DropIdx :: MAX {
19121953 debug_assert_eq ! (
19131954 unwind_drops. drop_nodes[ unwind_to] . data. local,
19141955 drop_data. local
@@ -1937,12 +1978,11 @@ where
19371978 ) ;
19381979 }
19391980 DropKind :: Storage => {
1940- // Ordinarily, storage-dead nodes are not emitted on unwind, so we don't
1941- // need to adjust `unwind_to` on this path. However, in some specific cases
1942- // we *do* emit storage-dead nodes on the unwind path, and in that case now that
1943- // the storage-dead has completed, we need to adjust the `unwind_to` pointer
1944- // so that any future drops we emit will not register storage-dead.
1945- if storage_dead_on_unwind {
1981+ // We emit storage-dead nodes on the unwind path for borrow-checking
1982+ // purposes. When `storage_dead_on_unwind` is true, we need to adjust
1983+ // the `unwind_to` pointer now that the storage-dead has completed, so
1984+ // that any future drops we emit will not register storage-dead.
1985+ if storage_dead_on_unwind && unwind_to != DropIdx :: MAX {
19461986 debug_assert_eq ! (
19471987 unwind_drops. drop_nodes[ unwind_to] . data. local,
19481988 drop_data. local
@@ -1986,15 +2026,11 @@ impl<'a, 'tcx: 'a> Builder<'a, 'tcx> {
19862026 for ( drop_idx, drop_node) in drops. drop_nodes . iter_enumerated ( ) . skip ( 1 ) {
19872027 match drop_node. data . kind {
19882028 DropKind :: Storage | DropKind :: ForLint => {
1989- if is_coroutine {
1990- let unwind_drop = self
1991- . scopes
1992- . unwind_drops
1993- . add_drop ( drop_node. data , unwind_indices[ drop_node. next ] ) ;
1994- unwind_indices. push ( unwind_drop) ;
1995- } else {
1996- unwind_indices. push ( unwind_indices[ drop_node. next ] ) ;
1997- }
2029+ let unwind_drop = self
2030+ . scopes
2031+ . unwind_drops
2032+ . add_drop ( drop_node. data , unwind_indices[ drop_node. next ] ) ;
2033+ unwind_indices. push ( unwind_drop) ;
19982034 }
19992035 DropKind :: Value => {
20002036 let unwind_drop = self
0 commit comments