Skip to content

Commit 59a7e56

Browse files
committed
Fix StorageDead and ForLint drops in tail calls and unwind paths
This commit fixes several issues related to StorageDead and ForLint drops: 1. Add StorageDead and ForLint drops to unwind_drops for all functions - Updated diverge_cleanup_target to include StorageDead and ForLint drops in the unwind_drops tree for all functions (not just coroutines), but only when there's a cleanup path (i.e., when there are Value or ForLint drops) - This ensures proper drop ordering for borrow-checking on panic paths 2. Fix break_for_tail_call to handle StorageDead and ForLint drops - Don't skip StorageDead drops for non-drop types - Adjust unwind_to pointer for StorageDead and ForLint drops, matching the behavior in build_scope_drops - Only adjust unwind_to when it's valid (not DropIdx::MAX) - This prevents debug assert failures when processing drops in tail calls 3. Fix index out of bounds panic when unwind_to is DropIdx::MAX - Added checks to ensure unwind_to != DropIdx::MAX before accessing unwind_drops.drop_nodes[unwind_to] - Only emit StorageDead on unwind paths when there's actually an unwind path - Only add entry points to unwind_drops when unwind_to is valid - This prevents panics when there's no cleanup needed 4. Add test for explicit tail calls with StorageDead drops - Tests that tail calls work correctly when StorageDead and ForLint drops are present in the unwind path - Verifies that unwind_to is correctly adjusted for all drop kinds These changes make the borrow-checker stricter and more consistent by ensuring that StorageDead statements are emitted on unwind paths for all functions when there's a cleanup path, allowing unsafe code to rely on drop order being enforced consistently.
1 parent 4082d6a commit 59a7e56

File tree

2 files changed

+158
-53
lines changed

2 files changed

+158
-53
lines changed

compiler/rustc_mir_build/src/builder/scope.rs

Lines changed: 89 additions & 53 deletions
Original file line numberDiff line numberDiff line change
@@ -256,16 +256,8 @@ struct DropNodeKey {
256256

257257
impl Scope {
258258
/// Whether there's anything to do for the cleanup path, that is,
259-
/// when unwinding through this scope. This includes destructors,
260-
/// but not StorageDead statements, which don't get emitted at all
261-
/// for unwinding, for several reasons:
262-
/// * clang doesn't emit llvm.lifetime.end for C++ unwinding
263-
/// * LLVM's memory dependency analysis can't handle it atm
264-
/// * polluting the cleanup MIR with StorageDead creates
265-
/// landing pads even though there's no actual destructors
266-
/// * freeing up stack space has no effect during unwinding
267-
/// Note that for coroutines we do emit StorageDeads, for the
268-
/// use of optimizations in the MIR coroutine transform.
259+
/// when unwinding through this scope. This includes destructors
260+
/// and StorageDead statements to maintain proper drop ordering.
269261
fn needs_cleanup(&self) -> bool {
270262
self.drops.iter().any(|drop| match drop.kind {
271263
DropKind::Value | DropKind::ForLint => true,
@@ -1109,6 +1101,9 @@ impl<'a, 'tcx> Builder<'a, 'tcx> {
11091101
);
11101102
let typing_env = self.typing_env();
11111103
let unwind_drops = &mut self.scopes.unwind_drops;
1104+
let has_storage_drops = self.scopes.scopes[1..]
1105+
.iter()
1106+
.any(|scope| scope.drops.iter().any(|d| d.kind == DropKind::Storage));
11121107

11131108
// the innermost scope contains only the destructors for the tail call arguments
11141109
// we only want to drop these in case of a panic, so we skip it
@@ -1118,7 +1113,11 @@ impl<'a, 'tcx> Builder<'a, 'tcx> {
11181113
let source_info = drop_data.source_info;
11191114
let local = drop_data.local;
11201115

1121-
if !self.local_decls[local].ty.needs_drop(self.tcx, typing_env) {
1116+
// Skip Value drops for types that don't need drop, but process
1117+
// StorageDead and ForLint drops for all locals
1118+
if drop_data.kind == DropKind::Value
1119+
&& !self.local_decls[local].ty.needs_drop(self.tcx, typing_env)
1120+
{
11221121
continue;
11231122
}
11241123

@@ -1127,15 +1126,17 @@ impl<'a, 'tcx> Builder<'a, 'tcx> {
11271126
// `unwind_to` should drop the value that we're about to
11281127
// schedule. If dropping this value panics, then we continue
11291128
// with the *next* value on the unwind path.
1130-
debug_assert_eq!(
1131-
unwind_drops.drop_nodes[unwind_to].data.local,
1132-
drop_data.local
1133-
);
1134-
debug_assert_eq!(
1135-
unwind_drops.drop_nodes[unwind_to].data.kind,
1136-
drop_data.kind
1137-
);
1138-
unwind_to = unwind_drops.drop_nodes[unwind_to].next;
1129+
if unwind_to != DropIdx::MAX {
1130+
debug_assert_eq!(
1131+
unwind_drops.drop_nodes[unwind_to].data.local,
1132+
drop_data.local
1133+
);
1134+
debug_assert_eq!(
1135+
unwind_drops.drop_nodes[unwind_to].data.kind,
1136+
drop_data.kind
1137+
);
1138+
unwind_to = unwind_drops.drop_nodes[unwind_to].next;
1139+
}
11391140

11401141
let mut unwind_entry_point = unwind_to;
11411142

@@ -1162,6 +1163,19 @@ impl<'a, 'tcx> Builder<'a, 'tcx> {
11621163
block = next;
11631164
}
11641165
DropKind::ForLint => {
1166+
// If this ForLint drop is in unwind_drops, we need to adjust
1167+
// unwind_to to match, just like in build_scope_drops
1168+
if has_storage_drops && unwind_to != DropIdx::MAX {
1169+
debug_assert_eq!(
1170+
unwind_drops.drop_nodes[unwind_to].data.local,
1171+
drop_data.local
1172+
);
1173+
debug_assert_eq!(
1174+
unwind_drops.drop_nodes[unwind_to].data.kind,
1175+
drop_data.kind
1176+
);
1177+
unwind_to = unwind_drops.drop_nodes[unwind_to].next;
1178+
}
11651179
self.cfg.push(
11661180
block,
11671181
Statement::new(
@@ -1174,6 +1188,19 @@ impl<'a, 'tcx> Builder<'a, 'tcx> {
11741188
);
11751189
}
11761190
DropKind::Storage => {
1191+
// If this StorageDead drop is in unwind_drops, we need to adjust
1192+
// unwind_to to match, just like in build_scope_drops
1193+
if has_storage_drops && unwind_to != DropIdx::MAX {
1194+
debug_assert_eq!(
1195+
unwind_drops.drop_nodes[unwind_to].data.local,
1196+
drop_data.local
1197+
);
1198+
debug_assert_eq!(
1199+
unwind_drops.drop_nodes[unwind_to].data.kind,
1200+
drop_data.kind
1201+
);
1202+
unwind_to = unwind_drops.drop_nodes[unwind_to].next;
1203+
}
11771204
// Only temps and vars need their storage dead.
11781205
assert!(local.index() > self.arg_count);
11791206
self.cfg.push(
@@ -1217,6 +1244,9 @@ impl<'a, 'tcx> Builder<'a, 'tcx> {
12171244
let dropline_to = if has_async_drops { Some(self.diverge_dropline()) } else { None };
12181245
let scope = self.scopes.scopes.last().expect("leave_top_scope called with no scopes");
12191246
let typing_env = self.typing_env();
1247+
let has_storage_drops = scope.drops.iter().any(|d| d.kind == DropKind::Storage);
1248+
// Only emit StorageDead on unwind paths when there's actually an unwind path
1249+
let storage_dead_on_unwind = has_storage_drops && unwind_to != DropIdx::MAX;
12201250
build_scope_drops(
12211251
&mut self.cfg,
12221252
&mut self.scopes.unwind_drops,
@@ -1225,7 +1255,7 @@ impl<'a, 'tcx> Builder<'a, 'tcx> {
12251255
block,
12261256
unwind_to,
12271257
dropline_to,
1228-
is_coroutine && needs_cleanup,
1258+
storage_dead_on_unwind,
12291259
self.arg_count,
12301260
|v: Local| Self::is_async_drop_impl(self.tcx, &self.local_decls, typing_env, v),
12311261
)
@@ -1609,9 +1639,18 @@ impl<'a, 'tcx> Builder<'a, 'tcx> {
16091639
}
16101640

16111641
let is_coroutine = self.coroutine.is_some();
1642+
// Check if there's a cleanup path (i.e., Value or ForLint drops that require cleanup)
1643+
let has_cleanup_path = self.scopes.scopes[uncached_scope..=target]
1644+
.iter()
1645+
.any(|scope| scope.drops.iter().any(|d| d.kind == DropKind::Value || d.kind == DropKind::ForLint));
16121646
for scope in &mut self.scopes.scopes[uncached_scope..=target] {
16131647
for drop in &scope.drops {
1614-
if is_coroutine || drop.kind == DropKind::Value {
1648+
// Add all drops to unwind_drops for all functions (not just coroutines)
1649+
// to maintain proper drop ordering for borrow-checking. This matches
1650+
// the behavior in build_exit_tree.
1651+
// For coroutines, we always add all drops. For other functions, we now
1652+
// also add StorageDead and ForLint drops, but only when there's a cleanup path.
1653+
if is_coroutine || drop.kind == DropKind::Value || drop.kind == DropKind::ForLint || (drop.kind == DropKind::Storage && has_cleanup_path) {
16151654
cached_drop = self.scopes.unwind_drops.add_drop(*drop, cached_drop);
16161655
}
16171656
}
@@ -1793,11 +1832,11 @@ impl<'a, 'tcx> Builder<'a, 'tcx> {
17931832
/// * `scope`, describes the drops that will occur on exiting the scope in regular execution
17941833
/// * `block`, the block to branch to once drops are complete (assuming no unwind occurs)
17951834
/// * `unwind_to`, describes the drops that would occur at this point in the code if a
1796-
/// panic occurred (a subset of the drops in `scope`, since we sometimes elide StorageDead and other
1797-
/// instructions on unwinding)
1835+
/// panic occurred (a subset of the drops in `scope`)
17981836
/// * `dropline_to`, describes the drops that would occur at this point in the code if a
17991837
/// coroutine drop occurred.
1800-
/// * `storage_dead_on_unwind`, if true, then we should emit `StorageDead` even when unwinding
1838+
/// * `storage_dead_on_unwind`, if true, then we emit `StorageDead` on the unwind path
1839+
/// and adjust `unwind_to` accordingly (used for all functions with StorageDead drops)
18011840
/// * `arg_count`, number of MIR local variables corresponding to fn arguments (used to assert that we don't drop those)
18021841
fn build_scope_drops<'tcx, F>(
18031842
cfg: &mut CFG<'tcx>,
@@ -1830,10 +1869,10 @@ where
18301869
// drops panic (panicking while unwinding will abort, so there's no need for
18311870
// another set of arrows).
18321871
//
1833-
// For coroutines, we unwind from a drop on a local to its StorageDead
1834-
// statement. For other functions we don't worry about StorageDead. The
1835-
// drops for the unwind path should have already been generated by
1836-
// `diverge_cleanup_gen`.
1872+
// We unwind from a drop on a local to its StorageDead statement for all
1873+
// functions (not just coroutines) to maintain proper drop ordering for
1874+
// borrow-checking. The drops for the unwind path should have already been
1875+
// generated by `diverge_cleanup_gen`.
18371876

18381877
// `unwind_to` indicates what needs to be dropped should unwinding occur.
18391878
// This is a subset of what needs to be dropped when exiting the scope.
@@ -1862,9 +1901,11 @@ where
18621901
//
18631902
// We adjust this BEFORE we create the drop (e.g., `drops[n]`)
18641903
// because `drops[n]` should unwind to `drops[n-1]`.
1865-
debug_assert_eq!(unwind_drops.drop_nodes[unwind_to].data.local, drop_data.local);
1866-
debug_assert_eq!(unwind_drops.drop_nodes[unwind_to].data.kind, drop_data.kind);
1867-
unwind_to = unwind_drops.drop_nodes[unwind_to].next;
1904+
if unwind_to != DropIdx::MAX {
1905+
debug_assert_eq!(unwind_drops.drop_nodes[unwind_to].data.local, drop_data.local);
1906+
debug_assert_eq!(unwind_drops.drop_nodes[unwind_to].data.kind, drop_data.kind);
1907+
unwind_to = unwind_drops.drop_nodes[unwind_to].next;
1908+
}
18681909

18691910
if let Some(idx) = dropline_to {
18701911
debug_assert_eq!(coroutine_drops.drop_nodes[idx].data.local, drop_data.local);
@@ -1880,7 +1921,9 @@ where
18801921
continue;
18811922
}
18821923

1883-
unwind_drops.add_entry_point(block, unwind_to);
1924+
if unwind_to != DropIdx::MAX {
1925+
unwind_drops.add_entry_point(block, unwind_to);
1926+
}
18841927
if let Some(to) = dropline_to
18851928
&& is_async_drop(local)
18861929
{
@@ -1904,11 +1947,9 @@ where
19041947
}
19051948
DropKind::ForLint => {
19061949
// As in the `DropKind::Storage` case below:
1907-
// normally lint-related drops are not emitted for unwind,
1908-
// so we can just leave `unwind_to` unmodified, but in some
1909-
// cases we emit things ALSO on the unwind path, so we need to adjust
1910-
// `unwind_to` in that case.
1911-
if storage_dead_on_unwind {
1950+
// we emit lint-related drops on the unwind path when `storage_dead_on_unwind`
1951+
// is true, so we need to adjust `unwind_to` in that case.
1952+
if storage_dead_on_unwind && unwind_to != DropIdx::MAX {
19121953
debug_assert_eq!(
19131954
unwind_drops.drop_nodes[unwind_to].data.local,
19141955
drop_data.local
@@ -1937,12 +1978,11 @@ where
19371978
);
19381979
}
19391980
DropKind::Storage => {
1940-
// Ordinarily, storage-dead nodes are not emitted on unwind, so we don't
1941-
// need to adjust `unwind_to` on this path. However, in some specific cases
1942-
// we *do* emit storage-dead nodes on the unwind path, and in that case now that
1943-
// the storage-dead has completed, we need to adjust the `unwind_to` pointer
1944-
// so that any future drops we emit will not register storage-dead.
1945-
if storage_dead_on_unwind {
1981+
// We emit storage-dead nodes on the unwind path for borrow-checking
1982+
// purposes. When `storage_dead_on_unwind` is true, we need to adjust
1983+
// the `unwind_to` pointer now that the storage-dead has completed, so
1984+
// that any future drops we emit will not register storage-dead.
1985+
if storage_dead_on_unwind && unwind_to != DropIdx::MAX {
19461986
debug_assert_eq!(
19471987
unwind_drops.drop_nodes[unwind_to].data.local,
19481988
drop_data.local
@@ -1986,15 +2026,11 @@ impl<'a, 'tcx: 'a> Builder<'a, 'tcx> {
19862026
for (drop_idx, drop_node) in drops.drop_nodes.iter_enumerated().skip(1) {
19872027
match drop_node.data.kind {
19882028
DropKind::Storage | DropKind::ForLint => {
1989-
if is_coroutine {
1990-
let unwind_drop = self
1991-
.scopes
1992-
.unwind_drops
1993-
.add_drop(drop_node.data, unwind_indices[drop_node.next]);
1994-
unwind_indices.push(unwind_drop);
1995-
} else {
1996-
unwind_indices.push(unwind_indices[drop_node.next]);
1997-
}
2029+
let unwind_drop = self
2030+
.scopes
2031+
.unwind_drops
2032+
.add_drop(drop_node.data, unwind_indices[drop_node.next]);
2033+
unwind_indices.push(unwind_drop);
19982034
}
19992035
DropKind::Value => {
20002036
let unwind_drop = self
Lines changed: 69 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,69 @@
1+
//@ run-pass
2+
//@ ignore-backends: gcc
3+
#![expect(incomplete_features)]
4+
#![feature(explicit_tail_calls)]
5+
6+
// Test that explicit tail calls work correctly when there are StorageDead
7+
// and ForLint drops in the unwind path. This ensures that `unwind_to` is
8+
// correctly adjusted in `break_for_tail_call` when encountering StorageDead
9+
// and ForLint drops, matching the behavior in `build_scope_drops`.
10+
11+
#[allow(dead_code)]
12+
struct Droppable(i32);
13+
14+
impl Drop for Droppable {
15+
fn drop(&mut self) {}
16+
}
17+
18+
fn tail_call_with_storage_dead() {
19+
// These will have StorageDead drops (non-drop types)
20+
let _a = 42i32;
21+
let _b = true;
22+
let _c = 10u8;
23+
24+
// This will have a Value drop (drop type)
25+
let _d = Droppable(1);
26+
27+
// Tail call - if unwind_to isn't adjusted for StorageDead drops,
28+
// the debug assert will fail when processing the Value drop
29+
become next_function();
30+
}
31+
32+
fn tail_call_with_mixed_drops() {
33+
// StorageDead drop
34+
let _storage = 100i32;
35+
36+
// Value drop
37+
let _value = Droppable(2);
38+
39+
// Another StorageDead drop
40+
let _storage2 = 200i32;
41+
42+
// Another Value drop
43+
let _value2 = Droppable(3);
44+
45+
// Tail call - tests that unwind_to is adjusted correctly
46+
// for both StorageDead and Value drops in sequence
47+
become next_function();
48+
}
49+
50+
fn tail_call_with_storage_before_value() {
51+
// Multiple StorageDead drops before a Value drop
52+
let _s1 = 1i32;
53+
let _s2 = 2i32;
54+
let _s3 = 3i32;
55+
56+
// Value drop - if StorageDead drops aren't handled,
57+
// unwind_to will point to wrong node and assert fails
58+
let _v = Droppable(4);
59+
60+
become next_function();
61+
}
62+
63+
fn next_function() {}
64+
65+
fn main() {
66+
tail_call_with_storage_dead();
67+
tail_call_with_mixed_drops();
68+
tail_call_with_storage_before_value();
69+
}

0 commit comments

Comments
 (0)