Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

compiler: Fix some real and theoretical miscompilations with allowzero and volatile #21882

Open
wants to merge 10 commits into
base: master
Choose a base branch
from
22 changes: 7 additions & 15 deletions src/Air.zig
Original file line number Diff line number Diff line change
Expand Up @@ -1610,6 +1610,7 @@ pub fn mustLower(air: Air, inst: Air.Inst.Index, ip: *const InternPool) bool {
const data = air.instructions.items(.data)[@intFromEnum(inst)];
return switch (air.instructions.items(.tag)[@intFromEnum(inst)]) {
.arg,
.assembly,
.block,
.loop,
.repeat,
Expand Down Expand Up @@ -1750,12 +1751,8 @@ pub fn mustLower(air: Air, inst: Air.Inst.Index, ip: *const InternPool) bool {
.cmp_vector_optimized,
.is_null,
.is_non_null,
.is_null_ptr,
.is_non_null_ptr,
.is_err,
.is_non_err,
.is_err_ptr,
.is_non_err_ptr,
.bool_and,
.bool_or,
.int_from_ptr,
Expand All @@ -1770,7 +1767,6 @@ pub fn mustLower(air: Air, inst: Air.Inst.Index, ip: *const InternPool) bool {
.unwrap_errunion_payload,
.unwrap_errunion_err,
.unwrap_errunion_payload_ptr,
.unwrap_errunion_err_ptr,
.wrap_errunion_payload,
.wrap_errunion_err,
.struct_field_ptr,
Expand Down Expand Up @@ -1815,17 +1811,13 @@ pub fn mustLower(air: Air, inst: Air.Inst.Index, ip: *const InternPool) bool {
.work_group_id,
=> false,

.assembly => {
const extra = air.extraData(Air.Asm, data.ty_pl.payload);
const is_volatile = @as(u1, @truncate(extra.data.flags >> 31)) != 0;
return is_volatile or if (extra.data.outputs_len == 1)
@as(Air.Inst.Ref, @enumFromInt(air.extra[extra.end])) != .none
else
extra.data.outputs_len > 1;
},
.load => air.typeOf(data.ty_op.operand, ip).isVolatilePtrIp(ip),
.is_non_null_ptr, .is_null_ptr, .is_non_err_ptr, .is_err_ptr => air.typeOf(data.un_op, ip).isVolatilePtrIp(ip),
.load, .unwrap_errunion_err_ptr => air.typeOf(data.ty_op.operand, ip).isVolatilePtrIp(ip),
.slice_elem_val, .ptr_elem_val => air.typeOf(data.bin_op.lhs, ip).isVolatilePtrIp(ip),
.atomic_load => air.typeOf(data.atomic_load.ptr, ip).isVolatilePtrIp(ip),
.atomic_load => switch (data.atomic_load.order) {
.unordered, .monotonic => air.typeOf(data.atomic_load.ptr, ip).isVolatilePtrIp(ip),
else => true, // Stronger memory orderings have inter-thread side effects.
},
};
}

Expand Down
98 changes: 62 additions & 36 deletions src/arch/riscv64/CodeGen.zig
Original file line number Diff line number Diff line change
Expand Up @@ -1503,7 +1503,7 @@ fn genBody(func: *Func, body: []const Air.Inst.Index) InnerError!void {

.mul,
.mul_wrap,
.div_trunc,
.div_trunc,
.div_exact,
.rem,

Expand All @@ -1521,13 +1521,13 @@ fn genBody(func: *Func, body: []const Air.Inst.Index) InnerError!void {
.max,
=> try func.airBinOp(inst, tag),


.ptr_add,
.ptr_sub => try func.airPtrArithmetic(inst, tag),

.mod,
.div_float,
.div_floor,
.div_float,
.div_floor,
=> return func.fail("TODO: {s}", .{@tagName(tag)}),

.sqrt,
Expand Down Expand Up @@ -1681,7 +1681,7 @@ fn genBody(func: *Func, body: []const Air.Inst.Index) InnerError!void {
.ptr_slice_ptr_ptr => try func.airPtrSlicePtrPtr(inst),

.array_elem_val => try func.airArrayElemVal(inst),

.slice_elem_val => try func.airSliceElemVal(inst),
.slice_elem_ptr => try func.airSliceElemPtr(inst),

Expand Down Expand Up @@ -1811,8 +1811,15 @@ fn finishAirBookkeeping(func: *Func) void {
fn finishAirResult(func: *Func, inst: Air.Inst.Index, result: MCValue) void {
if (func.liveness.isUnused(inst)) switch (result) {
.none, .dead, .unreach => {},
else => unreachable, // Why didn't the result die?
// Why didn't the result die?
.register => |r| if (r != .zero) unreachable,
else => unreachable,
} else {
switch (result) {
.register => |r| if (r == .zero) unreachable, // Why did we discard a used result?
else => {},
}

tracking_log.debug("%{d} => {} (birth)", .{ inst, result });
func.inst_tracking.putAssumeCapacityNoClobber(inst, InstTracking.init(result));
// In some cases, an operand may be reused as the result.
Expand Down Expand Up @@ -7785,48 +7792,56 @@ fn airAtomicLoad(func: *Func, inst: Air.Inst.Index) !void {
const pt = func.pt;
const zcu = pt.zcu;
const atomic_load = func.air.instructions.items(.data)[@intFromEnum(inst)].atomic_load;
const order: std.builtin.AtomicOrder = atomic_load.order;

const ptr_ty = func.typeOf(atomic_load.ptr);
const elem_ty = ptr_ty.childType(zcu);
const ptr_mcv = try func.resolveInst(atomic_load.ptr);
const result: MCValue = result: {
const order: std.builtin.AtomicOrder = atomic_load.order;

const bit_size = elem_ty.bitSize(zcu);
if (bit_size > 64) return func.fail("TODO: airAtomicStore > 64 bits", .{});
const ptr_ty = func.typeOf(atomic_load.ptr);
const elem_ty = ptr_ty.childType(zcu);
const ptr_mcv = try func.resolveInst(atomic_load.ptr);

const result_mcv = try func.allocRegOrMem(elem_ty, inst, true);
assert(result_mcv == .register); // should be less than 8 bytes
const bit_size = elem_ty.bitSize(zcu);
if (bit_size > 64) return func.fail("TODO: airAtomicLoad > 64 bits", .{});

if (order == .seq_cst) {
_ = try func.addInst(.{
.tag = .fence,
.data = .{ .fence = .{
.pred = .rw,
.succ = .rw,
} },
});
}
const unused = func.liveness.isUnused(inst);

try func.load(result_mcv, ptr_mcv, ptr_ty);
const result_mcv: MCValue = if (func.liveness.isUnused(inst))
.{ .register = .zero }
else
try func.allocRegOrMem(elem_ty, inst, true);
assert(result_mcv == .register); // should be less than 8 bytes

switch (order) {
// Don't guarnetee other memory operations to be ordered after the load.
.unordered => {},
.monotonic => {},
// Make sure all previous reads happen before any reading or writing accurs.
.seq_cst, .acquire => {
if (order == .seq_cst) {
_ = try func.addInst(.{
.tag = .fence,
.data = .{ .fence = .{
.pred = .r,
.pred = .rw,
.succ = .rw,
} },
});
},
else => unreachable,
}
}

try func.load(result_mcv, ptr_mcv, ptr_ty);

switch (order) {
// Don't guarantee other memory operations to be ordered after the load.
.unordered, .monotonic => {},
// Make sure all previous reads happen before any reading or writing occurs.
.acquire, .seq_cst => {
_ = try func.addInst(.{
.tag = .fence,
.data = .{ .fence = .{
.pred = .r,
.succ = .rw,
} },
});
},
else => unreachable,
}

break :result if (unused) .unreach else result_mcv;
};

return func.finishAir(inst, result_mcv, .{ atomic_load.ptr, .none, .none });
return func.finishAir(inst, result, .{ atomic_load.ptr, .none, .none });
}

fn airAtomicStore(func: *Func, inst: Air.Inst.Index, order: std.builtin.AtomicOrder) !void {
Expand Down Expand Up @@ -7856,6 +7871,17 @@ fn airAtomicStore(func: *Func, inst: Air.Inst.Index, order: std.builtin.AtomicOr
}

try func.store(ptr_mcv, val_mcv, ptr_ty);

if (order == .seq_cst) {
_ = try func.addInst(.{
.tag = .fence,
.data = .{ .fence = .{
.pred = .rw,
.succ = .rw,
} },
});
}

return func.finishAir(inst, .unreach, .{ bin_op.lhs, bin_op.rhs, .none });
}

Expand Down
34 changes: 20 additions & 14 deletions src/arch/x86_64/CodeGen.zig
Original file line number Diff line number Diff line change
Expand Up @@ -16596,23 +16596,29 @@ fn airAtomicRmw(self: *Self, inst: Air.Inst.Index) !void {

fn airAtomicLoad(self: *Self, inst: Air.Inst.Index) !void {
const atomic_load = self.air.instructions.items(.data)[@intFromEnum(inst)].atomic_load;
const result: MCValue = result: {
const ptr_ty = self.typeOf(atomic_load.ptr);
const ptr_mcv = try self.resolveInst(atomic_load.ptr);
const ptr_lock = switch (ptr_mcv) {
.register => |reg| self.register_manager.lockRegAssumeUnused(reg),
else => null,
};
defer if (ptr_lock) |lock| self.register_manager.unlockReg(lock);

const ptr_ty = self.typeOf(atomic_load.ptr);
const ptr_mcv = try self.resolveInst(atomic_load.ptr);
const ptr_lock = switch (ptr_mcv) {
.register => |reg| self.register_manager.lockRegAssumeUnused(reg),
else => null,
};
defer if (ptr_lock) |lock| self.register_manager.unlockReg(lock);
const unused = self.liveness.isUnused(inst);

const dst_mcv =
if (self.reuseOperand(inst, atomic_load.ptr, 0, ptr_mcv))
ptr_mcv
else
try self.allocRegOrMem(inst, true);
const dst_mcv: MCValue = if (unused)
.{ .register = try self.register_manager.allocReg(null, self.regClassForType(ptr_ty.childType(self.pt.zcu))) }
else if (self.reuseOperand(inst, atomic_load.ptr, 0, ptr_mcv))
ptr_mcv
else
try self.allocRegOrMem(inst, true);

try self.load(dst_mcv, ptr_ty, ptr_mcv);

try self.load(dst_mcv, ptr_ty, ptr_mcv);
return self.finishAir(inst, dst_mcv, .{ atomic_load.ptr, .none, .none });
break :result if (unused) .unreach else dst_mcv;
};
return self.finishAir(inst, result, .{ atomic_load.ptr, .none, .none });
}

fn airAtomicStore(self: *Self, inst: Air.Inst.Index, order: std.builtin.AtomicOrder) !void {
Expand Down
Loading
Loading