Skip to content

stage2 behavior tests for all targets passing with the LLVM backend #11583

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 9 commits into from
May 5, 2022
82 changes: 82 additions & 0 deletions lib/std/target.zig
Original file line number Diff line number Diff line change
Expand Up @@ -1773,6 +1773,88 @@ pub const Target = struct {
else => false,
};
}

pub inline fn maxIntAlignment(target: Target) u16 {
return switch (target.cpu.arch) {
.avr => 1,
.msp430 => 2,
.xcore => 4,

.arm,
.armeb,
.thumb,
.thumbeb,
.hexagon,
.mips,
.mipsel,
.powerpc,
.powerpcle,
.r600,
.amdgcn,
.riscv32,
.sparc,
.sparcel,
.s390x,
.lanai,
.wasm32,
.wasm64,
=> 8,

.i386 => return switch (target.os.tag) {
.windows => 8,
else => 4,
},

// For x86_64, LLVMABIAlignmentOfType(i128) reports 8. However I think 16
// is a better number for two reasons:
// 1. Better machine code when loading into SIMD register.
// 2. The C ABI wants 16 for extern structs.
// 3. 16-byte cmpxchg needs 16-byte alignment.
// Same logic for riscv64, powerpc64, mips64, sparcv9.
.x86_64,
.riscv64,
.powerpc64,
.powerpc64le,
.mips64,
.mips64el,
.sparcv9,

// Even LLVMABIAlignmentOfType(i128) agrees on these targets.
.aarch64,
.aarch64_be,
.aarch64_32,
.bpfel,
.bpfeb,
.nvptx,
.nvptx64,
=> 16,

// Below this comment are unverified but based on the fact that C requires
// int128_t to be 16 bytes aligned, it's a safe default.
.spu_2,
.csky,
.arc,
.m68k,
.tce,
.tcele,
.le32,
.amdil,
.hsail,
.spir,
.kalimba,
.renderscript32,
.spirv32,
.shave,
.le64,
.amdil64,
.hsail64,
.spir64,
.renderscript64,
.ve,
.spirv64,
=> 16,
};
}
};

test {
Expand Down
60 changes: 10 additions & 50 deletions src/AstGen.zig
Original file line number Diff line number Diff line change
Expand Up @@ -7423,63 +7423,33 @@ fn builtinCall(
},

.atomic_load => {
const int_type = try typeExpr(gz, scope, params[0]);
// TODO allow this pointer type to be volatile
const ptr_type = try gz.add(.{ .tag = .ptr_type_simple, .data = .{
.ptr_type_simple = .{
.is_allowzero = false,
.is_mutable = false,
.is_volatile = false,
.size = .One,
.elem_type = int_type,
},
} });
const result = try gz.addPlNode(.atomic_load, node, Zir.Inst.Bin{
const result = try gz.addPlNode(.atomic_load, node, Zir.Inst.AtomicLoad{
// zig fmt: off
.lhs = try expr(gz, scope, .{ .coerced_ty = ptr_type }, params[1]),
.rhs = try expr(gz, scope, .{ .coerced_ty = .atomic_order_type }, params[2]),
.elem_type = try typeExpr(gz, scope, params[0]),
.ptr = try expr (gz, scope, .none, params[1]),
.ordering = try expr (gz, scope, .{ .coerced_ty = .atomic_order_type }, params[2]),
// zig fmt: on
});
return rvalue(gz, rl, result, node);
},
.atomic_rmw => {
const int_type = try typeExpr(gz, scope, params[0]);
// TODO allow this pointer type to be volatile
const ptr_type = try gz.add(.{ .tag = .ptr_type_simple, .data = .{
.ptr_type_simple = .{
.is_allowzero = false,
.is_mutable = true,
.is_volatile = false,
.size = .One,
.elem_type = int_type,
},
} });
const result = try gz.addPlNode(.atomic_rmw, node, Zir.Inst.AtomicRmw{
// zig fmt: off
.ptr = try expr(gz, scope, .{ .coerced_ty = ptr_type }, params[1]),
.ptr = try expr(gz, scope, .none, params[1]),
.operation = try expr(gz, scope, .{ .coerced_ty = .atomic_rmw_op_type }, params[2]),
.operand = try expr(gz, scope, .{ .coerced_ty = int_type }, params[3]),
.operand = try expr(gz, scope, .{ .ty = int_type }, params[3]),
.ordering = try expr(gz, scope, .{ .coerced_ty = .atomic_order_type }, params[4]),
// zig fmt: on
});
return rvalue(gz, rl, result, node);
},
.atomic_store => {
const int_type = try typeExpr(gz, scope, params[0]);
// TODO allow this pointer type to be volatile
const ptr_type = try gz.add(.{ .tag = .ptr_type_simple, .data = .{
.ptr_type_simple = .{
.is_allowzero = false,
.is_mutable = true,
.is_volatile = false,
.size = .One,
.elem_type = int_type,
},
} });
const result = try gz.addPlNode(.atomic_store, node, Zir.Inst.AtomicStore{
// zig fmt: off
.ptr = try expr(gz, scope, .{ .coerced_ty = ptr_type }, params[1]),
.operand = try expr(gz, scope, .{ .coerced_ty = int_type }, params[2]),
.ptr = try expr(gz, scope, .none, params[1]),
.operand = try expr(gz, scope, .{ .ty = int_type }, params[2]),
.ordering = try expr(gz, scope, .{ .coerced_ty = .atomic_order_type }, params[3]),
// zig fmt: on
});
Expand Down Expand Up @@ -7684,20 +7654,10 @@ fn cmpxchg(
tag: Zir.Inst.Tag,
) InnerError!Zir.Inst.Ref {
const int_type = try typeExpr(gz, scope, params[0]);
// TODO: allow this to be volatile
const ptr_type = try gz.add(.{ .tag = .ptr_type_simple, .data = .{
.ptr_type_simple = .{
.is_allowzero = false,
.is_mutable = true,
.is_volatile = false,
.size = .One,
.elem_type = int_type,
},
} });
const result = try gz.addPlNode(tag, node, Zir.Inst.Cmpxchg{
// zig fmt: off
.ptr = try expr(gz, scope, .{ .coerced_ty = ptr_type }, params[1]),
.expected_value = try expr(gz, scope, .{ .coerced_ty = int_type }, params[2]),
.ptr = try expr(gz, scope, .none, params[1]),
.expected_value = try expr(gz, scope, .{ .ty = int_type }, params[2]),
.new_value = try expr(gz, scope, .{ .coerced_ty = int_type }, params[3]),
.success_order = try expr(gz, scope, .{ .coerced_ty = .atomic_order_type }, params[4]),
.failure_order = try expr(gz, scope, .{ .coerced_ty = .atomic_order_type }, params[5]),
Expand Down
15 changes: 15 additions & 0 deletions src/Module.zig
Original file line number Diff line number Diff line change
Expand Up @@ -151,6 +151,8 @@ allocated_decls: std.SegmentedList(Decl, 0) = .{},
/// When a Decl object is freed from `allocated_decls`, it is pushed into this stack.
decls_free_list: std.ArrayListUnmanaged(Decl.Index) = .{},

global_assembly: std.AutoHashMapUnmanaged(Decl.Index, []u8) = .{},

const MonomorphedFuncsSet = std.HashMapUnmanaged(
*Fn,
void,
Expand Down Expand Up @@ -2831,6 +2833,7 @@ pub fn deinit(mod: *Module) void {

mod.decls_free_list.deinit(gpa);
mod.allocated_decls.deinit(gpa);
mod.global_assembly.deinit(gpa);
}

pub fn destroyDecl(mod: *Module, decl_index: Decl.Index) void {
Expand All @@ -2842,6 +2845,9 @@ pub fn destroyDecl(mod: *Module, decl_index: Decl.Index) void {
if (decl.deletion_flag) {
assert(mod.deletion_set.swapRemove(decl_index));
}
if (mod.global_assembly.fetchRemove(decl_index)) |kv| {
gpa.free(kv.value);
}
if (decl.has_tv) {
if (decl.getInnerNamespace()) |namespace| {
namespace.destroyDecls(mod);
Expand Down Expand Up @@ -5714,3 +5720,12 @@ pub fn markDeclAlive(mod: *Module, decl: *Decl) void {
fn markDeclIndexAlive(mod: *Module, decl_index: Decl.Index) void {
return mod.markDeclAlive(mod.declPtr(decl_index));
}

pub fn addGlobalAssembly(mod: *Module, decl_index: Decl.Index, source: []const u8) !void {
try mod.global_assembly.ensureUnusedCapacity(mod.gpa, 1);

const duped_source = try mod.gpa.dupe(u8, source);
errdefer mod.gpa.free(duped_source);

mod.global_assembly.putAssumeCapacityNoClobber(decl_index, duped_source);
}
Loading