diff --git a/lib/compiler/translate-c/Translator.zig b/lib/compiler/translate-c/Translator.zig index b630584eb308..87de2d263109 100644 --- a/lib/compiler/translate-c/Translator.zig +++ b/lib/compiler/translate-c/Translator.zig @@ -378,8 +378,12 @@ pub const builtin_typedef_map = std.StaticStringMap([]const u8).initComptime(.{ .{ "int8_t", "i8" }, .{ "uint16_t", "u16" }, .{ "int16_t", "i16" }, + .{ "uint24_t", "u24" }, + .{ "int24_t", "i24" }, .{ "uint32_t", "u32" }, .{ "int32_t", "i32" }, + .{ "uint48_t", "u48" }, + .{ "int48_t", "i48" }, .{ "uint64_t", "u64" }, .{ "int64_t", "i64" }, .{ "intptr_t", "isize" }, diff --git a/lib/std/Target.zig b/lib/std/Target.zig index 26cff86ecd2e..251e4cf49246 100644 --- a/lib/std/Target.zig +++ b/lib/std/Target.zig @@ -68,6 +68,8 @@ pub const Os = struct { opengl, vulkan, + tios, + // LLVM tags deliberately omitted: // - bridgeos // - cheriotrtos @@ -205,6 +207,8 @@ pub const Os = struct { .opencl, .opengl, .vulkan, + + .tios, => .semver, .hurd => .hurd, @@ -659,6 +663,12 @@ pub const Os = struct { .max = .{ .major = 4, .minor = 6, .patch = 0 }, }, }, + .tios => .{ + .semver = .{ + .min = .{ .major = 5, .minor = 0, .patch = 0 }, + .max = .{ .major = 5, .minor = 8, .patch = 4 }, + }, + }, .vulkan => .{ .semver = .{ .min = .{ .major = 1, .minor = 2, .patch = 0 }, @@ -746,6 +756,7 @@ pub const wasm = @import("Target/wasm.zig"); pub const x86 = @import("Target/x86.zig"); pub const xcore = @import("Target/xcore.zig"); pub const xtensa = @import("Target/xtensa.zig"); +pub const z80 = @import("Target/generic.zig"); pub const Abi = enum { none, @@ -932,6 +943,7 @@ pub const Abi = enum { .opencl, .opengl, .vulkan, + .tios, => .none, }; } @@ -1064,6 +1076,7 @@ pub fn toElfMachine(target: *const Target) std.elf.EM { .avr => .AVR, .bpfeb, .bpfel => .BPF, .csky => .CSKY, + .ez80 => .Z80, .hexagon => .QDSP6, .hppa, .hppa64 => .PARISC, .kalimba => .CSR_KALIMBA, @@ -1125,6 +1138,7 @@ pub fn toCoffMachine(target: *const Target) std.coff.IMAGE.FILE.MACHINE { .bpfeb, .bpfel, .csky, + .ez80, .hexagon, .hppa, .hppa64, @@ -1338,6 +1352,7 @@ pub const Cpu = struct { bpfeb, bpfel, csky, + ez80, hexagon, hppa, hppa64, @@ -1437,6 +1452,7 @@ pub const Cpu = struct { x86, xcore, xtensa, + z80, }; pub inline fn family(arch: Arch) Family { @@ -1449,6 +1465,7 @@ pub const Cpu = struct { .avr => .avr, .bpfeb, .bpfel => .bpf, .csky => .csky, + .ez80 => .z80, .hexagon => .hexagon, .hppa, .hppa64 => .hppa, .kalimba => .kalimba, @@ -1676,6 +1693,7 @@ pub const Cpu = struct { .x86_64, .xcore, .xtensa, + .ez80, => .little, .aarch64_be, @@ -1942,6 +1960,10 @@ pub const Cpu = struct { .spirv_fragment, .spirv_vertex, => &.{ .spirv32, .spirv64 }, + + .ez80_cet, + .ez80_tiflags, + => &.{.ez80}, }; } }; @@ -2211,6 +2233,7 @@ pub fn requiresLibC(target: *const Target) bool { .plan9, .other, .@"3ds", + .tios, => false, }; } @@ -2373,6 +2396,8 @@ pub const DynamicLinker = struct { .ps4, .ps5, .vita, + + .tios, => .none, }; } @@ -2782,6 +2807,8 @@ pub const DynamicLinker = struct { .opencl, .opengl, .vulkan, + + .tios, => none, // TODO go over each item in this list and either move it to the above list, or @@ -2816,6 +2843,9 @@ pub fn ptrBitWidth_arch_abi(cpu_arch: Cpu.Arch, abi: Abi) u16 { .x86_16, => 16, + .ez80, + => 24, + .arc, .arceb, .arm, @@ -2883,6 +2913,8 @@ pub fn ptrBitWidth(target: *const Target) u16 { pub fn stackAlignment(target: *const Target) u16 { // Overrides for when the stack alignment is not equal to the pointer width. switch (target.cpu.arch) { + .ez80, + => return 1, .m68k, => return 2, .amdgcn, @@ -2962,6 +2994,7 @@ pub fn cCharSignedness(target: *const Target) std.builtin.Signedness { .arc, .arceb, .csky, + .ez80, .hexagon, .msp430, .powerpc, @@ -3333,6 +3366,13 @@ pub fn cTypeBitSize(target: *const Target, c_type: CType) u16 { .long, .ulong => return 64, .longlong, .ulonglong, .double, .longdouble => return 64, }, + .tios => switch (c_type) { + .char => return 8, + .short, .ushort => return 16, + .int, .uint => return 24, + .long, .ulong, .float, .double => return 32, + .longlong, .ulonglong, .longdouble => return 64, + }, .ps3, .contiki, @@ -3345,7 +3385,7 @@ pub fn cTypeBitSize(target: *const Target, c_type: CType) u16 { pub fn cTypeAlignment(target: *const Target, c_type: CType) u16 { // Overrides for unusual alignments switch (target.cpu.arch) { - .avr => return 1, + .avr, .ez80 => return 1, .x86 => switch (target.os.tag) { .windows, .uefi => switch (c_type) { .longlong, .ulonglong, .double => return 8, @@ -3382,6 +3422,8 @@ pub fn cTypeAlignment(target: *const Target, c_type: CType) u16 { return @min( std.math.ceilPowerOfTwoAssert(u16, (cTypeBitSize(target, c_type) + 7) / 8), @as(u16, switch (target.cpu.arch) { + .ez80 => 1, + .msp430, .x86_16, => 2, @@ -3459,7 +3501,7 @@ pub fn cTypePreferredAlignment(target: *const Target, c_type: CType) u16 { .longdouble => return 4, else => {}, }, - .avr => return 1, + .avr, .ez80 => return 1, .x86 => switch (target.os.tag) { .windows, .uefi => switch (c_type) { .longdouble => switch (target.abi) { @@ -3491,6 +3533,8 @@ pub fn cTypePreferredAlignment(target: *const Target, c_type: CType) u16 { return @min( std.math.ceilPowerOfTwoAssert(u16, (cTypeBitSize(target, c_type) + 7) / 8), @as(u16, switch (target.cpu.arch) { + .ez80 => 1, + .x86_16, .msp430 => 2, .arc, @@ -3561,7 +3605,9 @@ pub fn cTypePreferredAlignment(target: *const Target, c_type: CType) u16 { pub fn cMaxIntAlignment(target: *const Target) u16 { return switch (target.cpu.arch) { - .avr => 1, + .avr, + .ez80, + => 1, .msp430, .x86_16 => 2, @@ -3698,6 +3744,7 @@ pub fn cCallingConvention(target: *const Target) ?std.builtin.CallingConvention .amdgcn => .{ .amdgcn_device = .{} }, .nvptx, .nvptx64 => .nvptx_device, .spirv32, .spirv64 => .spirv_device, + .ez80 => .ez80_cet, }; } diff --git a/lib/std/builtin.zig b/lib/std/builtin.zig index c0be44b939c7..beecba4b8cef 100644 --- a/lib/std/builtin.zig +++ b/lib/std/builtin.zig @@ -377,6 +377,10 @@ pub const CallingConvention = union(enum(u8)) { spirv_fragment, spirv_vertex, + // Calling conventions for the `ez80` architecture. + ez80_cet, + ez80_tiflags, + /// Options shared across most calling conventions. pub const CommonOptions = struct { /// The boundary the stack is aligned to when the function is called. diff --git a/lib/std/zig/target.zig b/lib/std/zig/target.zig index 13eb7f8e2847..70aada0794cc 100644 --- a/lib/std/zig/target.zig +++ b/lib/std/zig/target.zig @@ -498,6 +498,10 @@ pub fn intAlignment(target: *const std.Target, bits: u16) u16 { 33...64 => 8, else => 16, }, + .ez80 => switch (bits) { + 0 => 0, + else => 1, + }, else => return @min( std.math.ceilPowerOfTwoPromote(u16, @as(u16, @intCast((@as(u17, bits) + 7) / 8))), target.cMaxIntAlignment(), diff --git a/lib/zig.h b/lib/zig.h index eebe4cf81861..25f79ea5f7f0 100644 --- a/lib/zig.h +++ b/lib/zig.h @@ -77,6 +77,9 @@ #elif defined(__I86__) #define zig_x86_16 #define zig_x86 +#elif defined (__ez80) +#define zig_ez80 +#define zig_z80 #endif #if defined(zig_msvc) || __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ @@ -407,6 +410,8 @@ #define zig_trap() __asm__ volatile("int $0x3") #elif defined(zig_x86) #define zig_trap() __asm__ volatile("ud2") +#elif defined(zig_z80) +#define zig_trap() __asm__ volatile("rst 00h") #else #define zig_trap() zig_trap_unavailable #endif @@ -509,7 +514,7 @@ zig_extern void *memcpy (void *zig_restrict, void const *zig_restrict, size_t); zig_extern void *memset (void *, int, size_t); zig_extern void *memmove (void *, void const *, size_t); -/* ================ Bool and 8/16/32/64-bit Integer Support ================= */ +/* ================ Bool and 8/16/24/32/48/64-bit Integer Support ================= */ #include @@ -588,6 +593,16 @@ typedef signed long long int16_t; #define INT16_MAX ( INT16_C(0x7FFF)) #define UINT16_MAX ( INT16_C(0xFFFF)) +#if defined(zig_ez80) +typedef unsigned int uint24_t; +typedef signed int int24_t; +#define INT24_C(c) c +#define UINT24_C(c) c##U +#endif +#define INT24_MIN (~INT24_C(0x7FFF)) +#define INT24_MAX ( INT24_C(0x7FFF)) +#define UINT24_MAX ( INT24_C(0xFFFF)) + #if SCHAR_MIN == ~0x7FFFFFFF && SCHAR_MAX == 0x7FFFFFFF && UCHAR_MAX == 0xFFFFFFFF typedef unsigned char uint32_t; typedef signed char int32_t; @@ -618,6 +633,17 @@ typedef signed long long int32_t; #define INT32_MAX ( INT32_C(0x7FFFFFFF)) #define UINT32_MAX ( INT32_C(0xFFFFFFFF)) +#if defined(zig_ez80) +typedef unsigned __int48 uint48_t; +typedef signed __int48 int48_t; +#define INT48_C(c) c +/* no suffix */ +#define UINT48_C(c) ((uint48_t)(c)) +#endif +#define INT48_MIN (~INT48_C(0x7FFFFFFFFFFF)) +#define INT48_MAX ( INT48_C(0x7FFFFFFFFFFF)) +#define UINT48_MAX ( INT48_C(0xFFFFFFFFFFFF)) + #if SCHAR_MIN == ~0x7FFFFFFFFFFFFFFF && SCHAR_MAX == 0x7FFFFFFFFFFFFFFF && UCHAR_MAX == 0xFFFFFFFFFFFFFFFF typedef unsigned char uint64_t; typedef signed char int64_t; @@ -661,10 +687,18 @@ typedef ptrdiff_t intptr_t; #define zig_maxInt_i16 INT16_MAX #define zig_minInt_u16 UINT16_C(0) #define zig_maxInt_u16 UINT16_MAX +#define zig_minInt_i24 INT24_MIN +#define zig_maxInt_i24 INT24_MAX +#define zig_minInt_u24 UINT24_C(0) +#define zig_maxInt_u24 UINT24_MAX #define zig_minInt_i32 INT32_MIN #define zig_maxInt_i32 INT32_MAX #define zig_minInt_u32 UINT32_C(0) #define zig_maxInt_u32 UINT32_MAX +#define zig_minInt_i48 INT48_MIN +#define zig_maxInt_i48 INT48_MAX +#define zig_minInt_u48 UINT48_C(0) +#define zig_maxInt_u48 UINT48_MAX #define zig_minInt_i64 INT64_MIN #define zig_maxInt_i64 INT64_MAX #define zig_minInt_u64 UINT64_C(0) @@ -784,6 +818,17 @@ zig_int_helpers(16, unsigned long long) #else zig_int_helpers(16, uint16_t) #endif +#if defined(zig_ez80) +#if UINT24_MAX <= UINT_MAX +zig_int_helpers(24, unsigned int) +#elif UINT24_MAX <= ULONG_MAX +zig_int_helpers(24, unsigned long) +#elif UINT24_MAX <= ULLONG_MAX +zig_int_helpers(24, unsigned long long) +#else +zig_int_helpers(24, uint24_t) +#endif +#endif #if UINT32_MAX <= UINT_MAX zig_int_helpers(32, unsigned int) #elif UINT32_MAX <= ULONG_MAX @@ -793,6 +838,17 @@ zig_int_helpers(32, unsigned long long) #else zig_int_helpers(32, uint32_t) #endif +#if defined(zig_ez80) +#if UINT24_MAX <= UINT_MAX +zig_int_helpers(48, unsigned int) +#elif UINT24_MAX <= ULONG_MAX +zig_int_helpers(48, unsigned long) +#elif UINT24_MAX <= ULLONG_MAX +zig_int_helpers(48, unsigned long long) +#else +zig_int_helpers(48, uint48_t) +#endif +#endif #if UINT64_MAX <= UINT_MAX zig_int_helpers(64, unsigned int) #elif UINT64_MAX <= ULONG_MAX @@ -911,6 +967,66 @@ static inline bool zig_addo_i16(int16_t *res, int16_t lhs, int16_t rhs, uint8_t #endif } +#if defined(zig_ez80) +static inline bool zig_addo_u24(uint24_t *res, uint24_t lhs, uint24_t rhs, uint8_t bits) { +#if zig_has_builtin(add_overflow) || defined(zig_gcc) + uint24_t full_res; + bool overflow = __builtin_add_overflow(lhs, rhs, &full_res); + *res = zig_wrap_u24(full_res, bits); + return overflow || full_res < zig_minInt_u(24, bits) || full_res > zig_maxInt_u(24, bits); +#else + uint32_t full_res; + bool overflow = zig_addo_u32(&full_res, lhs, rhs, bits); + *res = (uint24_t)full_res; + return overflow; +#endif +} + +static inline bool zig_addo_i24(int24_t *res, int24_t lhs, int24_t rhs, uint8_t bits) { +#if zig_has_builtin(add_overflow) || defined(zig_gcc) + int24_t full_res; + bool overflow = __builtin_add_overflow(lhs, rhs, &full_res); + *res = zig_wrap_i24(full_res, bits); + return overflow || full_res < zig_minInt_i(24, bits) || full_res > zig_maxInt_i(24, bits); +#else + int32_t full_res; + bool overflow = zig_addo_i32(&full_res, lhs, rhs, bits); + *res = (int24_t)full_res; + return overflow; +#endif +} +#endif + +#if defined(zig_ez80) +static inline bool zig_addo_u48(uint48_t *res, uint48_t lhs, uint48_t rhs, uint8_t bits) { +#if zig_has_builtin(add_overflow) || defined(zig_gcc) + uint48_t full_res; + bool overflow = __builtin_add_overflow(lhs, rhs, &full_res); + *res = zig_wrap_u48(full_res, bits); + return overflow || full_res < zig_minInt_u(48, bits) || full_res > zig_maxInt_u(48, bits); +#else + uint64_t full_res; + bool overflow = zig_addo_u64(&full_res, lhs, rhs, bits); + *res = (uint48_t)full_res; + return overflow; +#endif +} + +static inline bool zig_addo_i48(int48_t *res, int48_t lhs, int48_t rhs, uint8_t bits) { +#if zig_has_builtin(add_overflow) || defined(zig_gcc) + int48_t full_res; + bool overflow = __builtin_add_overflow(lhs, rhs, &full_res); + *res = zig_wrap_i48(full_res, bits); + return overflow || full_res < zig_minInt_i(48, bits) || full_res > zig_maxInt_i(48, bits); +#else + int64_t full_res; + bool overflow = zig_addo_i64(&full_res, lhs, rhs, bits); + *res = (int48_t)full_res; + return overflow; +#endif +} +#endif + static inline bool zig_subo_u32(uint32_t *res, uint32_t lhs, uint32_t rhs, uint8_t bits) { #if zig_has_builtin(sub_overflow) || defined(zig_gcc) uint32_t full_res; @@ -937,6 +1053,7 @@ static inline bool zig_subo_i32(int32_t *res, int32_t lhs, int32_t rhs, uint8_t return overflow || full_res < zig_minInt_i(32, bits) || full_res > zig_maxInt_i(32, bits); } + static inline bool zig_subo_u64(uint64_t *res, uint64_t lhs, uint64_t rhs, uint8_t bits) { #if zig_has_builtin(sub_overflow) || defined(zig_gcc) uint64_t full_res; @@ -1019,6 +1136,66 @@ static inline bool zig_subo_i16(int16_t *res, int16_t lhs, int16_t rhs, uint8_t #endif } +#if defined(zig_ez80) +static inline bool zig_subo_u24(uint24_t *res, uint24_t lhs, uint24_t rhs, uint8_t bits) { +#if zig_has_builtin(sub_overflow) || defined(zig_gcc) + uint24_t full_res; + bool overflow = __builtin_sub_overflow(lhs, rhs, &full_res); + *res = zig_wrap_u24(full_res, bits); + return overflow || full_res < zig_minInt_u(24, bits) || full_res > zig_maxInt_u(24, bits); +#else + uint32_t full_res; + bool overflow = zig_subo_u32(&full_res, lhs, rhs, bits); + *res = (uint24_t)full_res; + return overflow; +#endif +} + +static inline bool zig_subo_i24(int24_t *res, int24_t lhs, int24_t rhs, uint8_t bits) { +#if zig_has_builtin(sub_overflow) || defined(zig_gcc) + int24_t full_res; + bool overflow = __builtin_sub_overflow(lhs, rhs, &full_res); + *res = zig_wrap_i24(full_res, bits); + return overflow || full_res < zig_minInt_i(24, bits) || full_res > zig_maxInt_i(24, bits); +#else + int32_t full_res; + bool overflow = zig_subo_i32(&full_res, lhs, rhs, bits); + *res = (int24_t)full_res; + return overflow; +#endif +} +#endif + +#if defined(zig_ez80) +static inline bool zig_subo_u48(uint48_t *res, uint48_t lhs, uint48_t rhs, uint8_t bits) { +#if zig_has_builtin(sub_overflow) || defined(zig_gcc) + uint48_t full_res; + bool overflow = __builtin_sub_overflow(lhs, rhs, &full_res); + *res = zig_wrap_u48(full_res, bits); + return overflow || full_res < zig_minInt_u(48, bits) || full_res > zig_maxInt_u(48, bits); +#else + uint64_t full_res; + bool overflow = zig_subo_u64(&full_res, lhs, rhs, bits); + *res = (uint48_t)full_res; + return overflow; +#endif +} + +static inline bool zig_subo_i48(int48_t *res, int48_t lhs, int48_t rhs, uint8_t bits) { +#if zig_has_builtin(sub_overflow) || defined(zig_gcc) + int48_t full_res; + bool overflow = __builtin_sub_overflow(lhs, rhs, &full_res); + *res = zig_wrap_i48(full_res, bits); + return overflow || full_res < zig_minInt_i(48, bits) || full_res > zig_maxInt_i(48, bits); +#else + int64_t full_res; + bool overflow = zig_subo_i64(&full_res, lhs, rhs, bits); + *res = (int48_t)full_res; + return overflow; +#endif +} +#endif + static inline bool zig_mulo_u32(uint32_t *res, uint32_t lhs, uint32_t rhs, uint8_t bits) { #if zig_has_builtin(mul_overflow) || defined(zig_gcc) uint32_t full_res; @@ -1127,6 +1304,66 @@ static inline bool zig_mulo_i16(int16_t *res, int16_t lhs, int16_t rhs, uint8_t #endif } +#if defined(zig_ez80) +static inline bool zig_mulo_u24(uint24_t *res, uint24_t lhs, uint24_t rhs, uint8_t bits) { +#if zig_has_builtin(mul_overflow) || defined(zig_gcc) + uint24_t full_res; + bool overflow = __builtin_mul_overflow(lhs, rhs, &full_res); + *res = zig_wrap_u24(full_res, bits); + return overflow || full_res < zig_minInt_u(24, bits) || full_res > zig_maxInt_u(24, bits); +#else + uint32_t full_res; + bool overflow = zig_mulo_u32(&full_res, lhs, rhs, bits); + *res = (uint24_t)full_res; + return overflow; +#endif +} + +static inline bool zig_mulo_i24(int24_t *res, int24_t lhs, int24_t rhs, uint8_t bits) { +#if zig_has_builtin(mul_overflow) || defined(zig_gcc) + int24_t full_res; + bool overflow = __builtin_mul_overflow(lhs, rhs, &full_res); + *res = zig_wrap_i24(full_res, bits); + return overflow || full_res < zig_minInt_i(24, bits) || full_res > zig_maxInt_i(24, bits); +#else + int32_t full_res; + bool overflow = zig_mulo_i32(&full_res, lhs, rhs, bits); + *res = (int24_t)full_res; + return overflow; +#endif +} +#endif + +#if defined(zig_ez80) +static inline bool zig_mulo_u48(uint48_t *res, uint48_t lhs, uint48_t rhs, uint8_t bits) { +#if zig_has_builtin(mul_overflow) || defined(zig_gcc) + uint48_t full_res; + bool overflow = __builtin_mul_overflow(lhs, rhs, &full_res); + *res = zig_wrap_u48(full_res, bits); + return overflow || full_res < zig_minInt_u(48, bits) || full_res > zig_maxInt_u(48, bits); +#else + uint64_t full_res; + bool overflow = zig_mulo_u64(&full_res, lhs, rhs, bits); + *res = (uint48_t)full_res; + return overflow; +#endif +} + +static inline bool zig_mulo_i48(int48_t *res, int48_t lhs, int48_t rhs, uint8_t bits) { +#if zig_has_builtin(mul_overflow) || defined(zig_gcc) + int48_t full_res; + bool overflow = __builtin_mul_overflow(lhs, rhs, &full_res); + *res = zig_wrap_i48(full_res, bits); + return overflow || full_res < zig_minInt_i(48, bits) || full_res > zig_maxInt_i(48, bits); +#else + int64_t full_res; + bool overflow = zig_mulo_i64(&full_res, lhs, rhs, bits); + *res = (int48_t)full_res; + return overflow; +#endif +} +#endif + #define zig_int_builtins(w) \ static inline bool zig_shlo_u##w(uint##w##_t *res, uint##w##_t lhs, uint8_t rhs, uint8_t bits) { \ *res = zig_shlw_u##w(lhs, rhs, bits); \ @@ -1186,7 +1423,13 @@ static inline bool zig_mulo_i16(int16_t *res, int16_t lhs, int16_t rhs, uint8_t } zig_int_builtins(8) zig_int_builtins(16) +#if defined(zig_ez80) +zig_int_builtins(24) +#endif zig_int_builtins(32) +#if defined(zig_ez80) +zig_int_builtins(48) +#endif zig_int_builtins(64) #define zig_builtin8(name, val) __builtin_##name(val) @@ -1195,6 +1438,11 @@ typedef unsigned int zig_Builtin8; #define zig_builtin16(name, val) __builtin_##name(val) typedef unsigned int zig_Builtin16; +#if defined(zig_ez80) +#define zig_builtin24(name, val) __builtin_##name(val) +typedef unsigned int zig_Builtin24; +#endif + #if INT_MIN <= INT32_MIN #define zig_builtin32(name, val) __builtin_##name(val) typedef unsigned int zig_Builtin32; @@ -1203,6 +1451,11 @@ typedef unsigned int zig_Builtin32; typedef unsigned long zig_Builtin32; #endif +#if defined(zig_ez80) +#define zig_builtin48(name, val) __builtin_##name(val) +typedef unsigned long long zig_Builtin48; +#endif + #if INT_MIN <= INT64_MIN #define zig_builtin64(name, val) __builtin_##name(val) typedef unsigned int zig_Builtin64; @@ -1237,6 +1490,23 @@ static inline int16_t zig_byte_swap_i16(int16_t val, uint8_t bits) { return zig_wrap_i16((int16_t)zig_byte_swap_u16((uint16_t)val, bits), bits); } +#if defined(zig_ez80) +static inline uint16_t zig_byte_swap_u24(uint24_t val, uint8_t bits) { + uint24_t full_res; +#if zig_has_builtin(bswap24) || defined(zig_gcc) + full_res = __builtin_bswap24(val); +#else + full_res = (uint24_t)zig_byte_swap_u8((uint8_t)(val >> 0), 8) << 16 | + (uint24_t)zig_byte_swap_u16((uint16_t)(val >> 8), 16) >> 0; +#endif + return zig_wrap_u24(full_res >> (24 - bits), bits); +} + +static inline int16_t zig_byte_swap_i24(int24_t val, uint8_t bits) { + return zig_wrap_i24((int24_t)zig_byte_swap_u24((uint24_t)val, bits), bits); +} +#endif + static inline uint32_t zig_byte_swap_u32(uint32_t val, uint8_t bits) { uint32_t full_res; #if zig_has_builtin(bswap32) || defined(zig_gcc) @@ -1252,6 +1522,23 @@ static inline int32_t zig_byte_swap_i32(int32_t val, uint8_t bits) { return zig_wrap_i32((int32_t)zig_byte_swap_u32((uint32_t)val, bits), bits); } +#if defined(zig_ez80) +static inline uint32_t zig_byte_swap_u48(uint48_t val, uint8_t bits) { + uint48_t full_res; +#if zig_has_builtin(bswap48) || defined(zig_gcc) + full_res = __builtin_bswap48(val); +#else + full_res = (uint48_t)zig_byte_swap_u24((uint24_t)(val >> 0), 24) << 24 | + (uint48_t)zig_byte_swap_u24((uint24_t)(val >> 24), 24) >> 0; +#endif + return zig_wrap_u48(full_res >> (48 - bits), bits); +} + +static inline int32_t zig_byte_swap_i48(int48_t val, uint8_t bits) { + return zig_wrap_i48((int48_t)zig_byte_swap_u48((uint48_t)val, bits), bits); +} +#endif + static inline uint64_t zig_byte_swap_u64(uint64_t val, uint8_t bits) { uint64_t full_res; #if zig_has_builtin(bswap64) || defined(zig_gcc) @@ -1300,6 +1587,23 @@ static inline int16_t zig_bit_reverse_i16(int16_t val, uint8_t bits) { return zig_wrap_i16((int16_t)zig_bit_reverse_u16((uint16_t)val, bits), bits); } +#if defined(zig_ez80) +static inline uint24_t zig_bit_reverse_u24(uint24_t val, uint8_t bits) { + uint24_t full_res; +#if zig_has_builtin(bitreverse24) + full_res = __builtin_bitreverse24(val); +#else + full_res = (uint24_t)zig_bit_reverse_u8((uint8_t)(val >> 0), 8) << 16 | + (uint24_t)zig_bit_reverse_u16((uint16_t)(val >> 8), 16) >> 0; +#endif + return zig_wrap_u24(full_res >> (24 - bits), bits); +} + +static inline int24_t zig_bit_reverse_i24(int24_t val, uint8_t bits) { + return zig_wrap_i24((int24_t)zig_bit_reverse_u24((uint24_t)val, bits), bits); +} +#endif + static inline uint32_t zig_bit_reverse_u32(uint32_t val, uint8_t bits) { uint32_t full_res; #if zig_has_builtin(bitreverse32) @@ -1315,6 +1619,23 @@ static inline int32_t zig_bit_reverse_i32(int32_t val, uint8_t bits) { return zig_wrap_i32((int32_t)zig_bit_reverse_u32((uint32_t)val, bits), bits); } +#if defined(zig_ez80) +static inline uint32_t zig_bit_reverse_u48(uint48_t val, uint8_t bits) { + uint48_t full_res; +#if zig_has_builtin(bitreverse48) + full_res = __builtin_bitreverse48(val); +#else + full_res = (uint48_t)zig_bit_reverse_u24((uint24_t)(val >> 0), 24) << 24 | + (uint48_t)zig_bit_reverse_u24((uint24_t)(val >> 24), 24) >> 0; +#endif + return zig_wrap_u32(full_res >> (48 - bits), bits); +} + +static inline int32_t zig_bit_reverse_i48(int48_t val, uint8_t bits) { + return zig_wrap_i48((int48_t)zig_bit_reverse_u48((uint48_t)val, bits), bits); +} +#endif + static inline uint64_t zig_bit_reverse_u64(uint64_t val, uint8_t bits) { uint64_t full_res; #if zig_has_builtin(bitreverse64) @@ -1356,7 +1677,13 @@ static inline int64_t zig_bit_reverse_i64(int64_t val, uint8_t bits) { #endif zig_builtin_popcount(8) zig_builtin_popcount(16) +#if defined(zig_ez80) +zig_builtin_popcount(24) +#endif zig_builtin_popcount(32) +#if defined(zig_ez80) +zig_builtin_popcount(48) +#endif zig_builtin_popcount(64) #define zig_builtin_ctz_common(w) \ @@ -1381,7 +1708,13 @@ zig_builtin_popcount(64) #endif zig_builtin_ctz(8) zig_builtin_ctz(16) +#if defined(zig_ez80) +zig_builtin_ctz(24) +#endif zig_builtin_ctz(32) +#if defined(zig_ez80) +zig_builtin_ctz(48) +#endif zig_builtin_ctz(64) #define zig_builtin_clz_common(w) \ @@ -1406,7 +1739,13 @@ zig_builtin_ctz(64) #endif zig_builtin_clz(8) zig_builtin_clz(16) +#if defined(zig_ez80) +zig_builtin_clz(24) +#endif zig_builtin_clz(32) +#if defined(zig_ez80) +zig_builtin_clz(48) +#endif zig_builtin_clz(64) /* ======================== 128-bit Integer Support ========================= */ diff --git a/src/Sema.zig b/src/Sema.zig index 341cb1c85523..ad469edf96f4 100644 --- a/src/Sema.zig +++ b/src/Sema.zig @@ -26281,6 +26281,7 @@ fn validateExternType( }, .int => switch (ty.intInfo(zcu).bits) { 0, 8, 16, 32, 64, 128 => return true, + 24, 48 => return sema.pt.zcu.getTarget().cpu.arch == .ez80, else => return false, }, .@"fn" => { @@ -34388,7 +34389,7 @@ pub fn resolveStructAlignment( assert(struct_type.layout != .@"packed"); assert(struct_type.flagsUnordered(ip).alignment == .none); - const ptr_align = Alignment.fromByteUnits(@divExact(target.ptrBitWidth(), 8)); + const ptr_align = Type.ptrAbiAlignment(target); // We'll guess "pointer-aligned", if the struct has an // underaligned pointer field then some allocations @@ -34497,7 +34498,7 @@ pub fn resolveStructLayout(sema: *Sema, ty: Type) SemaError!void { } if (struct_type.flagsUnordered(ip).assumed_pointer_aligned and - big_align.compareStrict(.neq, Alignment.fromByteUnits(@divExact(zcu.getTarget().ptrBitWidth(), 8)))) + big_align.compareStrict(.neq, Type.ptrAbiAlignment(zcu.getTarget()))) { const msg = try sema.errMsg( ty.srcLoc(zcu), @@ -34733,7 +34734,7 @@ pub fn resolveUnionAlignment( assert(!union_type.haveLayout(ip)); - const ptr_align = Alignment.fromByteUnits(@divExact(target.ptrBitWidth(), 8)); + const ptr_align = Type.ptrAbiAlignment(target); // We'll guess "pointer-aligned", if the union has an // underaligned pointer field then some allocations @@ -34883,7 +34884,7 @@ pub fn resolveUnionLayout(sema: *Sema, ty: Type) SemaError!void { } if (union_type.flagsUnordered(ip).assumed_pointer_aligned and - alignment.compareStrict(.neq, Alignment.fromByteUnits(@divExact(pt.zcu.getTarget().ptrBitWidth(), 8)))) + alignment.compareStrict(.neq, Type.ptrAbiAlignment(pt.zcu.getTarget()))) { const msg = try sema.errMsg( ty.srcLoc(pt.zcu), diff --git a/src/Type.zig b/src/Type.zig index f3f5c9949179..6fdf0cace88d 100644 --- a/src/Type.zig +++ b/src/Type.zig @@ -1603,6 +1603,9 @@ fn abiSizeInnerOptional( } pub fn ptrAbiAlignment(target: *const Target) Alignment { + // The eZ80 has 24-bit pointers, which aren't exact powers of two, tripping + // the assert. The alignment of eZ80 pointers is 1, so we bypass the check. + if (target.cpu.arch == .ez80) return .@"1"; return Alignment.fromNonzeroByteUnits(@divExact(target.ptrBitWidth(), 8)); } diff --git a/src/Zcu.zig b/src/Zcu.zig index 443194878175..163bf67da1cb 100644 --- a/src/Zcu.zig +++ b/src/Zcu.zig @@ -3836,6 +3836,9 @@ pub fn atomicPtrAlignment( ) AtomicPtrAlignmentError!Alignment { const target = zcu.getTarget(); const max_atomic_bits: u16 = switch (target.cpu.arch) { + .ez80, + => 8, + .aarch64, .aarch64_be, => 128, @@ -4465,6 +4468,8 @@ pub fn callconvSupported(zcu: *Zcu, cc: std.builtin.CallingConvention) union(enu .avr_signal, => true, + .ez80_tiflags => true, + .naked => true, else => false, diff --git a/src/codegen/c.zig b/src/codegen/c.zig index b86e0b583f57..7f6e5331411f 100644 --- a/src/codegen/c.zig +++ b/src/codegen/c.zig @@ -281,7 +281,9 @@ const reserved_idents = std.StaticStringMap(void).initComptime(.{ .{ "inline", {} }, .{ "int", {} }, .{ "int16_t", {} }, + .{ "int24_t", {} }, .{ "int32_t", {} }, + .{ "int48_t", {} }, .{ "int64_t", {} }, .{ "int8_t", {} }, .{ "intptr_t", {} }, @@ -303,7 +305,9 @@ const reserved_idents = std.StaticStringMap(void).initComptime(.{ .{ "typedef", {} }, .{ "typeof", {} }, .{ "uint16_t", {} }, + .{ "uint24_t", {} }, .{ "uint32_t", {} }, + .{ "uint48_t", {} }, .{ "uint64_t", {} }, .{ "uint8_t", {} }, .{ "uintptr_t", {} }, @@ -8134,6 +8138,9 @@ fn toCallingConvention(cc: std.builtin.CallingConvention, zcu: *Zcu) ?[]const u8 .x86_64_interrupt, => "interrupt", + .ez80_tiflags, + => "__tiflags__", + else => unreachable, // `Zcu.callconvSupported` }; } diff --git a/src/codegen/c/Type.zig b/src/codegen/c/Type.zig index fa4db36a0c69..9426a60240f1 100644 --- a/src/codegen/c/Type.zig +++ b/src/codegen/c/Type.zig @@ -6,8 +6,12 @@ pub const @"i8": CType = .{ .index = .int8_t }; pub const @"u8": CType = .{ .index = .uint8_t }; pub const @"i16": CType = .{ .index = .int16_t }; pub const @"u16": CType = .{ .index = .uint16_t }; +pub const @"u24": CType = .{ .index = .uint24_t }; +pub const @"i24": CType = .{ .index = .int24_t }; pub const @"i32": CType = .{ .index = .int32_t }; pub const @"u32": CType = .{ .index = .uint32_t }; +pub const @"i48": CType = .{ .index = .int48_t }; +pub const @"u48": CType = .{ .index = .uint48_t }; pub const @"i64": CType = .{ .index = .int64_t }; pub const @"u64": CType = .{ .index = .uint64_t }; pub const @"i128": CType = .{ .index = .zig_i128 }; @@ -63,8 +67,12 @@ pub fn isInteger(ctype: CType) bool { .int8_t, .uint16_t, .int16_t, + .uint24_t, + .int24_t, .uint32_t, .int32_t, + .uint48_t, + .int48_t, .uint64_t, .int64_t, .uintptr_t, @@ -87,7 +95,9 @@ pub fn signedness(ctype: CType, mod: *Module) std.builtin.Signedness { .ptrdiff_t, .int8_t, .int16_t, + .int24_t, .int32_t, + .int48_t, .int64_t, .intptr_t, .zig_i128, @@ -100,7 +110,9 @@ pub fn signedness(ctype: CType, mod: *Module) std.builtin.Signedness { .size_t, .uint8_t, .uint16_t, + .uint24_t, .uint32_t, + .uint48_t, .uint64_t, .uintptr_t, .zig_u128, @@ -135,7 +147,9 @@ pub fn toSigned(ctype: CType) CType { .size_t, .ptrdiff_t => .{ .index = .ptrdiff_t }, .uint8_t, .int8_t => .{ .index = .int8_t }, .uint16_t, .int16_t => .{ .index = .int16_t }, + .uint24_t, .int24_t => .{ .index = .int24_t }, .uint32_t, .int32_t => .{ .index = .int32_t }, + .uint48_t, .int48_t => .{ .index = .int48_t }, .uint64_t, .int64_t => .{ .index = .int64_t }, .uintptr_t, .intptr_t => .{ .index = .intptr_t }, .zig_u128, .zig_i128 => .{ .index = .zig_i128 }, @@ -162,7 +176,9 @@ pub fn toUnsigned(ctype: CType) CType { .size_t, .ptrdiff_t => .{ .index = .size_t }, .uint8_t, .int8_t => .{ .index = .uint8_t }, .uint16_t, .int16_t => .{ .index = .uint16_t }, + .uint24_t, .int24_t => .{ .index = .uint24_t }, .uint32_t, .int32_t => .{ .index = .uint32_t }, + .uint48_t, .int48_t => .{ .index = .uint48_t }, .uint64_t, .int64_t => .{ .index = .uint64_t }, .uintptr_t, .intptr_t => .{ .index = .uintptr_t }, .zig_u128, .zig_i128 => .{ .index = .zig_u128 }, @@ -228,8 +244,12 @@ pub fn getStandardDefineAbbrev(ctype: CType) ?[]const u8 { .int8_t => "INT8", .uint16_t => "UINT16", .int16_t => "INT16", + .uint24_t => "UINT24", + .int24_t => "INT24", .uint32_t => "UINT32", .int32_t => "INT32", + .uint48_t => "UINT48", + .int48_t => "INT48", .uint64_t => "UINT64", .int64_t => "INT64", .uintptr_t => "UINTPTR", @@ -271,8 +291,12 @@ pub fn renderLiteralPrefix(ctype: CType, w: *Writer, kind: Kind, pool: *const Po .int8_t, .uint16_t, .int16_t, + .uint24_t, + .int24_t, .uint32_t, .int32_t, + .uint48_t, + .int48_t, .uint64_t, .int64_t, => try w.print("{s}_C(", .{ctype.getStandardDefineAbbrev().?}), @@ -331,8 +355,12 @@ pub fn renderLiteralSuffix(ctype: CType, w: *Writer, pool: *const Pool) Writer.E .int8_t, .uint16_t, .int16_t, + .uint24_t, + .int24_t, .uint32_t, .int32_t, + .uint48_t, + .int48_t, .uint64_t, .int64_t, .zig_u128, @@ -390,7 +418,9 @@ pub fn byteSize(ctype: CType, pool: *const Pool, mod: *Module) u64 { .intptr_t, => @divExact(target.ptrBitWidth(), 8), .uint16_t, .int16_t, .zig_f16 => 2, + .uint24_t, .int24_t => 3, .uint32_t, .int32_t, .zig_f32 => 4, + .uint48_t, .int48_t => 6, .uint64_t, .int64_t, .zig_f64 => 8, .zig_u128, .zig_i128, .zig_f128 => 16, .zig_f80 => if (target.cTypeBitSize(.longdouble) == 80) @@ -707,6 +737,12 @@ const Index = enum(u32) { zig_f128, zig_c_longdouble, + // ez80 stdint.h + uint24_t, + int24_t, + uint48_t, + int48_t, + _, const first_pool_index: u32 = @typeInfo(CType.Index).@"enum".fields.len; @@ -1346,6 +1382,7 @@ pub const Pool = struct { mod: *Module, kind: Kind, ) !CType { + const is_ez80 = mod.resolved_target.result.cpu.arch == .ez80; switch (int_info.bits) { 0 => return .void, 1...8 => switch (int_info.signedness) { @@ -1356,11 +1393,19 @@ pub const Pool = struct { .signed => return .i16, .unsigned => return .u16, }, - 17...32 => switch (int_info.signedness) { + 17...24 => switch (int_info.signedness) { + .signed => return if (is_ez80) .i24 else .i32, + .unsigned => return if (is_ez80) .u24 else .u32, + }, + 25...32 => switch (int_info.signedness) { .signed => return .i32, .unsigned => return .u32, }, - 33...64 => switch (int_info.signedness) { + 33...48 => switch (int_info.signedness) { + .signed => return if (is_ez80) .i48 else .i64, + .unsigned => return if (is_ez80) .u48 else .u64, + }, + 49...64 => switch (int_info.signedness) { .signed => return .i64, .unsigned => return .u64, }, diff --git a/src/codegen/llvm.zig b/src/codegen/llvm.zig index 824295807317..2a312d0d210a 100644 --- a/src/codegen/llvm.zig +++ b/src/codegen/llvm.zig @@ -108,6 +108,7 @@ pub fn targetTriple(allocator: Allocator, target: *const std.Target) ![]const u8 .alpha, .arceb, + .ez80, .hppa, .hppa64, .kalimba, @@ -246,6 +247,7 @@ pub fn targetTriple(allocator: Allocator, target: *const std.Target) ![]const u8 .opengl, .plan9, .contiki, + .tios, .other, => "unknown", }; @@ -485,6 +487,7 @@ pub fn dataLayout(target: *const std.Target) []const u8 { .alpha, .arceb, + .ez80, .hppa, .hppa64, .kalimba, @@ -1429,8 +1432,7 @@ pub const Object = struct { const param = wip.arg(llvm_arg_i); llvm_arg_i += 1; const field_ptr = try wip.gepStruct(llvm_ty, arg_ptr, field_i, ""); - const alignment = - Builder.Alignment.fromByteUnits(@divExact(target.ptrBitWidth(), 8)); + const alignment = Type.ptrAbiAlignment(target).toLlvm(); _ = try wip.store(.normal, param, field_ptr, alignment); } @@ -5398,8 +5400,7 @@ pub const FuncGen = struct { const llvm_ty = try o.builder.structType(.normal, llvm_types); try llvm_args.ensureUnusedCapacity(it.types_len); for (llvm_types, 0..) |field_ty, i| { - const alignment = - Builder.Alignment.fromByteUnits(@divExact(target.ptrBitWidth(), 8)); + const alignment = Type.ptrAbiAlignment(target).toLlvm(); const field_ptr = try self.wip.gepStruct(llvm_ty, arg_ptr, i, ""); const loaded = try self.wip.load(.normal, field_ty, field_ptr, alignment, ""); llvm_args.appendAssumeCapacity(loaded); @@ -11938,6 +11939,8 @@ fn toLlvmCallConvTag(cc_tag: std.builtin.CallingConvention.Tag, target: *const s .avr_gnu, .bpf_std, .csky_sysv, + .ez80_cet, + .ez80_tiflags, .hexagon_sysv, .hexagon_sysv_hvx, .hppa_elf, @@ -13128,6 +13131,7 @@ pub fn initializeLLVMTarget(arch: std.Target.Cpu.Arch) void { // LLVM does does not have a backend for these. .alpha, .arceb, + .ez80, .hppa, .hppa64, .kalimba, diff --git a/src/target.zig b/src/target.zig index fd7536f7ecb8..620f6aa078bf 100644 --- a/src/target.zig +++ b/src/target.zig @@ -215,6 +215,10 @@ pub fn hasLlvmSupport(target: *const std.Target, ofmt: std.Target.ObjectFormat) .xtensa, => false, + // Third-party LLVM backend exists. + .ez80, + => false, + // No LLVM backend exists. .alpha, .arceb, diff --git a/tools/update_cpu_features.zig b/tools/update_cpu_features.zig index 29877fff15c8..82f8645245f6 100644 --- a/tools/update_cpu_features.zig +++ b/tools/update_cpu_features.zig @@ -966,6 +966,10 @@ const targets = [_]ArchTarget{ .td_name = "CSKY", }, }, + .{ + .zig_name = "z80", + .llvm = null, + }, .{ .zig_name = "hexagon", .llvm = .{