@@ -1858,6 +1858,7 @@ fn genBody(cg: *CodeGen, body: []const Air.Inst.Index) InnerError!void {
1858
1858
.bit_and = > try cg .airLogicBinOp (inst , .@"and" ),
1859
1859
.bit_or = > try cg .airLogicBinOp (inst , .@"or" ),
1860
1860
.xor = > try cg .airLogicBinOp (inst , .xor ),
1861
+ .not = > try cg .airNot (inst ),
1861
1862
1862
1863
.bitcast = > try cg .airBitCast (inst ),
1863
1864
.intcast = > try cg .airIntCast (inst ),
@@ -2666,6 +2667,7 @@ const Select = struct {
2666
2667
pub const Src = union (enum ) {
2667
2668
none ,
2668
2669
any ,
2670
+ zero ,
2669
2671
imm ,
2670
2672
imm_val : i32 ,
2671
2673
imm_fit : u5 ,
@@ -2682,6 +2684,7 @@ const Select = struct {
2682
2684
2683
2685
pub const imm12 : Src = .{ .imm_fit = 12 };
2684
2686
pub const imm20 : Src = .{ .imm_fit = 20 };
2687
+ /// Immediate 0, see also zero
2685
2688
pub const imm_zero : Src = .{ .imm_val = 0 };
2686
2689
pub const imm_one : Src = .{ .imm_val = 1 };
2687
2690
pub const int_reg : Src = .{ .reg = .int };
@@ -2693,6 +2696,11 @@ const Select = struct {
2693
2696
return switch (pat ) {
2694
2697
.none = > temp .tracking (cg ).short == .none ,
2695
2698
.any = > true ,
2699
+ .zero = > switch (temp .tracking (cg ).short ) {
2700
+ .immediate = > | imm | imm == 0 ,
2701
+ .register = > | reg | reg == Register .zero ,
2702
+ else = > false ,
2703
+ },
2696
2704
.imm = > temp .tracking (cg ).short == .immediate ,
2697
2705
.imm_val = > | val | switch (temp .tracking (cg ).short ) {
2698
2706
.immediate = > | imm | @as (i32 , @intCast (imm )) == val ,
@@ -2716,7 +2724,7 @@ const Select = struct {
2716
2724
2717
2725
fn convert (pat : Src , temp : * Temp , cg : * CodeGen ) InnerError ! bool {
2718
2726
return switch (pat ) {
2719
- .none , .any , .imm , .imm_val , .imm_fit , .regs , .reg_frame = > false ,
2727
+ .none , .any , .zero , . imm , .imm_val , .imm_fit , .regs , .reg_frame = > false ,
2720
2728
.mem , .to_mem = > try temp .moveToMemory (cg , false ),
2721
2729
.mut_mem , .to_mut_mem = > try temp .moveToMemory (cg , true ),
2722
2730
.reg , .to_reg = > | rc | try temp .moveToRegister (cg , rc , false ),
@@ -2926,6 +2934,7 @@ fn airLogicBinOp(cg: *CodeGen, inst: Air.Inst.Index, op: LogicBinOpKind) !void {
2926
2934
const ty = sel .ops [0 ].typeOf (cg );
2927
2935
assert (ty .isAbiInt (zcu ));
2928
2936
2937
+ // case 1: RI
2929
2938
if (try sel .match (.{
2930
2939
.patterns = &.{
2931
2940
.{ .srcs = &.{ .to_int_reg , .imm12 } },
@@ -2938,7 +2947,9 @@ fn airLogicBinOp(cg: *CodeGen, inst: Air.Inst.Index, op: LogicBinOpKind) !void {
2938
2947
const dst_limb = dst .toLimbValue (0 , cg );
2939
2948
try asmIntLogicBinOpRRI (cg , op , dst_limb .getReg ().? , lhs_limb .getReg ().? , @intCast (rhs .getUnsignedImm (cg )));
2940
2949
try sel .finish (dst );
2941
- } else if (try sel .match (.{
2950
+ }
2951
+ // case 2: RR
2952
+ else if (try sel .match (.{
2942
2953
.patterns = &.{.{ .srcs = &.{ .to_int_reg , .to_int_reg } }},
2943
2954
})) {
2944
2955
const lhs , const rhs = sel .ops [0.. 2].* ;
@@ -2950,7 +2961,9 @@ fn airLogicBinOp(cg: *CodeGen, inst: Air.Inst.Index, op: LogicBinOpKind) !void {
2950
2961
try asmIntLogicBinOpRRR (cg , op , dst_limb .getReg ().? , lhs_limb .getReg ().? , rhs_limb .getReg ().? );
2951
2962
}
2952
2963
try sel .finish (dst );
2953
- } else if (try sel .match (.{
2964
+ }
2965
+ // case 3: limbs
2966
+ else if (try sel .match (.{
2954
2967
.patterns = &.{.{ .srcs = &.{ .any , .any } }},
2955
2968
.temps = &.{ .any_usize_reg , .any_usize_reg },
2956
2969
})) {
@@ -2967,6 +2980,54 @@ fn airLogicBinOp(cg: *CodeGen, inst: Air.Inst.Index, op: LogicBinOpKind) !void {
2967
2980
} else return sel .fail ();
2968
2981
}
2969
2982
2983
+ fn airNot (cg : * CodeGen , inst : Air.Inst.Index ) ! void {
2984
+ const pt = cg .pt ;
2985
+ const zcu = pt .zcu ;
2986
+
2987
+ const ty_op = cg .getAirData (inst ).ty_op ;
2988
+ const ty = ty_op .ty .toType ();
2989
+ var sel = Select .init (cg , inst , & try cg .tempsFromOperands (inst , .{ty_op .operand }));
2990
+
2991
+ // case 1: booleans
2992
+ if (try sel .match (.{
2993
+ .requirement = ty .zigTypeTag (zcu ) == .bool ,
2994
+ .patterns = &.{
2995
+ .{ .srcs = &.{.to_int_reg } },
2996
+ },
2997
+ })) {
2998
+ const op = sel .ops [0 ];
2999
+ const dst , _ = try cg .tempReuseOrAlloc (inst , op , 0 , ty , .{ .use_frame = false });
3000
+ try cg .asmInst (.xori (dst .getReg (cg ), op .getReg (cg ), 1 ));
3001
+ try sel .finish (dst );
3002
+ }
3003
+ // case 2: integers, fit in one register
3004
+ else if (try sel .match (.{
3005
+ .requirement = ty .isInt (zcu ),
3006
+ .patterns = &.{.{ .srcs = &.{.to_int_reg } }},
3007
+ })) {
3008
+ const op = sel .ops [0 ];
3009
+ const dst , _ = try cg .tempReuseOrAlloc (inst , op , 0 , ty , .{ .use_frame = false });
3010
+ try cg .asmInst (.nor (dst .getReg (cg ), op .getReg (cg ), .zero ));
3011
+ try sel .finish (dst );
3012
+ }
3013
+ // case 3: integers, per-limb
3014
+ else if (try sel .match (.{
3015
+ .requirement = ty .isInt (zcu ),
3016
+ .patterns = &.{.{ .srcs = &.{.any } }},
3017
+ .temps = &.{.any_usize_reg },
3018
+ })) {
3019
+ const op = sel .ops [0 ];
3020
+ const tmp = sel .temps [0 ];
3021
+ const dst , _ = try cg .tempReuseOrAlloc (inst , op , 0 , ty , .{ .use_frame = false });
3022
+ for (0.. op .getLimbCount (cg )) | limb_i | {
3023
+ const op_limb = try tmp .ensureReg (cg , op .toLimbValue (limb_i , cg ));
3024
+ const dst_limb = dst .toLimbValue (limb_i , cg );
3025
+ try cg .asmInst (.nor (dst_limb .getReg ().? , op_limb .getReg ().? , .zero ));
3026
+ }
3027
+ try sel .finish (dst );
3028
+ } else return sel .fail ();
3029
+ }
3030
+
2970
3031
fn airRetAddr (cg : * CodeGen , inst : Air.Inst.Index ) ! void {
2971
3032
// do not mark $ra as allocated
2972
3033
const index = RegisterManager .indexOfKnownRegIntoTracked (.ra ).? ;
0 commit comments