Text file src/cmd/compile/internal/ssa/_gen/ARM64.rules

     1  // Copyright 2016 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  (Add(Ptr|64|32|16|8) ...) => (ADD ...)
     6  (Add(32|64)F ...) => (FADD(S|D) ...)
     7  
     8  (Sub(Ptr|64|32|16|8) ...) => (SUB ...)
     9  (Sub(32|64)F ...) => (FSUB(S|D) ...)
    10  
    11  (Mul64 ...) => (MUL ...)
    12  (Mul(32|16|8) ...) => (MULW ...)
    13  (Mul(32|64)F  ...) => (FMUL(S|D) ...)
    14  
    15  (Hmul64  ...) => (MULH ...)
    16  (Hmul64u ...) => (UMULH ...)
    17  (Hmul32  x y) => (SRAconst (MULL <typ.Int64> x y) [32])
    18  (Hmul32u x y) => (SRAconst (UMULL <typ.UInt64> x y) [32])
    19  (Select0 (Mul64uhilo x y)) => (UMULH x y)
    20  (Select1 (Mul64uhilo x y)) => (MUL x y)
    21  
    22  (Div64 [false] x y) => (DIV  x y)
    23  (Div32 [false] x y) => (DIVW x y)
    24  (Div16 [false] x y) => (DIVW (SignExt16to32 x) (SignExt16to32 y))
    25  (Div16u x y) => (UDIVW (ZeroExt16to32 x) (ZeroExt16to32 y))
    26  (Div8   x y) => (DIVW  (SignExt8to32  x) (SignExt8to32  y))
    27  (Div8u  x y) => (UDIVW (ZeroExt8to32  x) (ZeroExt8to32  y))
    28  (Div64u ...) => (UDIV  ...)
    29  (Div32u ...) => (UDIVW ...)
    30  (Div32F ...) => (FDIVS ...)
    31  (Div64F ...) => (FDIVD ...)
    32  
    33  (Mod64 x y) => (MOD x y)
    34  (Mod32 x y) => (MODW x y)
    35  (Mod64u ...) => (UMOD ...)
    36  (Mod32u ...) => (UMODW ...)
    37  (Mod(16|8)  x y) => (MODW  (SignExt(16|8)to32 x) (SignExt(16|8)to32 y))
    38  (Mod(16|8)u x y) => (UMODW (ZeroExt(16|8)to32 x) (ZeroExt(16|8)to32 y))
    39  
    40  // (x + y) / 2 with x>=y    =>    (x - y) / 2 + y
    41  (Avg64u <t> x y) => (ADD (SRLconst <t> (SUB <t> x y) [1]) y)
    42  
    43  (And(64|32|16|8) ...) => (AND ...)
    44  (Or(64|32|16|8)  ...) => (OR ...)
    45  (Xor(64|32|16|8) ...) => (XOR ...)
    46  
    47  // unary ops
    48  (Neg(64|32|16|8) ...) => (NEG ...)
    49  (Neg(32|64)F     ...) => (FNEG(S|D) ...)
    50  (Com(64|32|16|8) ...) => (MVN ...)
    51  
    52  // math package intrinsics
    53  (Abs         ...) => (FABSD   ...)
    54  (Sqrt        ...) => (FSQRTD  ...)
    55  (Ceil        ...) => (FRINTPD ...)
    56  (Floor       ...) => (FRINTMD ...)
    57  (Round       ...) => (FRINTAD ...)
    58  (RoundToEven ...) => (FRINTND ...)
    59  (Trunc       ...) => (FRINTZD ...)
    60  (FMA       x y z) => (FMADDD z x y)
    61  
    62  (Sqrt32 ...) => (FSQRTS ...)
    63  
    64  (Min(64|32)F ...) => (FMIN(D|S) ...)
    65  (Max(64|32)F ...) => (FMAX(D|S) ...)
    66  
    67  // lowering rotates
    68  // we do rotate detection in generic rules, if the following rules need to be changed, check generic rules first.
    69  (RotateLeft8  <t> x (MOVDconst [c])) => (Or8 (Lsh8x64 <t> x (MOVDconst [c&7])) (Rsh8Ux64 <t> x (MOVDconst [-c&7])))
    70  (RotateLeft8  <t> x y) => (OR <t> (SLL <t> x (ANDconst <typ.Int64> [7] y)) (SRL <t> (ZeroExt8to64 x) (ANDconst <typ.Int64> [7] (NEG <typ.Int64> y))))
    71  (RotateLeft16 <t> x (MOVDconst [c])) => (Or16 (Lsh16x64 <t> x (MOVDconst [c&15])) (Rsh16Ux64 <t> x (MOVDconst [-c&15])))
    72  (RotateLeft16 <t> x y) => (RORW <t> (ORshiftLL <typ.UInt32> (ZeroExt16to32 x) (ZeroExt16to32 x) [16]) (NEG <typ.Int64> y))
    73  (RotateLeft32 x y) => (RORW x (NEG <y.Type> y))
    74  (RotateLeft64 x y) => (ROR x (NEG <y.Type> y))
    75  
    76  (Ctz(64|32|16|8)NonZero ...) => (Ctz(64|32|32|32) ...)
    77  
    78  (Ctz64 <t> x) => (CLZ  (RBIT  <t> x))
    79  (Ctz32 <t> x) => (CLZW (RBITW <t> x))
    80  (Ctz16 <t> x) => (CLZW <t> (RBITW <typ.UInt32> (ORconst <typ.UInt32> [0x10000] x)))
    81  (Ctz8  <t> x) => (CLZW <t> (RBITW <typ.UInt32> (ORconst <typ.UInt32> [0x100] x)))
    82  
    83  (PopCount64 <t> x) => (FMOVDfpgp <t> (VUADDLV <typ.Float64> (VCNT <typ.Float64> (FMOVDgpfp <typ.Float64> x))))
    84  (PopCount32 <t> x) => (FMOVDfpgp <t> (VUADDLV <typ.Float64> (VCNT <typ.Float64> (FMOVDgpfp <typ.Float64> (ZeroExt32to64 x)))))
    85  (PopCount16 <t> x) => (FMOVDfpgp <t> (VUADDLV <typ.Float64> (VCNT <typ.Float64> (FMOVDgpfp <typ.Float64> (ZeroExt16to64 x)))))
    86  
    87  // Load args directly into the register class where it will be used.
    88  (FMOVDgpfp <t> (Arg [off] {sym})) => @b.Func.Entry (Arg <t> [off] {sym})
    89  (FMOVDfpgp <t> (Arg [off] {sym})) => @b.Func.Entry (Arg <t> [off] {sym})
    90  
    91  // Similarly for stores, if we see a store after FPR <=> GPR move, then redirect store to use the other register set.
    92  (MOVDstore  [off] {sym} ptr (FMOVDfpgp val) mem) => (FMOVDstore [off] {sym} ptr val mem)
    93  (FMOVDstore [off] {sym} ptr (FMOVDgpfp val) mem) => (MOVDstore [off] {sym} ptr val mem)
    94  (MOVWstore  [off] {sym} ptr (FMOVSfpgp val) mem) => (FMOVSstore [off] {sym} ptr val mem)
    95  (FMOVSstore [off] {sym} ptr (FMOVSgpfp val) mem) => (MOVWstore [off] {sym} ptr val mem)
    96  
    97  // float <=> int register moves, with no conversion.
    98  // These come up when compiling math.{Float64bits, Float64frombits, Float32bits, Float32frombits}.
    99  (MOVDload  [off] {sym} ptr (FMOVDstore [off] {sym} ptr val _)) => (FMOVDfpgp val)
   100  (FMOVDload [off] {sym} ptr (MOVDstore  [off] {sym} ptr val _)) => (FMOVDgpfp val)
   101  (MOVWUload [off] {sym} ptr (FMOVSstore [off] {sym} ptr val _)) => (FMOVSfpgp val)
   102  (FMOVSload [off] {sym} ptr (MOVWstore  [off] {sym} ptr val _)) => (FMOVSgpfp val)
   103  
   104  (BitLen64 x) => (SUB (MOVDconst [64]) (CLZ <typ.Int> x))
   105  (BitLen32 x) => (SUB (MOVDconst [32]) (CLZW <typ.Int> x))
   106  (BitLen(16|8) x) => (BitLen64 (ZeroExt(16|8)to64 x))
   107  
   108  (Bswap64 ...) => (REV ...)
   109  (Bswap32 ...) => (REVW ...)
   110  (Bswap16 ...) => (REV16W ...)
   111  
   112  (BitRev64 ...) => (RBIT ...)
   113  (BitRev32 ...) => (RBITW ...)
   114  (BitRev16   x) => (SRLconst [48] (RBIT <typ.UInt64> x))
   115  (BitRev8    x) => (SRLconst [56] (RBIT <typ.UInt64> x))
   116  
   117  // In fact, UMOD will be translated into UREM instruction, and UREM is originally translated into
   118  // UDIV and MSUB instructions. But if there is already an identical UDIV instruction just before or
   119  // after UREM (case like quo, rem := z/y, z%y), then the second UDIV instruction becomes redundant.
   120  // The purpose of this rule is to have this extra UDIV instruction removed in CSE pass.
   121  (UMOD  <typ.UInt64> x y) => (MSUB <typ.UInt64> x y (UDIV <typ.UInt64> x y))
   122  (UMODW <typ.UInt32> x y) => (MSUBW <typ.UInt32> x y (UDIVW <typ.UInt32> x y))
   123  
   124  // 64-bit addition with carry.
   125  (Select0 (Add64carry x y c)) => (Select0 <typ.UInt64> (ADCSflags x y (Select1 <types.TypeFlags> (ADDSconstflags [-1] c))))
   126  (Select1 (Add64carry x y c)) => (ADCzerocarry <typ.UInt64> (Select1 <types.TypeFlags> (ADCSflags x y (Select1 <types.TypeFlags> (ADDSconstflags [-1] c)))))
   127  
   128  // 64-bit subtraction with borrowing.
   129  (Select0 (Sub64borrow x y bo)) => (Select0 <typ.UInt64> (SBCSflags x y (Select1 <types.TypeFlags> (NEGSflags bo))))
   130  (Select1 (Sub64borrow x y bo)) => (NEG <typ.UInt64> (NGCzerocarry <typ.UInt64> (Select1 <types.TypeFlags> (SBCSflags x y (Select1 <types.TypeFlags> (NEGSflags bo))))))
   131  
   132  // boolean ops -- booleans are represented with 0=false, 1=true
   133  (AndB ...) => (AND ...)
   134  (OrB  ...) => (OR ...)
   135  (EqB  x y) => (XOR (MOVDconst [1]) (XOR <typ.Bool> x y))
   136  (NeqB ...) => (XOR ...)
   137  (Not    x) => (XOR (MOVDconst [1]) x)
   138  
   139  // shifts
   140  // hardware instruction uses only the low 6 bits of the shift
   141  // we compare to 64 to ensure Go semantics for large shifts
   142  // Rules about rotates with non-const shift are based on the following rules,
   143  // if the following rules change, please also modify the rules based on them.
   144  
   145  // check shiftIsBounded first, if shift value is proved to be valid then we
   146  // can do the shift directly.
   147  // left shift
   148  (Lsh(64|32|16|8)x64 <t> x y) && shiftIsBounded(v) => (SLL <t> x y)
   149  (Lsh(64|32|16|8)x32 <t> x y) && shiftIsBounded(v) => (SLL <t> x y)
   150  (Lsh(64|32|16|8)x16 <t> x y) && shiftIsBounded(v) => (SLL <t> x y)
   151  (Lsh(64|32|16|8)x8  <t> x y) && shiftIsBounded(v) => (SLL <t> x y)
   152  
   153  // signed right shift
   154  (Rsh64x(64|32|16|8) <t> x y) && shiftIsBounded(v) => (SRA <t> x y)
   155  (Rsh32x(64|32|16|8) <t> x y) && shiftIsBounded(v) => (SRA <t> (SignExt32to64 x) y)
   156  (Rsh16x(64|32|16|8) <t> x y) && shiftIsBounded(v) => (SRA <t> (SignExt16to64 x) y)
   157  (Rsh8x(64|32|16|8)  <t> x y) && shiftIsBounded(v) => (SRA <t> (SignExt8to64 x) y)
   158  
   159  // unsigned right shift
   160  (Rsh64Ux(64|32|16|8) <t> x y) && shiftIsBounded(v) => (SRL <t> x y)
   161  (Rsh32Ux(64|32|16|8) <t> x y) && shiftIsBounded(v) => (SRL <t> (ZeroExt32to64 x) y)
   162  (Rsh16Ux(64|32|16|8) <t> x y) && shiftIsBounded(v) => (SRL <t> (ZeroExt16to64 x) y)
   163  (Rsh8Ux(64|32|16|8)  <t> x y) && shiftIsBounded(v) => (SRL <t> (ZeroExt8to64 x) y)
   164  
   165  // shift value may be out of range, use CMP + CSEL instead
   166  (Lsh64x64 <t> x y) && !shiftIsBounded(v)        => (CSEL [OpARM64LessThanU] (SLL <t> x y) (Const64 <t> [0]) (CMPconst [64] y))
   167  (Lsh64x(32|16|8) <t> x y) && !shiftIsBounded(v) => (CSEL [OpARM64LessThanU] (SLL <t> x y) (Const64 <t> [0]) (CMPconst [64] ((ZeroExt32to64|ZeroExt16to64|ZeroExt8to64) y)))
   168  
   169  (Lsh32x64 <t> x y) && !shiftIsBounded(v)        => (CSEL [OpARM64LessThanU] (SLL <t> x y) (Const64 <t> [0]) (CMPconst [64] y))
   170  (Lsh32x(32|16|8) <t> x y) && !shiftIsBounded(v) => (CSEL [OpARM64LessThanU] (SLL <t> x y) (Const64 <t> [0]) (CMPconst [64] ((ZeroExt32to64|ZeroExt16to64|ZeroExt8to64) y)))
   171  
   172  (Lsh16x64 <t> x y) && !shiftIsBounded(v)        => (CSEL [OpARM64LessThanU] (SLL <t> x y) (Const64 <t> [0]) (CMPconst [64] y))
   173  (Lsh16x(32|16|8) <t> x y) && !shiftIsBounded(v) => (CSEL [OpARM64LessThanU] (SLL <t> x y) (Const64 <t> [0]) (CMPconst [64] ((ZeroExt32to64|ZeroExt16to64|ZeroExt8to64) y)))
   174  
   175  (Lsh8x64 <t> x y) && !shiftIsBounded(v)        => (CSEL [OpARM64LessThanU] (SLL <t> x y) (Const64 <t> [0]) (CMPconst [64] y))
   176  (Lsh8x(32|16|8) <t> x y) && !shiftIsBounded(v) => (CSEL [OpARM64LessThanU] (SLL <t> x y) (Const64 <t> [0]) (CMPconst [64] ((ZeroExt32to64|ZeroExt16to64|ZeroExt8to64) y)))
   177  
   178  (Rsh64Ux64 <t> x y) && !shiftIsBounded(v)        => (CSEL [OpARM64LessThanU] (SRL <t> x y) (Const64 <t> [0]) (CMPconst [64] y))
   179  (Rsh64Ux(32|16|8) <t> x y) && !shiftIsBounded(v) => (CSEL [OpARM64LessThanU] (SRL <t> x y) (Const64 <t> [0]) (CMPconst [64] ((ZeroExt32to64|ZeroExt16to64|ZeroExt8to64) y)))
   180  
   181  (Rsh32Ux64 <t> x y) && !shiftIsBounded(v)        => (CSEL [OpARM64LessThanU] (SRL <t> (ZeroExt32to64 x) y) (Const64 <t> [0]) (CMPconst [64] y))
   182  (Rsh32Ux(32|16|8) <t> x y) && !shiftIsBounded(v) => (CSEL [OpARM64LessThanU] (SRL <t> (ZeroExt32to64 x) y) (Const64 <t> [0]) (CMPconst [64] ((ZeroExt32to64|ZeroExt16to64|ZeroExt8to64) y)))
   183  
   184  (Rsh16Ux64 <t> x y) && !shiftIsBounded(v)        => (CSEL [OpARM64LessThanU] (SRL <t> (ZeroExt16to64 x) y) (Const64 <t> [0]) (CMPconst [64] y))
   185  (Rsh16Ux(32|16|8) <t> x y) && !shiftIsBounded(v) => (CSEL [OpARM64LessThanU] (SRL <t> (ZeroExt16to64 x) y) (Const64 <t> [0]) (CMPconst [64] ((ZeroExt32to64|ZeroExt16to64|ZeroExt8to64) y)))
   186  
   187  (Rsh8Ux64 <t> x y) && !shiftIsBounded(v)        => (CSEL [OpARM64LessThanU] (SRL <t> (ZeroExt8to64 x) y) (Const64 <t> [0]) (CMPconst [64] y))
   188  (Rsh8Ux(32|16|8) <t> x y) && !shiftIsBounded(v) => (CSEL [OpARM64LessThanU] (SRL <t> (ZeroExt8to64 x) y) (Const64 <t> [0]) (CMPconst [64] ((ZeroExt32to64|ZeroExt16to64|ZeroExt8to64) y)))
   189  
   190  (Rsh64x64 x y) && !shiftIsBounded(v)        => (SRA x (CSEL [OpARM64LessThanU] <y.Type> y (Const64 <y.Type> [63]) (CMPconst [64] y)))
   191  (Rsh64x(32|16|8) x y) && !shiftIsBounded(v) => (SRA x (CSEL [OpARM64LessThanU] <y.Type> y (Const64 <y.Type> [63]) (CMPconst [64] ((ZeroExt32to64|ZeroExt16to64|ZeroExt8to64) y))))
   192  
   193  (Rsh32x64 x y) && !shiftIsBounded(v)        => (SRA (SignExt32to64 x) (CSEL [OpARM64LessThanU] <y.Type> y (Const64 <y.Type> [63]) (CMPconst [64] y)))
   194  (Rsh32x(32|16|8) x y) && !shiftIsBounded(v) => (SRA (SignExt32to64 x) (CSEL [OpARM64LessThanU] <y.Type> y (Const64 <y.Type> [63]) (CMPconst [64] ((ZeroExt32to64|ZeroExt16to64|ZeroExt8to64) y))))
   195  
   196  (Rsh16x64 x y) && !shiftIsBounded(v)        => (SRA (SignExt16to64 x) (CSEL [OpARM64LessThanU] <y.Type> y (Const64 <y.Type> [63]) (CMPconst [64] y)))
   197  (Rsh16x(32|16|8) x y) && !shiftIsBounded(v) => (SRA (SignExt16to64 x) (CSEL [OpARM64LessThanU] <y.Type> y (Const64 <y.Type> [63]) (CMPconst [64] ((ZeroExt32to64|ZeroExt16to64|ZeroExt8to64) y))))
   198  
   199  (Rsh8x64 x y) && !shiftIsBounded(v)        => (SRA (SignExt8to64 x) (CSEL [OpARM64LessThanU] <y.Type> y (Const64 <y.Type> [63]) (CMPconst [64] y)))
   200  (Rsh8x(32|16|8) x y) && !shiftIsBounded(v) => (SRA (SignExt8to64 x) (CSEL [OpARM64LessThanU] <y.Type> y (Const64 <y.Type> [63]) (CMPconst [64] ((ZeroExt32to64|ZeroExt16to64|ZeroExt8to64) y))))
   201  
   202  // constants
   203  (Const(64|32|16|8) [val]) => (MOVDconst [int64(val)])
   204  (Const(32|64)F    [val]) => (FMOV(S|D)const [float64(val)])
   205  (ConstNil) => (MOVDconst [0])
   206  (ConstBool [t]) => (MOVDconst [b2i(t)])
   207  
   208  (Slicemask <t> x) => (SRAconst (NEG <t> x) [63])
   209  
   210  // truncations
   211  // Because we ignore high parts of registers, truncates are just copies.
   212  (Trunc16to8  ...) => (Copy ...)
   213  (Trunc32to8  ...) => (Copy ...)
   214  (Trunc32to16 ...) => (Copy ...)
   215  (Trunc64to8  ...) => (Copy ...)
   216  (Trunc64to16 ...) => (Copy ...)
   217  (Trunc64to32 ...) => (Copy ...)
   218  
   219  // Zero-/Sign-extensions
   220  (ZeroExt8to16  ...) => (MOVBUreg ...)
   221  (ZeroExt8to32  ...) => (MOVBUreg ...)
   222  (ZeroExt16to32 ...) => (MOVHUreg ...)
   223  (ZeroExt8to64  ...) => (MOVBUreg ...)
   224  (ZeroExt16to64 ...) => (MOVHUreg ...)
   225  (ZeroExt32to64 ...) => (MOVWUreg ...)
   226  
   227  (SignExt8to16  ...) => (MOVBreg ...)
   228  (SignExt8to32  ...) => (MOVBreg ...)
   229  (SignExt16to32 ...) => (MOVHreg ...)
   230  (SignExt8to64  ...) => (MOVBreg ...)
   231  (SignExt16to64 ...) => (MOVHreg ...)
   232  (SignExt32to64 ...) => (MOVWreg ...)
   233  
   234  // float <=> int conversion
   235  (Cvt32to32F  ...) => (SCVTFWS ...)
   236  (Cvt32to64F  ...) => (SCVTFWD ...)
   237  (Cvt64to32F  ...) => (SCVTFS ...)
   238  (Cvt64to64F  ...) => (SCVTFD ...)
   239  (Cvt32Uto32F ...) => (UCVTFWS ...)
   240  (Cvt32Uto64F ...) => (UCVTFWD ...)
   241  (Cvt64Uto32F ...) => (UCVTFS ...)
   242  (Cvt64Uto64F ...) => (UCVTFD ...)
   243  (Cvt32Fto32  ...) => (FCVTZSSW ...)
   244  (Cvt64Fto32  ...) => (FCVTZSDW ...)
   245  (Cvt32Fto64  ...) => (FCVTZSS ...)
   246  (Cvt64Fto64  ...) => (FCVTZSD ...)
   247  (Cvt32Fto32U ...) => (FCVTZUSW ...)
   248  (Cvt64Fto32U ...) => (FCVTZUDW ...)
   249  (Cvt32Fto64U ...) => (FCVTZUS ...)
   250  (Cvt64Fto64U ...) => (FCVTZUD ...)
   251  (Cvt32Fto64F ...) => (FCVTSD ...)
   252  (Cvt64Fto32F ...) => (FCVTDS ...)
   253  
   254  (CvtBoolToUint8 ...) => (Copy ...)
   255  
   256  (Round32F ...) => (LoweredRound32F ...)
   257  (Round64F ...) => (LoweredRound64F ...)
   258  
   259  // comparisons
   260  (Eq8  x y)  => (Equal (CMPW (ZeroExt8to32  x) (ZeroExt8to32  y)))
   261  (Eq16  x y) => (Equal (CMPW (ZeroExt16to32 x) (ZeroExt16to32 y)))
   262  (Eq32  x y) => (Equal (CMPW  x y))
   263  (Eq64  x y) => (Equal (CMP   x y))
   264  (EqPtr x y) => (Equal (CMP   x y))
   265  (Eq32F x y) => (Equal (FCMPS x y))
   266  (Eq64F x y) => (Equal (FCMPD x y))
   267  
   268  (Neq8   x y) => (NotEqual (CMPW (ZeroExt8to32  x) (ZeroExt8to32  y)))
   269  (Neq16  x y) => (NotEqual (CMPW (ZeroExt16to32 x) (ZeroExt16to32 y)))
   270  (Neq32  x y) => (NotEqual (CMPW  x y))
   271  (Neq64  x y) => (NotEqual (CMP   x y))
   272  (NeqPtr x y) => (NotEqual (CMP   x y))
   273  (Neq(32|64)F x y) => (NotEqual (FCMP(S|D) x y))
   274  
   275  (Less(8|16) x y) => (LessThan (CMPW (SignExt(8|16)to32 x) (SignExt(8|16)to32 y)))
   276  (Less32 x y) => (LessThan (CMPW x y))
   277  (Less64 x y) => (LessThan (CMP  x y))
   278  
   279  // Set condition flags for floating-point comparisons "x < y"
   280  // and "x <= y". Because if either or both of the operands are
   281  // NaNs, all three of (x < y), (x == y) and (x > y) are false,
   282  // and ARM Manual says FCMP instruction sets PSTATE.<N,Z,C,V>
   283  // of this case to (0, 0, 1, 1).
   284  (Less32F x y) => (LessThanF (FCMPS x y))
   285  (Less64F x y) => (LessThanF (FCMPD x y))
   286  
   287  // For an unsigned integer x, the following rules are useful when combining branch
   288  // 0 <  x  =>  x != 0
   289  // x <= 0  =>  x == 0
   290  // x <  1  =>  x == 0
   291  // 1 <= x  =>  x != 0
   292  (Less(8U|16U|32U|64U) zero:(MOVDconst [0]) x) => (Neq(8|16|32|64) zero x)
   293  (Leq(8U|16U|32U|64U)  x zero:(MOVDconst [0])) => (Eq(8|16|32|64)  x zero)
   294  (Less(8U|16U|32U|64U) x (MOVDconst [1])) => (Eq(8|16|32|64)  x (MOVDconst [0]))
   295  (Leq(8U|16U|32U|64U)  (MOVDconst [1]) x) => (Neq(8|16|32|64) (MOVDconst [0]) x)
   296  
   297  (Less8U  x y) => (LessThanU (CMPW (ZeroExt8to32  x) (ZeroExt8to32  y)))
   298  (Less16U x y) => (LessThanU (CMPW (ZeroExt16to32 x) (ZeroExt16to32 y)))
   299  (Less32U x y) => (LessThanU (CMPW x y))
   300  (Less64U x y) => (LessThanU (CMP x y))
   301  
   302  (Leq8  x y) => (LessEqual (CMPW (SignExt8to32  x) (SignExt8to32  y)))
   303  (Leq16 x y) => (LessEqual (CMPW (SignExt16to32 x) (SignExt16to32 y)))
   304  (Leq32 x y) => (LessEqual (CMPW x y))
   305  (Leq64 x y) => (LessEqual (CMP x y))
   306  
   307  // Refer to the comments for op Less64F above.
   308  (Leq32F x y) => (LessEqualF (FCMPS x y))
   309  (Leq64F x y) => (LessEqualF (FCMPD x y))
   310  
   311  (Leq8U  x y) => (LessEqualU (CMPW (ZeroExt8to32  x) (ZeroExt8to32  y)))
   312  (Leq16U x y) => (LessEqualU (CMPW (ZeroExt16to32 x) (ZeroExt16to32 y)))
   313  (Leq32U x y) => (LessEqualU (CMPW x y))
   314  (Leq64U x y) => (LessEqualU (CMP x y))
   315  
   316  // Optimize comparison between a floating-point value and 0.0 with "FCMP $(0.0), Fn"
   317  (FCMPS x (FMOVSconst [0])) => (FCMPS0 x)
   318  (FCMPS (FMOVSconst [0]) x) => (InvertFlags (FCMPS0 x))
   319  (FCMPD x (FMOVDconst [0])) => (FCMPD0 x)
   320  (FCMPD (FMOVDconst [0]) x) => (InvertFlags (FCMPD0 x))
   321  
   322  // CSEL needs a flag-generating argument. Synthesize a TSTW if necessary.
   323  (CondSelect x y boolval) && flagArg(boolval) != nil => (CSEL [boolval.Op] x y flagArg(boolval))
   324  (CondSelect x y boolval) && flagArg(boolval) == nil => (CSEL [OpARM64NotEqual] x y (TSTWconst [1] boolval))
   325  
   326  (OffPtr [off] ptr:(SP)) && is32Bit(off) => (MOVDaddr [int32(off)] ptr)
   327  (OffPtr [off] ptr) => (ADDconst [off] ptr)
   328  
   329  (Addr {sym} base) => (MOVDaddr {sym} base)
   330  (LocalAddr <t> {sym} base mem) && t.Elem().HasPointers() => (MOVDaddr {sym} (SPanchored base mem))
   331  (LocalAddr <t> {sym} base _)  && !t.Elem().HasPointers() => (MOVDaddr {sym} base)
   332  
   333  // loads
   334  (Load <t> ptr mem) && t.IsBoolean() => (MOVBUload ptr mem)
   335  (Load <t> ptr mem) && (is8BitInt(t)  &&  t.IsSigned()) => (MOVBload ptr mem)
   336  (Load <t> ptr mem) && (is8BitInt(t)  && !t.IsSigned()) => (MOVBUload ptr mem)
   337  (Load <t> ptr mem) && (is16BitInt(t) &&  t.IsSigned()) => (MOVHload ptr mem)
   338  (Load <t> ptr mem) && (is16BitInt(t) && !t.IsSigned()) => (MOVHUload ptr mem)
   339  (Load <t> ptr mem) && (is32BitInt(t) &&  t.IsSigned()) => (MOVWload ptr mem)
   340  (Load <t> ptr mem) && (is32BitInt(t) && !t.IsSigned()) => (MOVWUload ptr mem)
   341  (Load <t> ptr mem) && (is64BitInt(t) || isPtr(t)) => (MOVDload ptr mem)
   342  (Load <t> ptr mem) && is32BitFloat(t) => (FMOVSload ptr mem)
   343  (Load <t> ptr mem) && is64BitFloat(t) => (FMOVDload ptr mem)
   344  
   345  // stores
   346  (Store {t} ptr val mem) && t.Size() == 1 => (MOVBstore ptr val mem)
   347  (Store {t} ptr val mem) && t.Size() == 2 => (MOVHstore ptr val mem)
   348  (Store {t} ptr val mem) && t.Size() == 4 && !t.IsFloat() => (MOVWstore ptr val mem)
   349  (Store {t} ptr val mem) && t.Size() == 8 && !t.IsFloat() => (MOVDstore ptr val mem)
   350  (Store {t} ptr val mem) && t.Size() == 4 &&  t.IsFloat() => (FMOVSstore ptr val mem)
   351  (Store {t} ptr val mem) && t.Size() == 8 &&  t.IsFloat() => (FMOVDstore ptr val mem)
   352  
   353  // zeroing
   354  (Zero [0] _   mem) => mem
   355  (Zero [1] ptr mem) => (MOVBstore ptr (MOVDconst [0]) mem)
   356  (Zero [2] ptr mem) => (MOVHstore ptr (MOVDconst [0]) mem)
   357  (Zero [4] ptr mem) => (MOVWstore ptr (MOVDconst [0]) mem)
   358  (Zero [3] ptr mem) =>
   359  	(MOVBstore [2] ptr (MOVDconst [0])
   360  		(MOVHstore ptr (MOVDconst [0]) mem))
   361  (Zero [5] ptr mem) =>
   362  	(MOVBstore [4] ptr (MOVDconst [0])
   363  		(MOVWstore ptr (MOVDconst [0]) mem))
   364  (Zero [6] ptr mem) =>
   365  	(MOVHstore [4] ptr (MOVDconst [0])
   366  		(MOVWstore ptr (MOVDconst [0]) mem))
   367  (Zero [7] ptr mem) =>
   368  	(MOVWstore [3] ptr (MOVDconst [0])
   369  		(MOVWstore ptr (MOVDconst [0]) mem))
   370  (Zero [8] ptr mem) => (MOVDstore ptr (MOVDconst [0]) mem)
   371  (Zero [9] ptr mem) =>
   372  	(MOVBstore [8] ptr (MOVDconst [0])
   373  		(MOVDstore ptr (MOVDconst [0]) mem))
   374  (Zero [10] ptr mem) =>
   375  	(MOVHstore [8] ptr (MOVDconst [0])
   376  		(MOVDstore ptr (MOVDconst [0]) mem))
   377  (Zero [11] ptr mem) =>
   378  	(MOVDstore [3] ptr (MOVDconst [0])
   379  		(MOVDstore ptr (MOVDconst [0]) mem))
   380  (Zero [12] ptr mem) =>
   381  	(MOVWstore [8] ptr (MOVDconst [0])
   382  		(MOVDstore ptr (MOVDconst [0]) mem))
   383  (Zero [13] ptr mem) =>
   384  	(MOVDstore [5] ptr (MOVDconst [0])
   385  		(MOVDstore ptr (MOVDconst [0]) mem))
   386  (Zero [14] ptr mem) =>
   387  	(MOVDstore [6] ptr (MOVDconst [0])
   388  		(MOVDstore ptr (MOVDconst [0]) mem))
   389  (Zero [15] ptr mem) =>
   390  	(MOVDstore [7] ptr (MOVDconst [0])
   391  		(MOVDstore ptr (MOVDconst [0]) mem))
   392  (Zero [16] ptr mem) =>
   393  	(STP [0] ptr (MOVDconst [0]) (MOVDconst [0]) mem)
   394  
   395  (Zero [s] ptr mem) && s > 16 && s < 192 => (LoweredZero [s] ptr mem)
   396  (Zero [s] ptr mem) && s >= 192 => (LoweredZeroLoop [s] ptr mem)
   397  
   398  // moves
   399  (Move [0] _   _   mem) => mem
   400  (Move [1] dst src mem) => (MOVBstore dst (MOVBUload src mem) mem)
   401  (Move [2] dst src mem) => (MOVHstore dst (MOVHUload src mem) mem)
   402  (Move [3] dst src mem) =>
   403  	(MOVBstore [2] dst (MOVBUload [2] src mem)
   404  		(MOVHstore dst (MOVHUload src mem) mem))
   405  (Move [4] dst src mem) => (MOVWstore dst (MOVWUload src mem) mem)
   406  (Move [5] dst src mem) =>
   407  	(MOVBstore [4] dst (MOVBUload [4] src mem)
   408  		(MOVWstore dst (MOVWUload src mem) mem))
   409  (Move [6] dst src mem) =>
   410  	(MOVHstore [4] dst (MOVHUload [4] src mem)
   411  		(MOVWstore dst (MOVWUload src mem) mem))
   412  (Move [7] dst src mem) =>
   413  	(MOVWstore [3] dst (MOVWUload [3] src mem)
   414  		(MOVWstore dst (MOVWUload src mem) mem))
   415  (Move [8] dst src mem) => (MOVDstore dst (MOVDload src mem) mem)
   416  (Move [9] dst src mem) =>
   417  	(MOVBstore [8] dst (MOVBUload [8] src mem)
   418  		(MOVDstore dst (MOVDload src mem) mem))
   419  (Move [10] dst src mem) =>
   420  	(MOVHstore [8] dst (MOVHUload [8] src mem)
   421  		(MOVDstore dst (MOVDload src mem) mem))
   422  (Move [11] dst src mem) =>
   423  	(MOVDstore [3] dst (MOVDload [3] src mem)
   424  		(MOVDstore dst (MOVDload src mem) mem))
   425  (Move [12] dst src mem) =>
   426  	(MOVWstore [8] dst (MOVWUload [8] src mem)
   427  		(MOVDstore dst (MOVDload src mem) mem))
   428  (Move [13] dst src mem) =>
   429  	(MOVDstore [5] dst (MOVDload [5] src mem)
   430  		(MOVDstore dst (MOVDload src mem) mem))
   431  (Move [14] dst src mem) =>
   432  	(MOVDstore [6] dst (MOVDload [6] src mem)
   433  		(MOVDstore dst (MOVDload src mem) mem))
   434  (Move [15] dst src mem) =>
   435  	(MOVDstore [7] dst (MOVDload [7] src mem)
   436  		(MOVDstore dst (MOVDload src mem) mem))
   437  (Move [16] dst src mem) =>
   438  	(STP dst (Select0 <typ.UInt64> (LDP src mem)) (Select1 <typ.UInt64> (LDP src mem)) mem)
   439  
   440  (Move [s] dst src mem) && s > 16 && s <= 24 =>
   441  	(MOVDstore [int32(s-8)] dst (MOVDload [int32(s-8)] src mem)
   442  		(STP dst (Select0 <typ.UInt64> (LDP src mem)) (Select1 <typ.UInt64> (LDP src mem)) mem))
   443  (Move [s] dst src mem) && s > 24 && s <= 32 =>
   444  	(STP [int32(s-16)] dst (Select0 <typ.UInt64> (LDP [int32(s-16)] src mem)) (Select1 <typ.UInt64> (LDP [int32(s-16)] src mem))
   445  		(STP dst (Select0 <typ.UInt64> (LDP src mem)) (Select1 <typ.UInt64> (LDP src mem)) mem))
   446  (Move [s] dst src mem) && s > 32 && s <= 40 =>
   447  	(MOVDstore [int32(s-8)] dst (MOVDload [int32(s-8)] src mem)
   448  		(STP [16] dst (Select0 <typ.UInt64> (LDP [16] src mem)) (Select1 <typ.UInt64> (LDP [16] src mem))
   449  			(STP dst (Select0 <typ.UInt64> (LDP src mem)) (Select1 <typ.UInt64> (LDP src mem)) mem)))
   450  (Move [s] dst src mem) && s > 40 && s <= 48 =>
   451  	(STP [int32(s-16)] dst (Select0 <typ.UInt64> (LDP [int32(s-16)] src mem)) (Select1 <typ.UInt64> (LDP [int32(s-16)] src mem))
   452  		(STP [16] dst (Select0 <typ.UInt64> (LDP [16] src mem)) (Select1 <typ.UInt64> (LDP [16] src mem))
   453  			(STP dst (Select0 <typ.UInt64> (LDP src mem)) (Select1 <typ.UInt64> (LDP src mem)) mem)))
   454  (Move [s] dst src mem) && s > 48 && s <= 56 =>
   455  	(MOVDstore [int32(s-8)] dst (MOVDload [int32(s-8)] src mem)
   456  		(STP [32] dst (Select0 <typ.UInt64> (LDP [32] src mem)) (Select1 <typ.UInt64> (LDP [32] src mem))
   457  			(STP [16] dst (Select0 <typ.UInt64> (LDP [16] src mem)) (Select1 <typ.UInt64> (LDP [16] src mem))
   458  				(STP dst (Select0 <typ.UInt64> (LDP src mem)) (Select1 <typ.UInt64> (LDP src mem)) mem))))
   459  (Move [s] dst src mem) && s > 56 && s <= 64 =>
   460  	(STP [int32(s-16)] dst (Select0 <typ.UInt64> (LDP [int32(s-16)] src mem)) (Select1 <typ.UInt64> (LDP [int32(s-16)] src mem))
   461  		(STP [32] dst (Select0 <typ.UInt64> (LDP [32] src mem)) (Select1 <typ.UInt64> (LDP [32] src mem))
   462  			(STP [16] dst (Select0 <typ.UInt64> (LDP [16] src mem)) (Select1 <typ.UInt64> (LDP [16] src mem))
   463  				(STP dst (Select0 <typ.UInt64> (LDP src mem)) (Select1 <typ.UInt64> (LDP src mem)) mem))))
   464  
   465  (Move [s] dst src mem) && s > 64 && s < 192 && logLargeCopy(v, s) => (LoweredMove [s] dst src mem)
   466  (Move [s] dst src mem) && s >= 192 && logLargeCopy(v, s) => (LoweredMoveLoop [s] dst src mem)
   467  
   468  // calls
   469  (StaticCall  ...) => (CALLstatic  ...)
   470  (ClosureCall ...) => (CALLclosure ...)
   471  (InterCall   ...) => (CALLinter   ...)
   472  (TailCall    ...) => (CALLtail    ...)
   473  
   474  // checks
   475  (NilCheck ...) => (LoweredNilCheck ...)
   476  (IsNonNil ptr) => (NotEqual (CMPconst [0] ptr))
   477  (IsInBounds      idx len) => (LessThanU  (CMP idx len))
   478  (IsSliceInBounds idx len) => (LessEqualU (CMP idx len))
   479  
   480  // pseudo-ops
   481  (GetClosurePtr ...) => (LoweredGetClosurePtr ...)
   482  (GetCallerSP   ...) => (LoweredGetCallerSP   ...)
   483  (GetCallerPC   ...) => (LoweredGetCallerPC   ...)
   484  
   485  // Absorb pseudo-ops into blocks.
   486  (If (Equal         cc) yes no) => (EQ cc yes no)
   487  (If (NotEqual      cc) yes no) => (NE cc yes no)
   488  (If (LessThan      cc) yes no) => (LT cc yes no)
   489  (If (LessThanU     cc) yes no) => (ULT cc yes no)
   490  (If (LessEqual     cc) yes no) => (LE cc yes no)
   491  (If (LessEqualU    cc) yes no) => (ULE cc yes no)
   492  (If (GreaterThan   cc) yes no) => (GT cc yes no)
   493  (If (GreaterThanU  cc) yes no) => (UGT cc yes no)
   494  (If (GreaterEqual  cc) yes no) => (GE cc yes no)
   495  (If (GreaterEqualU cc) yes no) => (UGE cc yes no)
   496  (If (LessThanF     cc) yes no) => (FLT cc yes no)
   497  (If (LessEqualF    cc) yes no) => (FLE cc yes no)
   498  (If (GreaterThanF  cc) yes no) => (FGT cc yes no)
   499  (If (GreaterEqualF cc) yes no) => (FGE cc yes no)
   500  
   501  (If cond yes no) => (TBNZ [0] cond yes no)
   502  
   503  (JumpTable idx) => (JUMPTABLE {makeJumpTableSym(b)} idx (MOVDaddr <typ.Uintptr> {makeJumpTableSym(b)} (SB)))
   504  
   505  // atomic intrinsics
   506  // Note: these ops do not accept offset.
   507  (AtomicLoad8   ...) => (LDARB ...)
   508  (AtomicLoad32  ...) => (LDARW ...)
   509  (AtomicLoad64  ...) => (LDAR  ...)
   510  (AtomicLoadPtr ...) => (LDAR  ...)
   511  
   512  (AtomicStore8       ...) => (STLRB ...)
   513  (AtomicStore32      ...) => (STLRW ...)
   514  (AtomicStore64      ...) => (STLR  ...)
   515  (AtomicStorePtrNoWB ...) => (STLR  ...)
   516  
   517  (AtomicExchange(8|32|64)       ...) => (LoweredAtomicExchange(8|32|64) ...)
   518  (AtomicAdd(32|64)            ...) => (LoweredAtomicAdd(32|64)      ...)
   519  (AtomicCompareAndSwap(32|64) ...) => (LoweredAtomicCas(32|64)      ...)
   520  
   521  (AtomicAdd(32|64)Variant            ...) => (LoweredAtomicAdd(32|64)Variant      ...)
   522  (AtomicExchange(8|32|64)Variant       ...) => (LoweredAtomicExchange(8|32|64)Variant ...)
   523  (AtomicCompareAndSwap(32|64)Variant ...) => (LoweredAtomicCas(32|64)Variant      ...)
   524  
   525  // Return old contents.
   526  (AtomicAnd(64|32|8)value            ...) => (LoweredAtomicAnd(64|32|8)            ...)
   527  (AtomicOr(64|32|8)value             ...) => (LoweredAtomicOr(64|32|8)             ...)
   528  (AtomicAnd(64|32|8)valueVariant     ...) => (LoweredAtomicAnd(64|32|8)Variant     ...)
   529  (AtomicOr(64|32|8)valueVariant      ...) => (LoweredAtomicOr(64|32|8)Variant      ...)
   530  
   531  // Write barrier.
   532  (WB ...) => (LoweredWB ...)
   533  
   534  // Publication barrier (0xe is ST option)
   535  (PubBarrier mem) => (DMB [0xe] mem)
   536  
   537  (PanicBounds ...) => (LoweredPanicBoundsRR ...)
   538  (LoweredPanicBoundsRR [kind] x (MOVDconst [c]) mem) => (LoweredPanicBoundsRC [kind] x {PanicBoundsC{C:c}} mem)
   539  (LoweredPanicBoundsRR [kind] (MOVDconst [c]) y mem) => (LoweredPanicBoundsCR [kind] {PanicBoundsC{C:c}} y mem)
   540  (LoweredPanicBoundsRC [kind] {p} (MOVDconst [c]) mem) => (LoweredPanicBoundsCC [kind] {PanicBoundsCC{Cx:c, Cy:p.C}} mem)
   541  (LoweredPanicBoundsCR [kind] {p} (MOVDconst [c]) mem) => (LoweredPanicBoundsCC [kind] {PanicBoundsCC{Cx:p.C, Cy:c}} mem)
   542  
   543  // Optimizations
   544  
   545  // Absorb boolean tests into block
   546  (NZ (Equal         cc) yes no) => (EQ  cc yes no)
   547  (NZ (NotEqual      cc) yes no) => (NE  cc yes no)
   548  (NZ (LessThan      cc) yes no) => (LT  cc yes no)
   549  (NZ (LessThanU     cc) yes no) => (ULT cc yes no)
   550  (NZ (LessEqual     cc) yes no) => (LE  cc yes no)
   551  (NZ (LessEqualU    cc) yes no) => (ULE cc yes no)
   552  (NZ (GreaterThan   cc) yes no) => (GT  cc yes no)
   553  (NZ (GreaterThanU  cc) yes no) => (UGT cc yes no)
   554  (NZ (GreaterEqual  cc) yes no) => (GE  cc yes no)
   555  (NZ (GreaterEqualU cc) yes no) => (UGE cc yes no)
   556  (NZ (LessThanF     cc) yes no) => (FLT cc yes no)
   557  (NZ (LessEqualF    cc) yes no) => (FLE cc yes no)
   558  (NZ (GreaterThanF  cc) yes no) => (FGT cc yes no)
   559  (NZ (GreaterEqualF cc) yes no) => (FGE cc yes no)
   560  
   561  (TBNZ [0] (Equal         cc) yes no) => (EQ  cc yes no)
   562  (TBNZ [0] (NotEqual      cc) yes no) => (NE  cc yes no)
   563  (TBNZ [0] (LessThan      cc) yes no) => (LT  cc yes no)
   564  (TBNZ [0] (LessThanU     cc) yes no) => (ULT cc yes no)
   565  (TBNZ [0] (LessEqual     cc) yes no) => (LE  cc yes no)
   566  (TBNZ [0] (LessEqualU    cc) yes no) => (ULE cc yes no)
   567  (TBNZ [0] (GreaterThan   cc) yes no) => (GT  cc yes no)
   568  (TBNZ [0] (GreaterThanU  cc) yes no) => (UGT cc yes no)
   569  (TBNZ [0] (GreaterEqual  cc) yes no) => (GE  cc yes no)
   570  (TBNZ [0] (GreaterEqualU cc) yes no) => (UGE cc yes no)
   571  (TBNZ [0] (LessThanF     cc) yes no) => (FLT cc yes no)
   572  (TBNZ [0] (LessEqualF    cc) yes no) => (FLE cc yes no)
   573  (TBNZ [0] (GreaterThanF  cc) yes no) => (FGT cc yes no)
   574  (TBNZ [0] (GreaterEqualF cc) yes no) => (FGE cc yes no)
   575  
   576  ((EQ|NE|LT|LE|GT|GE) (CMPconst  [0] z:(AND        x y)) yes no) && z.Uses == 1 => ((EQ|NE|LT|LE|GT|GE) (TST                x y) yes no)
   577  ((EQ|NE|LT|LE|GT|GE) (CMPconst  [0] x:(ANDconst [c] y)) yes no) && x.Uses == 1 => ((EQ|NE|LT|LE|GT|GE) (TSTconst         [c] y) yes no)
   578  ((EQ|NE|LT|LE|GT|GE) (CMPWconst [0] z:(AND        x y)) yes no) && z.Uses == 1 => ((EQ|NE|LT|LE|GT|GE) (TSTW               x y) yes no)
   579  ((EQ|NE|LT|LE|GT|GE) (CMPWconst [0] x:(ANDconst [c] y)) yes no) && x.Uses == 1 => ((EQ|NE|LT|LE|GT|GE) (TSTWconst [int32(c)] y) yes no)
   580  
   581  // For conditional instructions such as CSET, CSEL.
   582  ((Equal|NotEqual|LessThan|LessEqual|GreaterThan|GreaterEqual) (CMPconst [0]  z:(AND        x y))) && z.Uses == 1 =>
   583  	((Equal|NotEqual|LessThan|LessEqual|GreaterThan|GreaterEqual) (TST x y))
   584  ((Equal|NotEqual|LessThan|LessEqual|GreaterThan|GreaterEqual) (CMPWconst [0] x:(ANDconst [c] y))) && x.Uses == 1 =>
   585  	((Equal|NotEqual|LessThan|LessEqual|GreaterThan|GreaterEqual) (TSTWconst [int32(c)] y))
   586  ((Equal|NotEqual|LessThan|LessEqual|GreaterThan|GreaterEqual) (CMPWconst [0] z:(AND        x y))) && z.Uses == 1 =>
   587  	((Equal|NotEqual|LessThan|LessEqual|GreaterThan|GreaterEqual) (TSTW x y))
   588  ((Equal|NotEqual|LessThan|LessEqual|GreaterThan|GreaterEqual) (CMPconst [0]  x:(ANDconst [c] y))) && x.Uses == 1 =>
   589  	((Equal|NotEqual|LessThan|LessEqual|GreaterThan|GreaterEqual) (TSTconst [c] y))
   590  
   591  ((EQ|NE|LT|LE|GT|GE) (CMPconst  [0] x:(ADDconst [c] y)) yes no) && x.Uses == 1 => ((EQ|NE|LTnoov|LEnoov|GTnoov|GEnoov) (CMNconst         [c] y) yes no)
   592  ((EQ|NE|LT|LE|GT|GE) (CMPWconst [0] x:(ADDconst [c] y)) yes no) && x.Uses == 1 => ((EQ|NE|LTnoov|LEnoov|GTnoov|GEnoov) (CMNWconst [int32(c)] y) yes no)
   593  ((EQ|NE|LT|LE|GT|GE) (CMPconst  [0] z:(ADD        x y)) yes no) && z.Uses == 1 => ((EQ|NE|LTnoov|LEnoov|GTnoov|GEnoov) (CMN                x y) yes no)
   594  ((EQ|NE|LT|LE|GT|GE) (CMPWconst [0] z:(ADD        x y)) yes no) && z.Uses == 1 => ((EQ|NE|LTnoov|LEnoov|GTnoov|GEnoov) (CMNW               x y) yes no)
   595  
   596  // CMP(x,-y) -> CMN(x,y) is only valid for unordered comparison, if y can be -1<<63
   597  ((EQ|NE) (CMP x z:(NEG y)) yes no)   && z.Uses == 1 => ((EQ|NE) (CMN x y) yes no)
   598  ((Equal|NotEqual) (CMP x z:(NEG y))) && z.Uses == 1 => ((Equal|NotEqual) (CMN x y))
   599  
   600  // CMPW(x,-y) -> CMNW(x,y) is only valid for unordered comparison, if y can be -1<<31
   601  ((EQ|NE) (CMPW x z:(NEG y)) yes no)   && z.Uses == 1 => ((EQ|NE) (CMNW x y) yes no)
   602  ((Equal|NotEqual) (CMPW x z:(NEG y))) && z.Uses == 1 => ((Equal|NotEqual) (CMNW x y))
   603  
   604  // For conditional instructions such as CSET, CSEL.
   605  // TODO: add support for LE, GT, overflow needs to be considered.
   606  ((Equal|NotEqual|LessThan|GreaterEqual) (CMPconst  [0] x:(ADDconst [c] y))) && x.Uses == 1 => ((Equal|NotEqual|LessThanNoov|GreaterEqualNoov) (CMNconst [c] y))
   607  ((Equal|NotEqual|LessThan|GreaterEqual) (CMPWconst [0] x:(ADDconst [c] y))) && x.Uses == 1 => ((Equal|NotEqual|LessThanNoov|GreaterEqualNoov) (CMNWconst [int32(c)] y))
   608  ((Equal|NotEqual|LessThan|GreaterEqual) (CMPconst  [0] z:(ADD        x y))) && z.Uses == 1 => ((Equal|NotEqual|LessThanNoov|GreaterEqualNoov) (CMN  x y))
   609  ((Equal|NotEqual|LessThan|GreaterEqual) (CMPWconst [0] z:(ADD        x y))) && z.Uses == 1 => ((Equal|NotEqual|LessThanNoov|GreaterEqualNoov) (CMNW x y))
   610  ((Equal|NotEqual|LessThan|GreaterEqual) (CMPconst  [0] z:(MADD     a x y))) && z.Uses == 1 => ((Equal|NotEqual|LessThanNoov|GreaterEqualNoov) (CMN  a (MUL  <x.Type> x y)))
   611  ((Equal|NotEqual|LessThan|GreaterEqual) (CMPconst  [0] z:(MSUB     a x y))) && z.Uses == 1 => ((Equal|NotEqual|LessThanNoov|GreaterEqualNoov) (CMP  a (MUL  <x.Type> x y)))
   612  ((Equal|NotEqual|LessThan|GreaterEqual) (CMPWconst [0] z:(MADDW    a x y))) && z.Uses == 1 => ((Equal|NotEqual|LessThanNoov|GreaterEqualNoov) (CMNW a (MULW <x.Type> x y)))
   613  ((Equal|NotEqual|LessThan|GreaterEqual) (CMPWconst [0] z:(MSUBW    a x y))) && z.Uses == 1 => ((Equal|NotEqual|LessThanNoov|GreaterEqualNoov) (CMPW a (MULW <x.Type> x y)))
   614  
   615  ((CMPconst|CMNconst)   [c] y) && c < 0 && c != -1<<63 => ((CMNconst|CMPconst)   [-c] y)
   616  ((CMPWconst|CMNWconst) [c] y) && c < 0 && c != -1<<31 => ((CMNWconst|CMPWconst) [-c] y)
   617  
   618  ((EQ|NE) (CMPconst  [0] x) yes no) => ((Z|NZ)   x yes no)
   619  ((EQ|NE) (CMPWconst [0] x) yes no) => ((ZW|NZW) x yes no)
   620  
   621  ((ULE|UGT) (CMPconst  [0] x)) => ((EQ|NE) (CMPconst  [0] x))
   622  ((ULE|UGT) (CMPWconst [0] x)) => ((EQ|NE) (CMPWconst [0] x))
   623  
   624  ((Z|NZ)   sub:(SUB        x y)) && sub.Uses == 1 => ((EQ|NE) (CMP                x y))
   625  ((ZW|NZW) sub:(SUB        x y)) && sub.Uses == 1 => ((EQ|NE) (CMPW               x y))
   626  ((Z|NZ)   sub:(SUBconst [c] y)) && sub.Uses == 1 => ((EQ|NE) (CMPconst         [c] y))
   627  ((ZW|NZW) sub:(SUBconst [c] y)) && sub.Uses == 1 => ((EQ|NE) (CMPWconst [int32(c)] y))
   628  
   629  ((EQ|NE|LT|LE|GT|GE) (CMPconst  [0] z:(MADD a x y))  yes no) && z.Uses==1 => ((EQ|NE|LTnoov|LEnoov|GTnoov|GEnoov) (CMN  a (MUL  <x.Type> x y)) yes no)
   630  ((EQ|NE|LT|LE|GT|GE) (CMPconst  [0] z:(MSUB a x y))  yes no) && z.Uses==1 => ((EQ|NE|LTnoov|LEnoov|GTnoov|GEnoov) (CMP  a (MUL  <x.Type> x y)) yes no)
   631  ((EQ|NE|LT|LE|GT|GE) (CMPWconst [0] z:(MADDW a x y)) yes no) && z.Uses==1 => ((EQ|NE|LTnoov|LEnoov|GTnoov|GEnoov) (CMNW a (MULW <x.Type> x y)) yes no)
   632  ((EQ|NE|LT|LE|GT|GE) (CMPWconst [0] z:(MSUBW a x y)) yes no) && z.Uses==1 => ((EQ|NE|LTnoov|LEnoov|GTnoov|GEnoov) (CMPW a (MULW <x.Type> x y)) yes no)
   633  
   634  // Absorb bit-tests into block
   635  (Z   (ANDconst  [c] x) yes no) && oneBit(c) => (TBZ  [int64(ntz64(c))] x yes no)
   636  (NZ  (ANDconst  [c] x) yes no) && oneBit(c) => (TBNZ [int64(ntz64(c))] x yes no)
   637  (ZW  (ANDconst  [c] x) yes no) && oneBit(int64(uint32(c))) => (TBZ  [int64(ntz64(int64(uint32(c))))] x yes no)
   638  (NZW (ANDconst  [c] x) yes no) && oneBit(int64(uint32(c))) => (TBNZ [int64(ntz64(int64(uint32(c))))] x yes no)
   639  (EQ  (TSTconst  [c] x) yes no) && oneBit(c) => (TBZ  [int64(ntz64(c))] x yes no)
   640  (NE  (TSTconst  [c] x) yes no) && oneBit(c) => (TBNZ [int64(ntz64(c))] x yes no)
   641  (EQ  (TSTWconst [c] x) yes no) && oneBit(int64(uint32(c))) => (TBZ  [int64(ntz64(int64(uint32(c))))] x yes no)
   642  (NE  (TSTWconst [c] x) yes no) && oneBit(int64(uint32(c))) => (TBNZ [int64(ntz64(int64(uint32(c))))] x yes no)
   643  
   644  // Test sign-bit for signed comparisons against zero
   645  (GE (CMPWconst [0] x) yes no) => (TBZ  [31] x yes no)
   646  (GE (CMPconst [0] x)  yes no) => (TBZ  [63] x yes no)
   647  (LT (CMPWconst [0] x) yes no) => (TBNZ [31] x yes no)
   648  (LT (CMPconst [0] x)  yes no) => (TBNZ [63] x yes no)
   649  
   650  // fold offset into address
   651  (ADDconst [off1] (MOVDaddr [off2] {sym} ptr)) && is32Bit(off1+int64(off2)) =>
   652  	 (MOVDaddr [int32(off1)+off2] {sym} ptr)
   653  
   654  // fold address into load/store.
   655  // Do not fold global variable access in -dynlink mode, where it will
   656  // be rewritten to use the GOT via REGTMP, which currently cannot handle
   657  // large offset.
   658  (MOVBload [off1] {sym} (ADDconst [off2] ptr) mem) && is32Bit(int64(off1)+off2)
   659  	&& (ptr.Op != OpSB || !config.ctxt.Flag_dynlink) =>
   660  	(MOVBload [off1+int32(off2)] {sym} ptr mem)
   661  (MOVBUload [off1] {sym} (ADDconst [off2] ptr) mem) && is32Bit(int64(off1)+off2)
   662  	&& (ptr.Op != OpSB || !config.ctxt.Flag_dynlink) =>
   663  	(MOVBUload [off1+int32(off2)] {sym} ptr mem)
   664  (MOVHload [off1] {sym} (ADDconst [off2] ptr) mem) && is32Bit(int64(off1)+off2)
   665  	&& (ptr.Op != OpSB || !config.ctxt.Flag_dynlink) =>
   666  	(MOVHload [off1+int32(off2)] {sym} ptr mem)
   667  (MOVHUload [off1] {sym} (ADDconst [off2] ptr) mem) && is32Bit(int64(off1)+off2)
   668  	&& (ptr.Op != OpSB || !config.ctxt.Flag_dynlink) =>
   669  	(MOVHUload [off1+int32(off2)] {sym} ptr mem)
   670  (MOVWload [off1] {sym} (ADDconst [off2] ptr) mem) && is32Bit(int64(off1)+off2)
   671  	&& (ptr.Op != OpSB || !config.ctxt.Flag_dynlink) =>
   672  	(MOVWload [off1+int32(off2)] {sym} ptr mem)
   673  (MOVWUload [off1] {sym} (ADDconst [off2] ptr) mem) && is32Bit(int64(off1)+off2)
   674  	&& (ptr.Op != OpSB || !config.ctxt.Flag_dynlink) =>
   675  	(MOVWUload [off1+int32(off2)] {sym} ptr mem)
   676  (MOVDload [off1] {sym} (ADDconst [off2] ptr) mem) && is32Bit(int64(off1)+off2)
   677  	&& (ptr.Op != OpSB || !config.ctxt.Flag_dynlink) =>
   678  	(MOVDload [off1+int32(off2)] {sym} ptr mem)
   679  (LDP [off1] {sym} (ADDconst [off2] ptr) mem) && is32Bit(int64(off1)+off2)
   680  	&& (ptr.Op != OpSB || !config.ctxt.Flag_dynlink) =>
   681  	(LDP [off1+int32(off2)] {sym} ptr mem)
   682  (FMOVSload [off1] {sym} (ADDconst [off2] ptr) mem) && is32Bit(int64(off1)+off2)
   683  	&& (ptr.Op != OpSB || !config.ctxt.Flag_dynlink) =>
   684  	(FMOVSload [off1+int32(off2)] {sym} ptr mem)
   685  (FMOVDload [off1] {sym} (ADDconst [off2] ptr) mem) && is32Bit(int64(off1)+off2)
   686  	&& (ptr.Op != OpSB || !config.ctxt.Flag_dynlink) =>
   687  	(FMOVDload [off1+int32(off2)] {sym} ptr mem)
   688  
   689  // register indexed load
   690  (MOVDload  [off] {sym} (ADD ptr idx) mem) && off == 0 && sym == nil => (MOVDloadidx ptr idx mem)
   691  (MOVWUload [off] {sym} (ADD ptr idx) mem) && off == 0 && sym == nil => (MOVWUloadidx ptr idx mem)
   692  (MOVWload  [off] {sym} (ADD ptr idx) mem) && off == 0 && sym == nil => (MOVWloadidx ptr idx mem)
   693  (MOVHUload [off] {sym} (ADD ptr idx) mem) && off == 0 && sym == nil => (MOVHUloadidx ptr idx mem)
   694  (MOVHload  [off] {sym} (ADD ptr idx) mem) && off == 0 && sym == nil => (MOVHloadidx ptr idx mem)
   695  (MOVBUload [off] {sym} (ADD ptr idx) mem) && off == 0 && sym == nil => (MOVBUloadidx ptr idx mem)
   696  (MOVBload  [off] {sym} (ADD ptr idx) mem) && off == 0 && sym == nil => (MOVBloadidx ptr idx mem)
   697  (FMOVSload [off] {sym} (ADD ptr idx) mem) && off == 0 && sym == nil => (FMOVSloadidx ptr idx mem)
   698  (FMOVDload [off] {sym} (ADD ptr idx) mem) && off == 0 && sym == nil => (FMOVDloadidx ptr idx mem)
   699  
   700  (MOVDloadidx  ptr (MOVDconst [c]) mem) && is32Bit(c) => (MOVDload  [int32(c)] ptr mem)
   701  (MOVDloadidx  (MOVDconst [c]) ptr mem) && is32Bit(c) => (MOVDload  [int32(c)] ptr mem)
   702  (MOVWUloadidx ptr (MOVDconst [c]) mem) && is32Bit(c) => (MOVWUload [int32(c)] ptr mem)
   703  (MOVWUloadidx (MOVDconst [c]) ptr mem) && is32Bit(c) => (MOVWUload [int32(c)] ptr mem)
   704  (MOVWloadidx  ptr (MOVDconst [c]) mem) && is32Bit(c) => (MOVWload  [int32(c)] ptr mem)
   705  (MOVWloadidx  (MOVDconst [c]) ptr mem) && is32Bit(c) => (MOVWload  [int32(c)] ptr mem)
   706  (MOVHUloadidx ptr (MOVDconst [c]) mem) && is32Bit(c) => (MOVHUload [int32(c)] ptr mem)
   707  (MOVHUloadidx (MOVDconst [c]) ptr mem) && is32Bit(c) => (MOVHUload [int32(c)] ptr mem)
   708  (MOVHloadidx  ptr (MOVDconst [c]) mem) && is32Bit(c) => (MOVHload  [int32(c)] ptr mem)
   709  (MOVHloadidx  (MOVDconst [c]) ptr mem) && is32Bit(c) => (MOVHload  [int32(c)] ptr mem)
   710  (MOVBUloadidx ptr (MOVDconst [c]) mem) && is32Bit(c) => (MOVBUload [int32(c)] ptr mem)
   711  (MOVBUloadidx (MOVDconst [c]) ptr mem) && is32Bit(c) => (MOVBUload [int32(c)] ptr mem)
   712  (MOVBloadidx  ptr (MOVDconst [c]) mem) && is32Bit(c) => (MOVBload  [int32(c)] ptr mem)
   713  (MOVBloadidx  (MOVDconst [c]) ptr mem) && is32Bit(c) => (MOVBload  [int32(c)] ptr mem)
   714  (FMOVSloadidx ptr (MOVDconst [c]) mem) && is32Bit(c) => (FMOVSload [int32(c)] ptr mem)
   715  (FMOVSloadidx (MOVDconst [c]) ptr mem) && is32Bit(c) => (FMOVSload [int32(c)] ptr mem)
   716  (FMOVDloadidx ptr (MOVDconst [c]) mem) && is32Bit(c) => (FMOVDload [int32(c)] ptr mem)
   717  (FMOVDloadidx (MOVDconst [c]) ptr mem) && is32Bit(c) => (FMOVDload [int32(c)] ptr mem)
   718  
   719  // shifted register indexed load
   720  (MOVDload  [off] {sym} (ADDshiftLL [3] ptr idx) mem) && off == 0 && sym == nil => (MOVDloadidx8 ptr idx mem)
   721  (MOVWUload [off] {sym} (ADDshiftLL [2] ptr idx) mem) && off == 0 && sym == nil => (MOVWUloadidx4 ptr idx mem)
   722  (MOVWload  [off] {sym} (ADDshiftLL [2] ptr idx) mem) && off == 0 && sym == nil => (MOVWloadidx4 ptr idx mem)
   723  (MOVHUload [off] {sym} (ADDshiftLL [1] ptr idx) mem) && off == 0 && sym == nil => (MOVHUloadidx2 ptr idx mem)
   724  (MOVHload  [off] {sym} (ADDshiftLL [1] ptr idx) mem) && off == 0 && sym == nil => (MOVHloadidx2 ptr idx mem)
   725  (MOVDloadidx  ptr (SLLconst [3] idx) mem) => (MOVDloadidx8 ptr idx mem)
   726  (MOVWloadidx  ptr (SLLconst [2] idx) mem) => (MOVWloadidx4 ptr idx mem)
   727  (MOVWUloadidx ptr (SLLconst [2] idx) mem) => (MOVWUloadidx4 ptr idx mem)
   728  (MOVHloadidx  ptr (SLLconst [1] idx) mem) => (MOVHloadidx2 ptr idx mem)
   729  (MOVHUloadidx ptr (SLLconst [1] idx) mem) => (MOVHUloadidx2 ptr idx mem)
   730  (MOVHloadidx  ptr (ADD idx idx) mem) => (MOVHloadidx2 ptr idx mem)
   731  (MOVHUloadidx ptr (ADD idx idx) mem) => (MOVHUloadidx2 ptr idx mem)
   732  (MOVDloadidx  (SLLconst [3] idx) ptr mem) => (MOVDloadidx8 ptr idx mem)
   733  (MOVWloadidx  (SLLconst [2] idx) ptr mem) => (MOVWloadidx4 ptr idx mem)
   734  (MOVWUloadidx (SLLconst [2] idx) ptr mem) => (MOVWUloadidx4 ptr idx mem)
   735  (MOVHloadidx  (ADD idx idx) ptr mem) => (MOVHloadidx2 ptr idx mem)
   736  (MOVHUloadidx (ADD idx idx) ptr mem) => (MOVHUloadidx2 ptr idx mem)
   737  (MOVDloadidx8  ptr (MOVDconst [c]) mem) && is32Bit(c<<3) => (MOVDload  [int32(c)<<3] ptr mem)
   738  (MOVWUloadidx4 ptr (MOVDconst [c]) mem) && is32Bit(c<<2) => (MOVWUload [int32(c)<<2] ptr mem)
   739  (MOVWloadidx4  ptr (MOVDconst [c]) mem) && is32Bit(c<<2) => (MOVWload  [int32(c)<<2] ptr mem)
   740  (MOVHUloadidx2 ptr (MOVDconst [c]) mem) && is32Bit(c<<1) => (MOVHUload [int32(c)<<1] ptr mem)
   741  (MOVHloadidx2  ptr (MOVDconst [c]) mem) && is32Bit(c<<1) => (MOVHload  [int32(c)<<1] ptr mem)
   742  
   743  (FMOVDload [off] {sym} (ADDshiftLL [3] ptr idx) mem) && off == 0 && sym == nil => (FMOVDloadidx8 ptr idx mem)
   744  (FMOVSload [off] {sym} (ADDshiftLL [2] ptr idx) mem) && off == 0 && sym == nil => (FMOVSloadidx4 ptr idx mem)
   745  (FMOVDloadidx ptr (SLLconst [3] idx) mem) => (FMOVDloadidx8 ptr idx mem)
   746  (FMOVSloadidx ptr (SLLconst [2] idx) mem) => (FMOVSloadidx4 ptr idx mem)
   747  (FMOVDloadidx (SLLconst [3] idx) ptr mem) => (FMOVDloadidx8 ptr idx mem)
   748  (FMOVSloadidx (SLLconst [2] idx) ptr mem) => (FMOVSloadidx4 ptr idx mem)
   749  (FMOVDloadidx8 ptr (MOVDconst [c]) mem) && is32Bit(c<<3) => (FMOVDload ptr [int32(c)<<3] mem)
   750  (FMOVSloadidx4 ptr (MOVDconst [c]) mem) && is32Bit(c<<2) => (FMOVSload ptr [int32(c)<<2] mem)
   751  
   752  (MOVBstore [off1] {sym} (ADDconst [off2] ptr) val mem) && is32Bit(int64(off1)+off2)
   753  	&& (ptr.Op != OpSB || !config.ctxt.Flag_dynlink) =>
   754  	(MOVBstore [off1+int32(off2)] {sym} ptr val mem)
   755  (MOVHstore [off1] {sym} (ADDconst [off2] ptr) val mem) && is32Bit(int64(off1)+off2)
   756  	&& (ptr.Op != OpSB || !config.ctxt.Flag_dynlink) =>
   757  	(MOVHstore [off1+int32(off2)] {sym} ptr val mem)
   758  (MOVWstore [off1] {sym} (ADDconst [off2] ptr) val mem) && is32Bit(int64(off1)+off2)
   759  	&& (ptr.Op != OpSB || !config.ctxt.Flag_dynlink) =>
   760  	(MOVWstore [off1+int32(off2)] {sym} ptr val mem)
   761  (MOVDstore [off1] {sym} (ADDconst [off2] ptr) val mem) && is32Bit(int64(off1)+off2)
   762  	&& (ptr.Op != OpSB || !config.ctxt.Flag_dynlink) =>
   763  	(MOVDstore [off1+int32(off2)] {sym} ptr val mem)
   764  (STP [off1] {sym} (ADDconst [off2] ptr) val1 val2 mem) && is32Bit(int64(off1)+off2)
   765  	&& (ptr.Op != OpSB || !config.ctxt.Flag_dynlink) =>
   766  	(STP [off1+int32(off2)] {sym} ptr val1 val2 mem)
   767  (FMOVSstore [off1] {sym} (ADDconst [off2] ptr) val mem) && is32Bit(int64(off1)+off2)
   768  	&& (ptr.Op != OpSB || !config.ctxt.Flag_dynlink) =>
   769  	(FMOVSstore [off1+int32(off2)] {sym} ptr val mem)
   770  (FMOVDstore [off1] {sym} (ADDconst [off2] ptr) val mem) && is32Bit(int64(off1)+off2)
   771  	&& (ptr.Op != OpSB || !config.ctxt.Flag_dynlink) =>
   772  	(FMOVDstore [off1+int32(off2)] {sym} ptr val mem)
   773  
   774  // register indexed store
   775  (MOVDstore  [off] {sym} (ADD ptr idx) val mem) && off == 0 && sym == nil => (MOVDstoreidx ptr idx val mem)
   776  (MOVWstore  [off] {sym} (ADD ptr idx) val mem) && off == 0 && sym == nil => (MOVWstoreidx ptr idx val mem)
   777  (MOVHstore  [off] {sym} (ADD ptr idx) val mem) && off == 0 && sym == nil => (MOVHstoreidx ptr idx val mem)
   778  (MOVBstore  [off] {sym} (ADD ptr idx) val mem) && off == 0 && sym == nil => (MOVBstoreidx ptr idx val mem)
   779  (FMOVDstore [off] {sym} (ADD ptr idx) val mem) && off == 0 && sym == nil => (FMOVDstoreidx ptr idx val mem)
   780  (FMOVSstore [off] {sym} (ADD ptr idx) val mem) && off == 0 && sym == nil => (FMOVSstoreidx ptr idx val mem)
   781  (MOVDstoreidx  ptr (MOVDconst [c]) val mem) && is32Bit(c) => (MOVDstore  [int32(c)] ptr val mem)
   782  (MOVDstoreidx  (MOVDconst [c]) idx val mem) && is32Bit(c) => (MOVDstore  [int32(c)] idx val mem)
   783  (MOVWstoreidx  ptr (MOVDconst [c]) val mem) && is32Bit(c) => (MOVWstore  [int32(c)] ptr val mem)
   784  (MOVWstoreidx  (MOVDconst [c]) idx val mem) && is32Bit(c) => (MOVWstore  [int32(c)] idx val mem)
   785  (MOVHstoreidx  ptr (MOVDconst [c]) val mem) && is32Bit(c) => (MOVHstore  [int32(c)] ptr val mem)
   786  (MOVHstoreidx  (MOVDconst [c]) idx val mem) && is32Bit(c) => (MOVHstore  [int32(c)] idx val mem)
   787  (MOVBstoreidx  ptr (MOVDconst [c]) val mem) && is32Bit(c) => (MOVBstore  [int32(c)] ptr val mem)
   788  (MOVBstoreidx  (MOVDconst [c]) idx val mem) && is32Bit(c) => (MOVBstore  [int32(c)] idx val mem)
   789  (FMOVDstoreidx ptr (MOVDconst [c]) val mem) && is32Bit(c) => (FMOVDstore [int32(c)] ptr val mem)
   790  (FMOVDstoreidx (MOVDconst [c]) idx val mem) && is32Bit(c) => (FMOVDstore [int32(c)] idx val mem)
   791  (FMOVSstoreidx ptr (MOVDconst [c]) val mem) && is32Bit(c) => (FMOVSstore [int32(c)] ptr val mem)
   792  (FMOVSstoreidx (MOVDconst [c]) idx val mem) && is32Bit(c) => (FMOVSstore [int32(c)] idx val mem)
   793  
   794  // shifted register indexed store
   795  (MOVDstore [off] {sym} (ADDshiftLL [3] ptr idx) val mem) && off == 0 && sym == nil => (MOVDstoreidx8 ptr idx val mem)
   796  (MOVWstore [off] {sym} (ADDshiftLL [2] ptr idx) val mem) && off == 0 && sym == nil => (MOVWstoreidx4 ptr idx val mem)
   797  (MOVHstore [off] {sym} (ADDshiftLL [1] ptr idx) val mem) && off == 0 && sym == nil => (MOVHstoreidx2 ptr idx val mem)
   798  (MOVDstoreidx  ptr (SLLconst [3] idx) val mem) => (MOVDstoreidx8 ptr idx val mem)
   799  (MOVWstoreidx  ptr (SLLconst [2] idx) val mem) => (MOVWstoreidx4 ptr idx val mem)
   800  (MOVHstoreidx  ptr (SLLconst [1] idx) val mem) => (MOVHstoreidx2 ptr idx val mem)
   801  (MOVHstoreidx  ptr (ADD      idx idx) val mem) => (MOVHstoreidx2 ptr idx val mem)
   802  (MOVDstoreidx  (SLLconst [3] idx) ptr val mem) => (MOVDstoreidx8 ptr idx val mem)
   803  (MOVWstoreidx  (SLLconst [2] idx) ptr val mem) => (MOVWstoreidx4 ptr idx val mem)
   804  (MOVHstoreidx  (SLLconst [1] idx) ptr val mem) => (MOVHstoreidx2 ptr idx val mem)
   805  (MOVHstoreidx  (ADD      idx idx) ptr val mem) => (MOVHstoreidx2 ptr idx val mem)
   806  (MOVDstoreidx8 ptr (MOVDconst [c]) val mem) && is32Bit(c<<3) => (MOVDstore [int32(c)<<3] ptr val mem)
   807  (MOVWstoreidx4 ptr (MOVDconst [c]) val mem) && is32Bit(c<<2) => (MOVWstore [int32(c)<<2] ptr val mem)
   808  (MOVHstoreidx2 ptr (MOVDconst [c]) val mem) && is32Bit(c<<1) => (MOVHstore [int32(c)<<1] ptr val mem)
   809  
   810  (FMOVDstore [off] {sym} (ADDshiftLL [3] ptr idx) val mem) && off == 0 && sym == nil => (FMOVDstoreidx8 ptr idx val mem)
   811  (FMOVSstore [off] {sym} (ADDshiftLL [2] ptr idx) val mem) && off == 0 && sym == nil => (FMOVSstoreidx4 ptr idx val mem)
   812  (FMOVDstoreidx ptr (SLLconst [3] idx) val mem) => (FMOVDstoreidx8 ptr idx val mem)
   813  (FMOVSstoreidx ptr (SLLconst [2] idx) val mem) => (FMOVSstoreidx4 ptr idx val mem)
   814  (FMOVDstoreidx (SLLconst [3] idx) ptr val mem) => (FMOVDstoreidx8 ptr idx val mem)
   815  (FMOVSstoreidx (SLLconst [2] idx) ptr val mem) => (FMOVSstoreidx4 ptr idx val mem)
   816  (FMOVDstoreidx8 ptr (MOVDconst [c]) val mem) && is32Bit(c<<3) => (FMOVDstore [int32(c)<<3] ptr val mem)
   817  (FMOVSstoreidx4 ptr (MOVDconst [c]) val mem) && is32Bit(c<<2) => (FMOVSstore [int32(c)<<2] ptr val mem)
   818  
   819  (MOVBload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem)
   820  	&& canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2))
   821  	&& (ptr.Op != OpSB || !config.ctxt.Flag_dynlink) =>
   822  	(MOVBload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
   823  (MOVBUload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem)
   824  	&& canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2))
   825  	&& (ptr.Op != OpSB || !config.ctxt.Flag_dynlink) =>
   826  	(MOVBUload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
   827  (MOVHload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem)
   828  	&& canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2))
   829  	&& (ptr.Op != OpSB || !config.ctxt.Flag_dynlink) =>
   830  	(MOVHload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
   831  (MOVHUload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem)
   832  	&& canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2))
   833  	&& (ptr.Op != OpSB || !config.ctxt.Flag_dynlink) =>
   834  	(MOVHUload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
   835  (MOVWload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem)
   836  	&& canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2))
   837  	&& (ptr.Op != OpSB || !config.ctxt.Flag_dynlink) =>
   838  	(MOVWload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
   839  (MOVWUload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem)
   840  	&& canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2))
   841  	&& (ptr.Op != OpSB || !config.ctxt.Flag_dynlink) =>
   842  	(MOVWUload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
   843  (MOVDload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem)
   844  	&& canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2))
   845  	&& (ptr.Op != OpSB || !config.ctxt.Flag_dynlink) =>
   846  	(MOVDload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
   847  (LDP [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem)
   848  	&& canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2))
   849  	&& (ptr.Op != OpSB || !config.ctxt.Flag_dynlink) =>
   850  	(LDP [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
   851  (FMOVSload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem)
   852  	&& canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2))
   853  	&& (ptr.Op != OpSB || !config.ctxt.Flag_dynlink) =>
   854  	(FMOVSload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
   855  (FMOVDload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem)
   856  	&& canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2))
   857  	&& (ptr.Op != OpSB || !config.ctxt.Flag_dynlink) =>
   858  	(FMOVDload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
   859  
   860  (MOVBstore [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) val mem)
   861  	&& canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2))
   862  	&& (ptr.Op != OpSB || !config.ctxt.Flag_dynlink) =>
   863  	(MOVBstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
   864  (MOVHstore [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) val mem)
   865  	&& canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2))
   866  	&& (ptr.Op != OpSB || !config.ctxt.Flag_dynlink) =>
   867  	(MOVHstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
   868  (MOVWstore [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) val mem)
   869  	&& canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2))
   870  	&& (ptr.Op != OpSB || !config.ctxt.Flag_dynlink) =>
   871  	(MOVWstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
   872  (MOVDstore [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) val mem)
   873  	&& canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2))
   874  	&& (ptr.Op != OpSB || !config.ctxt.Flag_dynlink) =>
   875  	(MOVDstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
   876  (STP [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) val1 val2 mem)
   877  	&& canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2))
   878  	&& (ptr.Op != OpSB || !config.ctxt.Flag_dynlink) =>
   879  	(STP [off1+off2] {mergeSym(sym1,sym2)} ptr val1 val2 mem)
   880  (FMOVSstore [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) val mem)
   881  	&& canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2))
   882  	&& (ptr.Op != OpSB || !config.ctxt.Flag_dynlink) =>
   883  	(FMOVSstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
   884  (FMOVDstore [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) val mem)
   885  	&& canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2))
   886  	&& (ptr.Op != OpSB || !config.ctxt.Flag_dynlink) =>
   887  	(FMOVDstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
   888  
   889  // replace load from same location as preceding store with zero/sign extension (or copy in case of full width)
   890  // these seem to have bad interaction with other rules, resulting in slower code
   891  //(MOVBload  [off] {sym} ptr (MOVBstore  [off2] {sym2} ptr2 x _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) -> (MOVBreg x)
   892  //(MOVBUload [off] {sym} ptr (MOVBstore  [off2] {sym2} ptr2 x _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) -> (MOVBUreg x)
   893  //(MOVHload  [off] {sym} ptr (MOVHstore  [off2] {sym2} ptr2 x _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) -> (MOVHreg x)
   894  //(MOVHUload [off] {sym} ptr (MOVHstore  [off2] {sym2} ptr2 x _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) -> (MOVHUreg x)
   895  //(MOVWload  [off] {sym} ptr (MOVWstore  [off2] {sym2} ptr2 x _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) -> (MOVWreg x)
   896  //(MOVWUload [off] {sym} ptr (MOVWstore  [off2] {sym2} ptr2 x _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) -> (MOVWUreg x)
   897  //(MOVDload  [off] {sym} ptr (MOVDstore  [off2] {sym2} ptr2 x _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) -> x
   898  //(FMOVSload [off] {sym} ptr (FMOVSstore [off2] {sym2} ptr2 x _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) -> x
   899  //(FMOVDload [off] {sym} ptr (FMOVDstore [off2] {sym2} ptr2 x _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) -> x
   900  //(LDP       [off] {sym} ptr (STP      [off2] {sym2} ptr2 x y _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) -> x y
   901  
   902  // don't extend before store
   903  (MOVBstore [off] {sym} ptr (MOVBreg  x) mem) => (MOVBstore [off] {sym} ptr x mem)
   904  (MOVBstore [off] {sym} ptr (MOVBUreg x) mem) => (MOVBstore [off] {sym} ptr x mem)
   905  (MOVBstore [off] {sym} ptr (MOVHreg  x) mem) => (MOVBstore [off] {sym} ptr x mem)
   906  (MOVBstore [off] {sym} ptr (MOVHUreg x) mem) => (MOVBstore [off] {sym} ptr x mem)
   907  (MOVBstore [off] {sym} ptr (MOVWreg  x) mem) => (MOVBstore [off] {sym} ptr x mem)
   908  (MOVBstore [off] {sym} ptr (MOVWUreg x) mem) => (MOVBstore [off] {sym} ptr x mem)
   909  (MOVHstore [off] {sym} ptr (MOVHreg  x) mem) => (MOVHstore [off] {sym} ptr x mem)
   910  (MOVHstore [off] {sym} ptr (MOVHUreg x) mem) => (MOVHstore [off] {sym} ptr x mem)
   911  (MOVHstore [off] {sym} ptr (MOVWreg  x) mem) => (MOVHstore [off] {sym} ptr x mem)
   912  (MOVHstore [off] {sym} ptr (MOVWUreg x) mem) => (MOVHstore [off] {sym} ptr x mem)
   913  (MOVWstore [off] {sym} ptr (MOVWreg  x) mem) => (MOVWstore [off] {sym} ptr x mem)
   914  (MOVWstore [off] {sym} ptr (MOVWUreg x) mem) => (MOVWstore [off] {sym} ptr x mem)
   915  (MOVBstoreidx  ptr idx (MOVBreg  x) mem) => (MOVBstoreidx  ptr idx x mem)
   916  (MOVBstoreidx  ptr idx (MOVBUreg x) mem) => (MOVBstoreidx  ptr idx x mem)
   917  (MOVBstoreidx  ptr idx (MOVHreg  x) mem) => (MOVBstoreidx  ptr idx x mem)
   918  (MOVBstoreidx  ptr idx (MOVHUreg x) mem) => (MOVBstoreidx  ptr idx x mem)
   919  (MOVBstoreidx  ptr idx (MOVWreg  x) mem) => (MOVBstoreidx  ptr idx x mem)
   920  (MOVBstoreidx  ptr idx (MOVWUreg x) mem) => (MOVBstoreidx  ptr idx x mem)
   921  (MOVHstoreidx  ptr idx (MOVHreg  x) mem) => (MOVHstoreidx  ptr idx x mem)
   922  (MOVHstoreidx  ptr idx (MOVHUreg x) mem) => (MOVHstoreidx  ptr idx x mem)
   923  (MOVHstoreidx  ptr idx (MOVWreg  x) mem) => (MOVHstoreidx  ptr idx x mem)
   924  (MOVHstoreidx  ptr idx (MOVWUreg x) mem) => (MOVHstoreidx  ptr idx x mem)
   925  (MOVWstoreidx  ptr idx (MOVWreg  x) mem) => (MOVWstoreidx  ptr idx x mem)
   926  (MOVWstoreidx  ptr idx (MOVWUreg x) mem) => (MOVWstoreidx  ptr idx x mem)
   927  (MOVHstoreidx2 ptr idx (MOVHreg  x) mem) => (MOVHstoreidx2 ptr idx x mem)
   928  (MOVHstoreidx2 ptr idx (MOVHUreg x) mem) => (MOVHstoreidx2 ptr idx x mem)
   929  (MOVHstoreidx2 ptr idx (MOVWreg  x) mem) => (MOVHstoreidx2 ptr idx x mem)
   930  (MOVHstoreidx2 ptr idx (MOVWUreg x) mem) => (MOVHstoreidx2 ptr idx x mem)
   931  (MOVWstoreidx4 ptr idx (MOVWreg  x) mem) => (MOVWstoreidx4 ptr idx x mem)
   932  (MOVWstoreidx4 ptr idx (MOVWUreg x) mem) => (MOVWstoreidx4 ptr idx x mem)
   933  
   934  // if a register move has only 1 use, just use the same register without emitting instruction
   935  // MOVDnop doesn't emit instruction, only for ensuring the type.
   936  (MOVDreg x) && x.Uses == 1 => (MOVDnop x)
   937  
   938  // TODO: we should be able to get rid of MOVDnop all together.
   939  // But for now, this is enough to get rid of lots of them.
   940  (MOVDnop (MOVDconst [c])) => (MOVDconst [c])
   941  
   942  // fold constant into arithmetic ops
   943  (ADD  x (MOVDconst <t> [c])) && !t.IsPtr() => (ADDconst [c] x)
   944  (SUB  x (MOVDconst [c])) => (SUBconst [c] x)
   945  (AND  x (MOVDconst [c])) => (ANDconst [c] x)
   946  (OR   x (MOVDconst [c])) => (ORconst  [c] x)
   947  (XOR  x (MOVDconst [c])) => (XORconst [c] x)
   948  (TST  x (MOVDconst [c])) => (TSTconst [c] x)
   949  (TSTW x (MOVDconst [c])) => (TSTWconst [int32(c)] x)
   950  (CMN  x (MOVDconst [c])) => (CMNconst [c] x)
   951  (CMNW x (MOVDconst [c])) => (CMNWconst [int32(c)] x)
   952  (BIC  x (MOVDconst [c])) => (ANDconst [^c] x)
   953  (EON  x (MOVDconst [c])) => (XORconst [^c] x)
   954  (ORN  x (MOVDconst [c])) => (ORconst  [^c] x)
   955  
   956  (SLL x (MOVDconst [c])) => (SLLconst x [c&63])
   957  (SRL x (MOVDconst [c])) => (SRLconst x [c&63])
   958  (SRA x (MOVDconst [c])) => (SRAconst x [c&63])
   959  (SLL x (ANDconst [63] y)) => (SLL x y)
   960  (SRL x (ANDconst [63] y)) => (SRL x y)
   961  (SRA x (ANDconst [63] y)) => (SRA x y)
   962  
   963  (CMP  x (MOVDconst [c])) => (CMPconst [c] x)
   964  (CMP  (MOVDconst [c]) x) => (InvertFlags (CMPconst [c] x))
   965  (CMPW x (MOVDconst [c])) => (CMPWconst [int32(c)] x)
   966  (CMPW (MOVDconst [c]) x) => (InvertFlags (CMPWconst [int32(c)] x))
   967  
   968  (ROR  x (MOVDconst [c])) => (RORconst x [c&63])
   969  (RORW x (MOVDconst [c])) => (RORWconst x [c&31])
   970  
   971  (ADDSflags x (MOVDconst [c]))  => (ADDSconstflags [c] x)
   972  
   973  (ADDconst [c] y) && c < 0 => (SUBconst [-c] y)
   974  
   975  // Canonicalize the order of arguments to comparisons - helps with CSE.
   976  ((CMP|CMPW) x y) && canonLessThan(x,y) => (InvertFlags ((CMP|CMPW) y x))
   977  
   978  // mul-neg => mneg
   979  (NEG  (MUL  x y)) => (MNEG  x y)
   980  (NEG  (MULW x y)) && v.Type.Size() <= 4 => (MNEGW x y)
   981  (MUL  (NEG  x) y) => (MNEG  x y)
   982  (MULW (NEG  x) y) => (MNEGW x y)
   983  
   984  // madd/msub
   985  (ADD a l:(MUL  x y)) && l.Uses==1 && clobber(l) => (MADD a x y)
   986  (SUB a l:(MUL  x y)) && l.Uses==1 && clobber(l) => (MSUB a x y)
   987  (ADD a l:(MNEG x y)) && l.Uses==1 && clobber(l) => (MSUB a x y)
   988  (SUB a l:(MNEG x y)) && l.Uses==1 && clobber(l) => (MADD a x y)
   989  
   990  (ADD a l:(MULW  x y)) && v.Type.Size() <= 4 && l.Uses==1 && clobber(l) => (MADDW a x y)
   991  (SUB a l:(MULW  x y)) && v.Type.Size() <= 4 && l.Uses==1 && clobber(l) => (MSUBW a x y)
   992  (ADD a l:(MNEGW x y)) && v.Type.Size() <= 4 && l.Uses==1 && clobber(l) => (MSUBW a x y)
   993  (SUB a l:(MNEGW x y)) && v.Type.Size() <= 4 && l.Uses==1 && clobber(l) => (MADDW a x y)
   994  
   995  // madd/msub can't take constant arguments, so do a bit of reordering if a non-constant is available.
   996  // Note: don't reorder arithmetic concerning pointers, as we must ensure that
   997  // no intermediate computations are invalid pointers.
   998  (ADD <t> a p:(ADDconst [c] m:((MUL|MULW|MNEG|MNEGW) _ _))) && p.Uses==1 && m.Uses==1 && !t.IsPtrShaped() => (ADDconst [c] (ADD <v.Type> a m))
   999  (ADD <t> a p:(SUBconst [c] m:((MUL|MULW|MNEG|MNEGW) _ _))) && p.Uses==1 && m.Uses==1 && !t.IsPtrShaped() => (SUBconst [c] (ADD <v.Type> a m))
  1000  (SUB <t> a p:(ADDconst [c] m:((MUL|MULW|MNEG|MNEGW) _ _))) && p.Uses==1 && m.Uses==1 && !t.IsPtrShaped() => (SUBconst [c] (SUB <v.Type> a m))
  1001  (SUB <t> a p:(SUBconst [c] m:((MUL|MULW|MNEG|MNEGW) _ _))) && p.Uses==1 && m.Uses==1 && !t.IsPtrShaped() => (ADDconst [c] (SUB <v.Type> a m))
  1002  
  1003  // optimize ADCSflags, SBCSflags and friends
  1004  (ADCSflags x y (Select1 <types.TypeFlags> (ADDSconstflags [-1] (ADCzerocarry <typ.UInt64> c)))) => (ADCSflags x y c)
  1005  (ADCSflags x y (Select1 <types.TypeFlags> (ADDSconstflags [-1] (MOVDconst [0])))) => (ADDSflags x y)
  1006  (SBCSflags x y (Select1 <types.TypeFlags> (NEGSflags (NEG <typ.UInt64> (NGCzerocarry <typ.UInt64> bo))))) => (SBCSflags x y bo)
  1007  (SBCSflags x y (Select1 <types.TypeFlags> (NEGSflags (MOVDconst [0])))) => (SUBSflags x y)
  1008  
  1009  // mul by constant
  1010  (MUL _ (MOVDconst [0])) => (MOVDconst [0])
  1011  (MUL x (MOVDconst [1])) => x
  1012  
  1013  (MULW _ (MOVDconst [c])) && int32(c)==0 => (MOVDconst [0])
  1014  (MULW x (MOVDconst [c])) && int32(c)==1 => (MOVWUreg x)
  1015  
  1016  (MUL  x (MOVDconst [c])) && canMulStrengthReduce(config, c) => {mulStrengthReduce(v, x, c)}
  1017  (MULW x (MOVDconst [c])) && v.Type.Size() <= 4 && canMulStrengthReduce32(config, int32(c)) => {mulStrengthReduce32(v, x, int32(c))}
  1018  
  1019  // mneg by constant
  1020  (MNEG x (MOVDconst [-1])) => x
  1021  (MNEG _ (MOVDconst [0])) => (MOVDconst [0])
  1022  (MNEG x (MOVDconst [1])) => (NEG x)
  1023  (MNEG x (MOVDconst [c])) && isPowerOfTwo(c) => (NEG (SLLconst <x.Type> [log64(c)] x))
  1024  (MNEG x (MOVDconst [c])) && isPowerOfTwo(c-1) && c >= 3 => (NEG (ADDshiftLL <x.Type> x x [log64(c-1)]))
  1025  (MNEG x (MOVDconst [c])) && isPowerOfTwo(c+1) && c >= 7 => (NEG (ADDshiftLL <x.Type> (NEG <x.Type> x) x [log64(c+1)]))
  1026  (MNEG x (MOVDconst [c])) && c%3 == 0 && isPowerOfTwo(c/3) => (SLLconst <x.Type> [log64(c/3)] (SUBshiftLL <x.Type> x x [2]))
  1027  (MNEG x (MOVDconst [c])) && c%5 == 0 && isPowerOfTwo(c/5) => (NEG (SLLconst <x.Type> [log64(c/5)] (ADDshiftLL <x.Type> x x [2])))
  1028  (MNEG x (MOVDconst [c])) && c%7 == 0 && isPowerOfTwo(c/7) => (SLLconst <x.Type> [log64(c/7)] (SUBshiftLL <x.Type> x x [3]))
  1029  (MNEG x (MOVDconst [c])) && c%9 == 0 && isPowerOfTwo(c/9) => (NEG (SLLconst <x.Type> [log64(c/9)] (ADDshiftLL <x.Type> x x [3])))
  1030  
  1031  
  1032  (MNEGW x (MOVDconst [c])) && int32(c)==-1 => (MOVWUreg x)
  1033  (MNEGW _ (MOVDconst [c])) && int32(c)==0 => (MOVDconst [0])
  1034  (MNEGW x (MOVDconst [c])) && int32(c)==1 => (MOVWUreg (NEG <x.Type> x))
  1035  (MNEGW x (MOVDconst [c])) && isPowerOfTwo(c) => (NEG (SLLconst <x.Type> [log64(c)] x))
  1036  (MNEGW x (MOVDconst [c])) && isPowerOfTwo(c-1) && int32(c) >= 3 => (MOVWUreg (NEG <x.Type> (ADDshiftLL <x.Type> x x [log64(c-1)])))
  1037  (MNEGW x (MOVDconst [c])) && isPowerOfTwo(c+1) && int32(c) >= 7 => (MOVWUreg (NEG <x.Type> (ADDshiftLL <x.Type> (NEG <x.Type> x) x [log64(c+1)])))
  1038  (MNEGW x (MOVDconst [c])) && c%3 == 0 && isPowerOfTwo(c/3) && is32Bit(c) => (MOVWUreg (SLLconst <x.Type> [log64(c/3)] (SUBshiftLL <x.Type> x x [2])))
  1039  (MNEGW x (MOVDconst [c])) && c%5 == 0 && isPowerOfTwo(c/5) && is32Bit(c) => (MOVWUreg (NEG <x.Type> (SLLconst <x.Type> [log64(c/5)] (ADDshiftLL <x.Type> x x [2]))))
  1040  (MNEGW x (MOVDconst [c])) && c%7 == 0 && isPowerOfTwo(c/7) && is32Bit(c) => (MOVWUreg (SLLconst <x.Type> [log64(c/7)] (SUBshiftLL <x.Type> x x [3])))
  1041  (MNEGW x (MOVDconst [c])) && c%9 == 0 && isPowerOfTwo(c/9) && is32Bit(c) => (MOVWUreg (NEG <x.Type> (SLLconst <x.Type> [log64(c/9)] (ADDshiftLL <x.Type> x x [3]))))
  1042  
  1043  
  1044  (MADD a x (MOVDconst [-1])) => (SUB a x)
  1045  (MADD a _ (MOVDconst [0])) => a
  1046  (MADD a x (MOVDconst [1])) => (ADD a x)
  1047  (MADD a x (MOVDconst [c])) && isPowerOfTwo(c) => (ADDshiftLL a x [log64(c)])
  1048  (MADD a x (MOVDconst [c])) && isPowerOfTwo(c-1) && c>=3 => (ADD a (ADDshiftLL <x.Type> x x [log64(c-1)]))
  1049  (MADD a x (MOVDconst [c])) && isPowerOfTwo(c+1) && c>=7 => (SUB a (SUBshiftLL <x.Type> x x [log64(c+1)]))
  1050  (MADD a x (MOVDconst [c])) && c%3 == 0 && isPowerOfTwo(c/3) => (SUBshiftLL a (SUBshiftLL <x.Type> x x [2]) [log64(c/3)])
  1051  (MADD a x (MOVDconst [c])) && c%5 == 0 && isPowerOfTwo(c/5) => (ADDshiftLL a (ADDshiftLL <x.Type> x x [2]) [log64(c/5)])
  1052  (MADD a x (MOVDconst [c])) && c%7 == 0 && isPowerOfTwo(c/7) => (SUBshiftLL a (SUBshiftLL <x.Type> x x [3]) [log64(c/7)])
  1053  (MADD a x (MOVDconst [c])) && c%9 == 0 && isPowerOfTwo(c/9) => (ADDshiftLL a (ADDshiftLL <x.Type> x x [3]) [log64(c/9)])
  1054  
  1055  (MADD a (MOVDconst [-1]) x) => (SUB a x)
  1056  (MADD a (MOVDconst [0]) _) => a
  1057  (MADD a (MOVDconst [1]) x) => (ADD a x)
  1058  (MADD a (MOVDconst [c]) x) && isPowerOfTwo(c) => (ADDshiftLL a x [log64(c)])
  1059  (MADD a (MOVDconst [c]) x) && isPowerOfTwo(c-1) && c>=3 => (ADD a (ADDshiftLL <x.Type> x x [log64(c-1)]))
  1060  (MADD a (MOVDconst [c]) x) && isPowerOfTwo(c+1) && c>=7 => (SUB a (SUBshiftLL <x.Type> x x [log64(c+1)]))
  1061  (MADD a (MOVDconst [c]) x) && c%3 == 0 && isPowerOfTwo(c/3) => (SUBshiftLL a (SUBshiftLL <x.Type> x x [2]) [log64(c/3)])
  1062  (MADD a (MOVDconst [c]) x) && c%5 == 0 && isPowerOfTwo(c/5) => (ADDshiftLL a (ADDshiftLL <x.Type> x x [2]) [log64(c/5)])
  1063  (MADD a (MOVDconst [c]) x) && c%7 == 0 && isPowerOfTwo(c/7) => (SUBshiftLL a (SUBshiftLL <x.Type> x x [3]) [log64(c/7)])
  1064  (MADD a (MOVDconst [c]) x) && c%9 == 0 && isPowerOfTwo(c/9) => (ADDshiftLL a (ADDshiftLL <x.Type> x x [3]) [log64(c/9)])
  1065  
  1066  (MADDW a x (MOVDconst [c])) && int32(c)==-1 => (MOVWUreg (SUB <a.Type> a x))
  1067  (MADDW a _ (MOVDconst [c])) && int32(c)==0 => (MOVWUreg a)
  1068  (MADDW a x (MOVDconst [c])) && int32(c)==1 => (MOVWUreg (ADD <a.Type> a x))
  1069  (MADDW a x (MOVDconst [c])) && isPowerOfTwo(c) => (MOVWUreg (ADDshiftLL <a.Type> a x [log64(c)]))
  1070  (MADDW a x (MOVDconst [c])) && isPowerOfTwo(c-1) && int32(c)>=3 => (MOVWUreg (ADD <a.Type> a (ADDshiftLL <x.Type> x x [log64(c-1)])))
  1071  (MADDW a x (MOVDconst [c])) && isPowerOfTwo(c+1) && int32(c)>=7 => (MOVWUreg (SUB <a.Type> a (SUBshiftLL <x.Type> x x [log64(c+1)])))
  1072  (MADDW a x (MOVDconst [c])) && c%3 == 0 && isPowerOfTwo(c/3) && is32Bit(c) => (MOVWUreg (SUBshiftLL <a.Type> a (SUBshiftLL <x.Type> x x [2]) [log64(c/3)]))
  1073  (MADDW a x (MOVDconst [c])) && c%5 == 0 && isPowerOfTwo(c/5) && is32Bit(c) => (MOVWUreg (ADDshiftLL <a.Type> a (ADDshiftLL <x.Type> x x [2]) [log64(c/5)]))
  1074  (MADDW a x (MOVDconst [c])) && c%7 == 0 && isPowerOfTwo(c/7) && is32Bit(c) => (MOVWUreg (SUBshiftLL <a.Type> a (SUBshiftLL <x.Type> x x [3]) [log64(c/7)]))
  1075  (MADDW a x (MOVDconst [c])) && c%9 == 0 && isPowerOfTwo(c/9) && is32Bit(c) => (MOVWUreg (ADDshiftLL <a.Type> a (ADDshiftLL <x.Type> x x [3]) [log64(c/9)]))
  1076  
  1077  (MADDW a (MOVDconst [c]) x) && int32(c)==-1 => (MOVWUreg (SUB <a.Type> a x))
  1078  (MADDW a (MOVDconst [c]) _) && int32(c)==0 => (MOVWUreg a)
  1079  (MADDW a (MOVDconst [c]) x) && int32(c)==1 => (MOVWUreg (ADD <a.Type> a x))
  1080  (MADDW a (MOVDconst [c]) x) && isPowerOfTwo(c) => (MOVWUreg (ADDshiftLL <a.Type> a x [log64(c)]))
  1081  (MADDW a (MOVDconst [c]) x) && isPowerOfTwo(c-1) && int32(c)>=3 => (MOVWUreg (ADD <a.Type> a (ADDshiftLL <x.Type> x x [log64(c-1)])))
  1082  (MADDW a (MOVDconst [c]) x) && isPowerOfTwo(c+1) && int32(c)>=7 => (MOVWUreg (SUB <a.Type> a (SUBshiftLL <x.Type> x x [log64(c+1)])))
  1083  (MADDW a (MOVDconst [c]) x) && c%3 == 0 && isPowerOfTwo(c/3) && is32Bit(c) => (MOVWUreg (SUBshiftLL <a.Type> a (SUBshiftLL <x.Type> x x [2]) [log64(c/3)]))
  1084  (MADDW a (MOVDconst [c]) x) && c%5 == 0 && isPowerOfTwo(c/5) && is32Bit(c) => (MOVWUreg (ADDshiftLL <a.Type> a (ADDshiftLL <x.Type> x x [2]) [log64(c/5)]))
  1085  (MADDW a (MOVDconst [c]) x) && c%7 == 0 && isPowerOfTwo(c/7) && is32Bit(c) => (MOVWUreg (SUBshiftLL <a.Type> a (SUBshiftLL <x.Type> x x [3]) [log64(c/7)]))
  1086  (MADDW a (MOVDconst [c]) x) && c%9 == 0 && isPowerOfTwo(c/9) && is32Bit(c) => (MOVWUreg (ADDshiftLL <a.Type> a (ADDshiftLL <x.Type> x x [3]) [log64(c/9)]))
  1087  
  1088  (MSUB a x (MOVDconst [-1])) => (ADD a x)
  1089  (MSUB a _ (MOVDconst [0])) => a
  1090  (MSUB a x (MOVDconst [1])) => (SUB a x)
  1091  (MSUB a x (MOVDconst [c])) && isPowerOfTwo(c) => (SUBshiftLL a x [log64(c)])
  1092  (MSUB a x (MOVDconst [c])) && isPowerOfTwo(c-1) && c>=3 => (SUB a (ADDshiftLL <x.Type> x x [log64(c-1)]))
  1093  (MSUB a x (MOVDconst [c])) && isPowerOfTwo(c+1) && c>=7 => (ADD a (SUBshiftLL <x.Type> x x [log64(c+1)]))
  1094  (MSUB a x (MOVDconst [c])) && c%3 == 0 && isPowerOfTwo(c/3) => (ADDshiftLL a (SUBshiftLL <x.Type> x x [2]) [log64(c/3)])
  1095  (MSUB a x (MOVDconst [c])) && c%5 == 0 && isPowerOfTwo(c/5) => (SUBshiftLL a (ADDshiftLL <x.Type> x x [2]) [log64(c/5)])
  1096  (MSUB a x (MOVDconst [c])) && c%7 == 0 && isPowerOfTwo(c/7) => (ADDshiftLL a (SUBshiftLL <x.Type> x x [3]) [log64(c/7)])
  1097  (MSUB a x (MOVDconst [c])) && c%9 == 0 && isPowerOfTwo(c/9) => (SUBshiftLL a (ADDshiftLL <x.Type> x x [3]) [log64(c/9)])
  1098  
  1099  (MSUB a (MOVDconst [-1]) x) => (ADD a x)
  1100  (MSUB a (MOVDconst [0]) _) => a
  1101  (MSUB a (MOVDconst [1]) x) => (SUB a x)
  1102  (MSUB a (MOVDconst [c]) x) && isPowerOfTwo(c) => (SUBshiftLL a x [log64(c)])
  1103  (MSUB a (MOVDconst [c]) x) && isPowerOfTwo(c-1) && c>=3 => (SUB a (ADDshiftLL <x.Type> x x [log64(c-1)]))
  1104  (MSUB a (MOVDconst [c]) x) && isPowerOfTwo(c+1) && c>=7 => (ADD a (SUBshiftLL <x.Type> x x [log64(c+1)]))
  1105  (MSUB a (MOVDconst [c]) x) && c%3 == 0 && isPowerOfTwo(c/3) => (ADDshiftLL a (SUBshiftLL <x.Type> x x [2]) [log64(c/3)])
  1106  (MSUB a (MOVDconst [c]) x) && c%5 == 0 && isPowerOfTwo(c/5) => (SUBshiftLL a (ADDshiftLL <x.Type> x x [2]) [log64(c/5)])
  1107  (MSUB a (MOVDconst [c]) x) && c%7 == 0 && isPowerOfTwo(c/7) => (ADDshiftLL a (SUBshiftLL <x.Type> x x [3]) [log64(c/7)])
  1108  (MSUB a (MOVDconst [c]) x) && c%9 == 0 && isPowerOfTwo(c/9) => (SUBshiftLL a (ADDshiftLL <x.Type> x x [3]) [log64(c/9)])
  1109  
  1110  (MSUBW a x (MOVDconst [c])) && int32(c)==-1 => (MOVWUreg (ADD <a.Type> a x))
  1111  (MSUBW a _ (MOVDconst [c])) && int32(c)==0 => (MOVWUreg a)
  1112  (MSUBW a x (MOVDconst [c])) && int32(c)==1 => (MOVWUreg (SUB <a.Type> a x))
  1113  (MSUBW a x (MOVDconst [c])) && isPowerOfTwo(c) => (MOVWUreg (SUBshiftLL <a.Type> a x [log64(c)]))
  1114  (MSUBW a x (MOVDconst [c])) && isPowerOfTwo(c-1) && int32(c)>=3 => (MOVWUreg (SUB <a.Type> a (ADDshiftLL <x.Type> x x [log64(c-1)])))
  1115  (MSUBW a x (MOVDconst [c])) && isPowerOfTwo(c+1) && int32(c)>=7 => (MOVWUreg (ADD <a.Type> a (SUBshiftLL <x.Type> x x [log64(c+1)])))
  1116  (MSUBW a x (MOVDconst [c])) && c%3 == 0 && isPowerOfTwo(c/3) && is32Bit(c) => (MOVWUreg (ADDshiftLL <a.Type> a (SUBshiftLL <x.Type> x x [2]) [log64(c/3)]))
  1117  (MSUBW a x (MOVDconst [c])) && c%5 == 0 && isPowerOfTwo(c/5) && is32Bit(c) => (MOVWUreg (SUBshiftLL <a.Type> a (ADDshiftLL <x.Type> x x [2]) [log64(c/5)]))
  1118  (MSUBW a x (MOVDconst [c])) && c%7 == 0 && isPowerOfTwo(c/7) && is32Bit(c) => (MOVWUreg (ADDshiftLL <a.Type> a (SUBshiftLL <x.Type> x x [3]) [log64(c/7)]))
  1119  (MSUBW a x (MOVDconst [c])) && c%9 == 0 && isPowerOfTwo(c/9) && is32Bit(c) => (MOVWUreg (SUBshiftLL <a.Type> a (ADDshiftLL <x.Type> x x [3]) [log64(c/9)]))
  1120  
  1121  (MSUBW a (MOVDconst [c]) x) && int32(c)==-1 => (MOVWUreg (ADD <a.Type> a x))
  1122  (MSUBW a (MOVDconst [c]) _) && int32(c)==0 => (MOVWUreg a)
  1123  (MSUBW a (MOVDconst [c]) x) && int32(c)==1 => (MOVWUreg (SUB <a.Type> a x))
  1124  (MSUBW a (MOVDconst [c]) x) && isPowerOfTwo(c) => (MOVWUreg (SUBshiftLL <a.Type> a x [log64(c)]))
  1125  (MSUBW a (MOVDconst [c]) x) && isPowerOfTwo(c-1) && int32(c)>=3 => (MOVWUreg (SUB <a.Type> a (ADDshiftLL <x.Type> x x [log64(c-1)])))
  1126  (MSUBW a (MOVDconst [c]) x) && isPowerOfTwo(c+1) && int32(c)>=7 => (MOVWUreg (ADD <a.Type> a (SUBshiftLL <x.Type> x x [log64(c+1)])))
  1127  (MSUBW a (MOVDconst [c]) x) && c%3 == 0 && isPowerOfTwo(c/3) && is32Bit(c) => (MOVWUreg (ADDshiftLL <a.Type> a (SUBshiftLL <x.Type> x x [2]) [log64(c/3)]))
  1128  (MSUBW a (MOVDconst [c]) x) && c%5 == 0 && isPowerOfTwo(c/5) && is32Bit(c) => (MOVWUreg (SUBshiftLL <a.Type> a (ADDshiftLL <x.Type> x x [2]) [log64(c/5)]))
  1129  (MSUBW a (MOVDconst [c]) x) && c%7 == 0 && isPowerOfTwo(c/7) && is32Bit(c) => (MOVWUreg (ADDshiftLL <a.Type> a (SUBshiftLL <x.Type> x x [3]) [log64(c/7)]))
  1130  (MSUBW a (MOVDconst [c]) x) && c%9 == 0 && isPowerOfTwo(c/9) && is32Bit(c) => (MOVWUreg (SUBshiftLL <a.Type> a (ADDshiftLL <x.Type> x x [3]) [log64(c/9)]))
  1131  
  1132  // div by constant
  1133  (UDIV  x (MOVDconst [1])) => x
  1134  (UDIV  x (MOVDconst [c])) && isPowerOfTwo(c) => (SRLconst [log64(c)] x)
  1135  (UDIVW x (MOVDconst [c])) && uint32(c)==1 => (MOVWUreg x)
  1136  (UDIVW x (MOVDconst [c])) && isPowerOfTwo(c) && is32Bit(c) => (SRLconst [log64(c)] (MOVWUreg <v.Type> x))
  1137  (UMOD  _ (MOVDconst [1])) => (MOVDconst [0])
  1138  (UMOD  x (MOVDconst [c])) && isPowerOfTwo(c) => (ANDconst [c-1] x)
  1139  (UMODW _ (MOVDconst [c])) && uint32(c)==1 => (MOVDconst [0])
  1140  (UMODW x (MOVDconst [c])) && isPowerOfTwo(c) && is32Bit(c) => (ANDconst [c-1] x)
  1141  
  1142  // generic simplifications
  1143  (ADD x (NEG y)) => (SUB x y)
  1144  (SUB x (NEG y)) => (ADD x y)
  1145  (SUB x x) => (MOVDconst [0])
  1146  (AND x x) => x
  1147  (OR  x x) => x
  1148  (XOR x x) => (MOVDconst [0])
  1149  (BIC x x) => (MOVDconst [0])
  1150  (EON x x) => (MOVDconst [-1])
  1151  (ORN x x) => (MOVDconst [-1])
  1152  (AND x (MVN y)) => (BIC x y)
  1153  (XOR x (MVN y)) => (EON x y)
  1154  (OR  x (MVN y)) => (ORN x y)
  1155  (MVN (XOR x y)) => (EON x y)
  1156  (NEG (SUB x y)) => (SUB y x)
  1157  (NEG (NEG x)) => x
  1158  
  1159  (CSEL [cc] (MOVDconst [-1]) (MOVDconst [0]) flag) => (CSETM [cc] flag)
  1160  (CSEL [cc] (MOVDconst [0]) (MOVDconst [-1]) flag) => (CSETM [arm64Negate(cc)] flag)
  1161  (CSEL [cc] x (MOVDconst [0]) flag) => (CSEL0 [cc] x flag)
  1162  (CSEL [cc] (MOVDconst [0]) y flag) => (CSEL0 [arm64Negate(cc)] y flag)
  1163  (CSEL [cc] x (ADDconst [1] a) flag) => (CSINC [cc] x a flag)
  1164  (CSEL [cc] (ADDconst [1] a) x flag) => (CSINC [arm64Negate(cc)] x a flag)
  1165  (CSEL [cc] x (MVN a) flag) => (CSINV [cc] x a flag)
  1166  (CSEL [cc] (MVN a) x flag) => (CSINV [arm64Negate(cc)] x a flag)
  1167  (CSEL [cc] x (NEG a) flag) => (CSNEG [cc] x a flag)
  1168  (CSEL [cc] (NEG a) x flag) => (CSNEG [arm64Negate(cc)] x a flag)
  1169  
  1170  (SUB x (SUB y z)) => (SUB (ADD <v.Type> x z) y)
  1171  (SUB (SUB x y) z) => (SUB x (ADD <y.Type> y z))
  1172  
  1173  // remove redundant *const ops
  1174  (ADDconst [0]  x) => x
  1175  (SUBconst [0]  x) => x
  1176  (ANDconst [0]  _) => (MOVDconst [0])
  1177  (ANDconst [-1] x) => x
  1178  (ORconst  [0]  x) => x
  1179  (ORconst  [-1] _) => (MOVDconst [-1])
  1180  (XORconst [0]  x) => x
  1181  (XORconst [-1] x) => (MVN x)
  1182  
  1183  // generic constant folding
  1184  (ADDconst [c] (MOVDconst [d]))  => (MOVDconst [c+d])
  1185  (ADDconst [c] (ADDconst [d] x)) => (ADDconst [c+d] x)
  1186  (ADDconst [c] (SUBconst [d] x)) => (ADDconst [c-d] x)
  1187  (SUBconst [c] (MOVDconst [d]))  => (MOVDconst [d-c])
  1188  (SUBconst [c] (SUBconst [d] x)) => (ADDconst [-c-d] x)
  1189  (SUBconst [c] (ADDconst [d] x)) => (ADDconst [-c+d] x)
  1190  (SLLconst [c] (MOVDconst [d]))  => (MOVDconst [d<<uint64(c)])
  1191  (SRLconst [c] (MOVDconst [d]))  => (MOVDconst [int64(uint64(d)>>uint64(c))])
  1192  (SRAconst [c] (MOVDconst [d]))  => (MOVDconst [d>>uint64(c)])
  1193  (MUL   (MOVDconst [c]) (MOVDconst [d])) => (MOVDconst [c*d])
  1194  (MNEG  (MOVDconst [c]) (MOVDconst [d])) => (MOVDconst [-c*d])
  1195  (MULW  (MOVDconst [c]) (MOVDconst [d])) => (MOVDconst [int64(uint32(c*d))])
  1196  (MNEGW (MOVDconst [c]) (MOVDconst [d])) => (MOVDconst [int64(uint32(-c*d))])
  1197  (MADD  (MOVDconst [c]) x y) => (ADDconst [c] (MUL  <x.Type> x y))
  1198  (MSUB  (MOVDconst [c]) x y) => (ADDconst [c] (MNEG <x.Type> x y))
  1199  (MADD  a (MOVDconst [c]) (MOVDconst [d])) => (ADDconst [c*d] a)
  1200  (MSUB  a (MOVDconst [c]) (MOVDconst [d])) => (SUBconst [c*d] a)
  1201  (MADDW (MOVDconst [c]) x y) => (MOVWUreg (ADDconst <x.Type> [c] (MULW  <x.Type> x y)))
  1202  (MSUBW (MOVDconst [c]) x y) => (MOVWUreg (ADDconst <x.Type> [c] (MNEGW <x.Type> x y)))
  1203  (MADDW a (MOVDconst [c]) (MOVDconst [d])) => (MOVWUreg (ADDconst <a.Type> [c*d] a))
  1204  (MSUBW a (MOVDconst [c]) (MOVDconst [d])) => (MOVWUreg (SUBconst <a.Type> [c*d] a))
  1205  (DIV   (MOVDconst [c]) (MOVDconst [d])) && d != 0 => (MOVDconst [c/d])
  1206  (UDIV  (MOVDconst [c]) (MOVDconst [d])) && d != 0 => (MOVDconst [int64(uint64(c)/uint64(d))])
  1207  (DIVW  (MOVDconst [c]) (MOVDconst [d])) && d != 0 => (MOVDconst [int64(uint32(int32(c)/int32(d)))])
  1208  (UDIVW (MOVDconst [c]) (MOVDconst [d])) && d != 0 => (MOVDconst [int64(uint32(c)/uint32(d))])
  1209  (MOD   (MOVDconst [c]) (MOVDconst [d])) && d != 0 => (MOVDconst [c%d])
  1210  (UMOD  (MOVDconst [c]) (MOVDconst [d])) && d != 0 => (MOVDconst [int64(uint64(c)%uint64(d))])
  1211  (MODW  (MOVDconst [c]) (MOVDconst [d])) && d != 0 => (MOVDconst [int64(uint32(int32(c)%int32(d)))])
  1212  (UMODW (MOVDconst [c]) (MOVDconst [d])) && d != 0 => (MOVDconst [int64(uint32(c)%uint32(d))])
  1213  (ANDconst [c] (MOVDconst [d]))  => (MOVDconst [c&d])
  1214  (ANDconst [c] (ANDconst [d] x)) => (ANDconst [c&d] x)
  1215  (ANDconst [c] (MOVWUreg x)) => (ANDconst [c&(1<<32-1)] x)
  1216  (ANDconst [c] (MOVHUreg x)) => (ANDconst [c&(1<<16-1)] x)
  1217  (ANDconst [c] (MOVBUreg x)) => (ANDconst [c&(1<<8-1)] x)
  1218  (MOVWUreg (ANDconst [c] x)) => (ANDconst [c&(1<<32-1)] x)
  1219  (MOVHUreg (ANDconst [c] x)) => (ANDconst [c&(1<<16-1)] x)
  1220  (MOVBUreg (ANDconst [c] x)) => (ANDconst [c&(1<<8-1)] x)
  1221  (ORconst  [c] (MOVDconst [d]))  => (MOVDconst [c|d])
  1222  (ORconst  [c] (ORconst [d] x))  => (ORconst [c|d] x)
  1223  (XORconst [c] (MOVDconst [d]))  => (MOVDconst [c^d])
  1224  (XORconst [c] (XORconst [d] x)) => (XORconst [c^d] x)
  1225  (MVN (MOVDconst [c])) => (MOVDconst [^c])
  1226  (NEG (MOVDconst [c])) => (MOVDconst [-c])
  1227  (MOVBreg  (MOVDconst [c])) => (MOVDconst [int64(int8(c))])
  1228  (MOVBUreg (MOVDconst [c])) => (MOVDconst [int64(uint8(c))])
  1229  (MOVHreg  (MOVDconst [c])) => (MOVDconst [int64(int16(c))])
  1230  (MOVHUreg (MOVDconst [c])) => (MOVDconst [int64(uint16(c))])
  1231  (MOVWreg  (MOVDconst [c])) => (MOVDconst [int64(int32(c))])
  1232  (MOVWUreg (MOVDconst [c])) => (MOVDconst [int64(uint32(c))])
  1233  (MOVDreg  (MOVDconst [c])) => (MOVDconst [c])
  1234  
  1235  // constant comparisons
  1236  (CMPconst  (MOVDconst [x]) [y]) => (FlagConstant [subFlags64(x,y)])
  1237  (CMPWconst (MOVDconst [x]) [y]) => (FlagConstant [subFlags32(int32(x),y)])
  1238  (TSTconst  (MOVDconst [x]) [y]) => (FlagConstant [logicFlags64(x&y)])
  1239  (TSTWconst (MOVDconst [x]) [y]) => (FlagConstant [logicFlags32(int32(x)&y)])
  1240  (CMNconst  (MOVDconst [x]) [y]) => (FlagConstant [addFlags64(x,y)])
  1241  (CMNWconst (MOVDconst [x]) [y]) => (FlagConstant [addFlags32(int32(x),y)])
  1242  
  1243  // other known comparisons
  1244  (CMPconst  (MOVBUreg _) [c]) && 0xff < c       => (FlagConstant [subFlags64(0,1)])
  1245  (CMPconst  (MOVHUreg _) [c]) && 0xffff < c     => (FlagConstant [subFlags64(0,1)])
  1246  (CMPconst  (MOVWUreg _) [c]) && 0xffffffff < c => (FlagConstant [subFlags64(0,1)])
  1247  (CMPconst  (ANDconst _ [m]) [n]) && 0 <= m && m < n => (FlagConstant [subFlags64(0,1)])
  1248  (CMPconst  (SRLconst _ [c]) [n]) && 0 <= n && 0 < c && c <= 63 && (1<<uint64(64-c)) <= uint64(n) => (FlagConstant [subFlags64(0,1)])
  1249  (CMPWconst (MOVBUreg _) [c]) && 0xff   < c => (FlagConstant [subFlags64(0,1)])
  1250  (CMPWconst (MOVHUreg _) [c]) && 0xffff < c => (FlagConstant [subFlags64(0,1)])
  1251  
  1252  // absorb flag constants into branches
  1253  (EQ (FlagConstant [fc]) yes no) &&  fc.eq() => (First yes no)
  1254  (EQ (FlagConstant [fc]) yes no) && !fc.eq() => (First no yes)
  1255  
  1256  (NE (FlagConstant [fc]) yes no) &&  fc.ne() => (First yes no)
  1257  (NE (FlagConstant [fc]) yes no) && !fc.ne() => (First no yes)
  1258  
  1259  (LT (FlagConstant [fc]) yes no) &&  fc.lt() => (First yes no)
  1260  (LT (FlagConstant [fc]) yes no) && !fc.lt() => (First no yes)
  1261  
  1262  (LE (FlagConstant [fc]) yes no) &&  fc.le() => (First yes no)
  1263  (LE (FlagConstant [fc]) yes no) && !fc.le() => (First no yes)
  1264  
  1265  (GT (FlagConstant [fc]) yes no) &&  fc.gt() => (First yes no)
  1266  (GT (FlagConstant [fc]) yes no) && !fc.gt() => (First no yes)
  1267  
  1268  (GE (FlagConstant [fc]) yes no) &&  fc.ge() => (First yes no)
  1269  (GE (FlagConstant [fc]) yes no) && !fc.ge() => (First no yes)
  1270  
  1271  (ULT (FlagConstant [fc]) yes no) &&  fc.ult() => (First yes no)
  1272  (ULT (FlagConstant [fc]) yes no) && !fc.ult() => (First no yes)
  1273  
  1274  (ULE (FlagConstant [fc]) yes no) &&  fc.ule() => (First yes no)
  1275  (ULE (FlagConstant [fc]) yes no) && !fc.ule() => (First no yes)
  1276  
  1277  (UGT (FlagConstant [fc]) yes no) &&  fc.ugt() => (First yes no)
  1278  (UGT (FlagConstant [fc]) yes no) && !fc.ugt() => (First no yes)
  1279  
  1280  (UGE (FlagConstant [fc]) yes no) &&  fc.uge() => (First yes no)
  1281  (UGE (FlagConstant [fc]) yes no) && !fc.uge() => (First no yes)
  1282  
  1283  (LTnoov (FlagConstant [fc]) yes no) &&  fc.ltNoov() => (First yes no)
  1284  (LTnoov (FlagConstant [fc]) yes no) && !fc.ltNoov() => (First no yes)
  1285  
  1286  (LEnoov (FlagConstant [fc]) yes no) &&  fc.leNoov() => (First yes no)
  1287  (LEnoov (FlagConstant [fc]) yes no) && !fc.leNoov() => (First no yes)
  1288  
  1289  (GTnoov (FlagConstant [fc]) yes no) &&  fc.gtNoov() => (First yes no)
  1290  (GTnoov (FlagConstant [fc]) yes no) && !fc.gtNoov() => (First no yes)
  1291  
  1292  (GEnoov (FlagConstant [fc]) yes no) &&  fc.geNoov() => (First yes no)
  1293  (GEnoov (FlagConstant [fc]) yes no) && !fc.geNoov() => (First no yes)
  1294  
  1295  (Z   (MOVDconst [0]) yes no)                  => (First yes no)
  1296  (Z   (MOVDconst [c]) yes no) && c != 0        => (First no yes)
  1297  (NZ  (MOVDconst [0]) yes no)                  => (First no yes)
  1298  (NZ  (MOVDconst [c]) yes no) && c != 0        => (First yes no)
  1299  (ZW  (MOVDconst [c]) yes no) && int32(c) == 0 => (First yes no)
  1300  (ZW  (MOVDconst [c]) yes no) && int32(c) != 0 => (First no yes)
  1301  (NZW (MOVDconst [c]) yes no) && int32(c) == 0 => (First no yes)
  1302  (NZW (MOVDconst [c]) yes no) && int32(c) != 0 => (First yes no)
  1303  
  1304  // absorb InvertFlags into branches
  1305  (LT  (InvertFlags cmp) yes no) => (GT cmp yes no)
  1306  (GT  (InvertFlags cmp) yes no) => (LT cmp yes no)
  1307  (LE  (InvertFlags cmp) yes no) => (GE cmp yes no)
  1308  (GE  (InvertFlags cmp) yes no) => (LE cmp yes no)
  1309  (ULT (InvertFlags cmp) yes no) => (UGT cmp yes no)
  1310  (UGT (InvertFlags cmp) yes no) => (ULT cmp yes no)
  1311  (ULE (InvertFlags cmp) yes no) => (UGE cmp yes no)
  1312  (UGE (InvertFlags cmp) yes no) => (ULE cmp yes no)
  1313  (EQ  (InvertFlags cmp) yes no) => (EQ cmp yes no)
  1314  (NE  (InvertFlags cmp) yes no) => (NE cmp yes no)
  1315  (FLT (InvertFlags cmp) yes no) => (FGT cmp yes no)
  1316  (FGT (InvertFlags cmp) yes no) => (FLT cmp yes no)
  1317  (FLE (InvertFlags cmp) yes no) => (FGE cmp yes no)
  1318  (FGE (InvertFlags cmp) yes no) => (FLE cmp yes no)
  1319  (LTnoov (InvertFlags cmp) yes no) => (GTnoov cmp yes no)
  1320  (GEnoov (InvertFlags cmp) yes no) => (LEnoov cmp yes no)
  1321  (LEnoov (InvertFlags cmp) yes no) => (GEnoov cmp yes no)
  1322  (GTnoov (InvertFlags cmp) yes no) => (LTnoov cmp yes no)
  1323  
  1324  // absorb InvertFlags into conditional instructions
  1325  (CSEL  [cc] x y (InvertFlags cmp)) => (CSEL  [arm64Invert(cc)] x y cmp)
  1326  (CSEL0 [cc] x   (InvertFlags cmp)) => (CSEL0 [arm64Invert(cc)] x   cmp)
  1327  (CSETM [cc]     (InvertFlags cmp)) => (CSETM [arm64Invert(cc)]     cmp)
  1328  (CSINC [cc] x y (InvertFlags cmp)) => (CSINC [arm64Invert(cc)] x y cmp)
  1329  (CSINV [cc] x y (InvertFlags cmp)) => (CSINV [arm64Invert(cc)] x y cmp)
  1330  (CSNEG [cc] x y (InvertFlags cmp)) => (CSNEG [arm64Invert(cc)] x y cmp)
  1331  
  1332  // absorb flag constants into boolean values
  1333  (Equal             (FlagConstant [fc])) => (MOVDconst [b2i(fc.eq())])
  1334  (NotEqual          (FlagConstant [fc])) => (MOVDconst [b2i(fc.ne())])
  1335  (LessThan          (FlagConstant [fc])) => (MOVDconst [b2i(fc.lt())])
  1336  (LessThanU         (FlagConstant [fc])) => (MOVDconst [b2i(fc.ult())])
  1337  (LessEqual         (FlagConstant [fc])) => (MOVDconst [b2i(fc.le())])
  1338  (LessEqualU        (FlagConstant [fc])) => (MOVDconst [b2i(fc.ule())])
  1339  (GreaterThan       (FlagConstant [fc])) => (MOVDconst [b2i(fc.gt())])
  1340  (GreaterThanU      (FlagConstant [fc])) => (MOVDconst [b2i(fc.ugt())])
  1341  (GreaterEqual      (FlagConstant [fc])) => (MOVDconst [b2i(fc.ge())])
  1342  (GreaterEqualU     (FlagConstant [fc])) => (MOVDconst [b2i(fc.uge())])
  1343  (LessThanNoov      (FlagConstant [fc])) => (MOVDconst [b2i(fc.ltNoov())])
  1344  (GreaterEqualNoov  (FlagConstant [fc])) => (MOVDconst [b2i(fc.geNoov())])
  1345  
  1346  // absorb InvertFlags into boolean values
  1347  (Equal            (InvertFlags x)) => (Equal x)
  1348  (NotEqual         (InvertFlags x)) => (NotEqual x)
  1349  (LessThan         (InvertFlags x)) => (GreaterThan x)
  1350  (LessThanU        (InvertFlags x)) => (GreaterThanU x)
  1351  (GreaterThan      (InvertFlags x)) => (LessThan x)
  1352  (GreaterThanU     (InvertFlags x)) => (LessThanU x)
  1353  (LessEqual        (InvertFlags x)) => (GreaterEqual x)
  1354  (LessEqualU       (InvertFlags x)) => (GreaterEqualU x)
  1355  (GreaterEqual     (InvertFlags x)) => (LessEqual x)
  1356  (GreaterEqualU    (InvertFlags x)) => (LessEqualU x)
  1357  (LessThanF        (InvertFlags x)) => (GreaterThanF x)
  1358  (LessEqualF       (InvertFlags x)) => (GreaterEqualF x)
  1359  (GreaterThanF     (InvertFlags x)) => (LessThanF x)
  1360  (GreaterEqualF    (InvertFlags x)) => (LessEqualF x)
  1361  (LessThanNoov     (InvertFlags x)) => (CSEL0 [OpARM64NotEqual] (GreaterEqualNoov <typ.Bool> x) x)
  1362  (GreaterEqualNoov (InvertFlags x)) => (CSINC [OpARM64NotEqual] (LessThanNoov <typ.Bool> x) (MOVDconst [0]) x)
  1363  
  1364  // Don't bother extending if we're not using the higher bits.
  1365  (MOV(B|BU)reg x) && v.Type.Size() <= 1 => x
  1366  (MOV(H|HU)reg x) && v.Type.Size() <= 2 => x
  1367  (MOV(W|WU)reg x) && v.Type.Size() <= 4 => x
  1368  
  1369  // omit sign extension
  1370  (MOVWreg <t> (ANDconst x [c])) && uint64(c) & uint64(0xffffffff80000000) == 0 => (ANDconst <t> x [c])
  1371  (MOVHreg <t> (ANDconst x [c])) && uint64(c) & uint64(0xffffffffffff8000) == 0 => (ANDconst <t> x [c])
  1372  (MOVBreg <t> (ANDconst x [c])) && uint64(c) & uint64(0xffffffffffffff80) == 0 => (ANDconst <t> x [c])
  1373  
  1374  // absorb flag constants into conditional instructions
  1375  (CSEL  [cc] x _ flag) && ccARM64Eval(cc, flag) > 0 => x
  1376  (CSEL  [cc] _ y flag) && ccARM64Eval(cc, flag) < 0 => y
  1377  (CSEL0 [cc] x   flag) && ccARM64Eval(cc, flag) > 0 => x
  1378  (CSEL0 [cc] _   flag) && ccARM64Eval(cc, flag) < 0 => (MOVDconst [0])
  1379  (CSNEG [cc] x _ flag) && ccARM64Eval(cc, flag) > 0 => x
  1380  (CSNEG [cc] _ y flag) && ccARM64Eval(cc, flag) < 0 => (NEG y)
  1381  (CSINV [cc] x _ flag) && ccARM64Eval(cc, flag) > 0 => x
  1382  (CSINV [cc] _ y flag) && ccARM64Eval(cc, flag) < 0 => (Not y)
  1383  (CSINC [cc] x _ flag) && ccARM64Eval(cc, flag) > 0 => x
  1384  (CSINC [cc] _ y flag) && ccARM64Eval(cc, flag) < 0 => (ADDconst [1] y)
  1385  (CSETM [cc]     flag) && ccARM64Eval(cc, flag) > 0 => (MOVDconst [-1])
  1386  (CSETM [cc]     flag) && ccARM64Eval(cc, flag) < 0 => (MOVDconst [0])
  1387  
  1388  // absorb flags back into boolean CSEL
  1389  (CSEL [cc] x y (CMPWconst [0] boolval)) && cc == OpARM64NotEqual && flagArg(boolval) != nil =>
  1390        (CSEL [boolval.Op] x y flagArg(boolval))
  1391  (CSEL [cc] x y (CMPWconst [0] boolval)) && cc == OpARM64Equal && flagArg(boolval) != nil =>
  1392        (CSEL [arm64Negate(boolval.Op)] x y flagArg(boolval))
  1393  (CSEL0 [cc] x (CMPWconst [0] boolval)) && cc == OpARM64NotEqual && flagArg(boolval) != nil =>
  1394        (CSEL0 [boolval.Op] x flagArg(boolval))
  1395  (CSEL0 [cc] x (CMPWconst [0] boolval)) && cc == OpARM64Equal && flagArg(boolval) != nil =>
  1396        (CSEL0 [arm64Negate(boolval.Op)] x flagArg(boolval))
  1397  
  1398  // absorb shifts into ops
  1399  (NEG x:(SLLconst [c] y)) && clobberIfDead(x) => (NEGshiftLL [c] y)
  1400  (NEG x:(SRLconst [c] y)) && clobberIfDead(x) => (NEGshiftRL [c] y)
  1401  (NEG x:(SRAconst [c] y)) && clobberIfDead(x) => (NEGshiftRA [c] y)
  1402  (MVN x:(SLLconst [c] y)) && clobberIfDead(x) => (MVNshiftLL [c] y)
  1403  (MVN x:(SRLconst [c] y)) && clobberIfDead(x) => (MVNshiftRL [c] y)
  1404  (MVN x:(SRAconst [c] y)) && clobberIfDead(x) => (MVNshiftRA [c] y)
  1405  (MVN x:(RORconst [c] y)) && clobberIfDead(x) => (MVNshiftRO [c] y)
  1406  (ADD x0 x1:(SLLconst [c] y)) && clobberIfDead(x1) => (ADDshiftLL x0 y [c])
  1407  (ADD x0 x1:(SRLconst [c] y)) && clobberIfDead(x1) => (ADDshiftRL x0 y [c])
  1408  (ADD x0 x1:(SRAconst [c] y)) && clobberIfDead(x1) => (ADDshiftRA x0 y [c])
  1409  (SUB x0 x1:(SLLconst [c] y)) && clobberIfDead(x1) => (SUBshiftLL x0 y [c])
  1410  (SUB x0 x1:(SRLconst [c] y)) && clobberIfDead(x1) => (SUBshiftRL x0 y [c])
  1411  (SUB x0 x1:(SRAconst [c] y)) && clobberIfDead(x1) => (SUBshiftRA x0 y [c])
  1412  (AND x0 x1:(SLLconst [c] y)) && clobberIfDead(x1) => (ANDshiftLL x0 y [c])
  1413  (AND x0 x1:(SRLconst [c] y)) && clobberIfDead(x1) => (ANDshiftRL x0 y [c])
  1414  (AND x0 x1:(SRAconst [c] y)) && clobberIfDead(x1) => (ANDshiftRA x0 y [c])
  1415  (AND x0 x1:(RORconst [c] y)) && clobberIfDead(x1) => (ANDshiftRO x0 y [c])
  1416  (OR  x0 x1:(SLLconst [c] y)) && clobberIfDead(x1) => (ORshiftLL  x0 y [c]) // useful for combined load
  1417  (OR  x0 x1:(SRLconst [c] y)) && clobberIfDead(x1) => (ORshiftRL  x0 y [c])
  1418  (OR  x0 x1:(SRAconst [c] y)) && clobberIfDead(x1) => (ORshiftRA  x0 y [c])
  1419  (OR  x0 x1:(RORconst [c] y)) && clobberIfDead(x1) => (ORshiftRO  x0 y [c])
  1420  (XOR x0 x1:(SLLconst [c] y)) && clobberIfDead(x1) => (XORshiftLL x0 y [c])
  1421  (XOR x0 x1:(SRLconst [c] y)) && clobberIfDead(x1) => (XORshiftRL x0 y [c])
  1422  (XOR x0 x1:(SRAconst [c] y)) && clobberIfDead(x1) => (XORshiftRA x0 y [c])
  1423  (XOR x0 x1:(RORconst [c] y)) && clobberIfDead(x1) => (XORshiftRO x0 y [c])
  1424  (BIC x0 x1:(SLLconst [c] y)) && clobberIfDead(x1) => (BICshiftLL x0 y [c])
  1425  (BIC x0 x1:(SRLconst [c] y)) && clobberIfDead(x1) => (BICshiftRL x0 y [c])
  1426  (BIC x0 x1:(SRAconst [c] y)) && clobberIfDead(x1) => (BICshiftRA x0 y [c])
  1427  (BIC x0 x1:(RORconst [c] y)) && clobberIfDead(x1) => (BICshiftRO x0 y [c])
  1428  (ORN x0 x1:(SLLconst [c] y)) && clobberIfDead(x1) => (ORNshiftLL x0 y [c])
  1429  (ORN x0 x1:(SRLconst [c] y)) && clobberIfDead(x1) => (ORNshiftRL x0 y [c])
  1430  (ORN x0 x1:(SRAconst [c] y)) && clobberIfDead(x1) => (ORNshiftRA x0 y [c])
  1431  (ORN x0 x1:(RORconst [c] y)) && clobberIfDead(x1) => (ORNshiftRO x0 y [c])
  1432  (EON x0 x1:(SLLconst [c] y)) && clobberIfDead(x1) => (EONshiftLL x0 y [c])
  1433  (EON x0 x1:(SRLconst [c] y)) && clobberIfDead(x1) => (EONshiftRL x0 y [c])
  1434  (EON x0 x1:(SRAconst [c] y)) && clobberIfDead(x1) => (EONshiftRA x0 y [c])
  1435  (EON x0 x1:(RORconst [c] y)) && clobberIfDead(x1) => (EONshiftRO x0 y [c])
  1436  (CMP x0 x1:(SLLconst [c] y)) && clobberIfDead(x1) => (CMPshiftLL x0 y [c])
  1437  (CMP x0:(SLLconst [c] y) x1) && clobberIfDead(x0) => (InvertFlags (CMPshiftLL x1 y [c]))
  1438  (CMP x0 x1:(SRLconst [c] y)) && clobberIfDead(x1) => (CMPshiftRL x0 y [c])
  1439  (CMP x0:(SRLconst [c] y) x1) && clobberIfDead(x0) => (InvertFlags (CMPshiftRL x1 y [c]))
  1440  (CMP x0 x1:(SRAconst [c] y)) && clobberIfDead(x1) => (CMPshiftRA x0 y [c])
  1441  (CMP x0:(SRAconst [c] y) x1) && clobberIfDead(x0) => (InvertFlags (CMPshiftRA x1 y [c]))
  1442  (CMN x0 x1:(SLLconst [c] y)) && clobberIfDead(x1) => (CMNshiftLL x0 y [c])
  1443  (CMN x0 x1:(SRLconst [c] y)) && clobberIfDead(x1) => (CMNshiftRL x0 y [c])
  1444  (CMN x0 x1:(SRAconst [c] y)) && clobberIfDead(x1) => (CMNshiftRA x0 y [c])
  1445  (TST x0 x1:(SLLconst [c] y)) && clobberIfDead(x1) => (TSTshiftLL x0 y [c])
  1446  (TST x0 x1:(SRLconst [c] y)) && clobberIfDead(x1) => (TSTshiftRL x0 y [c])
  1447  (TST x0 x1:(SRAconst [c] y)) && clobberIfDead(x1) => (TSTshiftRA x0 y [c])
  1448  (TST x0 x1:(RORconst [c] y)) && clobberIfDead(x1) => (TSTshiftRO x0 y [c])
  1449  
  1450  // prefer *const ops to *shift ops
  1451  (ADDshiftLL (MOVDconst [c]) x [d]) => (ADDconst [c] (SLLconst <x.Type> x [d]))
  1452  (ADDshiftRL (MOVDconst [c]) x [d]) => (ADDconst [c] (SRLconst <x.Type> x [d]))
  1453  (ADDshiftRA (MOVDconst [c]) x [d]) => (ADDconst [c] (SRAconst <x.Type> x [d]))
  1454  (ANDshiftLL (MOVDconst [c]) x [d]) => (ANDconst [c] (SLLconst <x.Type> x [d]))
  1455  (ANDshiftRL (MOVDconst [c]) x [d]) => (ANDconst [c] (SRLconst <x.Type> x [d]))
  1456  (ANDshiftRA (MOVDconst [c]) x [d]) => (ANDconst [c] (SRAconst <x.Type> x [d]))
  1457  (ANDshiftRO (MOVDconst [c]) x [d]) => (ANDconst [c] (RORconst <x.Type> x [d]))
  1458  (ORshiftLL  (MOVDconst [c]) x [d]) => (ORconst  [c] (SLLconst <x.Type> x [d]))
  1459  (ORshiftRL  (MOVDconst [c]) x [d]) => (ORconst  [c] (SRLconst <x.Type> x [d]))
  1460  (ORshiftRA  (MOVDconst [c]) x [d]) => (ORconst  [c] (SRAconst <x.Type> x [d]))
  1461  (ORshiftRO  (MOVDconst [c]) x [d]) => (ORconst  [c] (RORconst <x.Type> x [d]))
  1462  (XORshiftLL (MOVDconst [c]) x [d]) => (XORconst [c] (SLLconst <x.Type> x [d]))
  1463  (XORshiftRL (MOVDconst [c]) x [d]) => (XORconst [c] (SRLconst <x.Type> x [d]))
  1464  (XORshiftRA (MOVDconst [c]) x [d]) => (XORconst [c] (SRAconst <x.Type> x [d]))
  1465  (XORshiftRO (MOVDconst [c]) x [d]) => (XORconst [c] (RORconst <x.Type> x [d]))
  1466  (CMPshiftLL (MOVDconst [c]) x [d]) => (InvertFlags (CMPconst [c] (SLLconst <x.Type> x [d])))
  1467  (CMPshiftRL (MOVDconst [c]) x [d]) => (InvertFlags (CMPconst [c] (SRLconst <x.Type> x [d])))
  1468  (CMPshiftRA (MOVDconst [c]) x [d]) => (InvertFlags (CMPconst [c] (SRAconst <x.Type> x [d])))
  1469  (CMNshiftLL (MOVDconst [c]) x [d]) => (CMNconst [c] (SLLconst <x.Type> x [d]))
  1470  (CMNshiftRL (MOVDconst [c]) x [d]) => (CMNconst [c] (SRLconst <x.Type> x [d]))
  1471  (CMNshiftRA (MOVDconst [c]) x [d]) => (CMNconst [c] (SRAconst <x.Type> x [d]))
  1472  (TSTshiftLL (MOVDconst [c]) x [d]) => (TSTconst [c] (SLLconst <x.Type> x [d]))
  1473  (TSTshiftRL (MOVDconst [c]) x [d]) => (TSTconst [c] (SRLconst <x.Type> x [d]))
  1474  (TSTshiftRA (MOVDconst [c]) x [d]) => (TSTconst [c] (SRAconst <x.Type> x [d]))
  1475  (TSTshiftRO (MOVDconst [c]) x [d]) => (TSTconst [c] (RORconst <x.Type> x [d]))
  1476  
  1477  // constant folding in *shift ops
  1478  (MVNshiftLL (MOVDconst [c]) [d]) => (MOVDconst [^int64(uint64(c)<<uint64(d))])
  1479  (MVNshiftRL (MOVDconst [c]) [d]) => (MOVDconst [^int64(uint64(c)>>uint64(d))])
  1480  (MVNshiftRA (MOVDconst [c]) [d]) => (MOVDconst [^(c>>uint64(d))])
  1481  (MVNshiftRO (MOVDconst [c]) [d]) => (MOVDconst [^rotateRight64(c, d)])
  1482  (NEGshiftLL (MOVDconst [c]) [d]) => (MOVDconst [-int64(uint64(c)<<uint64(d))])
  1483  (NEGshiftRL (MOVDconst [c]) [d]) => (MOVDconst [-int64(uint64(c)>>uint64(d))])
  1484  (NEGshiftRA (MOVDconst [c]) [d]) => (MOVDconst [-(c>>uint64(d))])
  1485  (ADDshiftLL x (MOVDconst [c]) [d]) => (ADDconst x [int64(uint64(c)<<uint64(d))])
  1486  (ADDshiftRL x (MOVDconst [c]) [d]) => (ADDconst x [int64(uint64(c)>>uint64(d))])
  1487  (ADDshiftRA x (MOVDconst [c]) [d]) => (ADDconst x [c>>uint64(d)])
  1488  (SUBshiftLL x (MOVDconst [c]) [d]) => (SUBconst x [int64(uint64(c)<<uint64(d))])
  1489  (SUBshiftRL x (MOVDconst [c]) [d]) => (SUBconst x [int64(uint64(c)>>uint64(d))])
  1490  (SUBshiftRA x (MOVDconst [c]) [d]) => (SUBconst x [c>>uint64(d)])
  1491  (ANDshiftLL x (MOVDconst [c]) [d]) => (ANDconst x [int64(uint64(c)<<uint64(d))])
  1492  (ANDshiftRL x (MOVDconst [c]) [d]) => (ANDconst x [int64(uint64(c)>>uint64(d))])
  1493  (ANDshiftRA x (MOVDconst [c]) [d]) => (ANDconst x [c>>uint64(d)])
  1494  (ANDshiftRO x (MOVDconst [c]) [d]) => (ANDconst x [rotateRight64(c, d)])
  1495  (ORshiftLL  x (MOVDconst [c]) [d]) => (ORconst  x [int64(uint64(c)<<uint64(d))])
  1496  (ORshiftRL  x (MOVDconst [c]) [d]) => (ORconst  x [int64(uint64(c)>>uint64(d))])
  1497  (ORshiftRA  x (MOVDconst [c]) [d]) => (ORconst  x [c>>uint64(d)])
  1498  (ORshiftRO  x (MOVDconst [c]) [d]) => (ORconst  x [rotateRight64(c, d)])
  1499  (XORshiftLL x (MOVDconst [c]) [d]) => (XORconst x [int64(uint64(c)<<uint64(d))])
  1500  (XORshiftRL x (MOVDconst [c]) [d]) => (XORconst x [int64(uint64(c)>>uint64(d))])
  1501  (XORshiftRA x (MOVDconst [c]) [d]) => (XORconst x [c>>uint64(d)])
  1502  (XORshiftRO x (MOVDconst [c]) [d]) => (XORconst x [rotateRight64(c, d)])
  1503  (BICshiftLL x (MOVDconst [c]) [d]) => (ANDconst x [^int64(uint64(c)<<uint64(d))])
  1504  (BICshiftRL x (MOVDconst [c]) [d]) => (ANDconst x [^int64(uint64(c)>>uint64(d))])
  1505  (BICshiftRA x (MOVDconst [c]) [d]) => (ANDconst x [^(c>>uint64(d))])
  1506  (BICshiftRO x (MOVDconst [c]) [d]) => (ANDconst x [^rotateRight64(c, d)])
  1507  (ORNshiftLL x (MOVDconst [c]) [d]) => (ORconst  x [^int64(uint64(c)<<uint64(d))])
  1508  (ORNshiftRL x (MOVDconst [c]) [d]) => (ORconst  x [^int64(uint64(c)>>uint64(d))])
  1509  (ORNshiftRA x (MOVDconst [c]) [d]) => (ORconst  x [^(c>>uint64(d))])
  1510  (ORNshiftRO x (MOVDconst [c]) [d]) => (ORconst  x [^rotateRight64(c, d)])
  1511  (EONshiftLL x (MOVDconst [c]) [d]) => (XORconst x [^int64(uint64(c)<<uint64(d))])
  1512  (EONshiftRL x (MOVDconst [c]) [d]) => (XORconst x [^int64(uint64(c)>>uint64(d))])
  1513  (EONshiftRA x (MOVDconst [c]) [d]) => (XORconst x [^(c>>uint64(d))])
  1514  (EONshiftRO x (MOVDconst [c]) [d]) => (XORconst x [^rotateRight64(c, d)])
  1515  (CMPshiftLL x (MOVDconst [c]) [d]) => (CMPconst x [int64(uint64(c)<<uint64(d))])
  1516  (CMPshiftRL x (MOVDconst [c]) [d]) => (CMPconst x [int64(uint64(c)>>uint64(d))])
  1517  (CMPshiftRA x (MOVDconst [c]) [d]) => (CMPconst x [c>>uint64(d)])
  1518  (CMNshiftLL x (MOVDconst [c]) [d]) => (CMNconst x [int64(uint64(c)<<uint64(d))])
  1519  (CMNshiftRL x (MOVDconst [c]) [d]) => (CMNconst x [int64(uint64(c)>>uint64(d))])
  1520  (CMNshiftRA x (MOVDconst [c]) [d]) => (CMNconst x [c>>uint64(d)])
  1521  (TSTshiftLL x (MOVDconst [c]) [d]) => (TSTconst x [int64(uint64(c)<<uint64(d))])
  1522  (TSTshiftRL x (MOVDconst [c]) [d]) => (TSTconst x [int64(uint64(c)>>uint64(d))])
  1523  (TSTshiftRA x (MOVDconst [c]) [d]) => (TSTconst x [c>>uint64(d)])
  1524  (TSTshiftRO x (MOVDconst [c]) [d]) => (TSTconst x [rotateRight64(c, d)])
  1525  
  1526  // simplification with *shift ops
  1527  (SUBshiftLL (SLLconst x [c]) x [c]) => (MOVDconst [0])
  1528  (SUBshiftRL (SRLconst x [c]) x [c]) => (MOVDconst [0])
  1529  (SUBshiftRA (SRAconst x [c]) x [c]) => (MOVDconst [0])
  1530  (ANDshiftLL y:(SLLconst x [c]) x [c]) => y
  1531  (ANDshiftRL y:(SRLconst x [c]) x [c]) => y
  1532  (ANDshiftRA y:(SRAconst x [c]) x [c]) => y
  1533  (ANDshiftRO y:(RORconst x [c]) x [c]) => y
  1534  (ORshiftLL  y:(SLLconst x [c]) x [c]) => y
  1535  (ORshiftRL  y:(SRLconst x [c]) x [c]) => y
  1536  (ORshiftRA  y:(SRAconst x [c]) x [c]) => y
  1537  (ORshiftRO  y:(RORconst x [c]) x [c]) => y
  1538  (XORshiftLL (SLLconst x [c]) x [c]) => (MOVDconst [0])
  1539  (XORshiftRL (SRLconst x [c]) x [c]) => (MOVDconst [0])
  1540  (XORshiftRA (SRAconst x [c]) x [c]) => (MOVDconst [0])
  1541  (XORshiftRO (RORconst x [c]) x [c]) => (MOVDconst [0])
  1542  (BICshiftLL (SLLconst x [c]) x [c]) => (MOVDconst [0])
  1543  (BICshiftRL (SRLconst x [c]) x [c]) => (MOVDconst [0])
  1544  (BICshiftRA (SRAconst x [c]) x [c]) => (MOVDconst [0])
  1545  (BICshiftRO (RORconst x [c]) x [c]) => (MOVDconst [0])
  1546  (EONshiftLL (SLLconst x [c]) x [c]) => (MOVDconst [-1])
  1547  (EONshiftRL (SRLconst x [c]) x [c]) => (MOVDconst [-1])
  1548  (EONshiftRA (SRAconst x [c]) x [c]) => (MOVDconst [-1])
  1549  (EONshiftRO (RORconst x [c]) x [c]) => (MOVDconst [-1])
  1550  (ORNshiftLL (SLLconst x [c]) x [c]) => (MOVDconst [-1])
  1551  (ORNshiftRL (SRLconst x [c]) x [c]) => (MOVDconst [-1])
  1552  (ORNshiftRA (SRAconst x [c]) x [c]) => (MOVDconst [-1])
  1553  (ORNshiftRO (RORconst x [c]) x [c]) => (MOVDconst [-1])
  1554  
  1555  // rev16w | rev16
  1556  // ((x>>8) | (x<<8)) => (REV16W x), the type of x is uint16, "|" can also be "^" or "+".
  1557  ((ADDshiftLL|ORshiftLL|XORshiftLL) <typ.UInt16> [8] (UBFX <typ.UInt16> [armBFAuxInt(8, 8)] x) x) => (REV16W x)
  1558  
  1559  // ((x & 0xff00ff00)>>8) | ((x & 0x00ff00ff)<<8), "|" can also be "^" or "+".
  1560  ((ADDshiftLL|ORshiftLL|XORshiftLL) [8] (UBFX [armBFAuxInt(8, 24)] (ANDconst [c1] x)) (ANDconst [c2] x))
  1561  	&& uint32(c1) == 0xff00ff00 && uint32(c2) == 0x00ff00ff
  1562  	=> (REV16W x)
  1563  
  1564  // ((x & 0xff00ff00ff00ff00)>>8) | ((x & 0x00ff00ff00ff00ff)<<8), "|" can also be "^" or "+".
  1565  ((ADDshiftLL|ORshiftLL|XORshiftLL) [8] (SRLconst [8] (ANDconst [c1] x)) (ANDconst [c2] x))
  1566  	&& (uint64(c1) == 0xff00ff00ff00ff00 && uint64(c2) == 0x00ff00ff00ff00ff)
  1567  	=> (REV16 x)
  1568  
  1569  // ((x & 0xff00ff00)>>8) | ((x & 0x00ff00ff)<<8), "|" can also be "^" or "+".
  1570  ((ADDshiftLL|ORshiftLL|XORshiftLL) [8] (SRLconst [8] (ANDconst [c1] x)) (ANDconst [c2] x))
  1571  	&& (uint64(c1) == 0xff00ff00 && uint64(c2) == 0x00ff00ff)
  1572  	=> (REV16 (ANDconst <x.Type> [0xffffffff] x))
  1573  
  1574  // Extract from reg pair
  1575  (ADDshiftLL [c] (SRLconst x [64-c]) x2) => (EXTRconst [64-c] x2 x)
  1576  ( ORshiftLL [c] (SRLconst x [64-c]) x2) => (EXTRconst [64-c] x2 x)
  1577  (XORshiftLL [c] (SRLconst x [64-c]) x2) => (EXTRconst [64-c] x2 x)
  1578  
  1579  (ADDshiftLL <t> [c] (UBFX [bfc] x) x2) && c < 32 && t.Size() == 4 && bfc == armBFAuxInt(32-c, c)
  1580  	=> (EXTRWconst [32-c] x2 x)
  1581  ( ORshiftLL <t> [c] (UBFX [bfc] x) x2) && c < 32 && t.Size() == 4 && bfc == armBFAuxInt(32-c, c)
  1582  	=> (EXTRWconst [32-c] x2 x)
  1583  (XORshiftLL <t> [c] (UBFX [bfc] x) x2) && c < 32 && t.Size() == 4 && bfc == armBFAuxInt(32-c, c)
  1584  	=> (EXTRWconst [32-c] x2 x)
  1585  
  1586  // Rewrite special pairs of shifts to AND.
  1587  // On ARM64 the bitmask can fit into an instruction.
  1588  (SRLconst [c] (SLLconst [c] x)) && 0 < c && c < 64 => (ANDconst [1<<uint(64-c)-1] x) // mask out high bits
  1589  (SLLconst [c] (SRLconst [c] x)) && 0 < c && c < 64 => (ANDconst [^(1<<uint(c)-1)] x) // mask out low bits
  1590  
  1591  // Special case setting bit as 1. An example is math.Copysign(c,-1)
  1592  (ORconst [c1] (ANDconst [c2] x)) && c2|c1 == ^0  => (ORconst [c1] x)
  1593  
  1594  // If the shift amount is larger than the datasize(32, 16, 8), we can optimize to constant 0.
  1595  (MOVWUreg (SLLconst [lc] x)) && lc >= 32 => (MOVDconst [0])
  1596  (MOVHUreg (SLLconst [lc] x)) && lc >= 16 => (MOVDconst [0])
  1597  (MOVBUreg (SLLconst [lc] x)) && lc >= 8 => (MOVDconst [0])
  1598  
  1599  // After zero extension, the upper (64-datasize(32|16|8)) bits are zero, we can optimiza to constant 0.
  1600  (SRLconst [rc] (MOVWUreg x)) && rc >= 32 => (MOVDconst [0])
  1601  (SRLconst [rc] (MOVHUreg x)) && rc >= 16 => (MOVDconst [0])
  1602  (SRLconst [rc] (MOVBUreg x)) && rc >= 8 => (MOVDconst [0])
  1603  
  1604  // Special cases for slice operations
  1605  (ADD x0 x1:(ANDshiftRA x2:(SLLconst [sl] y) z [63])) && x1.Uses == 1 && x2.Uses == 1 => (ADDshiftLL x0 (ANDshiftRA <y.Type> y z [63]) [sl])
  1606  (ADD x0 x1:(ANDshiftLL x2:(SRAconst [63] z) y [sl])) && x1.Uses == 1 && x2.Uses == 1 => (ADDshiftLL x0 (ANDshiftRA <y.Type> y z [63]) [sl])
  1607  
  1608  // bitfield ops
  1609  
  1610  // sbfiz
  1611  // (x << lc) >> rc
  1612  (SRAconst [rc] (SLLconst [lc] x)) && lc > rc => (SBFIZ [armBFAuxInt(lc-rc, 64-lc)] x)
  1613  // int64(x << lc)
  1614  (MOVWreg (SLLconst [lc] x)) && lc < 32 => (SBFIZ [armBFAuxInt(lc, 32-lc)] x)
  1615  (MOVHreg (SLLconst [lc] x)) && lc < 16 => (SBFIZ [armBFAuxInt(lc, 16-lc)] x)
  1616  (MOVBreg (SLLconst [lc] x)) && lc < 8  => (SBFIZ [armBFAuxInt(lc,  8-lc)] x)
  1617  // int64(x) << lc
  1618  (SLLconst [lc] (MOVWreg x))  => (SBFIZ [armBFAuxInt(lc, min(32, 64-lc))] x)
  1619  (SLLconst [lc] (MOVHreg x))  => (SBFIZ [armBFAuxInt(lc, min(16, 64-lc))] x)
  1620  (SLLconst [lc] (MOVBreg x))  => (SBFIZ [armBFAuxInt(lc, min(8,  64-lc))] x)
  1621  
  1622  // sbfx
  1623  // (x << lc) >> rc
  1624  (SRAconst [rc] (SLLconst [lc] x)) && lc <= rc => (SBFX [armBFAuxInt(rc-lc, 64-rc)] x)
  1625  // int64(x) >> rc
  1626  (SRAconst [rc] (MOVWreg x)) && rc < 32 => (SBFX [armBFAuxInt(rc, 32-rc)] x)
  1627  (SRAconst [rc] (MOVHreg x)) && rc < 16 => (SBFX [armBFAuxInt(rc, 16-rc)] x)
  1628  (SRAconst [rc] (MOVBreg x)) && rc < 8  => (SBFX [armBFAuxInt(rc,  8-rc)] x)
  1629  // merge sbfx and sign-extension into sbfx
  1630  (MOVWreg (SBFX [bfc] x)) && bfc.width() <= 32 => (SBFX [bfc] x)
  1631  (MOVHreg (SBFX [bfc] x)) && bfc.width() <= 16 => (SBFX [bfc] x)
  1632  (MOVBreg (SBFX [bfc] x)) && bfc.width() <=  8 => (SBFX [bfc] x)
  1633  
  1634  // sbfiz/sbfx combinations: merge shifts into bitfield ops
  1635  (SRAconst [sc] (SBFIZ [bfc] x)) && sc < bfc.lsb()
  1636  	=> (SBFIZ [armBFAuxInt(bfc.lsb()-sc, bfc.width())] x)
  1637  (SRAconst [sc] (SBFIZ [bfc] x)) && sc >= bfc.lsb()
  1638  	&& sc < bfc.lsb()+bfc.width()
  1639  	=> (SBFX [armBFAuxInt(sc-bfc.lsb(), bfc.lsb()+bfc.width()-sc)] x)
  1640  (SBFX [bfc] s:(SLLconst [sc] x))
  1641  	&& s.Uses == 1
  1642  	&& sc <= bfc.lsb()
  1643  	=> (SBFX [armBFAuxInt(bfc.lsb() - sc, bfc.width())] x)
  1644  (SBFX [bfc] s:(SLLconst [sc] x))
  1645  	&& s.Uses == 1
  1646  	&& sc > bfc.lsb()
  1647  	=> (SBFIZ [armBFAuxInt(sc - bfc.lsb(), bfc.width() - (sc-bfc.lsb()))] x)
  1648  
  1649  // ubfiz
  1650  // (x << lc) >> rc
  1651  (SRLconst [rc] (SLLconst [lc] x)) && lc > rc => (UBFIZ [armBFAuxInt(lc-rc, 64-lc)] x)
  1652  // uint64(x) << lc
  1653  (SLLconst [lc] (MOVWUreg x))  => (UBFIZ [armBFAuxInt(lc, min(32, 64-lc))] x)
  1654  (SLLconst [lc] (MOVHUreg x))  => (UBFIZ [armBFAuxInt(lc, min(16, 64-lc))] x)
  1655  (SLLconst [lc] (MOVBUreg x))  => (UBFIZ [armBFAuxInt(lc, min(8,  64-lc))] x)
  1656  // uint64(x << lc)
  1657  (MOVWUreg (SLLconst [lc] x)) && lc < 32 => (UBFIZ [armBFAuxInt(lc, 32-lc)] x)
  1658  (MOVHUreg (SLLconst [lc] x)) && lc < 16 => (UBFIZ [armBFAuxInt(lc, 16-lc)] x)
  1659  (MOVBUreg (SLLconst [lc] x)) && lc < 8  => (UBFIZ [armBFAuxInt(lc,  8-lc)] x)
  1660  
  1661  // merge ANDconst into ubfiz
  1662  // (x & ac) << sc
  1663  (SLLconst [sc] (ANDconst [ac] x)) && isARM64BFMask(sc, ac, 0)
  1664  	=> (UBFIZ [armBFAuxInt(sc, arm64BFWidth(ac, 0))] x)
  1665  // (x << sc) & ac
  1666  (ANDconst [ac] (SLLconst [sc] x)) && isARM64BFMask(sc, ac, sc)
  1667  	=> (UBFIZ [armBFAuxInt(sc, arm64BFWidth(ac, sc))] x)
  1668  
  1669  // ubfx
  1670  // (x << lc) >> rc
  1671  (SRLconst [rc] (SLLconst [lc] x)) && lc < rc => (UBFX [armBFAuxInt(rc-lc, 64-rc)] x)
  1672  // uint64(x) >> rc
  1673  (SRLconst [rc] (MOVWUreg x)) && rc < 32 => (UBFX [armBFAuxInt(rc, 32-rc)] x)
  1674  (SRLconst [rc] (MOVHUreg x)) && rc < 16 => (UBFX [armBFAuxInt(rc, 16-rc)] x)
  1675  (SRLconst [rc] (MOVBUreg x)) && rc < 8  => (UBFX [armBFAuxInt(rc,  8-rc)] x)
  1676  // uint64(x >> rc)
  1677  (MOVWUreg (SRLconst [rc] x)) && rc < 32 => (UBFX [armBFAuxInt(rc, 32)] x)
  1678  (MOVHUreg (SRLconst [rc] x)) && rc < 16 => (UBFX [armBFAuxInt(rc, 16)] x)
  1679  (MOVBUreg (SRLconst [rc] x)) && rc < 8  => (UBFX [armBFAuxInt(rc,  8)] x)
  1680  // merge ANDconst into ubfx
  1681  // (x >> sc) & ac
  1682  (ANDconst [ac] (SRLconst [sc] x)) && isARM64BFMask(sc, ac, 0)
  1683  	=> (UBFX [armBFAuxInt(sc, arm64BFWidth(ac, 0))] x)
  1684  // (x & ac) >> sc
  1685  (SRLconst [sc] (ANDconst [ac] x)) && isARM64BFMask(sc, ac, sc)
  1686  	=> (UBFX [armBFAuxInt(sc, arm64BFWidth(ac, sc))] x)
  1687  // merge ANDconst and ubfx into ubfx
  1688  (ANDconst [c] (UBFX [bfc] x)) && isARM64BFMask(0, c, 0) =>
  1689  	(UBFX [armBFAuxInt(bfc.lsb(), min(bfc.width(), arm64BFWidth(c, 0)))] x)
  1690  (UBFX [bfc] (ANDconst [c] x)) && isARM64BFMask(0, c, 0) && bfc.lsb() + bfc.width() <= arm64BFWidth(c, 0) =>
  1691  	(UBFX [bfc] x)
  1692  // merge ubfx and zero-extension into ubfx
  1693  (MOVWUreg (UBFX [bfc] x)) && bfc.width() <= 32 => (UBFX [bfc] x)
  1694  (MOVHUreg (UBFX [bfc] x)) && bfc.width() <= 16 => (UBFX [bfc] x)
  1695  (MOVBUreg (UBFX [bfc] x)) && bfc.width() <=  8 => (UBFX [bfc] x)
  1696  
  1697  // Extracting bits from across a zero-extension boundary.
  1698  (UBFX [bfc] e:(MOVWUreg x))
  1699  	&& e.Uses == 1
  1700  	&& bfc.lsb() < 32
  1701  	=> (UBFX [armBFAuxInt(bfc.lsb(), min(bfc.width(), 32-bfc.lsb()))] x)
  1702  (UBFX [bfc] e:(MOVHUreg x))
  1703  	&& e.Uses == 1
  1704  	&& bfc.lsb() < 16
  1705  	=> (UBFX [armBFAuxInt(bfc.lsb(), min(bfc.width(), 16-bfc.lsb()))] x)
  1706  (UBFX [bfc] e:(MOVBUreg x))
  1707  	&& e.Uses == 1
  1708  	&& bfc.lsb() < 8
  1709  	=> (UBFX [armBFAuxInt(bfc.lsb(), min(bfc.width(), 8-bfc.lsb()))] x)
  1710  
  1711  // ubfiz/ubfx combinations: merge shifts into bitfield ops
  1712  (SRLconst [sc] (UBFX [bfc] x)) && sc < bfc.width()
  1713  	=> (UBFX [armBFAuxInt(bfc.lsb()+sc, bfc.width()-sc)] x)
  1714  (UBFX [bfc] (SRLconst [sc] x)) && sc+bfc.width()+bfc.lsb() < 64
  1715  	=> (UBFX [armBFAuxInt(bfc.lsb()+sc, bfc.width())] x)
  1716  (SLLconst [sc] (UBFIZ [bfc] x)) && sc+bfc.width()+bfc.lsb() < 64
  1717  	=> (UBFIZ [armBFAuxInt(bfc.lsb()+sc, bfc.width())] x)
  1718  (UBFIZ [bfc] (SLLconst [sc] x)) && sc < bfc.width()
  1719  	=> (UBFIZ [armBFAuxInt(bfc.lsb()+sc, bfc.width()-sc)] x)
  1720  // ((x << c1) >> c2) >> c3
  1721  (SRLconst [sc] (UBFIZ [bfc] x)) && sc == bfc.lsb()
  1722  	=> (ANDconst [1<<uint(bfc.width())-1] x)
  1723  (SRLconst [sc] (UBFIZ [bfc] x)) && sc < bfc.lsb()
  1724  	=> (UBFIZ [armBFAuxInt(bfc.lsb()-sc, bfc.width())] x)
  1725  (SRLconst [sc] (UBFIZ [bfc] x)) && sc > bfc.lsb()
  1726  	&& sc < bfc.lsb()+bfc.width()
  1727  	=> (UBFX [armBFAuxInt(sc-bfc.lsb(), bfc.lsb()+bfc.width()-sc)] x)
  1728  // ((x << c1) << c2) >> c3
  1729  (UBFX [bfc] (SLLconst [sc] x)) && sc == bfc.lsb()
  1730  	=> (ANDconst [1<<uint(bfc.width())-1] x)
  1731  (UBFX [bfc] (SLLconst [sc] x)) && sc < bfc.lsb()
  1732  	=> (UBFX [armBFAuxInt(bfc.lsb()-sc, bfc.width())] x)
  1733  (UBFX [bfc] (SLLconst [sc] x)) && sc > bfc.lsb()
  1734  	&& sc < bfc.lsb()+bfc.width()
  1735  	=> (UBFIZ [armBFAuxInt(sc-bfc.lsb(), bfc.lsb()+bfc.width()-sc)] x)
  1736  
  1737  // bfi
  1738  (OR (UBFIZ [bfc] x) (ANDconst [ac] y))
  1739  	&& ac == ^((1<<uint(bfc.width())-1) << uint(bfc.lsb()))
  1740  	=> (BFI [bfc] y x)
  1741  (ORshiftLL [s] (ANDconst [xc] x) (ANDconst [yc] y))
  1742  	&& xc == ^(yc << s)    // opposite masks
  1743  	&& yc & (yc+1) == 0    // power of 2 minus 1
  1744  	&& yc > 0              // not 0, not all 64 bits (there are better rewrites in that case)
  1745  	&& s+log64(yc+1) <= 64 // shifted mask doesn't overflow
  1746  	=> (BFI [armBFAuxInt(s, log64(yc+1))] x y)
  1747  (ORshiftRL [rc] (ANDconst [ac] x) (SLLconst [lc] y))
  1748  	&& lc > rc && ac == ^((1<<uint(64-lc)-1) << uint64(lc-rc))
  1749  	=> (BFI [armBFAuxInt(lc-rc, 64-lc)] x y)
  1750  // bfxil
  1751  (OR (UBFX [bfc] x) (ANDconst [ac] y)) && ac == ^(1<<uint(bfc.width())-1)
  1752  	=> (BFXIL [bfc] y x)
  1753  (ORshiftLL [sc] (UBFX [bfc] x) (SRLconst [sc] y)) && sc == bfc.width()
  1754  	=> (BFXIL [bfc] y x)
  1755  (ORshiftRL [rc] (ANDconst [ac] y) (SLLconst [lc] x)) && lc < rc && ac == ^((1<<uint(64-rc)-1))
  1756  	=> (BFXIL [armBFAuxInt(rc-lc, 64-rc)] y x)
  1757  
  1758  // FP simplification
  1759  (FNEGS  (FMULS  x y)) => (FNMULS x y)
  1760  (FNEGD  (FMULD  x y)) => (FNMULD x y)
  1761  (FMULS  (FNEGS  x) y) => (FNMULS x y)
  1762  (FMULD  (FNEGD  x) y) => (FNMULD x y)
  1763  (FNEGS  (FNMULS x y)) => (FMULS  x y)
  1764  (FNEGD  (FNMULD x y)) => (FMULD  x y)
  1765  (FNMULS (FNEGS  x) y) => (FMULS  x y)
  1766  (FNMULD (FNEGD  x) y) => (FMULD  x y)
  1767  
  1768  (FADDS a (FMULS  x y)) && a.Block.Func.useFMA(v) => (FMADDS  a x y)
  1769  (FADDD a (FMULD  x y)) && a.Block.Func.useFMA(v) => (FMADDD  a x y)
  1770  (FSUBS a (FMULS  x y)) && a.Block.Func.useFMA(v) => (FMSUBS  a x y)
  1771  (FSUBD a (FMULD  x y)) && a.Block.Func.useFMA(v) => (FMSUBD  a x y)
  1772  (FSUBS (FMULS  x y) a) && a.Block.Func.useFMA(v) => (FNMSUBS a x y)
  1773  (FSUBD (FMULD  x y) a) && a.Block.Func.useFMA(v) => (FNMSUBD a x y)
  1774  (FADDS a (FNMULS x y)) && a.Block.Func.useFMA(v) => (FMSUBS  a x y)
  1775  (FADDD a (FNMULD x y)) && a.Block.Func.useFMA(v) => (FMSUBD  a x y)
  1776  (FSUBS a (FNMULS x y)) && a.Block.Func.useFMA(v) => (FMADDS  a x y)
  1777  (FSUBD a (FNMULD x y)) && a.Block.Func.useFMA(v) => (FMADDD  a x y)
  1778  (FSUBS (FNMULS x y) a) && a.Block.Func.useFMA(v) => (FNMADDS a x y)
  1779  (FSUBD (FNMULD x y) a) && a.Block.Func.useFMA(v) => (FNMADDD a x y)
  1780  
  1781  (MOVBUload [off] {sym} (SB) _) && symIsRO(sym) => (MOVDconst [int64(read8(sym, int64(off)))])
  1782  (MOVHUload [off] {sym} (SB) _) && symIsRO(sym) => (MOVDconst [int64(read16(sym, int64(off), config.ctxt.Arch.ByteOrder))])
  1783  (MOVWUload [off] {sym} (SB) _) && symIsRO(sym) => (MOVDconst [int64(read32(sym, int64(off), config.ctxt.Arch.ByteOrder))])
  1784  (MOVDload  [off] {sym} (SB) _) && symIsRO(sym) => (MOVDconst [int64(read64(sym, int64(off), config.ctxt.Arch.ByteOrder))])
  1785  (MOVBload  [off] {sym} (SB) _) && symIsRO(sym) => (MOVDconst [int64(int8(read8(sym, int64(off))))])
  1786  (MOVHload  [off] {sym} (SB) _) && symIsRO(sym) => (MOVDconst [int64(int16(read16(sym, int64(off), config.ctxt.Arch.ByteOrder)))])
  1787  (MOVWload  [off] {sym} (SB) _) && symIsRO(sym) => (MOVDconst [int64(int32(read32(sym, int64(off), config.ctxt.Arch.ByteOrder)))])
  1788  
  1789  // Prefetch instructions (aux is option: 0 - PLDL1KEEP; 1 - PLDL1STRM)
  1790  (PrefetchCache addr mem)         => (PRFM [0] addr mem)
  1791  (PrefetchCacheStreamed addr mem) => (PRFM [1] addr mem)
  1792  
  1793  // Arch-specific inlining for small or disjoint runtime.memmove
  1794  (SelectN [0] call:(CALLstatic {sym} s1:(MOVDstore _ (MOVDconst [sz]) s2:(MOVDstore  _ src s3:(MOVDstore {t} _ dst mem)))))
  1795  	&& sz >= 0
  1796  	&& isSameCall(sym, "runtime.memmove")
  1797  	&& s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1
  1798  	&& isInlinableMemmove(dst, src, sz, config)
  1799  	&& clobber(s1, s2, s3, call)
  1800  	=> (Move [sz] dst src mem)
  1801  
  1802  // Match post-lowering calls, register version.
  1803  (SelectN [0] call:(CALLstatic {sym} dst src (MOVDconst [sz]) mem))
  1804  	&& sz >= 0
  1805  	&& isSameCall(sym, "runtime.memmove")
  1806  	&& call.Uses == 1
  1807  	&& isInlinableMemmove(dst, src, sz, config)
  1808  	&& clobber(call)
  1809  	=> (Move [sz] dst src mem)
  1810  
  1811  ((REV|REVW) ((REV|REVW) p)) => p
  1812  
  1813  // internal/runtime/math.MulUintptr intrinsics
  1814  
  1815  (Select0 (Mul64uover x y)) => (MUL x y)
  1816  (Select1 (Mul64uover x y)) => (NotEqual (CMPconst (UMULH <typ.UInt64> x y) [0]))
  1817  

View as plain text