diff --git a/src/cmd/compile/internal/ssa/_gen/ARM64.rules b/src/cmd/compile/internal/ssa/_gen/ARM64.rules
index 01fe3a7..6034d67 100644
--- a/src/cmd/compile/internal/ssa/_gen/ARM64.rules
+++ b/src/cmd/compile/internal/ssa/_gen/ARM64.rules
@@ -1658,6 +1658,10 @@
(SRLconst [rc] (MOVHUreg x)) && rc >= 16 => (MOVDconst [0])
(SRLconst [rc] (MOVBUreg x)) && rc >= 8 => (MOVDconst [0])
+// Special cases for slice operations
+(ADD x0 x1:(ANDshiftRA x2:(SLLconst [sl] y) z [63])) && x1.Uses == 1 && x2.Uses == 1 => (ADDshiftLL x0 (ANDshiftRA <y.Type> y z [63]) [sl])
+(ADD x0 x1:(ANDshiftLL x2:(SRAconst [63] z) y [sl])) && x1.Uses == 1 && x2.Uses == 1 => (ADDshiftLL x0 (ANDshiftRA <y.Type> y z [63]) [sl])
+
// bitfield ops
// sbfiz
diff --git a/src/cmd/compile/internal/ssa/rewriteARM64.go b/src/cmd/compile/internal/ssa/rewriteARM64.go
index 792967c..1bd759d 100644
--- a/src/cmd/compile/internal/ssa/rewriteARM64.go
+++ b/src/cmd/compile/internal/ssa/rewriteARM64.go
@@ -1592,6 +1592,66 @@
}
break
}
+ // match: (ADD x0 x1:(ANDshiftRA x2:(SLLconst [sl] y) z [63]))
+ // cond: x1.Uses == 1 && x2.Uses == 1
+ // result: (ADDshiftLL x0 (ANDshiftRA <y.Type> y z [63]) [sl])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x0 := v_0
+ x1 := v_1
+ if x1.Op != OpARM64ANDshiftRA || auxIntToInt64(x1.AuxInt) != 63 {
+ continue
+ }
+ z := x1.Args[1]
+ x2 := x1.Args[0]
+ if x2.Op != OpARM64SLLconst {
+ continue
+ }
+ sl := auxIntToInt64(x2.AuxInt)
+ y := x2.Args[0]
+ if !(x1.Uses == 1 && x2.Uses == 1) {
+ continue
+ }
+ v.reset(OpARM64ADDshiftLL)
+ v.AuxInt = int64ToAuxInt(sl)
+ v0 := b.NewValue0(v.Pos, OpARM64ANDshiftRA, y.Type)
+ v0.AuxInt = int64ToAuxInt(63)
+ v0.AddArg2(y, z)
+ v.AddArg2(x0, v0)
+ return true
+ }
+ break
+ }
+ // match: (ADD x0 x1:(ANDshiftLL x2:(SRAconst [63] z) y [sl]))
+ // cond: x1.Uses == 1 && x2.Uses == 1
+ // result: (ADDshiftLL x0 (ANDshiftRA <y.Type> y z [63]) [sl])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x0 := v_0
+ x1 := v_1
+ if x1.Op != OpARM64ANDshiftLL {
+ continue
+ }
+ sl := auxIntToInt64(x1.AuxInt)
+ y := x1.Args[1]
+ x2 := x1.Args[0]
+ if x2.Op != OpARM64SRAconst || auxIntToInt64(x2.AuxInt) != 63 {
+ continue
+ }
+ z := x2.Args[0]
+ if !(x1.Uses == 1 && x2.Uses == 1) {
+ continue
+ }
+ v.reset(OpARM64ADDshiftLL)
+ v.AuxInt = int64ToAuxInt(sl)
+ v0 := b.NewValue0(v.Pos, OpARM64ANDshiftRA, y.Type)
+ v0.AuxInt = int64ToAuxInt(63)
+ v0.AddArg2(y, z)
+ v.AddArg2(x0, v0)
+ return true
+ }
+ break
+ }
return false
}
func rewriteValueARM64_OpARM64ADDSflags(v *Value) bool {
diff --git a/test/codegen/slices.go b/test/codegen/slices.go
index 9e8990c..a00c878 100644
--- a/test/codegen/slices.go
+++ b/test/codegen/slices.go
@@ -418,6 +418,15 @@
}
// --------------------------------------- //
+// ARM64 folding for slice masks //
+// --------------------------------------- //
+
+func SliceAndIndex(a []int, b int) int {
+ // arm64:"AND\tR[0-9]+->63","ADD\tR[0-9]+<<3"
+ return a[b:][b]
+}
+
+// --------------------------------------- //
// Code generation for unsafe.Slice //
// --------------------------------------- //