diff --git a/src/cmd/compile/internal/ssa/_gen/RISCV64.rules b/src/cmd/compile/internal/ssa/_gen/RISCV64.rules
index 49bdbc8..31829a5 100644
--- a/src/cmd/compile/internal/ssa/_gen/RISCV64.rules
+++ b/src/cmd/compile/internal/ssa/_gen/RISCV64.rules
@@ -823,16 +823,16 @@
(F(MADD|NMADD|MSUB|NMSUB)D x y neg:(FNEGD z)) && neg.Uses == 1 => (F(MSUB|NMSUB|MADD|NMADD)D x y z)
// Test for -∞ (bit 0) using 64 bit classify instruction.
-(FLTD x (FMOVDconst [c])) && float64ExactBits(c, -math.MaxFloat64) => (ANDI [1] (FCLASSD x))
-(FLED (FMOVDconst [c]) x) && float64ExactBits(c, -math.MaxFloat64) => (SNEZ (ANDI <typ.Int64> [0xff &^ 1] (FCLASSD x)))
-(FEQD x (FMOVDconst [c])) && float64ExactBits(c, math.Inf(-1)) => (ANDI [1] (FCLASSD x))
-(FNED x (FMOVDconst [c])) && float64ExactBits(c, math.Inf(-1)) => (SEQZ (ANDI <typ.Int64> [1] (FCLASSD x)))
+(FLTD x (FMOVDconst [-math.MaxFloat64])) => (ANDI [0b00_0000_0001] (FCLASSD x))
+(FLED (FMOVDconst [-math.MaxFloat64]) x) => (SNEZ (ANDI <typ.Int64> [0b00_1111_1110] (FCLASSD x)))
+(FEQD x (FMOVDconst [math.Inf(-1)])) => (ANDI [0b00_0000_0001] (FCLASSD x))
+(FNED x (FMOVDconst [math.Inf(-1)])) => (SEQZ (ANDI <typ.Int64> [0b00_0000_0001] (FCLASSD x)))
// Test for +∞ (bit 7) using 64 bit classify instruction.
-(FLTD (FMOVDconst [c]) x) && float64ExactBits(c, math.MaxFloat64) => (SNEZ (ANDI <typ.Int64> [1<<7] (FCLASSD x)))
-(FLED x (FMOVDconst [c])) && float64ExactBits(c, math.MaxFloat64) => (SNEZ (ANDI <typ.Int64> [0xff &^ (1<<7)] (FCLASSD x)))
-(FEQD x (FMOVDconst [c])) && float64ExactBits(c, math.Inf(1)) => (SNEZ (ANDI <typ.Int64> [1<<7] (FCLASSD x)))
-(FNED x (FMOVDconst [c])) && float64ExactBits(c, math.Inf(1)) => (SEQZ (ANDI <typ.Int64> [1<<7] (FCLASSD x)))
+(FLTD (FMOVDconst [math.MaxFloat64]) x) => (SNEZ (ANDI <typ.Int64> [0b00_1000_0000] (FCLASSD x)))
+(FLED x (FMOVDconst [math.MaxFloat64])) => (SNEZ (ANDI <typ.Int64> [0b00_0111_1111] (FCLASSD x)))
+(FEQD x (FMOVDconst [math.Inf(1)])) => (SNEZ (ANDI <typ.Int64> [0b00_1000_0000] (FCLASSD x)))
+(FNED x (FMOVDconst [math.Inf(1)])) => (SEQZ (ANDI <typ.Int64> [0b00_1000_0000] (FCLASSD x)))
//
// Optimisations for rva22u64 and above.
diff --git a/src/cmd/compile/internal/ssa/rewrite.go b/src/cmd/compile/internal/ssa/rewrite.go
index a822ebc..0730897 100644
--- a/src/cmd/compile/internal/ssa/rewrite.go
+++ b/src/cmd/compile/internal/ssa/rewrite.go
@@ -765,10 +765,6 @@
return i
}
-func float64ExactBits(f float64, c float64) bool {
- return math.Float64bits(f) == math.Float64bits(c)
-}
-
func flagConstantToAuxInt(x flagConstant) int64 {
return int64(x)
}
diff --git a/src/cmd/compile/internal/ssa/rewriteRISCV64.go b/src/cmd/compile/internal/ssa/rewriteRISCV64.go
index 8a390eb..52870fe 100644
--- a/src/cmd/compile/internal/ssa/rewriteRISCV64.go
+++ b/src/cmd/compile/internal/ssa/rewriteRISCV64.go
@@ -3582,21 +3582,16 @@
v_0 := v.Args[0]
b := v.Block
typ := &b.Func.Config.Types
- // match: (FEQD x (FMOVDconst [c]))
- // cond: float64ExactBits(c, math.Inf(-1))
- // result: (ANDI [1] (FCLASSD x))
+ // match: (FEQD x (FMOVDconst [math.Inf(-1)]))
+ // result: (ANDI [0b00_0000_0001] (FCLASSD x))
for {
for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
x := v_0
- if v_1.Op != OpRISCV64FMOVDconst {
- continue
- }
- c := auxIntToFloat64(v_1.AuxInt)
- if !(float64ExactBits(c, math.Inf(-1))) {
+ if v_1.Op != OpRISCV64FMOVDconst || auxIntToFloat64(v_1.AuxInt) != math.Inf(-1) {
continue
}
v.reset(OpRISCV64ANDI)
- v.AuxInt = int64ToAuxInt(1)
+ v.AuxInt = int64ToAuxInt(0b00_0000_0001)
v0 := b.NewValue0(v.Pos, OpRISCV64FCLASSD, typ.Int64)
v0.AddArg(x)
v.AddArg(v0)
@@ -3604,22 +3599,17 @@
}
break
}
- // match: (FEQD x (FMOVDconst [c]))
- // cond: float64ExactBits(c, math.Inf(1))
- // result: (SNEZ (ANDI <typ.Int64> [1<<7] (FCLASSD x)))
+ // match: (FEQD x (FMOVDconst [math.Inf(1)]))
+ // result: (SNEZ (ANDI <typ.Int64> [0b00_1000_0000] (FCLASSD x)))
for {
for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
x := v_0
- if v_1.Op != OpRISCV64FMOVDconst {
- continue
- }
- c := auxIntToFloat64(v_1.AuxInt)
- if !(float64ExactBits(c, math.Inf(1))) {
+ if v_1.Op != OpRISCV64FMOVDconst || auxIntToFloat64(v_1.AuxInt) != math.Inf(1) {
continue
}
v.reset(OpRISCV64SNEZ)
v0 := b.NewValue0(v.Pos, OpRISCV64ANDI, typ.Int64)
- v0.AuxInt = int64ToAuxInt(1 << 7)
+ v0.AuxInt = int64ToAuxInt(0b00_1000_0000)
v1 := b.NewValue0(v.Pos, OpRISCV64FCLASSD, typ.Int64)
v1.AddArg(x)
v0.AddArg(v1)
@@ -3635,42 +3625,32 @@
v_0 := v.Args[0]
b := v.Block
typ := &b.Func.Config.Types
- // match: (FLED (FMOVDconst [c]) x)
- // cond: float64ExactBits(c, -math.MaxFloat64)
- // result: (SNEZ (ANDI <typ.Int64> [0xff &^ 1] (FCLASSD x)))
+ // match: (FLED (FMOVDconst [-math.MaxFloat64]) x)
+ // result: (SNEZ (ANDI <typ.Int64> [0b00_1111_1110] (FCLASSD x)))
for {
- if v_0.Op != OpRISCV64FMOVDconst {
+ if v_0.Op != OpRISCV64FMOVDconst || auxIntToFloat64(v_0.AuxInt) != -math.MaxFloat64 {
break
}
- c := auxIntToFloat64(v_0.AuxInt)
x := v_1
- if !(float64ExactBits(c, -math.MaxFloat64)) {
- break
- }
v.reset(OpRISCV64SNEZ)
v0 := b.NewValue0(v.Pos, OpRISCV64ANDI, typ.Int64)
- v0.AuxInt = int64ToAuxInt(0xff &^ 1)
+ v0.AuxInt = int64ToAuxInt(0b00_1111_1110)
v1 := b.NewValue0(v.Pos, OpRISCV64FCLASSD, typ.Int64)
v1.AddArg(x)
v0.AddArg(v1)
v.AddArg(v0)
return true
}
- // match: (FLED x (FMOVDconst [c]))
- // cond: float64ExactBits(c, math.MaxFloat64)
- // result: (SNEZ (ANDI <typ.Int64> [0xff &^ (1<<7)] (FCLASSD x)))
+ // match: (FLED x (FMOVDconst [math.MaxFloat64]))
+ // result: (SNEZ (ANDI <typ.Int64> [0b00_0111_1111] (FCLASSD x)))
for {
x := v_0
- if v_1.Op != OpRISCV64FMOVDconst {
- break
- }
- c := auxIntToFloat64(v_1.AuxInt)
- if !(float64ExactBits(c, math.MaxFloat64)) {
+ if v_1.Op != OpRISCV64FMOVDconst || auxIntToFloat64(v_1.AuxInt) != math.MaxFloat64 {
break
}
v.reset(OpRISCV64SNEZ)
v0 := b.NewValue0(v.Pos, OpRISCV64ANDI, typ.Int64)
- v0.AuxInt = int64ToAuxInt(0xff &^ (1 << 7))
+ v0.AuxInt = int64ToAuxInt(0b00_0111_1111)
v1 := b.NewValue0(v.Pos, OpRISCV64FCLASSD, typ.Int64)
v1.AddArg(x)
v0.AddArg(v1)
@@ -3684,40 +3664,30 @@
v_0 := v.Args[0]
b := v.Block
typ := &b.Func.Config.Types
- // match: (FLTD x (FMOVDconst [c]))
- // cond: float64ExactBits(c, -math.MaxFloat64)
- // result: (ANDI [1] (FCLASSD x))
+ // match: (FLTD x (FMOVDconst [-math.MaxFloat64]))
+ // result: (ANDI [0b00_0000_0001] (FCLASSD x))
for {
x := v_0
- if v_1.Op != OpRISCV64FMOVDconst {
- break
- }
- c := auxIntToFloat64(v_1.AuxInt)
- if !(float64ExactBits(c, -math.MaxFloat64)) {
+ if v_1.Op != OpRISCV64FMOVDconst || auxIntToFloat64(v_1.AuxInt) != -math.MaxFloat64 {
break
}
v.reset(OpRISCV64ANDI)
- v.AuxInt = int64ToAuxInt(1)
+ v.AuxInt = int64ToAuxInt(0b00_0000_0001)
v0 := b.NewValue0(v.Pos, OpRISCV64FCLASSD, typ.Int64)
v0.AddArg(x)
v.AddArg(v0)
return true
}
- // match: (FLTD (FMOVDconst [c]) x)
- // cond: float64ExactBits(c, math.MaxFloat64)
- // result: (SNEZ (ANDI <typ.Int64> [1<<7] (FCLASSD x)))
+ // match: (FLTD (FMOVDconst [math.MaxFloat64]) x)
+ // result: (SNEZ (ANDI <typ.Int64> [0b00_1000_0000] (FCLASSD x)))
for {
- if v_0.Op != OpRISCV64FMOVDconst {
+ if v_0.Op != OpRISCV64FMOVDconst || auxIntToFloat64(v_0.AuxInt) != math.MaxFloat64 {
break
}
- c := auxIntToFloat64(v_0.AuxInt)
x := v_1
- if !(float64ExactBits(c, math.MaxFloat64)) {
- break
- }
v.reset(OpRISCV64SNEZ)
v0 := b.NewValue0(v.Pos, OpRISCV64ANDI, typ.Int64)
- v0.AuxInt = int64ToAuxInt(1 << 7)
+ v0.AuxInt = int64ToAuxInt(0b00_1000_0000)
v1 := b.NewValue0(v.Pos, OpRISCV64FCLASSD, typ.Int64)
v1.AddArg(x)
v0.AddArg(v1)
@@ -4155,22 +4125,17 @@
v_0 := v.Args[0]
b := v.Block
typ := &b.Func.Config.Types
- // match: (FNED x (FMOVDconst [c]))
- // cond: float64ExactBits(c, math.Inf(-1))
- // result: (SEQZ (ANDI <typ.Int64> [1] (FCLASSD x)))
+ // match: (FNED x (FMOVDconst [math.Inf(-1)]))
+ // result: (SEQZ (ANDI <typ.Int64> [0b00_0000_0001] (FCLASSD x)))
for {
for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
x := v_0
- if v_1.Op != OpRISCV64FMOVDconst {
- continue
- }
- c := auxIntToFloat64(v_1.AuxInt)
- if !(float64ExactBits(c, math.Inf(-1))) {
+ if v_1.Op != OpRISCV64FMOVDconst || auxIntToFloat64(v_1.AuxInt) != math.Inf(-1) {
continue
}
v.reset(OpRISCV64SEQZ)
v0 := b.NewValue0(v.Pos, OpRISCV64ANDI, typ.Int64)
- v0.AuxInt = int64ToAuxInt(1)
+ v0.AuxInt = int64ToAuxInt(0b00_0000_0001)
v1 := b.NewValue0(v.Pos, OpRISCV64FCLASSD, typ.Int64)
v1.AddArg(x)
v0.AddArg(v1)
@@ -4179,22 +4144,17 @@
}
break
}
- // match: (FNED x (FMOVDconst [c]))
- // cond: float64ExactBits(c, math.Inf(1))
- // result: (SEQZ (ANDI <typ.Int64> [1<<7] (FCLASSD x)))
+ // match: (FNED x (FMOVDconst [math.Inf(1)]))
+ // result: (SEQZ (ANDI <typ.Int64> [0b00_1000_0000] (FCLASSD x)))
for {
for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
x := v_0
- if v_1.Op != OpRISCV64FMOVDconst {
- continue
- }
- c := auxIntToFloat64(v_1.AuxInt)
- if !(float64ExactBits(c, math.Inf(1))) {
+ if v_1.Op != OpRISCV64FMOVDconst || auxIntToFloat64(v_1.AuxInt) != math.Inf(1) {
continue
}
v.reset(OpRISCV64SEQZ)
v0 := b.NewValue0(v.Pos, OpRISCV64ANDI, typ.Int64)
- v0.AuxInt = int64ToAuxInt(1 << 7)
+ v0.AuxInt = int64ToAuxInt(0b00_1000_0000)
v1 := b.NewValue0(v.Pos, OpRISCV64FCLASSD, typ.Int64)
v1.AddArg(x)
v0.AddArg(v1)