diff --git a/src/cmd/compile/internal/ssa/_gen/Wasm.rules b/src/cmd/compile/internal/ssa/_gen/Wasm.rules
index 6028152..4b533df 100644
--- a/src/cmd/compile/internal/ssa/_gen/Wasm.rules
+++ b/src/cmd/compile/internal/ssa/_gen/Wasm.rules
@@ -349,6 +349,9 @@
(Abs ...) => (F64Abs ...)
(Copysign ...) => (F64Copysign ...)
+(F32DemoteF64 (F64(Sqrt|Trunc|Ceil|Floor|Nearest|Abs) (F64PromoteF32 x))) => (F32(Sqrt|Trunc|Ceil|Floor|Nearest|Abs) x)
+(F32DemoteF64 (F64Copysign (F64PromoteF32 x) (F64PromoteF32 y))) => (F32Copysign x y)
+
(Sqrt32 ...) => (F32Sqrt ...)
(Ctz64 ...) => (I64Ctz ...)
diff --git a/src/cmd/compile/internal/ssa/rewriteWasm.go b/src/cmd/compile/internal/ssa/rewriteWasm.go
index faba41b..f7fe8fa 100644
--- a/src/cmd/compile/internal/ssa/rewriteWasm.go
+++ b/src/cmd/compile/internal/ssa/rewriteWasm.go
@@ -599,6 +599,8 @@
case OpWB:
v.Op = OpWasmLoweredWB
return true
+ case OpWasmF32DemoteF64:
+ return rewriteValueWasm_OpWasmF32DemoteF64(v)
case OpWasmF64Add:
return rewriteValueWasm_OpWasmF64Add(v)
case OpWasmF64Mul:
@@ -3617,6 +3619,121 @@
}
return false
}
+func rewriteValueWasm_OpWasmF32DemoteF64(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (F32DemoteF64 (F64Sqrt (F64PromoteF32 x)))
+ // result: (F32Sqrt x)
+ for {
+ if v_0.Op != OpWasmF64Sqrt {
+ break
+ }
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpWasmF64PromoteF32 {
+ break
+ }
+ x := v_0_0.Args[0]
+ v.reset(OpWasmF32Sqrt)
+ v.AddArg(x)
+ return true
+ }
+ // match: (F32DemoteF64 (F64Trunc (F64PromoteF32 x)))
+ // result: (F32Trunc x)
+ for {
+ if v_0.Op != OpWasmF64Trunc {
+ break
+ }
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpWasmF64PromoteF32 {
+ break
+ }
+ x := v_0_0.Args[0]
+ v.reset(OpWasmF32Trunc)
+ v.AddArg(x)
+ return true
+ }
+ // match: (F32DemoteF64 (F64Ceil (F64PromoteF32 x)))
+ // result: (F32Ceil x)
+ for {
+ if v_0.Op != OpWasmF64Ceil {
+ break
+ }
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpWasmF64PromoteF32 {
+ break
+ }
+ x := v_0_0.Args[0]
+ v.reset(OpWasmF32Ceil)
+ v.AddArg(x)
+ return true
+ }
+ // match: (F32DemoteF64 (F64Floor (F64PromoteF32 x)))
+ // result: (F32Floor x)
+ for {
+ if v_0.Op != OpWasmF64Floor {
+ break
+ }
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpWasmF64PromoteF32 {
+ break
+ }
+ x := v_0_0.Args[0]
+ v.reset(OpWasmF32Floor)
+ v.AddArg(x)
+ return true
+ }
+ // match: (F32DemoteF64 (F64Nearest (F64PromoteF32 x)))
+ // result: (F32Nearest x)
+ for {
+ if v_0.Op != OpWasmF64Nearest {
+ break
+ }
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpWasmF64PromoteF32 {
+ break
+ }
+ x := v_0_0.Args[0]
+ v.reset(OpWasmF32Nearest)
+ v.AddArg(x)
+ return true
+ }
+ // match: (F32DemoteF64 (F64Abs (F64PromoteF32 x)))
+ // result: (F32Abs x)
+ for {
+ if v_0.Op != OpWasmF64Abs {
+ break
+ }
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpWasmF64PromoteF32 {
+ break
+ }
+ x := v_0_0.Args[0]
+ v.reset(OpWasmF32Abs)
+ v.AddArg(x)
+ return true
+ }
+ // match: (F32DemoteF64 (F64Copysign (F64PromoteF32 x) (F64PromoteF32 y)))
+ // result: (F32Copysign x y)
+ for {
+ if v_0.Op != OpWasmF64Copysign {
+ break
+ }
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpWasmF64PromoteF32 {
+ break
+ }
+ x := v_0_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ if v_0_1.Op != OpWasmF64PromoteF32 {
+ break
+ }
+ y := v_0_1.Args[0]
+ v.reset(OpWasmF32Copysign)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
func rewriteValueWasm_OpWasmF64Add(v *Value) bool {
v_1 := v.Args[1]
v_0 := v.Args[0]
diff --git a/test/codegen/floats.go b/test/codegen/floats.go
index bf9e70d..3f27e54 100644
--- a/test/codegen/floats.go
+++ b/test/codegen/floats.go
@@ -283,15 +283,22 @@
func WideCeilNarrow(x float32) float32 {
// amd64/v3:"ROUNDSS"
// arm64:"FRINTPS"
+ // wasm:"F32Ceil"
return float32(math.Ceil(float64(x)))
}
func WideTruncNarrow(x float32) float32 {
// amd64/v3:"ROUNDSS"
// arm64:"FRINTZS"
+ // wasm:"F32Trunc"
return float32(math.Trunc(float64(x)))
}
+func WideCopysignNarrow(x, y float32) float32 {
+ // wasm:"F32Copysign"
+ return float32(math.Copysign(float64(x), float64(y)))
+}
+
// ------------------------ //
// Subnormal tests //
// ------------------------ //