Joel Sing submitted this change.
internal/bytealg: correct alignment checks for compare/memequal on riscv64
On riscv64 we need 8 byte alignment for 8 byte loads - the existing check
was only ensuring 4 byte alignment, which potentially results in unaligned
loads being performed. Unaligned loads incur a significant performance penality
due to the resulting kernel traps and fix ups.
Adjust BenchmarkCompareBytesBigUnaligned so that this issue would have been
more readily visible.
Updates #50615
name old time/op new time/op delta
CompareBytesBigUnaligned/offset=1-4 6.98ms _ 5% 6.84ms _ 3% ~ (p=0.319 n=5+5)
CompareBytesBigUnaligned/offset=2-4 6.75ms _ 1% 6.99ms _ 4% ~ (p=0.063 n=5+5)
CompareBytesBigUnaligned/offset=3-4 6.84ms _ 1% 6.74ms _ 1% -1.48% (p=0.003 n=5+5)
CompareBytesBigUnaligned/offset=4-4 146ms _ 1% 7ms _ 6% -95.08% (p=0.000 n=5+5)
CompareBytesBigUnaligned/offset=5-4 7.05ms _ 5% 6.75ms _ 1% ~ (p=0.079 n=5+5)
CompareBytesBigUnaligned/offset=6-4 7.11ms _ 5% 6.89ms _ 5% ~ (p=0.177 n=5+5)
CompareBytesBigUnaligned/offset=7-4 7.14ms _ 5% 6.91ms _ 6% ~ (p=0.165 n=5+5)
name old speed new speed delta
CompareBytesBigUnaligned/offset=1-4 150MB/s _ 5% 153MB/s _ 3% ~ (p=0.336 n=5+5)
CompareBytesBigUnaligned/offset=2-4 155MB/s _ 1% 150MB/s _ 4% ~ (p=0.058 n=5+5)
CompareBytesBigUnaligned/offset=3-4 153MB/s _ 1% 156MB/s _ 1% +1.51% (p=0.004 n=5+5)
CompareBytesBigUnaligned/offset=4-4 7.16MB/s _ 1% 145.79MB/s _ 6% +1936.23% (p=0.000 n=5+5)
CompareBytesBigUnaligned/offset=5-4 149MB/s _ 5% 155MB/s _ 1% ~ (p=0.078 n=5+5)
CompareBytesBigUnaligned/offset=6-4 148MB/s _ 5% 152MB/s _ 5% ~ (p=0.175 n=5+5)
CompareBytesBigUnaligned/offset=7-4 147MB/s _ 5% 152MB/s _ 6% ~ (p=0.160 n=5+5)
Change-Id: I2c859e061919db482318ce63b85b808aa973a9ba
Reviewed-on: https://go-review.googlesource.com/c/go/+/431099
Reviewed-by: Meng Zhuo <m...@golangcn.org>
TryBot-Result: Gopher Robot <go...@golang.org>
Run-TryBot: Joel Sing <jo...@sing.id.au>
Reviewed-by: Bryan Mills <bcm...@google.com>
Reviewed-by: Cherry Mui <cher...@google.com>
---
M src/bytes/compare_test.go
M src/internal/bytealg/compare_riscv64.s
M src/internal/bytealg/equal_riscv64.s
3 files changed, 60 insertions(+), 8 deletions(-)
diff --git a/src/bytes/compare_test.go b/src/bytes/compare_test.go
index a595d57..a0150ab 100644
--- a/src/bytes/compare_test.go
+++ b/src/bytes/compare_test.go
@@ -6,6 +6,7 @@
import (
. "bytes"
+ "fmt"
"internal/testenv"
"testing"
)
@@ -213,22 +214,30 @@
}
}
-func BenchmarkCompareBytesBigUnaligned(b *testing.B) {
+func benchmarkCompareBytesBigUnaligned(b *testing.B, offset int) {
b.StopTimer()
b1 := make([]byte, 0, 1<<20)
for len(b1) < 1<<20 {
b1 = append(b1, "Hello Gophers!"...)
}
- b2 := append([]byte("hello"), b1...)
+ b2 := append([]byte("12345678")[:offset], b1...)
b.StartTimer()
- for i := 0; i < b.N; i++ {
- if Compare(b1, b2[len("hello"):]) != 0 {
+ for j := 0; j < b.N; j++ {
+ if Compare(b1, b2[offset:]) != 0 {
b.Fatal("b1 != b2")
}
}
b.SetBytes(int64(len(b1)))
}
+func BenchmarkCompareBytesBigUnaligned(b *testing.B) {
+ for i := 1; i < 8; i++ {
+ b.Run(fmt.Sprintf("offset=%d", i), func(b *testing.B) {
+ benchmarkCompareBytesBigUnaligned(b, i)
+ })
+ }
+}
+
func BenchmarkCompareBytesBig(b *testing.B) {
b.StopTimer()
b1 := make([]byte, 0, 1<<20)
diff --git a/src/internal/bytealg/compare_riscv64.s b/src/internal/bytealg/compare_riscv64.s
index 7d2f8d6..e616577 100644
--- a/src/internal/bytealg/compare_riscv64.s
+++ b/src/internal/bytealg/compare_riscv64.s
@@ -58,8 +58,8 @@
BLT X5, X6, loop4_check
// Check alignment - if alignment differs we have to do one byte at a time.
- AND $3, X10, X7
- AND $3, X12, X8
+ AND $7, X10, X7
+ AND $7, X12, X8
BNE X7, X8, loop4_check
BEQZ X7, loop32_check
diff --git a/src/internal/bytealg/equal_riscv64.s b/src/internal/bytealg/equal_riscv64.s
index 77202d6..1e070be 100644
--- a/src/internal/bytealg/equal_riscv64.s
+++ b/src/internal/bytealg/equal_riscv64.s
@@ -42,8 +42,8 @@
BLT X12, X23, loop4_check
// Check alignment - if alignment differs we have to do one byte at a time.
- AND $3, X10, X9
- AND $3, X11, X19
+ AND $7, X10, X9
+ AND $7, X11, X19
BNE X9, X19, loop4_check
BEQZ X9, loop32_check
To view, visit change 431099. To unsubscribe, or for help writing mail filters, visit settings.