Joe Tsai has uploaded this change for review.
reflect: optimize for maps with string keys
Over 80% of all Go map types use a string as the key.
The Go runtime already has a specialized implementation for such maps
in runtime/map_faststr.go. However, the Go reflection implementation
has not historically made use of that implementation.
This CL plumbs the appropriate logic to be accessible from Go reflection
so that it can benefit as well.
name old time/op new time/op delta
MapStringKey/MapIndex-4 1.99us 1.35us -32.32% (p=0.008 n=5+5)
MapStringKey/SetMapIndex-4 3.57us 2.60us -27.26% (p=0.008 n=5+5)
Change-Id: I5495d48948d8caf2d004a03ae1820ab5f8729670
---
M src/reflect/all_test.go
M src/reflect/value.go
M src/runtime/map.go
3 files changed, 101 insertions(+), 0 deletions(-)
diff --git a/src/reflect/all_test.go b/src/reflect/all_test.go
index 40ac6a9..e468673 100644
--- a/src/reflect/all_test.go
+++ b/src/reflect/all_test.go
@@ -7042,6 +7042,45 @@
})
}
+func BenchmarkMapStringKey(b *testing.B) {
+ var states = []string{
+ "Alabama", "Alaska", "Arizona", "Arkansas", "California", "Colorado",
+ "Connecticut", "Delaware", "Florida", "Georgia", "Hawaii", "Idaho",
+ "Illinois", "Indiana", "Iowa", "Kansas", "Kentucky", "Louisiana",
+ "Maine", "Maryland", "Massachusetts", "Michigan", "Minnesota",
+ "Mississippi", "Missouri", "Montana", "Nebraska", "Nevada",
+ "New Hampshire", "New Jersey", "New Mexico", "New York",
+ "North Carolina", "North Dakota", "Ohio", "Oklahoma", "Oregon",
+ "Pennsylvania", "Rhode Island", "South Carolina", "South Dakota",
+ "Tennessee", "Texas", "Utah", "Vermont", "Virginia", "Washington",
+ "West Virginia", "Wisconsin", "Wyoming",
+ }
+ m := make(map[string]*int)
+ for _, s := range states {
+ m[s] = nil
+ }
+
+ vm := ValueOf(m)
+ vks := ValueOf(states)
+ vv := ValueOf((*int)(nil))
+ b.Run("MapIndex", func(b *testing.B) {
+ b.ReportAllocs()
+ for i := 0; i < b.N; i++ {
+ for j := range states {
+ vm.MapIndex(vks.Index(j))
+ }
+ }
+ })
+ b.Run("SetMapIndex", func(b *testing.B) {
+ b.ReportAllocs()
+ for i := 0; i < b.N; i++ {
+ for j := range states {
+ vm.SetMapIndex(vks.Index(j), vv)
+ }
+ }
+ })
+}
+
func TestSwapper(t *testing.T) {
type I int
var a, b, c I
diff --git a/src/reflect/value.go b/src/reflect/value.go
index a8274cc..c7e307b 100644
--- a/src/reflect/value.go
+++ b/src/reflect/value.go
@@ -1515,6 +1515,19 @@
// considered unexported. This is consistent with the
// behavior for structs, which allow read but not write
// of unexported fields.
+
+ if isStringKey := key.kind() == String && tt.key.Kind() == String; isStringKey && false {
+ k := *(*string)(key.ptr)
+ e := mapaccess_faststr(v.typ, v.pointer(), k)
+ if e == nil {
+ return Value{}
+ }
+ typ := tt.elem
+ fl := (v.flag | key.flag).ro()
+ fl |= flag(typ.Kind())
+ return copyVal(typ, fl, e)
+ }
+
key = key.assignTo("reflect.Value.MapIndex", tt.key, nil)
var k unsafe.Pointer
@@ -2079,6 +2092,25 @@
v.mustBeExported()
key.mustBeExported()
tt := (*mapType)(unsafe.Pointer(v.typ))
+
+ if isStringKey := key.kind() == String && tt.key.Kind() == String; isStringKey && false {
+ k := *(*string)(key.ptr)
+ if elem.typ == nil {
+ mapdelete_faststr(v.typ, v.pointer(), k)
+ return
+ }
+ elem.mustBeExported()
+ elem = elem.assignTo("reflect.Value.SetMapIndex", tt.elem, nil)
+ var e unsafe.Pointer
+ if elem.flag&flagIndir != 0 {
+ e = elem.ptr
+ } else {
+ e = unsafe.Pointer(&elem.ptr)
+ }
+ mapassign_faststr(v.typ, v.pointer(), k, e)
+ return
+ }
+
key = key.assignTo("reflect.Value.SetMapIndex", tt.key, nil)
var k unsafe.Pointer
if key.flag&flagIndir != 0 {
@@ -3211,11 +3243,20 @@
func mapaccess(t *rtype, m unsafe.Pointer, key unsafe.Pointer) (val unsafe.Pointer)
//go:noescape
+func mapaccess_faststr(t *rtype, m unsafe.Pointer, key string) (val unsafe.Pointer)
+
+//go:noescape
func mapassign(t *rtype, m unsafe.Pointer, key, val unsafe.Pointer)
//go:noescape
+func mapassign_faststr(t *rtype, m unsafe.Pointer, key string, val unsafe.Pointer)
+
+//go:noescape
func mapdelete(t *rtype, m unsafe.Pointer, key unsafe.Pointer)
+//go:noescape
+func mapdelete_faststr(t *rtype, m unsafe.Pointer, key string)
+
// m escapes into the return value, but the caller of mapiterinit
// doesn't let the return value escape.
//go:noescape
diff --git a/src/runtime/map.go b/src/runtime/map.go
index 0cad1a3..3954ba3 100644
--- a/src/runtime/map.go
+++ b/src/runtime/map.go
@@ -1324,17 +1324,38 @@
return elem
}
+//go:linkname reflect_mapaccess_faststr reflect.mapaccess_faststr
+func reflect_mapaccess_faststr(t *maptype, h *hmap, key string) unsafe.Pointer {
+ elem, ok := mapaccess2_faststr(t, h, key)
+ if !ok {
+ // reflect wants nil for a missing element
+ elem = nil
+ }
+ return elem
+}
+
//go:linkname reflect_mapassign reflect.mapassign
func reflect_mapassign(t *maptype, h *hmap, key unsafe.Pointer, elem unsafe.Pointer) {
p := mapassign(t, h, key)
typedmemmove(t.elem, p, elem)
}
+//go:linkname reflect_mapassign_faststr reflect.mapassign_faststr
+func reflect_mapassign_faststr(t *maptype, h *hmap, key string, elem unsafe.Pointer) {
+ p := mapassign_faststr(t, h, key)
+ typedmemmove(t.elem, p, elem)
+}
+
//go:linkname reflect_mapdelete reflect.mapdelete
func reflect_mapdelete(t *maptype, h *hmap, key unsafe.Pointer) {
mapdelete(t, h, key)
}
+//go:linkname reflect_mapdelete_faststr reflect.mapdelete_faststr
+func reflect_mapdelete_faststr(t *maptype, h *hmap, key string) {
+ mapdelete_faststr(t, h, key)
+}
+
//go:linkname reflect_mapiterinit reflect.mapiterinit
func reflect_mapiterinit(t *maptype, h *hmap) *hiter {
it := new(hiter)
To view, visit change 345486. To unsubscribe, or for help writing mail filters, visit settings.
Attention is currently required from: Joe Tsai.
1 comment:
File src/reflect/value.go:
Patch Set #1, Line 1519: && false
Is this intended to be a no-op?
To view, visit change 345486. To unsubscribe, or for help writing mail filters, visit settings.
Attention is currently required from: Joe Tsai.
Joe Tsai uploaded patch set #2 to this change.
reflect: optimize for maps with string keys
Over 80% of all Go map types use a string as the key.
The Go runtime already has a specialized implementation for such maps
in runtime/map_faststr.go. However, the Go reflection implementation
has not historically made use of that implementation.
This CL plumbs the appropriate logic to be accessible from Go reflection
so that it can benefit as well.
name old time/op new time/op delta
Map/StringKeys/MapIndex-4 4.65us ± 5% 2.95us ± 3% -36.50% (p=0.016 n=4+5)
Map/StringKeys/SetMapIndex-4 7.47us ± 5% 5.27us ± 2% -29.40% (p=0.008 n=5+5)
Map/Uint64Keys/MapIndex-4 3.79us ± 3% 3.75us ± 2% ~ (p=0.548 n=5+5)
Map/Uint64Keys/SetMapIndex-4 6.13us ± 3% 6.09us ± 1% ~ (p=0.746 n=5+5)
Change-Id: I5495d48948d8caf2d004a03ae1820ab5f8729670
---
M src/reflect/all_test.go
M src/reflect/value.go
M src/runtime/map.go
3 files changed, 109 insertions(+), 0 deletions(-)
To view, visit change 345486. To unsubscribe, or for help writing mail filters, visit settings.
Attention is currently required from: Dan Kortschak.
Patch set 2:Run-TryBot +1
1 comment:
File src/reflect/value.go:
Patch Set #1, Line 1519: && false
Is this intended to be a no-op?
My mistake. I disabled this code path for benchmarking purposes and forgot to re-enable it.
To view, visit change 345486. To unsubscribe, or for help writing mail filters, visit settings.
Attention is currently required from: Keith Randall.
1 comment:
Patchset:
RELNOTES=yes
To view, visit change 345486. To unsubscribe, or for help writing mail filters, visit settings.
Attention is currently required from: Joe Tsai.
3 comments:
File src/reflect/all_test.go:
Just call this k. It's confusing because this is just a single key, whereas below "ks" is a slice of keys.
(or rename "ks" below to "keys"?)
File src/reflect/value.go:
Patch Set #2, Line 1519: isStringKey := key.kind() == String && tt.key.Kind() == String; isStringKey
You can just do "if key.kind() == String && tt.key.Kind() == String {"
Patch Set #2, Line 1525: typ := tt.elem
Could you share this part with the code below? Put the non-string lookup code in an else block, I guess.
To view, visit change 345486. To unsubscribe, or for help writing mail filters, visit settings.
Attention is currently required from: Joe Tsai.
Joe Tsai uploaded patch set #3 to this change.
reflect: optimize for maps with string keys
Over 80% of all Go map types use a string as the key.
The Go runtime already has a specialized implementation for such maps
in runtime/map_faststr.go. However, the Go reflection implementation
has not historically made use of that implementation.
This CL plumbs the appropriate logic to be accessible from Go reflection
so that it can benefit as well.
name old time/op new time/op delta
Map/StringKeys/MapIndex-4 4.65us ± 5% 2.95us ± 3% -36.50% (p=0.016 n=4+5)
Map/StringKeys/SetMapIndex-4 7.47us ± 5% 5.27us ± 2% -29.40% (p=0.008 n=5+5)
Map/Uint64Keys/MapIndex-4 3.79us ± 3% 3.75us ± 2% ~ (p=0.548 n=5+5)
Map/Uint64Keys/SetMapIndex-4 6.13us ± 3% 6.09us ± 1% ~ (p=0.746 n=5+5)
Change-Id: I5495d48948d8caf2d004a03ae1820ab5f8729670
---
M src/reflect/all_test.go
M src/reflect/value.go
M src/runtime/map.go
3 files changed, 108 insertions(+), 6 deletions(-)
To view, visit change 345486. To unsubscribe, or for help writing mail filters, visit settings.
Attention is currently required from: Keith Randall.
Patch set 2:Run-TryBot +1Trust +1
4 comments:
File src/reflect/all_test.go:
Just call this k. […]
Done
File src/reflect/value.go:
Patch Set #2, Line 1519: isStringKey := key.kind() == String && tt.key.Kind() == String; isStringKey
You can just do "if key.kind() == String && tt.key. […]
Done
Patch Set #2, Line 1525: typ := tt.elem
Could you share this part with the code below? Put the non-string lookup code in an else block, I gu […]
Done. The downside of this approach is that it's not symmetrical to the code changes made to SetMapIndex.
That logic is harder to have shared code since you'll have to branch on isStringKey twice (for map deletion, and later for map assignment). Also, the `k` variable is an `unsafe.Pointer` in the generic case, but a `string` in the other case.
Patch Set #2, Line 1525: typ := tt.elem
Could you share this part with the code below? Put the non-string lookup code in an else block, I gu […]
Done
To view, visit change 345486. To unsubscribe, or for help writing mail filters, visit settings.
Attention is currently required from: Joe Tsai.
Patch set 3:Code-Review +2
Attention is currently required from: Joe Tsai.
Joe Tsai uploaded patch set #4 to this change.
reflect: optimize for maps with string keys
Over 80% of all Go map types use a string as the key.
The Go runtime already has a specialized implementation for such maps
in runtime/map_faststr.go. However, the Go reflection implementation
has not historically made use of that implementation.
This CL plumbs the appropriate logic to be accessible from Go reflection
so that it can benefit as well.
name old time/op new time/op delta
Map/StringKeys/MapIndex-4 4.65us ± 5% 2.95us ± 3% -36.50% (p=0.016 n=4+5)
Map/StringKeys/SetMapIndex-4 7.47us ± 5% 5.27us ± 2% -29.40% (p=0.008 n=5+5)
Map/Uint64Keys/MapIndex-4 3.79us ± 3% 3.75us ± 2% ~ (p=0.548 n=5+5)
Map/Uint64Keys/SetMapIndex-4 6.13us ± 3% 6.09us ± 1% ~ (p=0.746 n=5+5)
Change-Id: I5495d48948d8caf2d004a03ae1820ab5f8729670
---
M src/reflect/all_test.go
M src/reflect/value.go
M src/runtime/map.go
3 files changed, 108 insertions(+), 6 deletions(-)
To view, visit change 345486. To unsubscribe, or for help writing mail filters, visit settings.
Patch set 4:Run-TryBot +1
Joe Tsai submitted this change.
3 is the latest approved patch-set.
The change was submitted with unreviewed changes in the following files:
```
The name of the file: src/reflect/value.go
Insertions: 0, Deletions: 2.
@@ -508,7 +508,7 @@
// Copy values to "integer registers."
if v.flag&flagIndir != 0 {
offset := add(v.ptr, st.offset, "precomputed value offset")
- memmove(regArgs.IntRegArgAddr(st.ireg, st.size), offset, st.size)
+ intToReg(®Args, st.ireg, st.size, offset)
} else {
if st.kind == abiStepPointer {
// Duplicate this pointer in the pointer area of the
@@ -524,7 +524,7 @@
panic("attempted to copy pointer to FP register")
}
offset := add(v.ptr, st.offset, "precomputed value offset")
- memmove(regArgs.FloatRegArgAddr(st.freg, st.size), offset, st.size)
+ floatToReg(®Args, st.freg, st.size, offset)
default:
panic("unknown ABI part kind")
}
@@ -610,13 +610,13 @@
switch st.kind {
case abiStepIntReg:
offset := add(s, st.offset, "precomputed value offset")
- memmove(offset, regArgs.IntRegArgAddr(st.ireg, st.size), st.size)
+ intFromReg(®Args, st.ireg, st.size, offset)
case abiStepPointer:
s := add(s, st.offset, "precomputed value offset")
*((*unsafe.Pointer)(s)) = regArgs.Ptrs[st.ireg]
case abiStepFloatReg:
offset := add(s, st.offset, "precomputed value offset")
- memmove(offset, regArgs.FloatRegArgAddr(st.freg, st.size), st.size)
+ floatFromReg(®Args, st.freg, st.size, offset)
case abiStepStack:
panic("register-based return value has stack component")
default:
@@ -698,13 +698,13 @@
switch st.kind {
case abiStepIntReg:
offset := add(v.ptr, st.offset, "precomputed value offset")
- memmove(offset, regs.IntRegArgAddr(st.ireg, st.size), st.size)
+ intFromReg(regs, st.ireg, st.size, offset)
case abiStepPointer:
s := add(v.ptr, st.offset, "precomputed value offset")
*((*unsafe.Pointer)(s)) = regs.Ptrs[st.ireg]
case abiStepFloatReg:
offset := add(v.ptr, st.offset, "precomputed value offset")
- memmove(offset, regs.FloatRegArgAddr(st.freg, st.size), st.size)
+ floatFromReg(regs, st.freg, st.size, offset)
case abiStepStack:
panic("register-based return value has stack component")
default:
@@ -784,7 +784,7 @@
// Copy values to "integer registers."
if v.flag&flagIndir != 0 {
offset := add(v.ptr, st.offset, "precomputed value offset")
- memmove(regs.IntRegArgAddr(st.ireg, st.size), offset, st.size)
+ intToReg(regs, st.ireg, st.size, offset)
} else {
// Only populate the Ints space on the return path.
// This is safe because out is kept alive until the
@@ -799,7 +799,7 @@
panic("attempted to copy pointer to FP register")
}
offset := add(v.ptr, st.offset, "precomputed value offset")
- memmove(regs.FloatRegArgAddr(st.freg, st.size), offset, st.size)
+ floatToReg(regs, st.freg, st.size, offset)
default:
panic("unknown ABI part kind")
}
@@ -982,9 +982,9 @@
methodRegs.Ptrs[mStep.ireg] = *(*unsafe.Pointer)(from)
fallthrough // We need to make sure this ends up in Ints, too.
case abiStepIntReg:
- memmove(methodRegs.IntRegArgAddr(mStep.ireg, mStep.size), from, mStep.size)
+ intToReg(&methodRegs, mStep.ireg, mStep.size, from)
case abiStepFloatReg:
- memmove(methodRegs.FloatRegArgAddr(mStep.freg, mStep.size), from, mStep.size)
+ floatToReg(&methodRegs, mStep.freg, mStep.size, from)
default:
panic("unexpected method step")
}
@@ -1000,9 +1000,9 @@
// Do the pointer copy directly so we get a write barrier.
*(*unsafe.Pointer)(to) = valueRegs.Ptrs[vStep.ireg]
case abiStepIntReg:
- memmove(to, valueRegs.IntRegArgAddr(vStep.ireg, vStep.size), vStep.size)
+ intFromReg(valueRegs, vStep.ireg, vStep.size, to)
case abiStepFloatReg:
- memmove(to, valueRegs.FloatRegArgAddr(vStep.freg, vStep.size), vStep.size)
+ floatFromReg(valueRegs, vStep.freg, vStep.size, to)
default:
panic("unexpected value step")
}
@@ -1555,11 +1555,12 @@
if m != nil {
mlen = maplen(m)
}
- it := mapiterinit(v.typ, m)
+ var it hiter
+ mapiterinit(v.typ, m, &it)
a := make([]Value, mlen)
var i int
for i = 0; i < len(a); i++ {
- key := mapiterkey(it)
+ key := mapiterkey(&it)
if key == nil {
// Someone deleted an entry from the map since we
// called maplen above. It's a data race, but nothing
@@ -1567,41 +1568,67 @@
break
}
a[i] = copyVal(keyType, fl, key)
- mapiternext(it)
+ mapiternext(&it)
}
return a[:i]
}
+// hiter's structure matches runtime.hiter's structure.
+// Having a clone here allows us to embed a map iterator
+// inside type MapIter so that MapIters can be re-used
+// without doing any allocations.
+type hiter struct {
+ key unsafe.Pointer
+ elem unsafe.Pointer
+ t unsafe.Pointer
+ h unsafe.Pointer
+ buckets unsafe.Pointer
+ bptr unsafe.Pointer
+ overflow *[]unsafe.Pointer
+ oldoverflow *[]unsafe.Pointer
+ startBucket uintptr
+ offset uint8
+ wrapped bool
+ B uint8
+ i uint8
+ bucket uintptr
+ checkBucket uintptr
+}
+
+func (h hiter) initialized() bool {
+ return h.t != nil
+}
+
// A MapIter is an iterator for ranging over a map.
// See Value.MapRange.
type MapIter struct {
- m Value
- it unsafe.Pointer
+ m Value
+ hiter hiter
}
-// Key returns the key of the iterator's current map entry.
-func (it *MapIter) Key() Value {
- if it.it == nil {
+// Key returns the key of iter's current map entry.
+func (iter *MapIter) Key() Value {
+ if !iter.hiter.initialized() {
panic("MapIter.Key called before Next")
}
- iterkey := mapiterkey(it.it)
+ iterkey := mapiterkey(&iter.hiter)
if iterkey == nil {
panic("MapIter.Key called on exhausted iterator")
}
- t := (*mapType)(unsafe.Pointer(it.m.typ))
+ t := (*mapType)(unsafe.Pointer(iter.m.typ))
ktype := t.key
- return copyVal(ktype, it.m.flag.ro()|flag(ktype.Kind()), iterkey)
+ return copyVal(ktype, iter.m.flag.ro()|flag(ktype.Kind()), iterkey)
}
-// SetKey assigns dst to the key of the iterator's current map entry.
-// It is equivalent to dst.Set(it.Key()), but it avoids allocating a new Value.
+// SetKey assigns dst to the key of iter's current map entry.
+// It is equivalent to dst.Set(i.Key()), but it avoids allocating a new Value.
// As in Go, the key must be assignable to dst's type.
-func (it *MapIter) SetKey(dst Value) {
- if it.it == nil {
+func (iter *MapIter) SetKey(dst Value) {
+ if !iter.hiter.initialized() {
panic("MapIter.SetKey called before Next")
}
- iterkey := mapiterkey(it.it)
+ iterkey := mapiterkey(&iter.hiter)
if iterkey == nil {
panic("MapIter.SetKey called on exhausted iterator")
}
@@ -1612,37 +1639,37 @@
target = dst.ptr
}
- t := (*mapType)(unsafe.Pointer(it.m.typ))
+ t := (*mapType)(unsafe.Pointer(iter.m.typ))
ktype := t.key
- key := Value{ktype, iterkey, it.m.flag | flag(ktype.Kind())}
+ key := Value{ktype, iterkey, iter.m.flag | flag(ktype.Kind())}
key = key.assignTo("reflect.MapIter.SetKey", dst.typ, target)
typedmemmove(dst.typ, dst.ptr, key.ptr)
}
-// Value returns the value of the iterator's current map entry.
-func (it *MapIter) Value() Value {
- if it.it == nil {
+// Value returns the value of iter's current map entry.
+func (iter *MapIter) Value() Value {
+ if !iter.hiter.initialized() {
panic("MapIter.Value called before Next")
}
- iterelem := mapiterelem(it.it)
+ iterelem := mapiterelem(&iter.hiter)
if iterelem == nil {
panic("MapIter.Value called on exhausted iterator")
}
- t := (*mapType)(unsafe.Pointer(it.m.typ))
+ t := (*mapType)(unsafe.Pointer(iter.m.typ))
vtype := t.elem
- return copyVal(vtype, it.m.flag.ro()|flag(vtype.Kind()), iterelem)
+ return copyVal(vtype, iter.m.flag.ro()|flag(vtype.Kind()), iterelem)
}
-// SetValue assigns dst to the value of the iterator's current map entry.
-// It is equivalent to dst.Set(it.Value()), but it avoids allocating a new Value.
+// SetValue assigns dst to the value of iter's current map entry.
+// It is equivalent to dst.Set(i.Value()), but it avoids allocating a new Value.
// As in Go, the value must be assignable to dst's type.
-func (it *MapIter) SetValue(dst Value) {
- if it.it == nil {
+func (iter *MapIter) SetValue(dst Value) {
+ if !iter.hiter.initialized() {
panic("MapIter.SetValue called before Next")
}
- iterelem := mapiterelem(it.it)
+ iterelem := mapiterelem(&iter.hiter)
if iterelem == nil {
panic("MapIter.SetValue called on exhausted iterator")
}
@@ -1653,27 +1680,42 @@
target = dst.ptr
}
- t := (*mapType)(unsafe.Pointer(it.m.typ))
+ t := (*mapType)(unsafe.Pointer(iter.m.typ))
vtype := t.elem
- elem := Value{vtype, iterelem, it.m.flag | flag(vtype.Kind())}
+ elem := Value{vtype, iterelem, iter.m.flag | flag(vtype.Kind())}
elem = elem.assignTo("reflect.MapIter.SetValue", dst.typ, target)
typedmemmove(dst.typ, dst.ptr, elem.ptr)
}
// Next advances the map iterator and reports whether there is another
-// entry. It returns false when the iterator is exhausted; subsequent
+// entry. It returns false when iter is exhausted; subsequent
// calls to Key, Value, or Next will panic.
-func (it *MapIter) Next() bool {
- if it.it == nil {
- it.it = mapiterinit(it.m.typ, it.m.pointer())
+func (iter *MapIter) Next() bool {
+ if !iter.m.IsValid() {
+ panic("MapIter.Next called on an iterator that does not have an associated map Value")
+ }
+ if !iter.hiter.initialized() {
+ mapiterinit(iter.m.typ, iter.m.pointer(), &iter.hiter)
} else {
- if mapiterkey(it.it) == nil {
+ if mapiterkey(&iter.hiter) == nil {
panic("MapIter.Next called on exhausted iterator")
}
- mapiternext(it.it)
+ mapiternext(&iter.hiter)
}
- return mapiterkey(it.it) != nil
+ return mapiterkey(&iter.hiter) != nil
+}
+
+// Reset modifies iter to iterate over v.
+// It panics if v's Kind is not Map and v is not the zero Value.
+// Reset(Value{}) causes iter to not to refer to any map,
+// which may allow the previously iterated-over map to be garbage collected.
+func (iter *MapIter) Reset(v Value) {
+ if v.IsValid() {
+ v.mustBe(Map)
+ }
+ iter.m = v
+ iter.hiter = hiter{}
}
// MapRange returns a range iterator for a map.
@@ -3250,19 +3292,17 @@
//go:noescape
func mapdelete_faststr(t *rtype, m unsafe.Pointer, key string)
-// m escapes into the return value, but the caller of mapiterinit
-// doesn't let the return value escape.
//go:noescape
-func mapiterinit(t *rtype, m unsafe.Pointer) unsafe.Pointer
+func mapiterinit(t *rtype, m unsafe.Pointer, it *hiter)
//go:noescape
-func mapiterkey(it unsafe.Pointer) (key unsafe.Pointer)
+func mapiterkey(it *hiter) (key unsafe.Pointer)
//go:noescape
-func mapiterelem(it unsafe.Pointer) (elem unsafe.Pointer)
+func mapiterelem(it *hiter) (elem unsafe.Pointer)
//go:noescape
-func mapiternext(it unsafe.Pointer)
+func mapiternext(it *hiter)
//go:noescape
func maplen(m unsafe.Pointer) int
```
reflect: optimize for maps with string keys
Over 80% of all Go map types use a string as the key.
The Go runtime already has a specialized implementation for such maps
in runtime/map_faststr.go. However, the Go reflection implementation
has not historically made use of that implementation.
This CL plumbs the appropriate logic to be accessible from Go reflection
so that it can benefit as well.
name old time/op new time/op delta
Map/StringKeys/MapIndex-4 4.65us ± 5% 2.95us ± 3% -36.50% (p=0.016 n=4+5)
Map/StringKeys/SetMapIndex-4 7.47us ± 5% 5.27us ± 2% -29.40% (p=0.008 n=5+5)
Map/Uint64Keys/MapIndex-4 3.79us ± 3% 3.75us ± 2% ~ (p=0.548 n=5+5)
Map/Uint64Keys/SetMapIndex-4 6.13us ± 3% 6.09us ± 1% ~ (p=0.746 n=5+5)
Change-Id: I5495d48948d8caf2d004a03ae1820ab5f8729670
Reviewed-on: https://go-review.googlesource.com/c/go/+/345486
Trust: Joe Tsai <joe...@digital-static.net>
Run-TryBot: Joe Tsai <joe...@digital-static.net>
TryBot-Result: Go Bot <go...@golang.org>
Reviewed-by: Keith Randall <k...@golang.org>
---
M src/reflect/all_test.go
M src/reflect/value.go
M src/runtime/map.go
3 files changed, 108 insertions(+), 6 deletions(-)
diff --git a/src/reflect/all_test.go b/src/reflect/all_test.go
index 293d036..e92f711 100644
--- a/src/reflect/all_test.go
+++ b/src/reflect/all_test.go
@@ -7050,6 +7050,53 @@
})
}
+func BenchmarkMap(b *testing.B) {
+ type V *int
+ value := ValueOf((V)(nil))
+ stringKeys := []string{}
+ mapOfStrings := map[string]V{}
+ uint64Keys := []uint64{}
+ mapOfUint64s := map[uint64]V{}
+ for i := 0; i < 100; i++ {
+ stringKey := fmt.Sprintf("key%d", i)
+ stringKeys = append(stringKeys, stringKey)
+ mapOfStrings[stringKey] = nil
+
+ uint64Key := uint64(i)
+ uint64Keys = append(uint64Keys, uint64Key)
+ mapOfUint64s[uint64Key] = nil
+ }
+
+ tests := []struct {
+ label string
+ m, keys, value Value
+ }{
+ {"StringKeys", ValueOf(mapOfStrings), ValueOf(stringKeys), value},
+ {"Uint64Keys", ValueOf(mapOfUint64s), ValueOf(uint64Keys), value},
+ }
+
+ for _, tt := range tests {
+ b.Run(tt.label, func(b *testing.B) {
+ b.Run("MapIndex", func(b *testing.B) {
+ b.ReportAllocs()
+ for i := 0; i < b.N; i++ {
+ for j := tt.keys.Len() - 1; j >= 0; j-- {
+ tt.m.MapIndex(tt.keys.Index(j))
+ }
+ }
+ })
+ b.Run("SetMapIndex", func(b *testing.B) {
+ b.ReportAllocs()
+ for i := 0; i < b.N; i++ {
+ for j := tt.keys.Len() - 1; j >= 0; j-- {
+ tt.m.SetMapIndex(tt.keys.Index(j), tt.value)
+ }
+ }
+ })
+ })
+ }
+}
+
func TestSwapper(t *testing.T) {
type I int
var a, b, c I
diff --git a/src/reflect/value.go b/src/reflect/value.go
index bf29d1b..6e9aaab 100644
--- a/src/reflect/value.go
+++ b/src/reflect/value.go
@@ -1515,15 +1515,21 @@
// considered unexported. This is consistent with the
// behavior for structs, which allow read but not write
// of unexported fields.
- key = key.assignTo("reflect.Value.MapIndex", tt.key, nil)
- var k unsafe.Pointer
- if key.flag&flagIndir != 0 {
- k = key.ptr
+ var e unsafe.Pointer
+ if key.kind() == String && tt.key.Kind() == String {
+ k := *(*string)(key.ptr)
+ e = mapaccess_faststr(v.typ, v.pointer(), k)
} else {
- k = unsafe.Pointer(&key.ptr)
+ key = key.assignTo("reflect.Value.MapIndex", tt.key, nil)
+ var k unsafe.Pointer
+ if key.flag&flagIndir != 0 {
+ k = key.ptr
+ } else {
+ k = unsafe.Pointer(&key.ptr)
+ }
+ e = mapaccess(v.typ, v.pointer(), k)
}
- e := mapaccess(v.typ, v.pointer(), k)
if e == nil {
return Value{}
}
@@ -2121,6 +2127,25 @@
v.mustBeExported()
key.mustBeExported()
tt := (*mapType)(unsafe.Pointer(v.typ))
+
+ if key.kind() == String && tt.key.Kind() == String {
+ k := *(*string)(key.ptr)
+ if elem.typ == nil {
+ mapdelete_faststr(v.typ, v.pointer(), k)
+ return
+ }
+ elem.mustBeExported()
+ elem = elem.assignTo("reflect.Value.SetMapIndex", tt.elem, nil)
+ var e unsafe.Pointer
+ if elem.flag&flagIndir != 0 {
+ e = elem.ptr
+ } else {
+ e = unsafe.Pointer(&elem.ptr)
+ }
+ mapassign_faststr(v.typ, v.pointer(), k, e)
+ return
+ }
+
key = key.assignTo("reflect.Value.SetMapIndex", tt.key, nil)
var k unsafe.Pointer
if key.flag&flagIndir != 0 {
@@ -3253,12 +3278,21 @@
func mapaccess(t *rtype, m unsafe.Pointer, key unsafe.Pointer) (val unsafe.Pointer)
//go:noescape
+func mapaccess_faststr(t *rtype, m unsafe.Pointer, key string) (val unsafe.Pointer)
+
+//go:noescape
func mapassign(t *rtype, m unsafe.Pointer, key, val unsafe.Pointer)
//go:noescape
+func mapassign_faststr(t *rtype, m unsafe.Pointer, key string, val unsafe.Pointer)
+
+//go:noescape
func mapdelete(t *rtype, m unsafe.Pointer, key unsafe.Pointer)
//go:noescape
+func mapdelete_faststr(t *rtype, m unsafe.Pointer, key string)
+
+//go:noescape
func mapiterinit(t *rtype, m unsafe.Pointer, it *hiter)
//go:noescape
diff --git a/src/runtime/map.go b/src/runtime/map.go
index 59b803d..985c297 100644func reflect_mapiterinit(t *maptype, h *hmap, it *hiter) {
mapiterinit(t, h, it)
To view, visit change 345486. To unsubscribe, or for help writing mail filters, visit settings.