[go] runtime: use SwissTable

79 views
Skip to first unread message

张云浩 (Gerrit)

unread,
Aug 30, 2022, 2:50:23 AM8/30/22
to goph...@pubsubhelper.golang.org, golang-co...@googlegroups.com

张云浩 has uploaded this change for review.

View Change

runtime: use SwissTable

We suggest using SwissTable in the runtime to replace the original implementation of the hashmap.

SwissTable is now used in abseil and rust by default. We borrowed ideas from the two implementations and combined some optimization ideas of Go's original hashmap to construct the current implementation.

- abseil: https://abseil.io/about/design/swisstables
- rust: https://github.com/rust-lang/hashbrown

For #54766

Change-Id: I66f7de17d8b36b578f9cbc71843b0e4356b914f2
---
M src/cmd/compile/internal/reflectdata/reflect.go
M src/cmd/compile/internal/test/inl_test.go
M src/cmd/compile/internal/walk/builtin.go
M src/cmd/link/internal/ld/dwarf.go
M src/reflect/value.go
M src/runtime/export_test.go
M src/runtime/map.go
M src/runtime/map_fast32.go
M src/runtime/map_fast64.go
M src/runtime/map_faststr.go
M src/runtime/map_test.go
A src/runtime/mapbithack.go
M src/runtime/runtime-gdb.py
M src/runtime/stubs.go
14 files changed, 1,994 insertions(+), 1,710 deletions(-)

diff --git a/src/cmd/compile/internal/reflectdata/reflect.go b/src/cmd/compile/internal/reflectdata/reflect.go
index 16b7a3a..2376b48 100644
--- a/src/cmd/compile/internal/reflectdata/reflect.go
+++ b/src/cmd/compile/internal/reflectdata/reflect.go
@@ -104,7 +104,7 @@
elemtype = types.NewPtr(elemtype)
}

- field := make([]*types.Field, 0, 5)
+ field := make([]*types.Field, 0, 4)

// The first field is: uint8 topbits[BUCKETSIZE].
arr := types.NewArray(types.Types[types.TUINT8], BUCKETSIZE)
@@ -126,6 +126,7 @@
// Arrange for the bucket to have no pointers by changing
// the type of the overflow field to uintptr in this case.
// See comment on hmap.overflow in runtime/map.go.
+ // TODO(zyh): Remove this field and make sure there is no overlap.
otyp := types.Types[types.TUNSAFEPTR]
if !elemtype.HasPointers() && !keytype.HasPointers() {
otyp = types.Types[types.TUINTPTR]
@@ -208,33 +209,29 @@
// count int
// flags uint8
// B uint8
- // noverflow uint16
+ // _pad uint16
// hash0 uint32
// buckets *bmap
- // oldbuckets *bmap
- // nevacuate uintptr
- // extra unsafe.Pointer // *mapextra
+ // growthLeft int
// }
// must match runtime/map.go:hmap.
fields := []*types.Field{
makefield("count", types.Types[types.TINT]),
makefield("flags", types.Types[types.TUINT8]),
makefield("B", types.Types[types.TUINT8]),
- makefield("noverflow", types.Types[types.TUINT16]),
+ makefield("_pad", types.Types[types.TUINT16]),
makefield("hash0", types.Types[types.TUINT32]), // Used in walk.go for OMAKEMAP.
makefield("buckets", types.NewPtr(bmap)), // Used in walk.go for OMAKEMAP.
- makefield("oldbuckets", types.NewPtr(bmap)),
- makefield("nevacuate", types.Types[types.TUINTPTR]),
- makefield("extra", types.Types[types.TUNSAFEPTR]),
+ makefield("growthLeft", types.Types[types.TINT]),
}

hmap := types.NewStruct(types.NoPkg, fields)
hmap.SetNoalg(true)
types.CalcSize(hmap)

- // The size of hmap should be 48 bytes on 64 bit
- // and 28 bytes on 32 bit platforms.
- if size := int64(8 + 5*types.PtrSize); hmap.Size() != size {
+ // The size of hmap should be 32 bytes on 64 bit
+ // and 20 bytes on 32 bit platforms.
+ if size := int64(8 + 3*types.PtrSize); hmap.Size() != size {
base.Fatalf("hmap size not correct: got %d, want %d", hmap.Size(), size)
}

@@ -261,8 +258,6 @@
// h *hmap
// buckets *bmap
// bptr *bmap
- // overflow unsafe.Pointer // *[]*bmap
- // oldoverflow unsafe.Pointer // *[]*bmap
// startBucket uintptr
// offset uint8
// wrapped bool
@@ -279,8 +274,6 @@
makefield("h", types.NewPtr(hmap)),
makefield("buckets", types.NewPtr(bmap)),
makefield("bptr", types.NewPtr(bmap)),
- makefield("overflow", types.Types[types.TUNSAFEPTR]),
- makefield("oldoverflow", types.Types[types.TUNSAFEPTR]),
makefield("startBucket", types.Types[types.TUINTPTR]),
makefield("offset", types.Types[types.TUINT8]),
makefield("wrapped", types.Types[types.TBOOL]),
@@ -294,7 +287,7 @@
hiter := types.NewStruct(types.NoPkg, fields)
hiter.SetNoalg(true)
types.CalcSize(hiter)
- if hiter.Size() != int64(12*types.PtrSize) {
+ if hiter.Size() != int64(10*types.PtrSize) {
base.Fatalf("hash_iter size not correct %d %d", hiter.Size(), 12*types.PtrSize)
}
t.MapType().Hiter = hiter
diff --git a/src/cmd/compile/internal/test/inl_test.go b/src/cmd/compile/internal/test/inl_test.go
index 9926985..7d27b92 100644
--- a/src/cmd/compile/internal/test/inl_test.go
+++ b/src/cmd/compile/internal/test/inl_test.go
@@ -39,10 +39,7 @@
"adjustpointer",
"alignDown",
"alignUp",
- "bucketMask",
- "bucketShift",
"chanbuf",
- "evacuated",
"fastlog2",
"fastrand",
"float64bits",
@@ -62,12 +59,28 @@
"stringStructOf",
"subtract1",
"subtractb",
- "tophash",
- "(*bmap).keys",
- "(*bmap).overflow",
"(*waitq).enqueue",
"funcInfo.entry",

+ // Map-related ones
+ "bucketMask",
+ "bucketShift",
+ "isFull",
+ "littleEndianBytesToUint64",
+ "littleEndianUint64ToBytes",
+ "matchEmpty",
+ "matchEmptyOrDeleted",
+ "matchFull",
+ "matchTopHash",
+ "newProbe",
+ "tophash",
+ "(*bitmask64).NextMatch",
+ "(*bitmask64).RemoveNextMatch",
+ "(*bmap).keys",
+ "(*probe).Bucket",
+ "(*probe).Next",
+ "(*probe).Reset",
+
// GC-related ones
"cgoInRange",
"gclinkptr.ptr",
diff --git a/src/cmd/compile/internal/walk/builtin.go b/src/cmd/compile/internal/walk/builtin.go
index 5a649c0..8a5f7c8 100644
--- a/src/cmd/compile/internal/walk/builtin.go
+++ b/src/cmd/compile/internal/walk/builtin.go
@@ -291,6 +291,7 @@
// var bv bmap
// b = &bv
// h.buckets = b
+ // h.growthLeft = BUCKETSIZE
// }

nif := ir.NewIfStmt(base.Pos, ir.NewBinaryExpr(base.Pos, ir.OLE, hint, ir.NewInt(reflectdata.BUCKETSIZE)), nil, nil)
@@ -300,10 +301,24 @@
// b = &bv
b := stackTempAddr(&nif.Body, reflectdata.MapBucketType(t))

+ for i := 0; i < reflectdata.BUCKETSIZE; i++ {
+ tophashsym := reflectdata.MapBucketType(t).Field(0).Sym
+ btophash := ir.NewSelectorExpr(base.Pos, ir.ODOT, b, tophashsym) // b.tophash
+ tidx := ir.NewIndexExpr(base.Pos, btophash, ir.NewInt(int64(i))) // b.tophash[i]
+ seti := ir.NewAssignStmt(base.Pos, tidx, typecheck.Conv(ir.NewInt(255), types.Types[types.TUINT8])) // b.tophash[i] = 255
+ nif.Body.Append(seti)
+ }
+
// h.buckets = b
bsym := hmapType.Field(5).Sym // hmap.buckets see reflect.go:hmap
na := ir.NewAssignStmt(base.Pos, ir.NewSelectorExpr(base.Pos, ir.ODOT, h, bsym), b)
nif.Body.Append(na)
+
+ // h.growthLeft = BUCKETSIZE
+ bsym2 := hmapType.Field(6).Sym
+ na2 := ir.NewAssignStmt(base.Pos, ir.NewSelectorExpr(base.Pos, ir.ODOT, h, bsym2), ir.NewInt(reflectdata.BUCKETSIZE))
+ nif.Body.Append(na2)
+
appendWalkStmt(init, nif)
}
}
diff --git a/src/cmd/link/internal/ld/dwarf.go b/src/cmd/link/internal/ld/dwarf.go
index 75fabb4..335c737 100644
--- a/src/cmd/link/internal/ld/dwarf.go
+++ b/src/cmd/link/internal/ld/dwarf.go
@@ -956,7 +956,6 @@
dwhs := d.mkinternaltype(ctxt, dwarf.DW_ABRV_STRUCTTYPE, "hash", keyname, valname, func(dwh *dwarf.DWDie) {
d.copychildren(ctxt, dwh, hash)
d.substitutetype(dwh, "buckets", d.defptrto(dwhbs))
- d.substitutetype(dwh, "oldbuckets", d.defptrto(dwhbs))
newattr(dwh, dwarf.DW_AT_byte_size, dwarf.DW_CLS_CONSTANT, getattr(hash, dwarf.DW_AT_byte_size).Value, nil)
})

diff --git a/src/reflect/value.go b/src/reflect/value.go
index 3611a5a..80bbb45 100644
--- a/src/reflect/value.go
+++ b/src/reflect/value.go
@@ -1798,8 +1798,6 @@
h unsafe.Pointer
buckets unsafe.Pointer
bptr unsafe.Pointer
- overflow *[]unsafe.Pointer
- oldoverflow *[]unsafe.Pointer
startBucket uintptr
offset uint8
wrapped bool
diff --git a/src/runtime/export_test.go b/src/runtime/export_test.go
index 32d33ad..90d2290 100644
--- a/src/runtime/export_test.go
+++ b/src/runtime/export_test.go
@@ -447,7 +447,10 @@
rw.rw.unlock()
}

-const RuntimeHmapSize = unsafe.Sizeof(hmap{})
+const (
+ RuntimeHmapSize = unsafe.Sizeof(hmap{})
+ RuntimeBucketCnt = bucketCnt
+)

func MapBucketsCount(m map[int]int) int {
h := *(**hmap)(unsafe.Pointer(&m))
@@ -546,42 +549,6 @@
stackOverflow(&buf[0])
}

-func MapTombstoneCheck(m map[int]int) {
- // Make sure emptyOne and emptyRest are distributed correctly.
- // We should have a series of filled and emptyOne cells, followed by
- // a series of emptyRest cells.
- h := *(**hmap)(unsafe.Pointer(&m))
- i := any(m)
- t := *(**maptype)(unsafe.Pointer(&i))
-
- for x := 0; x < 1<<h.B; x++ {
- b0 := (*bmap)(add(h.buckets, uintptr(x)*uintptr(t.bucketsize)))
- n := 0
- for b := b0; b != nil; b = b.overflow(t) {
- for i := 0; i < bucketCnt; i++ {
- if b.tophash[i] != emptyRest {
- n++
- }
- }
- }
- k := 0
- for b := b0; b != nil; b = b.overflow(t) {
- for i := 0; i < bucketCnt; i++ {
- if k < n && b.tophash[i] == emptyRest {
- panic("early emptyRest")
- }
- if k >= n && b.tophash[i] != emptyRest {
- panic("late non-emptyRest")
- }
- if k == n-1 && b.tophash[i] == emptyOne {
- panic("last non-emptyRest entry is emptyOne")
- }
- k++
- }
- }
- }
-}
-
func RunGetgThreadSwitchTest() {
// Test that getg works correctly with thread switch.
// With gccgo, if we generate getg inlined, the backend
diff --git a/src/runtime/map.go b/src/runtime/map.go
index 65be472..13032a1 100644
--- a/src/runtime/map.go
+++ b/src/runtime/map.go
@@ -6,52 +6,26 @@

// This file contains the implementation of Go's map type.
//
+// The implementation is mainly based on SwissTable
+// (https://abseil.io/about/design/swisstables).
+//
// A map is just a hash table. The data is arranged
// into an array of buckets. Each bucket contains up to
-// 8 key/elem pairs. The low-order bits of the hash are
-// used to select a bucket. Each bucket contains a few
-// high-order bits of each hash to distinguish the entries
-// within a single bucket.
-//
-// If more than 8 keys hash to a bucket, we chain on
-// extra buckets.
+// 8 key/elem pairs. The hash are used to select a bucket.
+// Each bucket contains high-order 7 bits of each hash to
+// distinguish the entries within a single bucket.
//
// When the hashtable grows, we allocate a new array
// of buckets twice as big. Buckets are incrementally
// copied from the old bucket array to the new bucket array.
//
// Map iterators walk through the array of buckets and
-// return the keys in walk order (bucket #, then overflow
-// chain order, then bucket index). To maintain iteration
-// semantics, we never move keys within their bucket (if
-// we did, keys might be returned 0 or 2 times). When
-// growing the table, iterators remain iterating through the
-// old table and must check the new table if the bucket
-// they are iterating through has been moved ("evacuated")
-// to the new table.
-
-// Picking loadFactor: too large and we have lots of overflow
-// buckets, too small and we waste a lot of space. I wrote
-// a simple program to check some stats for different loads:
-// (64-bit, 8 byte keys and elems)
-// loadFactor %overflow bytes/entry hitprobe missprobe
-// 4.00 2.13 20.77 3.00 4.00
-// 4.50 4.05 17.30 3.25 4.50
-// 5.00 6.85 14.77 3.50 5.00
-// 5.50 10.55 12.94 3.75 5.50
-// 6.00 15.27 11.67 4.00 6.00
-// 6.50 20.90 10.79 4.25 6.50
-// 7.00 27.14 10.15 4.50 7.00
-// 7.50 34.03 9.73 4.75 7.50
-// 8.00 41.10 9.40 5.00 8.00
-//
-// %overflow = percentage of buckets which have an overflow bucket
-// bytes/entry = overhead bytes used per key/elem pair
-// hitprobe = # of entries to check when looking up a present key
-// missprobe = # of entries to check when looking up an absent key
-//
-// Keep in mind this data is for maximally loaded tables, i.e. just
-// before the table grows. Typical tables will be somewhat less loaded.
+// return the keys in walk order (bucket #, then the next
+// bucket index). To maintain iteration semantics, we only
+// move keys within their bucket if without any iterators.
+// When growing the table, iterators remain iterating through
+// the old table and must check the new table if the bucket
+// they are iterating through has beenmoved to the new table.

import (
"internal/abi"
@@ -62,15 +36,9 @@
)

const (
- // Maximum number of key/elem pairs a bucket can hold.
bucketCntBits = 3
bucketCnt = 1 << bucketCntBits

- // Maximum average load of a bucket that triggers growth is 6.5.
- // Represent as loadFactorNum/loadFactorDen, to allow integer math.
- loadFactorNum = 13
- loadFactorDen = 2
-
// Maximum key or elem size to keep inline (instead of mallocing per element).
// Must fit in a uint8.
// Fast versions cannot handle big elems - the cutoff size for
@@ -86,77 +54,63 @@
v int64
}{}.v)

- // Possible tophash values. We reserve a few possibilities for special marks.
- // Each bucket (including its overflow buckets, if any) will have either all or none of its
- // entries in the evacuated* states (except during the evacuate() method, which only happens
- // during map writes and thus no one else can observe the map during that time).
- emptyRest = 0 // this cell is empty, and there are no more non-empty cells at higher indexes or overflows.
- emptyOne = 1 // this cell is empty
- evacuatedX = 2 // key/elem is valid. Entry has been evacuated to first half of larger table.
- evacuatedY = 3 // same as above, but evacuated to second half of larger table.
- evacuatedEmpty = 4 // cell is empty, bucket is evacuated.
- minTopHash = 5 // minimum tophash for a normal filled cell.
+ // Possible tophash values.
+ // fullSlot uint8 = 0b0xxx_xxxx, the xxx_xxxx is the top 7 bits of the hash.
+ emptySlot uint8 = 0b1111_1111
+ deletedSlot uint8 = 0b1000_0000
+ emptyOrDeletedMask uint64 = 0x8080_8080_8080_8080

// flags
- iterator = 1 // there may be an iterator using buckets
- oldIterator = 2 // there may be an iterator using oldbuckets
- hashWriting = 4 // a goroutine is writing to the map
- sameSizeGrow = 8 // the current map growth is to a new map of the same size
+ iterator = 1 // there may be an iterator using buckets
+ hashWriting = 2 // a goroutine is writing to the map

- // sentinel bucket ID for iterator checks
- noCheck = 1<<(8*goarch.PtrSize) - 1
+ // LoadFactor.
+ // 0 -> invalid
+ // 1 -> 0.5(min)
+ // 2 -> 0.75
+ // 3 -> 0.875(max)
+ // 4 -> 0.9375, invalid if bucketCnt=8, we need to make sure at least one empty slot.
+ loadFactorShift = 3
+
+ // Maximum number of key/elem pairs a bucket can hold for maximally loaded tables.
+ maxItemInBucket = bucketCnt - (bucketCnt >> loadFactorShift)
+ emptyItemInBucket = bucketCnt >> loadFactorShift // used for optimazation
+
+ // Use for loop to traverse values in the buckets if B <= loopaccessB.
+ loopaccessB = 1
)

-// isEmpty reports whether the given tophash array entry represents an empty bucket entry.
-func isEmpty(x uint8) bool {
- return x <= emptyOne
+// isFull returns true if the slot is full.
+func isFull(v uint8) bool {
+ return v < 128
}

// A header for a Go map.
type hmap struct {
// Note: the format of the hmap is also encoded in cmd/compile/internal/reflectdata/reflect.go.
// Make sure this stays in sync with the compiler's definition.
- count int // # live cells == size of map. Must be first (used by len() builtin)
- flags uint8
- B uint8 // log_2 of # of buckets (can hold up to loadFactor * 2^B items)
- noverflow uint16 // approximate number of overflow buckets; see incrnoverflow for details
- hash0 uint32 // hash seed
-
+ count int // live cells == size of map. Must be first (used by len() builtin)
+ flags uint8
+ B uint8 // log_2 of # of buckets (can hold up to loadFactor * 2^B items)
+ _pad uint16
+ hash0 uint32 // hash seed
buckets unsafe.Pointer // array of 2^B Buckets. may be nil if count==0.
- oldbuckets unsafe.Pointer // previous bucket array of half the size, non-nil only when growing
- nevacuate uintptr // progress counter for evacuation (buckets less than this have been evacuated)
-
- extra *mapextra // optional fields
-}
-
-// mapextra holds fields that are not present on all maps.
-type mapextra struct {
- // If both key and elem do not contain pointers and are inline, then we mark bucket
- // type as containing no pointers. This avoids scanning such maps.
- // However, bmap.overflow is a pointer. In order to keep overflow buckets
- // alive, we store pointers to all overflow buckets in hmap.extra.overflow and hmap.extra.oldoverflow.
- // overflow and oldoverflow are only used if key and elem do not contain pointers.
- // overflow contains overflow buckets for hmap.buckets.
- // oldoverflow contains overflow buckets for hmap.oldbuckets.
- // The indirection allows to store a pointer to the slice in hiter.
- overflow *[]*bmap
- oldoverflow *[]*bmap
-
- // nextOverflow holds a pointer to a free overflow bucket.
- nextOverflow *bmap
+ growthLeft int
}

// A bucket for a Go map.
type bmap struct {
// tophash generally contains the top byte of the hash value
- // for each key in this bucket. If tophash[0] < minTopHash,
- // tophash[0] is a bucket evacuation state instead.
+ // for each key in this bucket.
tophash [bucketCnt]uint8
// Followed by bucketCnt keys and then bucketCnt elems.
// NOTE: packing all the keys together and then all the elems together makes the
// code a bit more complicated than alternating key/elem/key/elem/... but it allows
// us to eliminate padding which would be needed for, e.g., map[int64]int8.
- // Followed by an overflow pointer.
+}
+
+func (b *bmap) keys() unsafe.Pointer {
+ return add(unsafe.Pointer(b), dataOffset)
}

// A hash iteration structure.
@@ -169,8 +123,6 @@
h *hmap
buckets unsafe.Pointer // bucket ptr at hash_iter initialization time
bptr *bmap // current bucket
- overflow *[]*bmap // keeps overflow buckets of hmap.buckets alive
- oldoverflow *[]*bmap // keeps overflow buckets of hmap.oldbuckets alive
startBucket uintptr // bucket iteration started at
offset uint8 // intra-bucket offset to start from during iteration (should be big enough to hold bucketCnt-1)
wrapped bool // already wrapped around from end of bucket array to beginning
@@ -180,6 +132,37 @@
checkBucket uintptr
}

+// probe represents the state of a probe sequence.
+//
+// The sequence is a triangular progression.
+type probe struct {
+ bucket uintptr
+ mask uintptr
+ stride uintptr
+}
+
+func newProbe(hash, mask uintptr) *probe {
+ return &probe{
+ bucket: hash & mask,
+ mask: mask,
+ }
+}
+
+func (p *probe) Bucket() uintptr {
+ return p.bucket
+}
+
+func (p *probe) Next() {
+ p.stride += 1
+ p.bucket += p.stride
+ p.bucket &= p.mask
+}
+
+func (p *probe) Reset(hash uintptr) {
+ p.stride = 0
+ p.bucket = hash & p.mask
+}
+
// bucketShift returns 1<<b, optimized for code generation.
func bucketShift(b uint8) uintptr {
// Masking the shift amount allows overflow checks to be elided.
@@ -192,92 +175,12 @@
}

// tophash calculates the tophash value for hash.
-func tophash(hash uintptr) uint8 {
- top := uint8(hash >> (goarch.PtrSize*8 - 8))
- if top < minTopHash {
- top += minTopHash
- }
- return top
+func tophash(v uintptr) uint8 {
+ return uint8(v >> (goarch.PtrSize*8 - 7))
}

-func evacuated(b *bmap) bool {
- h := b.tophash[0]
- return h > emptyOne && h < minTopHash
-}
-
-func (b *bmap) overflow(t *maptype) *bmap {
- return *(**bmap)(add(unsafe.Pointer(b), uintptr(t.bucketsize)-goarch.PtrSize))
-}
-
-func (b *bmap) setoverflow(t *maptype, ovf *bmap) {
- *(**bmap)(add(unsafe.Pointer(b), uintptr(t.bucketsize)-goarch.PtrSize)) = ovf
-}
-
-func (b *bmap) keys() unsafe.Pointer {
- return add(unsafe.Pointer(b), dataOffset)
-}
-
-// incrnoverflow increments h.noverflow.
-// noverflow counts the number of overflow buckets.
-// This is used to trigger same-size map growth.
-// See also tooManyOverflowBuckets.
-// To keep hmap small, noverflow is a uint16.
-// When there are few buckets, noverflow is an exact count.
-// When there are many buckets, noverflow is an approximate count.
-func (h *hmap) incrnoverflow() {
- // We trigger same-size map growth if there are
- // as many overflow buckets as buckets.
- // We need to be able to count to 1<<h.B.
- if h.B < 16 {
- h.noverflow++
- return
- }
- // Increment with probability 1/(1<<(h.B-15)).
- // When we reach 1<<15 - 1, we will have approximately
- // as many overflow buckets as buckets.
- mask := uint32(1)<<(h.B-15) - 1
- // Example: if h.B == 18, then mask == 7,
- // and fastrand & 7 == 0 with probability 1/8.
- if fastrand()&mask == 0 {
- h.noverflow++
- }
-}
-
-func (h *hmap) newoverflow(t *maptype, b *bmap) *bmap {
- var ovf *bmap
- if h.extra != nil && h.extra.nextOverflow != nil {
- // We have preallocated overflow buckets available.
- // See makeBucketArray for more details.
- ovf = h.extra.nextOverflow
- if ovf.overflow(t) == nil {
- // We're not at the end of the preallocated overflow buckets. Bump the pointer.
- h.extra.nextOverflow = (*bmap)(add(unsafe.Pointer(ovf), uintptr(t.bucketsize)))
- } else {
- // This is the last preallocated overflow bucket.
- // Reset the overflow pointer on this bucket,
- // which was set to a non-nil sentinel value.
- ovf.setoverflow(t, nil)
- h.extra.nextOverflow = nil
- }
- } else {
- ovf = (*bmap)(newobject(t.bucket))
- }
- h.incrnoverflow()
- if t.bucket.ptrdata == 0 {
- h.createOverflow()
- *h.extra.overflow = append(*h.extra.overflow, ovf)
- }
- b.setoverflow(t, ovf)
- return ovf
-}
-
-func (h *hmap) createOverflow() {
- if h.extra == nil {
- h.extra = new(mapextra)
- }
- if h.extra.overflow == nil {
- h.extra.overflow = new([]*bmap)
- }
+func overLoadFactor(count int, B uint8) bool {
+ return count > bucketCnt && uintptr(count) > bucketShift(B)*maxItemInBucket
}

func makemap64(t *maptype, hint int64, h *hmap) *hmap {
@@ -287,15 +190,6 @@
return makemap(t, int(hint), h)
}

-// makemap_small implements Go map creation for make(map[k]v) and
-// make(map[k]v, hint) when hint is known to be at most bucketCnt
-// at compile time and the map needs to be allocated on the heap.
-func makemap_small() *hmap {
- h := new(hmap)
- h.hash0 = fastrand()
- return h
-}
-
// makemap implements Go map creation for make(map[k]v, hint).
// If the compiler has determined that the map or the first bucket
// can be created on the stack, h and/or bucket may be non-nil.
@@ -320,71 +214,42 @@
B++
}
h.B = B
+ h.growthLeft = bucketCnt * int(bucketShift(B))

// allocate initial hash table
// if B == 0, the buckets field is allocated lazily later (in mapassign)
// If hint is large zeroing this memory could take a while.
if h.B != 0 {
- var nextOverflow *bmap
- h.buckets, nextOverflow = makeBucketArray(t, h.B, nil)
- if nextOverflow != nil {
- h.extra = new(mapextra)
- h.extra.nextOverflow = nextOverflow
- }
+ h.buckets = makeBucketArray(t, h.B)
}

return h
}

+// makemap_small implements Go map creation for make(map[k]v) and
+// make(map[k]v, hint) when hint is known to be at most bucketCnt
+// at compile time and the map needs to be allocated on the heap.
+func makemap_small() *hmap {
+ h := new(hmap)
+ h.hash0 = fastrand()
+ h.growthLeft = bucketCnt
+ return h
+}
+
// makeBucketArray initializes a backing array for map buckets.
-// 1<<b is the minimum number of buckets to allocate.
-// dirtyalloc should either be nil or a bucket array previously
-// allocated by makeBucketArray with the same t and b parameters.
-// If dirtyalloc is nil a new backing array will be alloced and
-// otherwise dirtyalloc will be cleared and reused as backing array.
-func makeBucketArray(t *maptype, b uint8, dirtyalloc unsafe.Pointer) (buckets unsafe.Pointer, nextOverflow *bmap) {
- base := bucketShift(b)
- nbuckets := base
- // For small b, overflow buckets are unlikely.
- // Avoid the overhead of the calculation.
- if b >= 4 {
- // Add on the estimated number of overflow buckets
- // required to insert the median number of elements
- // used with this value of b.
- nbuckets += bucketShift(b - 4)
- sz := t.bucket.size * nbuckets
- up := roundupsize(sz)
- if up != sz {
- nbuckets = up / t.bucket.size
- }
+// 1<<b is the number of buckets to allocate.
+func makeBucketArray(t *maptype, b uint8) unsafe.Pointer {
+ nbuckets := bucketShift(b)
+
+ buckets := newarray(t.bucket, int(nbuckets))
+
+ base := buckets
+ for i := uintptr(0); i < nbuckets; i++ {
+ *(*uint64)(base) = 0xffff_ffff_ffff_ffff // all empty
+ base = add(base, uintptr(t.bucketsize))
}

- if dirtyalloc == nil {
- buckets = newarray(t.bucket, int(nbuckets))
- } else {
- // dirtyalloc was previously generated by
- // the above newarray(t.bucket, int(nbuckets))
- // but may not be empty.
- buckets = dirtyalloc
- size := t.bucket.size * nbuckets
- if t.bucket.ptrdata != 0 {
- memclrHasPointers(buckets, size)
- } else {
- memclrNoHeapPointers(buckets, size)
- }
- }
-
- if base != nbuckets {
- // We preallocated some overflow buckets.
- // To keep the overhead of tracking these overflow buckets to a minimum,
- // we use the convention that if a preallocated overflow bucket's overflow
- // pointer is nil, then there are more available by bumping the pointer.
- // We need a safe non-nil pointer for the last overflow bucket; just use buckets.
- nextOverflow = (*bmap)(add(buckets, base*uintptr(t.bucketsize)))
- last := (*bmap)(add(buckets, (nbuckets-1)*uintptr(t.bucketsize)))
- last.setoverflow(t, (*bmap)(buckets))
- }
- return buckets, nextOverflow
+ return buckets
}

// mapaccess1 returns a pointer to h[key]. Never returns nil, instead
@@ -414,28 +279,19 @@
if h.flags&hashWriting != 0 {
fatal("concurrent map read and map write")
}
+
hash := t.hasher(key, uintptr(h.hash0))
- m := bucketMask(h.B)
- b := (*bmap)(add(h.buckets, (hash&m)*uintptr(t.bucketsize)))
- if c := h.oldbuckets; c != nil {
- if !h.sameSizeGrow() {
- // There used to be half as many buckets; mask down one more power of two.
- m >>= 1
- }
- oldb := (*bmap)(add(c, (hash&m)*uintptr(t.bucketsize)))
- if !evacuated(oldb) {
- b = oldb
- }
- }
top := tophash(hash)
-bucketloop:
- for ; b != nil; b = b.overflow(t) {
- for i := uintptr(0); i < bucketCnt; i++ {
- if b.tophash[i] != top {
- if b.tophash[i] == emptyRest {
- break bucketloop
- }
- continue
+
+ p := newProbe(hash, bucketMask(h.B))
+
+ for {
+ b := (*bmap)(add(h.buckets, p.Bucket()*uintptr(t.bucketsize)))
+ status := matchTopHash(b.tophash, top)
+ for {
+ i := status.NextMatch()
+ if i >= bucketCnt {
+ break
}
k := add(unsafe.Pointer(b), dataOffset+i*uintptr(t.keysize))
if t.indirectkey() {
@@ -448,9 +304,13 @@
}
return e
}
+ status.RemoveNextMatch()
}
+ if matchEmpty(b.tophash) != 0 {
+ return unsafe.Pointer(&zeroVal[0])
+ }
+ p.Next()
}
- return unsafe.Pointer(&zeroVal[0])
}

func mapaccess2(t *maptype, h *hmap, key unsafe.Pointer) (unsafe.Pointer, bool) {
@@ -475,28 +335,19 @@
if h.flags&hashWriting != 0 {
fatal("concurrent map read and map write")
}
+
hash := t.hasher(key, uintptr(h.hash0))
- m := bucketMask(h.B)
- b := (*bmap)(add(h.buckets, (hash&m)*uintptr(t.bucketsize)))
- if c := h.oldbuckets; c != nil {
- if !h.sameSizeGrow() {
- // There used to be half as many buckets; mask down one more power of two.
- m >>= 1
- }
- oldb := (*bmap)(add(c, (hash&m)*uintptr(t.bucketsize)))
- if !evacuated(oldb) {
- b = oldb
- }
- }
top := tophash(hash)
-bucketloop:
- for ; b != nil; b = b.overflow(t) {
- for i := uintptr(0); i < bucketCnt; i++ {
- if b.tophash[i] != top {
- if b.tophash[i] == emptyRest {
- break bucketloop
- }
- continue
+
+ p := newProbe(hash, bucketMask(h.B))
+
+ for {
+ b := (*bmap)(add(h.buckets, p.Bucket()*uintptr(t.bucketsize)))
+ status := matchTopHash(b.tophash, top)
+ for {
+ i := status.NextMatch()
+ if i >= bucketCnt {
+ break
}
k := add(unsafe.Pointer(b), dataOffset+i*uintptr(t.keysize))
if t.indirectkey() {
@@ -509,9 +360,13 @@
}
return e, true
}
+ status.RemoveNextMatch()
}
+ if matchEmpty(b.tophash) != 0 {
+ return unsafe.Pointer(&zeroVal[0]), false
+ }
+ p.Next()
}
- return unsafe.Pointer(&zeroVal[0]), false
}

// returns both key and elem. Used by map iterator
@@ -520,27 +375,17 @@
return nil, nil
}
hash := t.hasher(key, uintptr(h.hash0))
- m := bucketMask(h.B)
- b := (*bmap)(add(h.buckets, (hash&m)*uintptr(t.bucketsize)))
- if c := h.oldbuckets; c != nil {
- if !h.sameSizeGrow() {
- // There used to be half as many buckets; mask down one more power of two.
- m >>= 1
- }
- oldb := (*bmap)(add(c, (hash&m)*uintptr(t.bucketsize)))
- if !evacuated(oldb) {
- b = oldb
- }
- }
top := tophash(hash)
-bucketloop:
- for ; b != nil; b = b.overflow(t) {
- for i := uintptr(0); i < bucketCnt; i++ {
- if b.tophash[i] != top {
- if b.tophash[i] == emptyRest {
- break bucketloop
- }
- continue
+
+ p := newProbe(hash, bucketMask(h.B))
+
+ for {
+ b := (*bmap)(add(h.buckets, p.Bucket()*uintptr(t.bucketsize)))
+ status := matchTopHash(b.tophash, top)
+ for {
+ i := status.NextMatch()
+ if i >= bucketCnt {
+ break
}
k := add(unsafe.Pointer(b), dataOffset+i*uintptr(t.keysize))
if t.indirectkey() {
@@ -553,9 +398,13 @@
}
return k, e
}
+ status.RemoveNextMatch()
}
+ if matchEmpty(b.tophash) != 0 {
+ return nil, nil
+ }
+ p.Next()
}
- return nil, nil
}

func mapaccess1_fat(t *maptype, h *hmap, key, zero unsafe.Pointer) unsafe.Pointer {
@@ -601,86 +450,88 @@
h.flags ^= hashWriting

if h.buckets == nil {
- h.buckets = newobject(t.bucket) // newarray(t.bucket, 1)
+ // Init an empty map.
+ h.buckets = makeBucketArray(t, 0)
+ h.growthLeft = bucketCnt
}

-again:
- bucket := hash & bucketMask(h.B)
- if h.growing() {
- growWork(t, h, bucket)
- }
- b := (*bmap)(add(h.buckets, bucket*uintptr(t.bucketsize)))
top := tophash(hash)

+ if h.needGrow() {
+ grow(h, t)
+ }
+
+ p := newProbe(hash, bucketMask(h.B))
+
var inserti *uint8
var insertk unsafe.Pointer
var elem unsafe.Pointer
-bucketloop:
+
+ var (
+ b *bmap
+ status bitmask64
+ )
+ // Check if the key in the map.
for {
- for i := uintptr(0); i < bucketCnt; i++ {
- if b.tophash[i] != top {
- if isEmpty(b.tophash[i]) && inserti == nil {
- inserti = &b.tophash[i]
- insertk = add(unsafe.Pointer(b), dataOffset+i*uintptr(t.keysize))
- elem = add(unsafe.Pointer(b), dataOffset+bucketCnt*uintptr(t.keysize)+i*uintptr(t.elemsize))
- }
- if b.tophash[i] == emptyRest {
- break bucketloop
- }
- continue
+ b = (*bmap)(add(h.buckets, p.Bucket()*uintptr(t.bucketsize)))
+ status = matchTopHash(b.tophash, top)
+ for {
+ i := status.NextMatch()
+ if i >= bucketCnt {
+ break
}
k := add(unsafe.Pointer(b), dataOffset+i*uintptr(t.keysize))
if t.indirectkey() {
k = *((*unsafe.Pointer)(k))
}
- if !t.key.equal(key, k) {
- continue
+ if t.key.equal(key, k) {
+ // Found a key.
+ // already have a mapping for key. Update it.
+ if t.needkeyupdate() {
+ typedmemmove(t.key, k, key)
+ }
+ elem = add(unsafe.Pointer(b), dataOffset+bucketCnt*uintptr(t.keysize)+i*uintptr(t.elemsize))
+ goto done
}
- // already have a mapping for key. Update it.
- if t.needkeyupdate() {
- typedmemmove(t.key, k, key)
- }
- elem = add(unsafe.Pointer(b), dataOffset+bucketCnt*uintptr(t.keysize)+i*uintptr(t.elemsize))
- goto done
+ status.RemoveNextMatch()
}
- ovf := b.overflow(t)
- if ovf == nil {
+ if matchEmpty(b.tophash) != 0 {
break
}
- b = ovf
+ p.Next()
}

- // Did not find mapping for key. Allocate new cell & add entry.
-
- // If we hit the max load factor or we have too many overflow buckets,
- // and we're not already in the middle of growing, start growing.
- if !h.growing() && (overLoadFactor(h.count+1, h.B) || tooManyOverflowBuckets(h.noverflow, h.B)) {
- hashGrow(t, h)
- goto again // Growing the table invalidates everything, so try again
+ // The key is not in the map.
+ p.Reset(hash)
+ for {
+ b = (*bmap)(add(h.buckets, p.Bucket()*uintptr(t.bucketsize)))
+ // Can't find the key in this bucket.
+ // Check empty slot or deleted slot.
+ status = matchEmptyOrDeleted(b.tophash)
+ i := status.NextMatch()
+ if i < bucketCnt {
+ inserti = &b.tophash[i]
+ insertk = add(unsafe.Pointer(b), dataOffset+i*uintptr(t.keysize))
+ elem = add(unsafe.Pointer(b), dataOffset+bucketCnt*uintptr(t.keysize)+i*uintptr(t.elemsize))
+ // Insert key and value.
+ if t.indirectkey() {
+ kmem := newobject(t.key)
+ *(*unsafe.Pointer)(insertk) = kmem
+ insertk = kmem
+ }
+ if t.indirectelem() {
+ vmem := newobject(t.elem)
+ *(*unsafe.Pointer)(elem) = vmem
+ }
+ typedmemmove(t.key, insertk, key)
+ *inserti = top
+ h.growthLeft -= 1
+ h.count += 1
+ goto done
+ }
+ // No idle slot
+ p.Next()
}
-
- if inserti == nil {
- // The current bucket and all the overflow buckets connected to it are full, allocate a new one.
- newb := h.newoverflow(t, b)
- inserti = &newb.tophash[0]
- insertk = add(unsafe.Pointer(newb), dataOffset)
- elem = add(insertk, bucketCnt*uintptr(t.keysize))
- }
-
- // store new key/elem at insert position
- if t.indirectkey() {
- kmem := newobject(t.key)
- *(*unsafe.Pointer)(insertk) = kmem
- insertk = kmem
- }
- if t.indirectelem() {
- vmem := newobject(t.elem)
- *(*unsafe.Pointer)(elem) = vmem
- }
- typedmemmove(t.key, insertk, key)
- *inserti = top
- h.count++
-
done:
if h.flags&hashWriting == 0 {
fatal("concurrent map writes")
@@ -711,6 +562,11 @@
}
return
}
+
+ if h.buckets == nil {
+ return
+ }
+
if h.flags&hashWriting != 0 {
fatal("concurrent map writes")
}
@@ -721,87 +577,64 @@
// in which case we have not actually done a write (delete).
h.flags ^= hashWriting

- bucket := hash & bucketMask(h.B)
- if h.growing() {
- growWork(t, h, bucket)
- }
- b := (*bmap)(add(h.buckets, bucket*uintptr(t.bucketsize)))
- bOrig := b
+ p := newProbe(hash, bucketMask(h.B))
top := tophash(hash)
-search:
- for ; b != nil; b = b.overflow(t) {
- for i := uintptr(0); i < bucketCnt; i++ {
- if b.tophash[i] != top {
- if b.tophash[i] == emptyRest {
- break search
- }
- continue
+
+ for {
+ b := (*bmap)(add(h.buckets, p.Bucket()*uintptr(t.bucketsize)))
+ status := matchTopHash(b.tophash, top)
+ for {
+ i := status.NextMatch()
+ if i >= bucketCnt {
+ break
}
k := add(unsafe.Pointer(b), dataOffset+i*uintptr(t.keysize))
k2 := k
if t.indirectkey() {
k2 = *((*unsafe.Pointer)(k2))
}
- if !t.key.equal(key, k2) {
- continue
- }
- // Only clear key if there are pointers in it.
- if t.indirectkey() {
- *(*unsafe.Pointer)(k) = nil
- } else if t.key.ptrdata != 0 {
- memclrHasPointers(k, t.key.size)
- }
- e := add(unsafe.Pointer(b), dataOffset+bucketCnt*uintptr(t.keysize)+i*uintptr(t.elemsize))
- if t.indirectelem() {
- *(*unsafe.Pointer)(e) = nil
- } else if t.elem.ptrdata != 0 {
- memclrHasPointers(e, t.elem.size)
- } else {
- memclrNoHeapPointers(e, t.elem.size)
- }
- b.tophash[i] = emptyOne
- // If the bucket now ends in a bunch of emptyOne states,
- // change those to emptyRest states.
- // It would be nice to make this a separate function, but
- // for loops are not currently inlineable.
- if i == bucketCnt-1 {
- if b.overflow(t) != nil && b.overflow(t).tophash[0] != emptyRest {
- goto notLast
+ if t.key.equal(key, k2) {
+ // Found this key.
+ h.count -= 1
+ // Only clear key if there are pointers in it.
+ if t.indirectkey() {
+ *(*unsafe.Pointer)(k) = nil
+ } else if t.key.ptrdata != 0 {
+ memclrHasPointers(k, t.key.size)
}
- } else {
- if b.tophash[i+1] != emptyRest {
- goto notLast
- }
- }
- for {
- b.tophash[i] = emptyRest
- if i == 0 {
- if b == bOrig {
- break // beginning of initial bucket, we're done.
- }
- // Find previous bucket, continue at its last entry.
- c := b
- for b = bOrig; b.overflow(t) != c; b = b.overflow(t) {
- }
- i = bucketCnt - 1
+ e := add(unsafe.Pointer(b), dataOffset+bucketCnt*uintptr(t.keysize)+i*uintptr(t.elemsize))
+ if t.indirectelem() {
+ *(*unsafe.Pointer)(e) = nil
+ } else if t.elem.ptrdata != 0 {
+ memclrHasPointers(e, t.elem.size)
} else {
- i--
+ memclrNoHeapPointers(e, t.elem.size)
}
- if b.tophash[i] != emptyOne {
- break
+ // Update tophash.
+ if matchEmpty(b.tophash) == 0 {
+ // We only ever mark the slot as deleted if the entry we want to delete
+ // is in a pack of bucketCnt non-EMPTY buckets.
+ b.tophash[i] = deletedSlot
+ } else {
+ h.growthLeft += 1
+ b.tophash[i] = emptySlot
}
+ goto done
}
- notLast:
- h.count--
- // Reset the hash seed to make it more difficult for attackers to
- // repeatedly trigger hash collisions. See issue 25237.
- if h.count == 0 {
- h.hash0 = fastrand()
- }
- break search
+ status.RemoveNextMatch()
}
+ if matchEmpty(b.tophash) != 0 {
+ // The key is not in this map.
+ goto done
+ }
+ p.Next()
}
-
+done:
+ if h.count == 0 {
+ // Reset the hash seed to make it more difficult for attackers to
+ // repeatedly trigger hash collisions. See issue 25237.
+ h.hash0 = fastrand()
+ }
if h.flags&hashWriting == 0 {
fatal("concurrent map writes")
}
@@ -823,7 +656,7 @@
return
}

- if unsafe.Sizeof(hiter{})/goarch.PtrSize != 12 {
+ if unsafe.Sizeof(hiter{})/goarch.PtrSize != 10 {
throw("hash_iter size incorrect") // see cmd/compile/internal/reflectdata/reflect.go
}
it.h = h
@@ -831,15 +664,6 @@
// grab snapshot of bucket state
it.B = h.B
it.buckets = h.buckets
- if t.bucket.ptrdata == 0 {
- // Allocate the current slice and remember pointers to both current and old.
- // This preserves all relevant overflow buckets alive even if
- // the table grows and/or overflow buckets are added to the table
- // while we are iterating.
- h.createOverflow()
- it.overflow = h.extra.overflow
- it.oldoverflow = h.extra.oldoverflow
- }

// decide where to start
var r uintptr
@@ -856,8 +680,8 @@

// Remember we have an iterator.
// Can run concurrently with another mapiterinit().
- if old := h.flags; old&(iterator|oldIterator) != iterator|oldIterator {
- atomic.Or8(&h.flags, iterator|oldIterator)
+ if old := h.flags; old&(iterator) != iterator {
+ atomic.Or8(&h.flags, iterator)
}

mapiternext(it)
@@ -876,45 +700,26 @@
bucket := it.bucket
b := it.bptr
i := it.i
- checkBucket := it.checkBucket
-
-next:
- if b == nil {
- if bucket == it.startBucket && it.wrapped {
- // end of iteration
- it.key = nil
- it.elem = nil
- return
- }
- if h.growing() && it.B == h.B {
- // Iterator was started in the middle of a grow, and the grow isn't done yet.
- // If the bucket we're looking at hasn't been filled in yet (i.e. the old
- // bucket hasn't been evacuated) then we need to iterate through the old
- // bucket and only return the ones that will be migrated to this bucket.
- oldbucket := bucket & it.h.oldbucketmask()
- b = (*bmap)(add(h.oldbuckets, oldbucket*uintptr(t.bucketsize)))
- if !evacuated(b) {
- checkBucket = bucket
- } else {
- b = (*bmap)(add(it.buckets, bucket*uintptr(t.bucketsize)))
- checkBucket = noCheck
- }
- } else {
- b = (*bmap)(add(it.buckets, bucket*uintptr(t.bucketsize)))
- checkBucket = noCheck
- }
- bucket++
- if bucket == bucketShift(it.B) {
- bucket = 0
- it.wrapped = true
- }
- i = 0
+ if b != nil {
+ goto bucketloop
}
+nextbucket:
+ if it.wrapped && bucket == it.startBucket {
+ // end of iteration
+ it.key = nil
+ it.elem = nil
+ return
+ }
+ b = (*bmap)(add(it.buckets, bucket*uintptr(t.bucketsize)))
+ bucket++
+ if bucket == bucketShift(it.B) {
+ bucket = 0
+ it.wrapped = true
+ }
+bucketloop:
for ; i < bucketCnt; i++ {
offi := (i + it.offset) & (bucketCnt - 1)
- if isEmpty(b.tophash[offi]) || b.tophash[offi] == evacuatedEmpty {
- // TODO: emptyRest is hard to use here, as we start iterating
- // in the middle of a bucket. It's feasible, just tricky.
+ if !isFull(b.tophash[offi]) {
continue
}
k := add(unsafe.Pointer(b), dataOffset+uintptr(offi)*uintptr(t.keysize))
@@ -922,36 +727,8 @@
k = *((*unsafe.Pointer)(k))
}
e := add(unsafe.Pointer(b), dataOffset+bucketCnt*uintptr(t.keysize)+uintptr(offi)*uintptr(t.elemsize))
- if checkBucket != noCheck && !h.sameSizeGrow() {
- // Special case: iterator was started during a grow to a larger size
- // and the grow is not done yet. We're working on a bucket whose
- // oldbucket has not been evacuated yet. Or at least, it wasn't
- // evacuated when we started the bucket. So we're iterating
- // through the oldbucket, skipping any keys that will go
- // to the other new bucket (each oldbucket expands to two
- // buckets during a grow).
- if t.reflexivekey() || t.key.equal(k, k) {
- // If the item in the oldbucket is not destined for
- // the current new bucket in the iteration, skip it.
- hash := t.hasher(k, uintptr(h.hash0))
- if hash&bucketMask(it.B) != checkBucket {
- continue
- }
- } else {
- // Hash isn't repeatable if k != k (NaNs). We need a
- // repeatable and randomish choice of which direction
- // to send NaNs during evacuation. We'll use the low
- // bit of tophash to decide which way NaNs go.
- // NOTE: this case is why we need two evacuate tophash
- // values, evacuatedX and evacuatedY, that differ in
- // their low bit.
- if checkBucket>>(it.B-1) != uintptr(b.tophash[offi]&1) {
- continue
- }
- }
- }
- if (b.tophash[offi] != evacuatedX && b.tophash[offi] != evacuatedY) ||
- !(t.reflexivekey() || t.key.equal(k, k)) {
+
+ if h.B == it.B || !(t.reflexivekey() || t.key.equal(k, k)) {
// This is the golden data, we can return it.
// OR
// key!=key, so the entry can't be deleted or updated, so we can just return it.
@@ -976,17 +753,17 @@
it.key = rk
it.elem = re
}
+ // Got a data, save info into it.
it.bucket = bucket
if it.bptr != b { // avoid unnecessary write barrier; see issue 14921
it.bptr = b
}
it.i = i + 1
- it.checkBucket = checkBucket
return
}
- b = b.overflow(t)
+ // No valid item in this bucket.
i = 0
- goto next
+ goto nextbucket
}

// mapclear deletes all keys from a map.
@@ -1007,283 +784,259 @@

h.flags ^= hashWriting

- h.flags &^= sameSizeGrow
- h.oldbuckets = nil
- h.nevacuate = 0
- h.noverflow = 0
h.count = 0

+ // Clear bucket.
+ nbuckets := bucketShift(h.B)
+ h.growthLeft = int(nbuckets * bucketCnt)
+
+ buckets := h.buckets
+ size := uintptr(t.bucketsize) * nbuckets
+
+ // Only clear buckets if there are pointers in it.
+ if t.bucket.ptrdata != 0 {
+ memclrHasPointers(buckets, size)
+ }
+
+ for bptr := h.buckets; uintptr(bptr) < uintptr(add(h.buckets, size)); bptr = add(bptr, uintptr(t.bucketsize)) {
+ *(*uint64)(bptr) = 0xffff_ffff_ffff_ffff
+ }
+
// Reset the hash seed to make it more difficult for attackers to
// repeatedly trigger hash collisions. See issue 25237.
h.hash0 = fastrand()

- // Keep the mapextra allocation but clear any extra information.
- if h.extra != nil {
- *h.extra = mapextra{}
- }
-
- // makeBucketArray clears the memory pointed to by h.buckets
- // and recovers any overflow buckets by generating them
- // as if h.buckets was newly alloced.
- _, nextOverflow := makeBucketArray(t, h.B, h.buckets)
- if nextOverflow != nil {
- // If overflow buckets are created then h.extra
- // will have been allocated during initial bucket creation.
- h.extra.nextOverflow = nextOverflow
- }
-
if h.flags&hashWriting == 0 {
fatal("concurrent map writes")
}
h.flags &^= hashWriting
}

-func hashGrow(t *maptype, h *hmap) {
- // If we've hit the load factor, get bigger.
- // Otherwise, there are too many overflow buckets,
- // so keep the same number of buckets and "grow" laterally.
- bigger := uint8(1)
- if !overLoadFactor(h.count+1, h.B) {
- bigger = 0
- h.flags |= sameSizeGrow
- }
- oldbuckets := h.buckets
- newbuckets, nextOverflow := makeBucketArray(t, h.B+bigger, nil)
-
- flags := h.flags &^ (iterator | oldIterator)
- if h.flags&iterator != 0 {
- flags |= oldIterator
- }
- // commit the grow (atomic wrt gc)
- h.B += bigger
- h.flags = flags
- h.oldbuckets = oldbuckets
- h.buckets = newbuckets
- h.nevacuate = 0
- h.noverflow = 0
-
- if h.extra != nil && h.extra.overflow != nil {
- // Promote current overflow buckets to the old generation.
- if h.extra.oldoverflow != nil {
- throw("oldoverflow is not nil")
- }
- h.extra.oldoverflow = h.extra.overflow
- h.extra.overflow = nil
- }
- if nextOverflow != nil {
- if h.extra == nil {
- h.extra = new(mapextra)
- }
- h.extra.nextOverflow = nextOverflow
- }
-
- // the actual copying of the hash table data is done incrementally
- // by growWork() and evacuate().
+func (h *hmap) needGrow() bool {
+ threshold := int(bucketShift(h.B) * emptyItemInBucket) // (capcity - capcity*loadFactor)
+ return h.growthLeft <= threshold
}

-// overLoadFactor reports whether count items placed in 1<<B buckets is over loadFactor.
-func overLoadFactor(count int, B uint8) bool {
- return count > bucketCnt && uintptr(count) > loadFactorNum*(bucketShift(B)/loadFactorDen)
-}
-
-// tooManyOverflowBuckets reports whether noverflow buckets is too many for a map with 1<<B buckets.
-// Note that most of these overflow buckets must be in sparse use;
-// if use was dense, then we'd have already triggered regular map growth.
-func tooManyOverflowBuckets(noverflow uint16, B uint8) bool {
- // If the threshold is too low, we do extraneous work.
- // If the threshold is too high, maps that grow and shrink can hold on to lots of unused memory.
- // "too many" means (approximately) as many overflow buckets as regular buckets.
- // See incrnoverflow for more details.
- if B > 15 {
- B = 15
- }
- // The compiler doesn't see here that B < 16; mask B to generate shorter shift code.
- return noverflow >= uint16(1)<<(B&15)
-}
-
-// growing reports whether h is growing. The growth may be to the same size or bigger.
-func (h *hmap) growing() bool {
- return h.oldbuckets != nil
-}
-
-// sameSizeGrow reports whether the current growth is to a map of the same size.
-func (h *hmap) sameSizeGrow() bool {
- return h.flags&sameSizeGrow != 0
-}
-
-// noldbuckets calculates the number of buckets prior to the current map growth.
-func (h *hmap) noldbuckets() uintptr {
- oldB := h.B
- if !h.sameSizeGrow() {
- oldB--
- }
- return bucketShift(oldB)
-}
-
-// oldbucketmask provides a mask that can be applied to calculate n % noldbuckets().
-func (h *hmap) oldbucketmask() uintptr {
- return h.noldbuckets() - 1
-}
-
-func growWork(t *maptype, h *hmap, bucket uintptr) {
- // make sure we evacuate the oldbucket corresponding
- // to the bucket we're about to use
- evacuate(t, h, bucket&h.oldbucketmask())
-
- // evacuate one more oldbucket to make progress on growing
- if h.growing() {
- evacuate(t, h, h.nevacuate)
+func grow(h *hmap, t *maptype) {
+ cap := bucketShift(h.B) * bucketCnt
+ if uintptr(h.count*32) <= cap*25 && (h.flags&iterator != iterator) {
+ // Rehash in place if the current size is <= 25/32 of capacity.
+ // If there may be an iterator using buckets, we disable growsamesize.
+ // Because it may move data to different buckets, this behavior will
+ // break the iterator(keys might be returned 0 or 2 times).
+ // TODO(zyh): We can use a field to indicate the number of iterators
+ // instead of just setting a flag. If that, we can still do growsamesize
+ // after a traversal.
+ growsamesize(h, t)
+ } else {
+ growbig(h, t)
}
}

-func bucketEvacuated(t *maptype, h *hmap, bucket uintptr) bool {
- b := (*bmap)(add(h.oldbuckets, bucket*uintptr(t.bucketsize)))
- return evacuated(b)
-}
+func growsamesize(h *hmap, t *maptype) {
+ bucketNum := bucketShift(h.B)
+ mask := bucketNum - 1
+ indirectk := t.indirectkey()
+ indirecte := t.indirectelem()
+ // For all buckets:
+ // - mark all DELETED slots as EMPTY
+ // - mark all FULL slots as DELETED
+ for bucket := uintptr(0); bucket < bucketNum; bucket++ {
+ b := (*bmap)(add(h.buckets, bucket*uintptr(t.bucketsize)))
+ prepareSameSizeGrow(&b.tophash)
+ }
+ // Temporary key and value used to swap.
+ var (
+ inittmp bool
+ tmpk, tmpe unsafe.Pointer
+ )

-// evacDst is an evacuation destination.
-type evacDst struct {
- b *bmap // current destination bucket
- i int // key/elem index into b
- k unsafe.Pointer // pointer to current key storage
- e unsafe.Pointer // pointer to current elem storage
-}
-
-func evacuate(t *maptype, h *hmap, oldbucket uintptr) {
- b := (*bmap)(add(h.oldbuckets, oldbucket*uintptr(t.bucketsize)))
- newbit := h.noldbuckets()
- if !evacuated(b) {
- // TODO: reuse overflow buckets instead of using new ones, if there
- // is no iterator using the old buckets. (If !oldIterator.)
-
- // xy contains the x and y (low and high) evacuation destinations.
- var xy [2]evacDst
- x := &xy[0]
- x.b = (*bmap)(add(h.buckets, oldbucket*uintptr(t.bucketsize)))
- x.k = add(unsafe.Pointer(x.b), dataOffset)
- x.e = add(x.k, bucketCnt*uintptr(t.keysize))
-
- if !h.sameSizeGrow() {
- // Only calculate y pointers if we're growing bigger.
- // Otherwise GC can see bad pointers.
- y := &xy[1]
- y.b = (*bmap)(add(h.buckets, (oldbucket+newbit)*uintptr(t.bucketsize)))
- y.k = add(unsafe.Pointer(y.b), dataOffset)
- y.e = add(y.k, bucketCnt*uintptr(t.keysize))
- }
-
- for ; b != nil; b = b.overflow(t) {
- k := add(unsafe.Pointer(b), dataOffset)
- e := add(k, bucketCnt*uintptr(t.keysize))
- for i := 0; i < bucketCnt; i, k, e = i+1, add(k, uintptr(t.keysize)), add(e, uintptr(t.elemsize)) {
- top := b.tophash[i]
- if isEmpty(top) {
- b.tophash[i] = evacuatedEmpty
- continue
+ for bucket := uintptr(0); bucket < bucketNum; bucket++ {
+ b := (*bmap)(add(h.buckets, bucket*uintptr(t.bucketsize)))
+ for i := uintptr(0); i < bucketCnt; {
+ if b.tophash[i] != deletedSlot {
+ i++
+ continue
+ }
+ base := add(unsafe.Pointer(b), dataOffset)
+ k := add(base, i*uintptr(t.keysize))
+ e := add(base, bucketCnt*uintptr(t.keysize)+i*uintptr(t.elemsize))
+ k2 := k
+ if indirectk {
+ k2 = *(*unsafe.Pointer)(k)
+ }
+ hash := t.hasher(k2, uintptr(h.hash0))
+ top := tophash(hash)
+ // Find the first non-null slot.
+ var (
+ dstb *bmap
+ dsti uintptr
+ dstp = newProbe(hash, mask)
+ )
+ for {
+ dstb = (*bmap)(add(h.buckets, dstp.Bucket()*uintptr(t.bucketsize)))
+ status := matchEmptyOrDeleted(dstb.tophash)
+ dsti = status.NextMatch()
+ if dsti < bucketCnt {
+ break
}
- if top < minTopHash {
- throw("bad map state")
+ dstp.Next()
+ }
+
+ // The target bucket is the same.
+ if dstp.Bucket() == bucket {
+ // Just mark slot as FULL.
+ b.tophash[i] = top
+ i += 1
+ continue
+ }
+
+ dstbase := add(unsafe.Pointer(dstb), dataOffset) // key and value start
+ dstk := add(unsafe.Pointer(dstbase), dsti*uintptr(t.keysize))
+ dste := add(unsafe.Pointer(dstbase), bucketCnt*uintptr(t.keysize)+dsti*uintptr(t.elemsize))
+
+ // Target is in another bucket.
+ switch dstb.tophash[dsti] {
+ case emptySlot:
+ // 1. Transfer element to target
+ // 2. Mark target as FULL
+ // 3. Mark slot as EMPTY
+
+ // Store new key and value at insert position.
+ if indirectk {
+ *(*unsafe.Pointer)(dstk) = k2
+ } else {
+ typedmemmove(t.key, dstk, k)
}
- k2 := k
- if t.indirectkey() {
- k2 = *((*unsafe.Pointer)(k2))
+ if indirecte {
+ *(*unsafe.Pointer)(dste) = *(*unsafe.Pointer)(e)
+ } else {
+ typedmemmove(t.elem, dste, e)
}
- var useY uint8
- if !h.sameSizeGrow() {
- // Compute hash to make our evacuation decision (whether we need
- // to send this key/elem to bucket x or bucket y).
- hash := t.hasher(k2, uintptr(h.hash0))
- if h.flags&iterator != 0 && !t.reflexivekey() && !t.key.equal(k2, k2) {
- // If key != key (NaNs), then the hash could be (and probably
- // will be) entirely different from the old hash. Moreover,
- // it isn't reproducible. Reproducibility is required in the
- // presence of iterators, as our evacuation decision must
- // match whatever decision the iterator made.
- // Fortunately, we have the freedom to send these keys either
- // way. Also, tophash is meaningless for these kinds of keys.
- // We let the low bit of tophash drive the evacuation decision.
- // We recompute a new random tophash for the next level so
- // these keys will get evenly distributed across all buckets
- // after multiple grows.
- useY = top & 1
- top = tophash(hash)
- } else {
- if hash&newbit != 0 {
- useY = 1
- }
+
+ // Clear key and value.
+ if indirectk {
+ *(*unsafe.Pointer)(k) = nil
+ } else if t.key.ptrdata != 0 {
+ memclrHasPointers(k, t.key.size)
+ }
+ if indirecte {
+ *(*unsafe.Pointer)(e) = nil
+ } else if t.elem.ptrdata != 0 {
+ memclrHasPointers(e, t.elem.size)
+ } else {
+ memclrNoHeapPointers(e, t.elem.size)
+ }
+ dstb.tophash[dsti] = top
+ b.tophash[i] = emptySlot
+ i++
+ case deletedSlot:
+ // 1. Swap current element with target element
+ // 2. Mark target as FULL
+ // 3. Repeat procedure for current slot with moved from element (target)
+
+ // This path is not commonly executed, just lazy initializing temporary variables.
+ if !inittmp {
+ if !indirectk {
+ tmpk = newobject(t.key)
}
+ if !indirecte {
+ tmpe = newobject(t.elem)
+ }
+ inittmp = true
}

- if evacuatedX+1 != evacuatedY || evacuatedX^1 != evacuatedY {
- throw("bad evacuatedN")
- }
-
- b.tophash[i] = evacuatedX + useY // evacuatedX + 1 == evacuatedY
- dst := &xy[useY] // evacuation destination
-
- if dst.i == bucketCnt {
- dst.b = h.newoverflow(t, dst.b)
- dst.i = 0
- dst.k = add(unsafe.Pointer(dst.b), dataOffset)
- dst.e = add(dst.k, bucketCnt*uintptr(t.keysize))
- }
- dst.b.tophash[dst.i&(bucketCnt-1)] = top // mask dst.i as an optimization, to avoid a bounds check
- if t.indirectkey() {
- *(*unsafe.Pointer)(dst.k) = k2 // copy pointer
+ if indirectk {
+ *(*unsafe.Pointer)(dstk), *(*unsafe.Pointer)(k) = *(*unsafe.Pointer)(k), *(*unsafe.Pointer)(dstk)
} else {
- typedmemmove(t.key, dst.k, k) // copy elem
+ typedmemmove(t.key, tmpk, dstk)
+ typedmemmove(t.key, dstk, k)
+ typedmemmove(t.key, k, tmpk)
}
- if t.indirectelem() {
- *(*unsafe.Pointer)(dst.e) = *(*unsafe.Pointer)(e)
+ if indirecte {
+ *(*unsafe.Pointer)(dste), *(*unsafe.Pointer)(e) = *(*unsafe.Pointer)(e), *(*unsafe.Pointer)(dste)
} else {
- typedmemmove(t.elem, dst.e, e)
+ typedmemmove(t.elem, tmpe, dste)
+ typedmemmove(t.elem, dste, e)
+ typedmemmove(t.elem, e, tmpe)
}
- dst.i++
- // These updates might push these pointers past the end of the
- // key or elem arrays. That's ok, as we have the overflow pointer
- // at the end of the bucket to protect against pointing past the
- // end of the bucket.
- dst.k = add(dst.k, uintptr(t.keysize))
- dst.e = add(dst.e, uintptr(t.elemsize))
+ dstb.tophash[dsti] = top
}
}
- // Unlink the overflow buckets & clear key/elem to help GC.
- if h.flags&oldIterator == 0 && t.bucket.ptrdata != 0 {
- b := add(h.oldbuckets, oldbucket*uintptr(t.bucketsize))
- // Preserve b.tophash because the evacuation
- // state is maintained there.
- ptr := add(b, dataOffset)
- n := uintptr(t.bucketsize) - dataOffset
- memclrHasPointers(ptr, n)
- }
}
-
- if oldbucket == h.nevacuate {
- advanceEvacuationMark(h, t, newbit)
- }
+ h.growthLeft = int(bucketNum*bucketCnt) - h.count
}

-func advanceEvacuationMark(h *hmap, t *maptype, newbit uintptr) {
- h.nevacuate++
- // Experiments suggest that 1024 is overkill by at least an order of magnitude.
- // Put it in there as a safeguard anyway, to ensure O(1) behavior.
- stop := h.nevacuate + 1024
- if stop > newbit {
- stop = newbit
- }
- for h.nevacuate != stop && bucketEvacuated(t, h, h.nevacuate) {
- h.nevacuate++
- }
- if h.nevacuate == newbit { // newbit == # of oldbuckets
- // Growing is all done. Free old main bucket array.
- h.oldbuckets = nil
- // Can discard old overflow buckets as well.
- // If they are still referenced by an iterator,
- // then the iterator holds a pointers to the slice.
- if h.extra != nil {
- h.extra.oldoverflow = nil
+func growbig(h *hmap, t *maptype) {
+ oldB := h.B
+ newB := h.B + 1
+ oldBucketnum := bucketShift(oldB)
+ newBucketnum := bucketShift(newB)
+ newCap := newBucketnum * bucketCnt
+
+ newBucketArray := makeBucketArray(t, newB)
+ newMask := newBucketnum - 1
+ hash0 := uintptr(h.hash0)
+
+ for bucket := uintptr(0); bucket < oldBucketnum; bucket++ {
+ b := (*bmap)(add(h.buckets, bucket*uintptr(t.bucketsize)))
+ base := add(unsafe.Pointer(b), dataOffset) // key and value start
+ status := matchFull(b.tophash)
+ for {
+ i := status.NextMatch()
+ if i >= bucketCnt {
+ break
+ }
+ k := add(base, i*uintptr(t.keysize))
+ e := add(base, bucketCnt*uintptr(t.keysize)+i*uintptr(t.elemsize))
+ mapassignwithoutgrow(t, hash0, newMask, newBucketArray, k, e)
+ status.RemoveNextMatch()
}
- h.flags &^= sameSizeGrow
+ }
+
+ h.B = newB
+ h.flags &^= iterator
+ h.buckets = newBucketArray
+ h.growthLeft = int(newCap) - h.count
+}
+
+func mapassignwithoutgrow(t *maptype, hash0, mask uintptr, buckets, key, elem unsafe.Pointer) {
+ var hash uintptr
+
+ if t.indirectkey() {
+ hash = t.hasher(*(*unsafe.Pointer)(key), hash0)
+ } else {
+ hash = t.hasher(key, hash0)
+ }
+
+ top := tophash(hash)
+ p := newProbe(hash, mask)
+
+ // The key is not in the map.
+ for {
+ b := (*bmap)(add(buckets, p.Bucket()*uintptr(t.bucketsize)))
+ // Check empty slot or deleted slot.
+ status := matchEmptyOrDeleted(b.tophash)
+ i := status.NextMatch()
+ if i < bucketCnt {
+ // Insert key and value.
+ b.tophash[i] = top
+ base := add(unsafe.Pointer(b), dataOffset)
+ k := add(unsafe.Pointer(base), i*uintptr(t.keysize))
+ if t.indirectkey() {
+ *(*unsafe.Pointer)(k) = *(*unsafe.Pointer)(key)
+ } else {
+ typedmemmove(t.key, k, key)
+ }
+ e := add(unsafe.Pointer(base), bucketCnt*uintptr(t.keysize)+i*uintptr(t.elemsize))
+ if t.indirectelem() {
+ *(*unsafe.Pointer)(e) = *(*unsafe.Pointer)(elem)
+ } else {
+ typedmemmove(t.elem, e, elem)
+ }
+ return
+ }
+ p.Next()
}
}

diff --git a/src/runtime/map_fast32.go b/src/runtime/map_fast32.go
index 01ea330..17b1723 100644
--- a/src/runtime/map_fast32.go
+++ b/src/runtime/map_fast32.go
@@ -21,33 +21,60 @@
if h.flags&hashWriting != 0 {
fatal("concurrent map read and map write")
}
- var b *bmap
- if h.B == 0 {
- // One-bucket table. No need to hash.
- b = (*bmap)(h.buckets)
- } else {
- hash := t.hasher(noescape(unsafe.Pointer(&key)), uintptr(h.hash0))
- m := bucketMask(h.B)
- b = (*bmap)(add(h.buckets, (hash&m)*uintptr(t.bucketsize)))
- if c := h.oldbuckets; c != nil {
- if !h.sameSizeGrow() {
- // There used to be half as many buckets; mask down one more power of two.
- m >>= 1
- }
- oldb := (*bmap)(add(c, (hash&m)*uintptr(t.bucketsize)))
- if !evacuated(oldb) {
- b = oldb
- }
- }
- }
- for ; b != nil; b = b.overflow(t) {
- for i, k := uintptr(0), b.keys(); i < bucketCnt; i, k = i+1, add(k, 4) {
- if *(*uint32)(k) == key && !isEmpty(b.tophash[i]) {
+
+ B := h.B
+ if B == 0 {
+ // We don't need hash if only one bucket.
+ b := (*bmap)(h.buckets)
+ for i, k := uintptr(0), add(unsafe.Pointer(b), dataOffset); i < bucketCnt; i, k = i+1, add(k, 4) {
+ if *(*uint32)(k) == key && isFull(b.tophash[i]) {
return add(unsafe.Pointer(b), dataOffset+bucketCnt*4+i*uintptr(t.elemsize))
}
}
+ return unsafe.Pointer(&zeroVal[0])
}
- return unsafe.Pointer(&zeroVal[0])
+
+ hash := t.hasher(noescape(unsafe.Pointer(&key)), uintptr(h.hash0))
+ top := tophash(hash)
+
+ p := newProbe(hash, bucketMask(B))
+
+ if B <= loopaccessB {
+ goto loopaccess
+ }
+
+ for {
+ b := (*bmap)(add(h.buckets, p.Bucket()*uintptr(t.bucketsize)))
+ status := matchTopHash(b.tophash, top)
+ for {
+ i := status.NextMatch()
+ if i >= bucketCnt {
+ break
+ }
+ k := add(unsafe.Pointer(b), dataOffset+i*4)
+ if *(*uint32)(k) == key {
+ return add(unsafe.Pointer(b), dataOffset+bucketCnt*4+i*uintptr(t.elemsize))
+ }
+ status.RemoveNextMatch()
+ }
+ if matchEmpty(b.tophash) != 0 {
+ return unsafe.Pointer(&zeroVal[0])
+ }
+ p.Next()
+ }
+loopaccess:
+ for {
+ b := (*bmap)(add(h.buckets, p.Bucket()*uintptr(t.bucketsize)))
+ for i, k := uintptr(0), add(unsafe.Pointer(b), dataOffset); i < bucketCnt; i, k = i+1, add(k, 4) {
+ if *(*uint32)(k) == key && isFull(b.tophash[i]) {
+ return add(unsafe.Pointer(b), dataOffset+bucketCnt*4+i*uintptr(t.elemsize))
+ }
+ }
+ if matchEmpty(b.tophash) != 0 {
+ return unsafe.Pointer(&zeroVal[0])
+ }
+ p.Next()
+ }
}

func mapaccess2_fast32(t *maptype, h *hmap, key uint32) (unsafe.Pointer, bool) {
@@ -61,33 +88,60 @@
if h.flags&hashWriting != 0 {
fatal("concurrent map read and map write")
}
- var b *bmap
- if h.B == 0 {
- // One-bucket table. No need to hash.
- b = (*bmap)(h.buckets)
- } else {
- hash := t.hasher(noescape(unsafe.Pointer(&key)), uintptr(h.hash0))
- m := bucketMask(h.B)
- b = (*bmap)(add(h.buckets, (hash&m)*uintptr(t.bucketsize)))
- if c := h.oldbuckets; c != nil {
- if !h.sameSizeGrow() {
- // There used to be half as many buckets; mask down one more power of two.
- m >>= 1
- }
- oldb := (*bmap)(add(c, (hash&m)*uintptr(t.bucketsize)))
- if !evacuated(oldb) {
- b = oldb
- }
- }
- }
- for ; b != nil; b = b.overflow(t) {
- for i, k := uintptr(0), b.keys(); i < bucketCnt; i, k = i+1, add(k, 4) {
- if *(*uint32)(k) == key && !isEmpty(b.tophash[i]) {
+
+ B := h.B
+ if B == 0 {
+ // We don't need hash if only one bucket.
+ b := (*bmap)(h.buckets)
+ for i, k := uintptr(0), add(unsafe.Pointer(b), dataOffset); i < bucketCnt; i, k = i+1, add(k, 4) {
+ if *(*uint32)(k) == key && isFull(b.tophash[i]) {
return add(unsafe.Pointer(b), dataOffset+bucketCnt*4+i*uintptr(t.elemsize)), true
}
}
+ return unsafe.Pointer(&zeroVal[0]), false
}
- return unsafe.Pointer(&zeroVal[0]), false
+
+ hash := t.hasher(noescape(unsafe.Pointer(&key)), uintptr(h.hash0))
+ top := tophash(hash)
+ mask := bucketMask(B)
+ p := newProbe(hash, mask)
+
+ if B <= loopaccessB {
+ goto loopaccess
+ }
+
+ for {
+ b := (*bmap)(add(h.buckets, p.Bucket()*uintptr(t.bucketsize)))
+ status := matchTopHash(b.tophash, top)
+ for {
+ i := status.NextMatch()
+ if i >= bucketCnt {
+ break
+ }
+ k := add(unsafe.Pointer(b), dataOffset+i*4)
+ if *(*uint32)(k) == key {
+ return add(unsafe.Pointer(b), dataOffset+bucketCnt*4+i*uintptr(t.elemsize)), true
+ }
+ status.RemoveNextMatch()
+ }
+ if matchEmpty(b.tophash) != 0 {
+ return unsafe.Pointer(&zeroVal[0]), false
+ }
+ p.Next()
+ }
+loopaccess:
+ for {
+ b := (*bmap)(add(h.buckets, p.Bucket()*uintptr(t.bucketsize)))
+ for i, k := uintptr(0), add(unsafe.Pointer(b), dataOffset); i < bucketCnt; i, k = i+1, add(k, 4) {
+ if *(*uint32)(k) == key && isFull(b.tophash[i]) {
+ return add(unsafe.Pointer(b), dataOffset+bucketCnt*4+i*uintptr(t.elemsize)), true
+ }
+ }
+ if matchEmpty(b.tophash) != 0 {
+ return unsafe.Pointer(&zeroVal[0]), false
+ }
+ p.Next()
+ }
}

func mapassign_fast32(t *maptype, h *hmap, key uint32) unsafe.Pointer {
@@ -107,70 +161,74 @@
h.flags ^= hashWriting

if h.buckets == nil {
- h.buckets = newobject(t.bucket) // newarray(t.bucket, 1)
+ // Init an empty map.
+ h.buckets = makeBucketArray(t, 0)
+ h.growthLeft = bucketCnt
}

-again:
- bucket := hash & bucketMask(h.B)
- if h.growing() {
- growWork_fast32(t, h, bucket)
+ top := tophash(hash)
+
+ if h.needGrow() {
+ grow_fast32(h, t)
}
- b := (*bmap)(add(h.buckets, bucket*uintptr(t.bucketsize)))
+
+ p := newProbe(hash, bucketMask(h.B))

var insertb *bmap
var inserti uintptr
var insertk unsafe.Pointer

-bucketloop:
+ var (
+ b *bmap
+ status bitmask64
+ )
+ // Check if the key in the map.
for {
- for i := uintptr(0); i < bucketCnt; i++ {
- if isEmpty(b.tophash[i]) {
- if insertb == nil {
- inserti = i
- insertb = b
- }
- if b.tophash[i] == emptyRest {
- break bucketloop
- }
- continue
+ b = (*bmap)(add(h.buckets, p.Bucket()*uintptr(t.bucketsize)))
+ status = matchTopHash(b.tophash, top)
+ for {
+ i := status.NextMatch()
+ if i >= bucketCnt {
+ break
}
k := *((*uint32)(add(unsafe.Pointer(b), dataOffset+i*4)))
- if k != key {
- continue
+ if k == key {
+ insertb = b
+ inserti = i
+ goto done
}
- inserti = i
- insertb = b
- goto done
+ status.RemoveNextMatch()
}
- ovf := b.overflow(t)
- if ovf == nil {
+ if matchEmpty(b.tophash) != 0 {
break
}
- b = ovf
+ p.Next()
}

- // Did not find mapping for key. Allocate new cell & add entry.
+ // The key is not in the map.
+ p.Reset(hash)
+ for {
+ b = (*bmap)(add(h.buckets, p.Bucket()*uintptr(t.bucketsize)))
+ // Can't find the key in this bucket.
+ // Check empty slot or deleted slot.
+ status = matchEmptyOrDeleted(b.tophash)
+ i := status.NextMatch()
+ if i < bucketCnt {
+ // Insert key and value.
+ insertb = b
+ inserti = i
+ insertb.tophash[i] = top
+ insertk = add(unsafe.Pointer(insertb), dataOffset+inserti*4)
+ // store new key at insert position
+ *(*uint32)(insertk) = key

- // If we hit the max load factor or we have too many overflow buckets,
- // and we're not already in the middle of growing, start growing.
- if !h.growing() && (overLoadFactor(h.count+1, h.B) || tooManyOverflowBuckets(h.noverflow, h.B)) {
- hashGrow(t, h)
- goto again // Growing the table invalidates everything, so try again
+ h.growthLeft -= 1
+ h.count += 1
+ goto done
+ }
+ // No idle slot
+ p.Next()
}
-
- if insertb == nil {
- // The current bucket and all the overflow buckets connected to it are full, allocate a new one.
- insertb = h.newoverflow(t, b)
- inserti = 0 // not necessary, but avoids needlessly spilling inserti
- }
- insertb.tophash[inserti&(bucketCnt-1)] = tophash(hash) // mask inserti to avoid bounds checks
-
- insertk = add(unsafe.Pointer(insertb), dataOffset+inserti*4)
- // store new key at insert position
- *(*uint32)(insertk) = key
-
- h.count++
-
done:
elem := add(unsafe.Pointer(insertb), dataOffset+bucketCnt*4+inserti*uintptr(t.elemsize))
if h.flags&hashWriting == 0 {
@@ -197,70 +255,74 @@
h.flags ^= hashWriting

if h.buckets == nil {
- h.buckets = newobject(t.bucket) // newarray(t.bucket, 1)
+ // Init an empty map.
+ h.buckets = makeBucketArray(t, 0)
+ h.growthLeft = bucketCnt
}

-again:
- bucket := hash & bucketMask(h.B)
- if h.growing() {
- growWork_fast32(t, h, bucket)
+ top := tophash(hash)
+
+ if h.needGrow() {
+ grow_fast32(h, t)
}
- b := (*bmap)(add(h.buckets, bucket*uintptr(t.bucketsize)))
+
+ p := newProbe(hash, bucketMask(h.B))

var insertb *bmap
var inserti uintptr
var insertk unsafe.Pointer

-bucketloop:
+ var (
+ b *bmap
+ status bitmask64
+ )
+ // Check if the key in the map.
for {
- for i := uintptr(0); i < bucketCnt; i++ {
- if isEmpty(b.tophash[i]) {
- if insertb == nil {
- inserti = i
- insertb = b
- }
- if b.tophash[i] == emptyRest {
- break bucketloop
- }
- continue
+ b = (*bmap)(add(h.buckets, p.Bucket()*uintptr(t.bucketsize)))
+ status = matchTopHash(b.tophash, top)
+ for {
+ i := status.NextMatch()
+ if i >= bucketCnt {
+ break
}
k := *((*unsafe.Pointer)(add(unsafe.Pointer(b), dataOffset+i*4)))
- if k != key {
- continue
+ if k == key {
+ insertb = b
+ inserti = i
+ goto done
}
- inserti = i
- insertb = b
- goto done
+ status.RemoveNextMatch()
}
- ovf := b.overflow(t)
- if ovf == nil {
+ if matchEmpty(b.tophash) != 0 {
break
}
- b = ovf
+ p.Next()
}

- // Did not find mapping for key. Allocate new cell & add entry.
+ // The key is not in the map.
+ p.Reset(hash)
+ for {
+ b = (*bmap)(add(h.buckets, p.Bucket()*uintptr(t.bucketsize)))
+ // Can't find the key in this bucket.
+ // Check empty slot or deleted slot.
+ status = matchEmptyOrDeleted(b.tophash)
+ i := status.NextMatch()
+ if i < bucketCnt {
+ // Insert key and value.
+ insertb = b
+ inserti = i
+ insertb.tophash[i] = top
+ insertk = add(unsafe.Pointer(insertb), dataOffset+inserti*4)
+ // store new key at insert position
+ *(*unsafe.Pointer)(insertk) = key

- // If we hit the max load factor or we have too many overflow buckets,
- // and we're not already in the middle of growing, start growing.
- if !h.growing() && (overLoadFactor(h.count+1, h.B) || tooManyOverflowBuckets(h.noverflow, h.B)) {
- hashGrow(t, h)
- goto again // Growing the table invalidates everything, so try again
+ h.growthLeft -= 1
+ h.count += 1
+ goto done
+ }
+ // No idle slot
+ p.Next()
}
-
- if insertb == nil {
- // The current bucket and all the overflow buckets connected to it are full, allocate a new one.
- insertb = h.newoverflow(t, b)
- inserti = 0 // not necessary, but avoids needlessly spilling inserti
- }
- insertb.tophash[inserti&(bucketCnt-1)] = tophash(hash) // mask inserti to avoid bounds checks
-
- insertk = add(unsafe.Pointer(insertb), dataOffset+inserti*4)
- // store new key at insert position
- *(*unsafe.Pointer)(insertk) = key
-
- h.count++
-
done:
elem := add(unsafe.Pointer(insertb), dataOffset+bucketCnt*4+inserti*uintptr(t.elemsize))
if h.flags&hashWriting == 0 {
@@ -284,179 +346,243 @@

hash := t.hasher(noescape(unsafe.Pointer(&key)), uintptr(h.hash0))

- // Set hashWriting after calling t.hasher for consistency with mapdelete
+ // Set hashWriting after calling t.hasher, since t.hasher may panic,
+ // in which case we have not actually done a write (delete).
h.flags ^= hashWriting

- bucket := hash & bucketMask(h.B)
- if h.growing() {
- growWork_fast32(t, h, bucket)
- }
- b := (*bmap)(add(h.buckets, bucket*uintptr(t.bucketsize)))
- bOrig := b
-search:
- for ; b != nil; b = b.overflow(t) {
- for i, k := uintptr(0), b.keys(); i < bucketCnt; i, k = i+1, add(k, 4) {
- if key != *(*uint32)(k) || isEmpty(b.tophash[i]) {
- continue
- }
- // Only clear key if there are pointers in it.
- // This can only happen if pointers are 32 bit
- // wide as 64 bit pointers do not fit into a 32 bit key.
- if goarch.PtrSize == 4 && t.key.ptrdata != 0 {
- // The key must be a pointer as we checked pointers are
- // 32 bits wide and the key is 32 bits wide also.
- *(*unsafe.Pointer)(k) = nil
- }
- e := add(unsafe.Pointer(b), dataOffset+bucketCnt*4+i*uintptr(t.elemsize))
- if t.elem.ptrdata != 0 {
- memclrHasPointers(e, t.elem.size)
- } else {
- memclrNoHeapPointers(e, t.elem.size)
- }
- b.tophash[i] = emptyOne
- // If the bucket now ends in a bunch of emptyOne states,
- // change those to emptyRest states.
- if i == bucketCnt-1 {
- if b.overflow(t) != nil && b.overflow(t).tophash[0] != emptyRest {
- goto notLast
- }
- } else {
- if b.tophash[i+1] != emptyRest {
- goto notLast
- }
- }
- for {
- b.tophash[i] = emptyRest
- if i == 0 {
- if b == bOrig {
- break // beginning of initial bucket, we're done.
- }
- // Find previous bucket, continue at its last entry.
- c := b
- for b = bOrig; b.overflow(t) != c; b = b.overflow(t) {
- }
- i = bucketCnt - 1
- } else {
- i--
- }
- if b.tophash[i] != emptyOne {
- break
- }
- }
- notLast:
- h.count--
- // Reset the hash seed to make it more difficult for attackers to
- // repeatedly trigger hash collisions. See issue 25237.
- if h.count == 0 {
- h.hash0 = fastrand()
- }
- break search
- }
- }
+ p := newProbe(hash, bucketMask(h.B))
+ top := tophash(hash)

+ for {
+ b := (*bmap)(add(h.buckets, p.Bucket()*uintptr(t.bucketsize)))
+ status := matchTopHash(b.tophash, top)
+ for {
+ i := status.NextMatch()
+ if i >= bucketCnt {
+ break
+ }
+ k := add(unsafe.Pointer(b), dataOffset+i*4)
+ if key == *(*uint32)(k) {
+ // Found this key.
+ h.count -= 1
+ // Only clear key if there are pointers in it.
+ // This can only happen if pointers are 32 bit
+ // wide as 64 bit pointers do not fit into a 32 bit key.
+ if goarch.PtrSize == 4 && t.key.ptrdata != 0 {
+ // The key must be a pointer as we checked pointers are
+ // 32 bits wide and the key is 32 bits wide also.
+ *(*unsafe.Pointer)(k) = nil
+ }
+ e := add(unsafe.Pointer(b), dataOffset+bucketCnt*4+i*uintptr(t.elemsize))
+ if t.elem.ptrdata != 0 {
+ memclrHasPointers(e, t.elem.size)
+ } else {
+ memclrNoHeapPointers(e, t.elem.size)
+ }
+ // Update tophash.
+ if matchEmpty(b.tophash) == 0 {
+ // We only ever mark the slot as deleted if the entry we want to delete
+ // is in a pack of bucketCnt non-EMPTY buckets.
+ b.tophash[i] = deletedSlot
+ } else {
+ h.growthLeft += 1
+ b.tophash[i] = emptySlot
+ }
+ goto done
+ }
+ status.RemoveNextMatch()
+ }
+ if matchEmpty(b.tophash) != 0 {
+ // The key is not in this map.
+ goto done
+ }
+ p.Next()
+ }
+done:
+ if h.count == 0 {
+ // Reset the hash seed to make it more difficult for attackers to
+ // repeatedly trigger hash collisions. See issue 25237.
+ h.hash0 = fastrand()
+ }
if h.flags&hashWriting == 0 {
fatal("concurrent map writes")
}
h.flags &^= hashWriting
}

-func growWork_fast32(t *maptype, h *hmap, bucket uintptr) {
- // make sure we evacuate the oldbucket corresponding
- // to the bucket we're about to use
- evacuate_fast32(t, h, bucket&h.oldbucketmask())
-
- // evacuate one more oldbucket to make progress on growing
- if h.growing() {
- evacuate_fast32(t, h, h.nevacuate)
+func grow_fast32(h *hmap, t *maptype) {
+ cap := bucketShift(h.B) * bucketCnt
+ if uintptr(h.count*32) <= cap*25 && (h.flags&iterator != iterator) {
+ // If there may be an iterator using buckets, we disable growsamesize.
+ // Because it may move data to different buckets, this behavior will break the iterator.
+ growsamesize_fast32(h, t)
+ } else {
+ growbig_fast32(h, t)
}
}

-func evacuate_fast32(t *maptype, h *hmap, oldbucket uintptr) {
- b := (*bmap)(add(h.oldbuckets, oldbucket*uintptr(t.bucketsize)))
- newbit := h.noldbuckets()
- if !evacuated(b) {
- // TODO: reuse overflow buckets instead of using new ones, if there
- // is no iterator using the old buckets. (If !oldIterator.)
+func growsamesize_fast32(h *hmap, t *maptype) {
+ bucketNum := bucketShift(h.B)
+ mask := bucketNum - 1
+ // For all buckets:
+ // - mark all DELETED slots as EMPTY
+ // - mark all FULL slots as DELETED
+ for bucket := uintptr(0); bucket < bucketNum; bucket++ {
+ b := (*bmap)(add(h.buckets, bucket*uintptr(t.bucketsize)))
+ prepareSameSizeGrow(&b.tophash)
+ }
+ // Temporary key and value used to swap.
+ tmpk := newobject(t.key)
+ tmpe := newobject(t.elem)

- // xy contains the x and y (low and high) evacuation destinations.
- var xy [2]evacDst
- x := &xy[0]
- x.b = (*bmap)(add(h.buckets, oldbucket*uintptr(t.bucketsize)))
- x.k = add(unsafe.Pointer(x.b), dataOffset)
- x.e = add(x.k, bucketCnt*4)
-
- if !h.sameSizeGrow() {
- // Only calculate y pointers if we're growing bigger.
- // Otherwise GC can see bad pointers.
- y := &xy[1]
- y.b = (*bmap)(add(h.buckets, (oldbucket+newbit)*uintptr(t.bucketsize)))
- y.k = add(unsafe.Pointer(y.b), dataOffset)
- y.e = add(y.k, bucketCnt*4)
- }
-
- for ; b != nil; b = b.overflow(t) {
- k := add(unsafe.Pointer(b), dataOffset)
- e := add(k, bucketCnt*4)
- for i := 0; i < bucketCnt; i, k, e = i+1, add(k, 4), add(e, uintptr(t.elemsize)) {
- top := b.tophash[i]
- if isEmpty(top) {
- b.tophash[i] = evacuatedEmpty
- continue
+ for bucket := uintptr(0); bucket < bucketNum; bucket++ {
+ b := (*bmap)(add(h.buckets, bucket*uintptr(t.bucketsize)))
+ for i := uintptr(0); i < bucketCnt; {
+ if b.tophash[i] != deletedSlot {
+ i++
+ continue
+ }
+ base := add(unsafe.Pointer(b), dataOffset)
+ k := add(base, i*4)
+ e := add(base, bucketCnt*4+i*uintptr(t.elemsize))
+ hash := t.hasher(noescape(unsafe.Pointer(k)), uintptr(h.hash0))
+ top := tophash(hash)
+ // Find first not null slot.
+ var (
+ dstb *bmap
+ dsti uintptr
+ dstp = newProbe(hash, mask)
+ )
+ for {
+ dstb = (*bmap)(add(h.buckets, dstp.Bucket()*uintptr(t.bucketsize)))
+ status := matchEmptyOrDeleted(dstb.tophash)
+ dsti = status.NextMatch()
+ if dsti < bucketCnt {
+ break
}
- if top < minTopHash {
- throw("bad map state")
- }
- var useY uint8
- if !h.sameSizeGrow() {
- // Compute hash to make our evacuation decision (whether we need
- // to send this key/elem to bucket x or bucket y).
- hash := t.hasher(k, uintptr(h.hash0))
- if hash&newbit != 0 {
- useY = 1
- }
- }
+ dstp.Next()
+ }

- b.tophash[i] = evacuatedX + useY // evacuatedX + 1 == evacuatedY, enforced in makemap
- dst := &xy[useY] // evacuation destination
+ // The target bucket is the same.
+ if dstp.Bucket() == bucket {
+ // Just mark slot as FULL.
+ b.tophash[i] = top
+ i += 1
+ continue
+ }

- if dst.i == bucketCnt {
- dst.b = h.newoverflow(t, dst.b)
- dst.i = 0
- dst.k = add(unsafe.Pointer(dst.b), dataOffset)
- dst.e = add(dst.k, bucketCnt*4)
+ dstbase := add(unsafe.Pointer(dstb), dataOffset) // key and value start
+ dstk := add(unsafe.Pointer(dstbase), dsti*4)
+ dste := add(unsafe.Pointer(dstbase), bucketCnt*4+dsti*uintptr(t.elemsize))
+
+ // Target is in another bucket.
+ switch dstb.tophash[dsti] {
+ case emptySlot:
+ // 1. Transfer element to target
+ // 2. Mark target as FULL
+ // 3. Mark slot as EMPTY
+
+ // Store new key and value at insert position.
+ *(*uint32)(dstk) = *(*uint32)(k)
+ typedmemmove(t.elem, dste, e)
+
+ // Clear key and value.
+
+ // Only clear key if there are pointers in it.
+ // This can only happen if pointers are 32 bit
+ // wide as 64 bit pointers do not fit into a 32 bit key.
+ if goarch.PtrSize == 4 && t.key.ptrdata != 0 {
+ // The key must be a pointer as we checked pointers are
+ // 32 bits wide and the key is 32 bits wide also.
+ *(*unsafe.Pointer)(k) = nil
}
- dst.b.tophash[dst.i&(bucketCnt-1)] = top // mask dst.i as an optimization, to avoid a bounds check
-
- // Copy key.
- if goarch.PtrSize == 4 && t.key.ptrdata != 0 && writeBarrier.enabled {
- // Write with a write barrier.
- *(*unsafe.Pointer)(dst.k) = *(*unsafe.Pointer)(k)
+ if t.elem.ptrdata != 0 {
+ memclrHasPointers(e, uintptr(t.elemsize))
} else {
- *(*uint32)(dst.k) = *(*uint32)(k)
+ memclrNoHeapPointers(e, uintptr(t.elemsize))
}

- typedmemmove(t.elem, dst.e, e)
- dst.i++
- // These updates might push these pointers past the end of the
- // key or elem arrays. That's ok, as we have the overflow pointer
- // at the end of the bucket to protect against pointing past the
- // end of the bucket.
- dst.k = add(dst.k, 4)
- dst.e = add(dst.e, uintptr(t.elemsize))
+ dstb.tophash[dsti] = top
+ b.tophash[i] = emptySlot
+ i++
+ case deletedSlot:
+ // 1. Swap current element with target element
+ // 2. Mark target as FULL
+ // 3. Repeat procedure for current slot with moved from element (target)
+
+ // tmpk,tmpe = dstk,dste
+ *(*uint32)(tmpk) = *(*uint32)(dstk)
+ typedmemmove(t.elem, tmpe, dste)
+
+ // dstk,dste = k,e
+ *(*uint32)(dstk) = *(*uint32)(k)
+ typedmemmove(t.elem, dste, e)
+
+ // k,e = tmpk,tmpe
+ *(*uint32)(k) = *(*uint32)(tmpk)
+ typedmemmove(t.elem, e, tmpe)
+
+ dstb.tophash[dsti] = top
}
}
- // Unlink the overflow buckets & clear key/elem to help GC.
- if h.flags&oldIterator == 0 && t.bucket.ptrdata != 0 {
- b := add(h.oldbuckets, oldbucket*uintptr(t.bucketsize))
- // Preserve b.tophash because the evacuation
- // state is maintained there.
- ptr := add(b, dataOffset)
- n := uintptr(t.bucketsize) - dataOffset
- memclrHasPointers(ptr, n)
+ }
+ h.growthLeft = int(bucketNum*bucketCnt) - h.count
+}
+
+func growbig_fast32(h *hmap, t *maptype) {
+ oldB := h.B
+ newB := h.B + 1
+ oldBucketnum := bucketShift(oldB)
+ newBucketnum := bucketShift(newB)
+ newCap := newBucketnum * bucketCnt
+
+ newBucketArray := makeBucketArray(t, newB)
+ newMask := newBucketnum - 1
+ hash0 := uintptr(h.hash0)
+
+ for bucket := uintptr(0); bucket < oldBucketnum; bucket++ {
+ b := (*bmap)(add(h.buckets, bucket*uintptr(t.bucketsize)))
+ base := add(unsafe.Pointer(b), dataOffset) // key and value start
+ status := matchFull(b.tophash)
+ for {
+ i := status.NextMatch()
+ if i >= bucketCnt {
+ break
+ }
+ k := *(*uint32)(add(base, i*4))
+ e := add(base, bucketCnt*4+i*uintptr(t.elemsize))
+ mapassignwithoutgrow_fast32(t, hash0, newMask, newBucketArray, k, e)
+ status.RemoveNextMatch()
}
}

- if oldbucket == h.nevacuate {
- advanceEvacuationMark(h, t, newbit)
+ h.B = newB
+ h.flags &^= iterator
+ h.buckets = newBucketArray
+ h.growthLeft = int(newCap) - h.count
+}
+
+func mapassignwithoutgrow_fast32(t *maptype, hash0, mask uintptr, buckets unsafe.Pointer, key uint32, elem unsafe.Pointer) {
+ hash := t.hasher(noescape(unsafe.Pointer(&key)), hash0)
+ top := tophash(hash)
+ p := newProbe(hash, mask)
+
+ // The key is not in the map.
+ for {
+ b := (*bmap)(add(buckets, p.Bucket()*uintptr(t.bucketsize)))
+ // Check empty slot or deleted slot.
+ status := matchEmptyOrDeleted(b.tophash)
+ i := status.NextMatch()
+ if i < bucketCnt {
+ // Insert key and value.
+ b.tophash[i] = top
+ base := add(unsafe.Pointer(b), dataOffset)
+ k := add(unsafe.Pointer(base), i*4)
+ *(*uint32)(k) = key
+ e := add(unsafe.Pointer(base), bucketCnt*4+i*uintptr(t.elemsize))
+ typedmemmove(t.elem, e, elem)
+ return
+ }
+ p.Next()
}
}
diff --git a/src/runtime/map_fast64.go b/src/runtime/map_fast64.go
index 2967360..ade10e6 100644
--- a/src/runtime/map_fast64.go
+++ b/src/runtime/map_fast64.go
@@ -21,33 +21,60 @@
if h.flags&hashWriting != 0 {
fatal("concurrent map read and map write")
}
- var b *bmap
- if h.B == 0 {
- // One-bucket table. No need to hash.
- b = (*bmap)(h.buckets)
- } else {
- hash := t.hasher(noescape(unsafe.Pointer(&key)), uintptr(h.hash0))
- m := bucketMask(h.B)
- b = (*bmap)(add(h.buckets, (hash&m)*uintptr(t.bucketsize)))
- if c := h.oldbuckets; c != nil {
- if !h.sameSizeGrow() {
- // There used to be half as many buckets; mask down one more power of two.
- m >>= 1
- }
- oldb := (*bmap)(add(c, (hash&m)*uintptr(t.bucketsize)))
- if !evacuated(oldb) {
- b = oldb
- }
- }
- }
- for ; b != nil; b = b.overflow(t) {
- for i, k := uintptr(0), b.keys(); i < bucketCnt; i, k = i+1, add(k, 8) {
- if *(*uint64)(k) == key && !isEmpty(b.tophash[i]) {
+
+ B := h.B
+ if B == 0 {
+ // We don't need hash if only one bucket.
+ b := (*bmap)(h.buckets)
+ for i, k := uintptr(0), add(unsafe.Pointer(b), dataOffset); i < bucketCnt; i, k = i+1, add(k, 8) {
+ if *(*uint64)(k) == key && isFull(b.tophash[i]) {
return add(unsafe.Pointer(b), dataOffset+bucketCnt*8+i*uintptr(t.elemsize))
}
}
+ return unsafe.Pointer(&zeroVal[0])
}
- return unsafe.Pointer(&zeroVal[0])
+
+ hash := t.hasher(noescape(unsafe.Pointer(&key)), uintptr(h.hash0))
+ top := tophash(hash)
+ mask := bucketMask(B)
+ p := newProbe(hash, mask)
+
+ if B <= loopaccessB {
+ goto loopaccess
+ }
+
+ for {
+ b := (*bmap)(add(h.buckets, p.Bucket()*uintptr(t.bucketsize)))
+ status := matchTopHash(b.tophash, top)
+ for {
+ i := status.NextMatch()
+ if i >= bucketCnt {
+ break
+ }
+ k := add(unsafe.Pointer(b), dataOffset+i*8)
+ if *(*uint64)(k) == key {
+ return add(unsafe.Pointer(b), dataOffset+bucketCnt*8+i*uintptr(t.elemsize))
+ }
+ status.RemoveNextMatch()
+ }
+ if matchEmpty(b.tophash) != 0 {
+ return unsafe.Pointer(&zeroVal[0])
+ }
+ p.Next()
+ }
+loopaccess:
+ for {
+ b := (*bmap)(add(h.buckets, p.Bucket()*uintptr(t.bucketsize)))
+ for i, k := uintptr(0), add(unsafe.Pointer(b), dataOffset); i < bucketCnt; i, k = i+1, add(k, 8) {
+ if *(*uint64)(k) == key && isFull(b.tophash[i]) {
+ return add(unsafe.Pointer(b), dataOffset+bucketCnt*8+i*uintptr(t.elemsize))
+ }
+ }
+ if matchEmpty(b.tophash) != 0 {
+ return unsafe.Pointer(&zeroVal[0])
+ }
+ p.Next()
+ }
}

func mapaccess2_fast64(t *maptype, h *hmap, key uint64) (unsafe.Pointer, bool) {
@@ -61,33 +88,60 @@
if h.flags&hashWriting != 0 {
fatal("concurrent map read and map write")
}
- var b *bmap
- if h.B == 0 {
- // One-bucket table. No need to hash.
- b = (*bmap)(h.buckets)
- } else {
- hash := t.hasher(noescape(unsafe.Pointer(&key)), uintptr(h.hash0))
- m := bucketMask(h.B)
- b = (*bmap)(add(h.buckets, (hash&m)*uintptr(t.bucketsize)))
- if c := h.oldbuckets; c != nil {
- if !h.sameSizeGrow() {
- // There used to be half as many buckets; mask down one more power of two.
- m >>= 1
- }
- oldb := (*bmap)(add(c, (hash&m)*uintptr(t.bucketsize)))
- if !evacuated(oldb) {
- b = oldb
- }
- }
- }
- for ; b != nil; b = b.overflow(t) {
- for i, k := uintptr(0), b.keys(); i < bucketCnt; i, k = i+1, add(k, 8) {
- if *(*uint64)(k) == key && !isEmpty(b.tophash[i]) {
+
+ B := h.B
+ if B == 0 {
+ // We don't need hash if only one bucket.
+ b := (*bmap)(h.buckets)
+ for i, k := uintptr(0), add(unsafe.Pointer(b), dataOffset); i < bucketCnt; i, k = i+1, add(k, 8) {
+ if *(*uint64)(k) == key && isFull(b.tophash[i]) {
return add(unsafe.Pointer(b), dataOffset+bucketCnt*8+i*uintptr(t.elemsize)), true
}
}
+ return unsafe.Pointer(&zeroVal[0]), false
}
- return unsafe.Pointer(&zeroVal[0]), false
+
+ hash := t.hasher(noescape(unsafe.Pointer(&key)), uintptr(h.hash0))
+ top := tophash(hash)
+ mask := bucketMask(B)
+ p := newProbe(hash, mask)
+
+ if B <= loopaccessB {
+ goto loopaccess
+ }
+
+ for {
+ b := (*bmap)(add(h.buckets, p.Bucket()*uintptr(t.bucketsize)))
+ status := matchTopHash(b.tophash, top)
+ for {
+ i := status.NextMatch()
+ if i >= bucketCnt {
+ break
+ }
+ k := add(unsafe.Pointer(b), dataOffset+i*8)
+ if *(*uint64)(k) == key {
+ return add(unsafe.Pointer(b), dataOffset+bucketCnt*8+i*uintptr(t.elemsize)), true
+ }
+ status.RemoveNextMatch()
+ }
+ if matchEmpty(b.tophash) != 0 {
+ return unsafe.Pointer(&zeroVal[0]), false
+ }
+ p.Next()
+ }
+loopaccess:
+ for {
+ b := (*bmap)(add(h.buckets, p.Bucket()*uintptr(t.bucketsize)))
+ for i, k := uintptr(0), add(unsafe.Pointer(b), dataOffset); i < bucketCnt; i, k = i+1, add(k, 8) {
+ if *(*uint64)(k) == key && isFull(b.tophash[i]) {
+ return add(unsafe.Pointer(b), dataOffset+bucketCnt*8+i*uintptr(t.elemsize)), true
+ }
+ }
+ if matchEmpty(b.tophash) != 0 {
+ return unsafe.Pointer(&zeroVal[0]), false
+ }
+ p.Next()
+ }
}

func mapassign_fast64(t *maptype, h *hmap, key uint64) unsafe.Pointer {
@@ -107,70 +161,72 @@
h.flags ^= hashWriting

if h.buckets == nil {
- h.buckets = newobject(t.bucket) // newarray(t.bucket, 1)
+ // Init an empty map.
+ h.buckets = makeBucketArray(t, 0)
+ h.growthLeft = bucketCnt
}

-again:
- bucket := hash & bucketMask(h.B)
- if h.growing() {
- growWork_fast64(t, h, bucket)
+ if h.needGrow() {
+ grow_fast64(h, t)
}
- b := (*bmap)(add(h.buckets, bucket*uintptr(t.bucketsize)))
+
+ p := newProbe(hash, bucketMask(h.B))

var insertb *bmap
var inserti uintptr
var insertk unsafe.Pointer

-bucketloop:
+ var (
+ b *bmap
+ status bitmask64
+ top = tophash(hash)
+ )
+ // Check if the key in the map.
for {
- for i := uintptr(0); i < bucketCnt; i++ {
- if isEmpty(b.tophash[i]) {
- if insertb == nil {
- insertb = b
- inserti = i
- }
- if b.tophash[i] == emptyRest {
- break bucketloop
- }
- continue
+ b := (*bmap)(add(h.buckets, p.Bucket()*uintptr(t.bucketsize)))
+ status := matchTopHash(b.tophash, top)
+ for {
+ i := status.NextMatch()
+ if i >= bucketCnt {
+ break
}
- k := *((*uint64)(add(unsafe.Pointer(b), dataOffset+i*8)))
- if k != key {
- continue
+ k := add(unsafe.Pointer(b), dataOffset+i*8)
+ if *(*uint64)(k) == key {
+ insertb = b
+ inserti = i
+ goto done
}
+ status.RemoveNextMatch()
+ }
+ if matchEmpty(b.tophash) != 0 {
+ goto insertnewkey
+ }
+ p.Next()
+ }
+
+insertnewkey:
+ // The key is not in the map.
+ p.Reset(hash)
+ for {
+ b = (*bmap)(add(h.buckets, p.Bucket()*uintptr(t.bucketsize)))
+ // Can't find the key in this bucket.
+ // Check empty slot or deleted slot.
+ status = matchEmptyOrDeleted(b.tophash)
+ i := status.NextMatch()
+ if i < bucketCnt {
+ // Insert key and value.
insertb = b
inserti = i
+ insertb.tophash[i] = top
+ insertk = add(unsafe.Pointer(insertb), dataOffset+inserti*8)
+ // Store new key at insert position.
+ *(*uint64)(insertk) = key
+ h.growthLeft -= 1
+ h.count += 1
goto done
}
- ovf := b.overflow(t)
- if ovf == nil {
- break
- }
- b = ovf
+ p.Next()
}
-
- // Did not find mapping for key. Allocate new cell & add entry.
-
- // If we hit the max load factor or we have too many overflow buckets,
- // and we're not already in the middle of growing, start growing.
- if !h.growing() && (overLoadFactor(h.count+1, h.B) || tooManyOverflowBuckets(h.noverflow, h.B)) {
- hashGrow(t, h)
- goto again // Growing the table invalidates everything, so try again
- }
-
- if insertb == nil {
- // The current bucket and all the overflow buckets connected to it are full, allocate a new one.
- insertb = h.newoverflow(t, b)
- inserti = 0 // not necessary, but avoids needlessly spilling inserti
- }
- insertb.tophash[inserti&(bucketCnt-1)] = tophash(hash) // mask inserti to avoid bounds checks
-
- insertk = add(unsafe.Pointer(insertb), dataOffset+inserti*8)
- // store new key at insert position
- *(*uint64)(insertk) = key
-
- h.count++
-
done:
elem := add(unsafe.Pointer(insertb), dataOffset+bucketCnt*8+inserti*uintptr(t.elemsize))
if h.flags&hashWriting == 0 {
@@ -197,70 +253,64 @@
h.flags ^= hashWriting

if h.buckets == nil {
- h.buckets = newobject(t.bucket) // newarray(t.bucket, 1)
+ // Init an empty map.
+ h.buckets = makeBucketArray(t, 0)
+ h.growthLeft = bucketCnt
}

-again:
- bucket := hash & bucketMask(h.B)
- if h.growing() {
- growWork_fast64(t, h, bucket)
+ if h.needGrow() {
+ grow_fast64(h, t)
}
- b := (*bmap)(add(h.buckets, bucket*uintptr(t.bucketsize)))
+
+ p := newProbe(hash, bucketMask(h.B))

var insertb *bmap
var inserti uintptr
var insertk unsafe.Pointer

-bucketloop:
+ var (
+ b *bmap
+ status bitmask64
+ top = tophash(hash)
+ )
+ // Check if the key in the map.
for {
- for i := uintptr(0); i < bucketCnt; i++ {
- if isEmpty(b.tophash[i]) {
- if insertb == nil {
- insertb = b
- inserti = i
- }
- if b.tophash[i] == emptyRest {
- break bucketloop
- }
- continue
+ b = (*bmap)(add(h.buckets, p.Bucket()*uintptr(t.bucketsize)))
+ for i, k := uintptr(0), add(unsafe.Pointer(b), dataOffset); i < bucketCnt; i, k = i+1, add(k, 8) {
+ if *(*unsafe.Pointer)(k) == key && isFull(b.tophash[i]) {
+ insertb = b
+ inserti = i
+ goto done
}
- k := *((*unsafe.Pointer)(add(unsafe.Pointer(b), dataOffset+i*8)))
- if k != key {
- continue
- }
- insertb = b
- inserti = i
- goto done
}
- ovf := b.overflow(t)
- if ovf == nil {
+ if matchEmpty(b.tophash) != 0 {
break
}
- b = ovf
+ p.Next()
}

- // Did not find mapping for key. Allocate new cell & add entry.
-
- // If we hit the max load factor or we have too many overflow buckets,
- // and we're not already in the middle of growing, start growing.
- if !h.growing() && (overLoadFactor(h.count+1, h.B) || tooManyOverflowBuckets(h.noverflow, h.B)) {
- hashGrow(t, h)
- goto again // Growing the table invalidates everything, so try again
+ // The key is not in the map.
+ p.Reset(hash)
+ for {
+ b = (*bmap)(add(h.buckets, p.Bucket()*uintptr(t.bucketsize)))
+ // Can't find the key in this bucket.
+ // Check empty slot or deleted slot.
+ status = matchEmptyOrDeleted(b.tophash)
+ i := status.NextMatch()
+ if i < bucketCnt {
+ // Insert key and value.
+ insertb = b
+ inserti = i
+ insertb.tophash[i] = top
+ insertk = add(unsafe.Pointer(insertb), dataOffset+inserti*8)
+ // Store new key at insert position.
+ *(*unsafe.Pointer)(insertk) = key
+ h.growthLeft -= 1
+ h.count += 1
+ goto done
+ }
+ p.Next()
}
-
- if insertb == nil {
- // The current bucket and all the overflow buckets connected to it are full, allocate a new one.
- insertb = h.newoverflow(t, b)
- inserti = 0 // not necessary, but avoids needlessly spilling inserti
- }
- insertb.tophash[inserti&(bucketCnt-1)] = tophash(hash) // mask inserti to avoid bounds checks
-
- insertk = add(unsafe.Pointer(insertb), dataOffset+inserti*8)
- // store new key at insert position
- *(*unsafe.Pointer)(insertk) = key
-
- h.count++
-
done:
elem := add(unsafe.Pointer(insertb), dataOffset+bucketCnt*8+inserti*uintptr(t.elemsize))
if h.flags&hashWriting == 0 {
@@ -284,187 +334,246 @@

hash := t.hasher(noescape(unsafe.Pointer(&key)), uintptr(h.hash0))

- // Set hashWriting after calling t.hasher for consistency with mapdelete
+ // Set hashWriting after calling t.hasher, since t.hasher may panic,
+ // in which case we have not actually done a write (delete).
h.flags ^= hashWriting

- bucket := hash & bucketMask(h.B)
- if h.growing() {
- growWork_fast64(t, h, bucket)
- }
- b := (*bmap)(add(h.buckets, bucket*uintptr(t.bucketsize)))
- bOrig := b
-search:
- for ; b != nil; b = b.overflow(t) {
- for i, k := uintptr(0), b.keys(); i < bucketCnt; i, k = i+1, add(k, 8) {
- if key != *(*uint64)(k) || isEmpty(b.tophash[i]) {
- continue
- }
- // Only clear key if there are pointers in it.
- if t.key.ptrdata != 0 {
- if goarch.PtrSize == 8 {
- *(*unsafe.Pointer)(k) = nil
- } else {
- // There are three ways to squeeze at one ore more 32 bit pointers into 64 bits.
- // Just call memclrHasPointers instead of trying to handle all cases here.
- memclrHasPointers(k, 8)
- }
- }
- e := add(unsafe.Pointer(b), dataOffset+bucketCnt*8+i*uintptr(t.elemsize))
- if t.elem.ptrdata != 0 {
- memclrHasPointers(e, t.elem.size)
- } else {
- memclrNoHeapPointers(e, t.elem.size)
- }
- b.tophash[i] = emptyOne
- // If the bucket now ends in a bunch of emptyOne states,
- // change those to emptyRest states.
- if i == bucketCnt-1 {
- if b.overflow(t) != nil && b.overflow(t).tophash[0] != emptyRest {
- goto notLast
- }
- } else {
- if b.tophash[i+1] != emptyRest {
- goto notLast
- }
- }
- for {
- b.tophash[i] = emptyRest
- if i == 0 {
- if b == bOrig {
- break // beginning of initial bucket, we're done.
- }
- // Find previous bucket, continue at its last entry.
- c := b
- for b = bOrig; b.overflow(t) != c; b = b.overflow(t) {
- }
- i = bucketCnt - 1
- } else {
- i--
- }
- if b.tophash[i] != emptyOne {
- break
- }
- }
- notLast:
- h.count--
- // Reset the hash seed to make it more difficult for attackers to
- // repeatedly trigger hash collisions. See issue 25237.
- if h.count == 0 {
- h.hash0 = fastrand()
- }
- break search
- }
- }
+ p := newProbe(hash, bucketMask(h.B))
+ top := tophash(hash)

+ for {
+ b := (*bmap)(add(h.buckets, p.Bucket()*uintptr(t.bucketsize)))
+ status := matchTopHash(b.tophash, top)
+ for {
+ i := status.NextMatch()
+ if i >= bucketCnt {
+ break
+ }
+ k := add(unsafe.Pointer(b), dataOffset+i*8)
+ if key == *(*uint64)(k) {
+ // Found this key.
+ h.count -= 1
+ // Only clear key if there are pointers in it.
+ if t.key.ptrdata != 0 {
+ if goarch.PtrSize == 8 {
+ *(*unsafe.Pointer)(k) = nil
+ } else {
+ // There are three ways to squeeze at one ore more 32 bit pointers into 64 bits.
+ // Just call memclrHasPointers instead of trying to handle all cases here.
+ memclrHasPointers(k, 8)
+ }
+ }
+ e := add(unsafe.Pointer(b), dataOffset+bucketCnt*8+i*uintptr(t.elemsize))
+ if t.elem.ptrdata != 0 {
+ memclrHasPointers(e, t.elem.size)
+ } else {
+ memclrNoHeapPointers(e, t.elem.size)
+ }
+ // Update tophash.
+ if matchEmpty(b.tophash) == 0 {
+ // We only ever mark the slot as deleted if the entry we want to delete
+ // is in a pack of bucketCnt non-EMPTY buckets.
+ b.tophash[i] = deletedSlot
+ } else {
+ h.growthLeft += 1
+ b.tophash[i] = emptySlot
+ }
+ goto done
+ }
+ status.RemoveNextMatch()
+ }
+ if matchEmpty(b.tophash) != 0 {
+ // The key is not in this map.
+ goto done
+ }
+ p.Next()
+ }
+done:
+ if h.count == 0 {
+ // Reset the hash seed to make it more difficult for attackers to
+ // repeatedly trigger hash collisions. See issue 25237.
+ h.hash0 = fastrand()
+ }
if h.flags&hashWriting == 0 {
fatal("concurrent map writes")
}
h.flags &^= hashWriting
}

-func growWork_fast64(t *maptype, h *hmap, bucket uintptr) {
- // make sure we evacuate the oldbucket corresponding
- // to the bucket we're about to use
- evacuate_fast64(t, h, bucket&h.oldbucketmask())
-
- // evacuate one more oldbucket to make progress on growing
- if h.growing() {
- evacuate_fast64(t, h, h.nevacuate)
+func grow_fast64(h *hmap, t *maptype) {
+ cap := bucketShift(h.B) * bucketCnt
+ if uintptr(h.count*32) <= cap*25 && (h.flags&iterator != iterator) {
+ // Rehash in place if the current size is <= 25/32 of capacity.
+ // If there may be an iterator using buckets, we disable growsamesize.
+ // Because it may move data to different buckets, this behavior will break the iterator.
+ growsamesize_fast64(h, t)
+ } else {
+ growbig_fast64(h, t)
}
}

-func evacuate_fast64(t *maptype, h *hmap, oldbucket uintptr) {
- b := (*bmap)(add(h.oldbuckets, oldbucket*uintptr(t.bucketsize)))
- newbit := h.noldbuckets()
- if !evacuated(b) {
- // TODO: reuse overflow buckets instead of using new ones, if there
- // is no iterator using the old buckets. (If !oldIterator.)
+func growsamesize_fast64(h *hmap, t *maptype) {
+ bucketNum := bucketShift(h.B)
+ mask := bucketNum - 1
+ // For all buckets:
+ // - mark all DELETED slots as EMPTY
+ // - mark all FULL slots as DELETED
+ for bucket := uintptr(0); bucket < bucketNum; bucket++ {
+ b := (*bmap)(add(h.buckets, bucket*uintptr(t.bucketsize)))
+ prepareSameSizeGrow(&b.tophash)
+ }
+ // Temporary key and value used to swap.
+ tmpk := newobject(t.key)
+ tmpe := newobject(t.elem)

- // xy contains the x and y (low and high) evacuation destinations.
- var xy [2]evacDst
- x := &xy[0]
- x.b = (*bmap)(add(h.buckets, oldbucket*uintptr(t.bucketsize)))
- x.k = add(unsafe.Pointer(x.b), dataOffset)
- x.e = add(x.k, bucketCnt*8)
-
- if !h.sameSizeGrow() {
- // Only calculate y pointers if we're growing bigger.
- // Otherwise GC can see bad pointers.
- y := &xy[1]
- y.b = (*bmap)(add(h.buckets, (oldbucket+newbit)*uintptr(t.bucketsize)))
- y.k = add(unsafe.Pointer(y.b), dataOffset)
- y.e = add(y.k, bucketCnt*8)
- }
-
- for ; b != nil; b = b.overflow(t) {
- k := add(unsafe.Pointer(b), dataOffset)
- e := add(k, bucketCnt*8)
- for i := 0; i < bucketCnt; i, k, e = i+1, add(k, 8), add(e, uintptr(t.elemsize)) {
- top := b.tophash[i]
- if isEmpty(top) {
- b.tophash[i] = evacuatedEmpty
- continue
+ for bucket := uintptr(0); bucket < bucketNum; bucket++ {
+ b := (*bmap)(add(h.buckets, bucket*uintptr(t.bucketsize)))
+ for i := uintptr(0); i < bucketCnt; {
+ if b.tophash[i] != deletedSlot {
+ i++
+ continue
+ }
+ base := add(unsafe.Pointer(b), dataOffset)
+ k := add(base, i*8)
+ e := add(base, bucketCnt*8+i*uintptr(t.elemsize))
+ hash := t.hasher(noescape(unsafe.Pointer((*uint64)(k))), uintptr(h.hash0))
+ top := tophash(hash)
+ // Find first not null slot.
+ var (
+ dstb *bmap
+ dsti uintptr
+ dstp = newProbe(hash, mask)
+ )
+ for {
+ dstb = (*bmap)(add(h.buckets, dstp.Bucket()*uintptr(t.bucketsize)))
+ status := matchEmptyOrDeleted(dstb.tophash)
+ dsti = status.NextMatch()
+ if dsti < bucketCnt {
+ break
}
- if top < minTopHash {
- throw("bad map state")
- }
- var useY uint8
- if !h.sameSizeGrow() {
- // Compute hash to make our evacuation decision (whether we need
- // to send this key/elem to bucket x or bucket y).
- hash := t.hasher(k, uintptr(h.hash0))
- if hash&newbit != 0 {
- useY = 1
- }
- }
+ dstp.Next()
+ }

- b.tophash[i] = evacuatedX + useY // evacuatedX + 1 == evacuatedY, enforced in makemap
- dst := &xy[useY] // evacuation destination
+ // The target bucket is the same.
+ if dstp.Bucket() == bucket {
+ // Just mark slot as FULL.
+ b.tophash[i] = top
+ i += 1
+ continue
+ }

- if dst.i == bucketCnt {
- dst.b = h.newoverflow(t, dst.b)
- dst.i = 0
- dst.k = add(unsafe.Pointer(dst.b), dataOffset)
- dst.e = add(dst.k, bucketCnt*8)
- }
- dst.b.tophash[dst.i&(bucketCnt-1)] = top // mask dst.i as an optimization, to avoid a bounds check
+ dstbase := add(unsafe.Pointer(dstb), dataOffset) // key and value start
+ dstk := add(unsafe.Pointer(dstbase), dsti*8)
+ dste := add(unsafe.Pointer(dstbase), bucketCnt*8+dsti*uintptr(t.elemsize))

- // Copy key.
- if t.key.ptrdata != 0 && writeBarrier.enabled {
+ // Target is in another bucket.
+ switch dstb.tophash[dsti] {
+ case emptySlot:
+ // 1. Transfer element to target
+ // 2. Mark target as FULL
+ // 3. Mark slot as EMPTY
+
+ // Store new key and value at insert position.
+ *(*uint64)(dstk) = *(*uint64)(k)
+ typedmemmove(t.elem, dste, e)
+
+ // Clear key and value.
+ if t.key.ptrdata != 0 {
if goarch.PtrSize == 8 {
- // Write with a write barrier.
- *(*unsafe.Pointer)(dst.k) = *(*unsafe.Pointer)(k)
+ *(*unsafe.Pointer)(k) = nil
} else {
- // There are three ways to squeeze at least one 32 bit pointer into 64 bits.
- // Give up and call typedmemmove.
- typedmemmove(t.key, dst.k, k)
+ // There are three ways to squeeze at one ore more 32 bit pointers into 64 bits.
+ // Just call memclrHasPointers instead of trying to handle all cases here.
+ memclrHasPointers(k, 8)
}
+ }
+ if t.elem.ptrdata != 0 {
+ memclrHasPointers(e, uintptr(t.elemsize))
} else {
- *(*uint64)(dst.k) = *(*uint64)(k)
+ memclrNoHeapPointers(e, uintptr(t.elemsize))
}

- typedmemmove(t.elem, dst.e, e)
- dst.i++
- // These updates might push these pointers past the end of the
- // key or elem arrays. That's ok, as we have the overflow pointer
- // at the end of the bucket to protect against pointing past the
- // end of the bucket.
- dst.k = add(dst.k, 8)
- dst.e = add(dst.e, uintptr(t.elemsize))
+ dstb.tophash[dsti] = top
+ b.tophash[i] = emptySlot
+ i++
+ case deletedSlot:
+ // 1. Swap current element with target element
+ // 2. Mark target as FULL
+ // 3. Repeat procedure for current slot with moved from element (target)
+
+ // tmpk,tmpe = dstk,dste
+ *(*uint64)(tmpk) = *(*uint64)(dstk)
+ typedmemmove(t.elem, tmpe, dste)
+
+ // dstk,dste = k,e
+ *(*uint64)(dstk) = *(*uint64)(k)
+ typedmemmove(t.elem, dste, e)
+
+ // k,e = tmpk,tmpe
+ *(*uint64)(k) = *(*uint64)(tmpk)
+ typedmemmove(t.elem, e, tmpe)
+
+ dstb.tophash[dsti] = top
}
}
- // Unlink the overflow buckets & clear key/elem to help GC.
- if h.flags&oldIterator == 0 && t.bucket.ptrdata != 0 {
- b := add(h.oldbuckets, oldbucket*uintptr(t.bucketsize))
- // Preserve b.tophash because the evacuation
- // state is maintained there.
- ptr := add(b, dataOffset)
- n := uintptr(t.bucketsize) - dataOffset
- memclrHasPointers(ptr, n)
+ }
+ h.growthLeft = int(bucketNum*bucketCnt) - h.count
+}
+
+func growbig_fast64(h *hmap, t *maptype) {
+ oldB := h.B
+ newB := h.B + 1
+ oldBucketnum := bucketShift(oldB)
+ newBucketnum := bucketShift(newB)
+ newCap := newBucketnum * bucketCnt
+
+ newBucketArray := makeBucketArray(t, newB)
+ newMask := newBucketnum - 1
+ hash0 := uintptr(h.hash0)
+
+ for bucket := uintptr(0); bucket < oldBucketnum; bucket++ {
+ b := (*bmap)(add(h.buckets, bucket*uintptr(t.bucketsize)))
+ base := add(unsafe.Pointer(b), dataOffset) // key and value start
+ status := matchFull(b.tophash)
+ for {
+ i := status.NextMatch()
+ if i >= bucketCnt {
+ break
+ }
+ k := *(*uint64)(add(base, i*8))
+ e := add(base, bucketCnt*8+i*uintptr(t.elemsize))
+ mapassignwithoutgrow_fast64(t, hash0, newMask, newBucketArray, k, e)
+ status.RemoveNextMatch()
}
}

- if oldbucket == h.nevacuate {
- advanceEvacuationMark(h, t, newbit)
+ h.B = newB
+ h.flags &^= iterator
+ h.buckets = newBucketArray
+ h.growthLeft = int(newCap) - h.count
+}
+
+func mapassignwithoutgrow_fast64(t *maptype, hash0, mask uintptr, buckets unsafe.Pointer, key uint64, elem unsafe.Pointer) {
+ hash := t.hasher(noescape(unsafe.Pointer(&key)), hash0)
+ top := tophash(hash)
+ p := newProbe(hash, mask)
+
+ // The key is not in the map.
+ for {
+ b := (*bmap)(add(buckets, p.Bucket()*uintptr(t.bucketsize)))
+ // Check empty slot or deleted slot.
+ status := matchEmptyOrDeleted(b.tophash)
+ i := status.NextMatch()
+ if i < bucketCnt {
+ // Insert key and value.
+ b.tophash[i] = top
+ base := add(unsafe.Pointer(b), dataOffset)
+ k := add(unsafe.Pointer(base), i*8)
+ *(*uint64)(k) = key
+ e := add(unsafe.Pointer(base), bucketCnt*8+i*uintptr(t.elemsize))
+ typedmemmove(t.elem, e, elem)
+ return
+ }
+ p.Next()
}
}
diff --git a/src/runtime/map_faststr.go b/src/runtime/map_faststr.go
index 006c24c..65eaa90b 100644
--- a/src/runtime/map_faststr.go
+++ b/src/runtime/map_faststr.go
@@ -21,6 +21,7 @@
if h.flags&hashWriting != 0 {
fatal("concurrent map read and map write")
}
+
key := stringStructOf(&ky)
if h.B == 0 {
// One-bucket table.
@@ -29,10 +30,7 @@
// short key, doing lots of comparisons is ok
for i, kptr := uintptr(0), b.keys(); i < bucketCnt; i, kptr = i+1, add(kptr, 2*goarch.PtrSize) {
k := (*stringStruct)(kptr)
- if k.len != key.len || isEmpty(b.tophash[i]) {
- if b.tophash[i] == emptyRest {
- break
- }
+ if k.len != key.len || !isFull(b.tophash[i]) {
continue
}
if k.str == key.str || memequal(k.str, key.str, uintptr(key.len)) {
@@ -45,10 +43,7 @@
keymaybe := uintptr(bucketCnt)
for i, kptr := uintptr(0), b.keys(); i < bucketCnt; i, kptr = i+1, add(kptr, 2*goarch.PtrSize) {
k := (*stringStruct)(kptr)
- if k.len != key.len || isEmpty(b.tophash[i]) {
- if b.tophash[i] == emptyRest {
- break
- }
+ if k.len != key.len || !isFull(b.tophash[i]) {
continue
}
if k.str == key.str {
@@ -78,31 +73,30 @@
}
dohash:
hash := t.hasher(noescape(unsafe.Pointer(&ky)), uintptr(h.hash0))
- m := bucketMask(h.B)
- b := (*bmap)(add(h.buckets, (hash&m)*uintptr(t.bucketsize)))
- if c := h.oldbuckets; c != nil {
- if !h.sameSizeGrow() {
- // There used to be half as many buckets; mask down one more power of two.
- m >>= 1
- }
- oldb := (*bmap)(add(c, (hash&m)*uintptr(t.bucketsize)))
- if !evacuated(oldb) {
- b = oldb
- }
- }
top := tophash(hash)
- for ; b != nil; b = b.overflow(t) {
- for i, kptr := uintptr(0), b.keys(); i < bucketCnt; i, kptr = i+1, add(kptr, 2*goarch.PtrSize) {
- k := (*stringStruct)(kptr)
- if k.len != key.len || b.tophash[i] != top {
- continue
+
+ p := newProbe(hash, bucketMask(h.B))
+
+ for {
+ b := (*bmap)(add(h.buckets, p.Bucket()*uintptr(t.bucketsize)))
+ status := matchTopHash(b.tophash, top)
+ for {
+ i := status.NextMatch()
+ if i >= bucketCnt {
+ break
}
- if k.str == key.str || memequal(k.str, key.str, uintptr(key.len)) {
+ kptr := add(unsafe.Pointer(b), dataOffset+i*2*goarch.PtrSize)
+ k := (*stringStruct)(kptr)
+ if k.len == key.len && (k.str == key.str || memequal(k.str, key.str, uintptr(key.len))) {
return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*goarch.PtrSize+i*uintptr(t.elemsize))
}
+ status.RemoveNextMatch()
}
+ if matchEmpty(b.tophash) != 0 {
+ return unsafe.Pointer(&zeroVal[0])
+ }
+ p.Next()
}
- return unsafe.Pointer(&zeroVal[0])
}

func mapaccess2_faststr(t *maptype, h *hmap, ky string) (unsafe.Pointer, bool) {
@@ -116,6 +110,7 @@
if h.flags&hashWriting != 0 {
fatal("concurrent map read and map write")
}
+
key := stringStructOf(&ky)
if h.B == 0 {
// One-bucket table.
@@ -124,10 +119,7 @@
// short key, doing lots of comparisons is ok
for i, kptr := uintptr(0), b.keys(); i < bucketCnt; i, kptr = i+1, add(kptr, 2*goarch.PtrSize) {
k := (*stringStruct)(kptr)
- if k.len != key.len || isEmpty(b.tophash[i]) {
- if b.tophash[i] == emptyRest {
- break
- }
+ if k.len != key.len || !isFull(b.tophash[i]) {
continue
}
if k.str == key.str || memequal(k.str, key.str, uintptr(key.len)) {
@@ -140,10 +132,7 @@
keymaybe := uintptr(bucketCnt)
for i, kptr := uintptr(0), b.keys(); i < bucketCnt; i, kptr = i+1, add(kptr, 2*goarch.PtrSize) {
k := (*stringStruct)(kptr)
- if k.len != key.len || isEmpty(b.tophash[i]) {
- if b.tophash[i] == emptyRest {
- break
- }
+ if k.len != key.len || !isFull(b.tophash[i]) {
continue
}
if k.str == key.str {
@@ -173,34 +162,33 @@
}
dohash:
hash := t.hasher(noescape(unsafe.Pointer(&ky)), uintptr(h.hash0))
- m := bucketMask(h.B)
- b := (*bmap)(add(h.buckets, (hash&m)*uintptr(t.bucketsize)))
- if c := h.oldbuckets; c != nil {
- if !h.sameSizeGrow() {
- // There used to be half as many buckets; mask down one more power of two.
- m >>= 1
- }
- oldb := (*bmap)(add(c, (hash&m)*uintptr(t.bucketsize)))
- if !evacuated(oldb) {
- b = oldb
- }
- }
top := tophash(hash)
- for ; b != nil; b = b.overflow(t) {
- for i, kptr := uintptr(0), b.keys(); i < bucketCnt; i, kptr = i+1, add(kptr, 2*goarch.PtrSize) {
- k := (*stringStruct)(kptr)
- if k.len != key.len || b.tophash[i] != top {
- continue
+
+ p := newProbe(hash, bucketMask(h.B))
+
+ for {
+ b := (*bmap)(add(h.buckets, p.Bucket()*uintptr(t.bucketsize)))
+ status := matchTopHash(b.tophash, top)
+ for {
+ i := status.NextMatch()
+ if i >= bucketCnt {
+ break
}
- if k.str == key.str || memequal(k.str, key.str, uintptr(key.len)) {
+ kptr := add(unsafe.Pointer(b), dataOffset+i*2*goarch.PtrSize)
+ k := (*stringStruct)(kptr)
+ if k.len == key.len && (k.str == key.str || memequal(k.str, key.str, uintptr(key.len))) {
return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*goarch.PtrSize+i*uintptr(t.elemsize)), true
}
+ status.RemoveNextMatch()
}
+ if matchEmpty(b.tophash) != 0 {
+ return unsafe.Pointer(&zeroVal[0]), false
+ }
+ p.Next()
}
- return unsafe.Pointer(&zeroVal[0]), false
}

-func mapassign_faststr(t *maptype, h *hmap, s string) unsafe.Pointer {
+func mapassign_faststr(t *maptype, h *hmap, ky string) unsafe.Pointer {
if h == nil {
panic(plainError("assignment to entry in nil map"))
}
@@ -211,84 +199,85 @@
if h.flags&hashWriting != 0 {
fatal("concurrent map writes")
}
- key := stringStructOf(&s)
- hash := t.hasher(noescape(unsafe.Pointer(&s)), uintptr(h.hash0))
+ key := stringStructOf(&ky)
+ hash := t.hasher(noescape(unsafe.Pointer(&ky)), uintptr(h.hash0))

// Set hashWriting after calling t.hasher for consistency with mapassign.
h.flags ^= hashWriting

if h.buckets == nil {
- h.buckets = newobject(t.bucket) // newarray(t.bucket, 1)
+ // Init an empty map.
+ h.buckets = makeBucketArray(t, 0)
+ h.growthLeft = bucketCnt
}

-again:
- bucket := hash & bucketMask(h.B)
- if h.growing() {
- growWork_faststr(t, h, bucket)
- }
- b := (*bmap)(add(h.buckets, bucket*uintptr(t.bucketsize)))
top := tophash(hash)

+ if h.needGrow() {
+ grow_faststr(h, t)
+ }
+
+ p := newProbe(hash, bucketMask(h.B))
+
var insertb *bmap
var inserti uintptr
var insertk unsafe.Pointer

-bucketloop:
+ var (
+ b *bmap
+ status bitmask64
+ )
+ // Check if the key in the map.
for {
- for i := uintptr(0); i < bucketCnt; i++ {
- if b.tophash[i] != top {
- if isEmpty(b.tophash[i]) && insertb == nil {
- insertb = b
- inserti = i
- }
- if b.tophash[i] == emptyRest {
- break bucketloop
- }
- continue
+ b = (*bmap)(add(h.buckets, p.Bucket()*uintptr(t.bucketsize)))
+ status = matchTopHash(b.tophash, top)
+ for {
+ i := status.NextMatch()
+ if i >= bucketCnt {
+ break
}
- k := (*stringStruct)(add(unsafe.Pointer(b), dataOffset+i*2*goarch.PtrSize))
- if k.len != key.len {
- continue
+ kptr := add(unsafe.Pointer(b), dataOffset+i*2*goarch.PtrSize)
+ k := (*stringStruct)(kptr)
+ if k.len == key.len && (k.str == key.str || memequal(k.str, key.str, uintptr(key.len))) {
+ insertb = b
+ inserti = i
+ // Overwrite existing key, so it can be garbage collected.
+ // The size is already guaranteed to be set correctly.
+ k.str = key.str
+ goto done
}
- if k.str != key.str && !memequal(k.str, key.str, uintptr(key.len)) {
- continue
- }
- // already have a mapping for key. Update it.
- inserti = i
- insertb = b
- // Overwrite existing key, so it can be garbage collected.
- // The size is already guaranteed to be set correctly.
- k.str = key.str
- goto done
+ status.RemoveNextMatch()
}
- ovf := b.overflow(t)
- if ovf == nil {
+ if matchEmpty(b.tophash) != 0 {
break
}
- b = ovf
+ p.Next()
}

- // Did not find mapping for key. Allocate new cell & add entry.
+ // The key is not in the map.
+ p.Reset(hash)
+ for {
+ b = (*bmap)(add(h.buckets, p.Bucket()*uintptr(t.bucketsize)))
+ // Can't find the key in this bucket.
+ // Check empty slot or deleted slot.
+ status = matchEmptyOrDeleted(b.tophash)
+ i := status.NextMatch()
+ if i < bucketCnt {
+ // Insert key and value.
+ insertb = b
+ inserti = i
+ insertb.tophash[i] = top
+ insertk = add(unsafe.Pointer(insertb), dataOffset+inserti*2*goarch.PtrSize)
+ // store new key at insert position
+ *((*stringStruct)(insertk)) = *key

- // If we hit the max load factor or we have too many overflow buckets,
- // and we're not already in the middle of growing, start growing.
- if !h.growing() && (overLoadFactor(h.count+1, h.B) || tooManyOverflowBuckets(h.noverflow, h.B)) {
- hashGrow(t, h)
- goto again // Growing the table invalidates everything, so try again
+ h.growthLeft -= 1
+ h.count += 1
+ goto done
+ }
+ // No idle slot
+ p.Next()
}
-
- if insertb == nil {
- // The current bucket and all the overflow buckets connected to it are full, allocate a new one.
- insertb = h.newoverflow(t, b)
- inserti = 0 // not necessary, but avoids needlessly spilling inserti
- }
- insertb.tophash[inserti&(bucketCnt-1)] = top // mask inserti to avoid bounds checks
-
- insertk = add(unsafe.Pointer(insertb), dataOffset+inserti*2*goarch.PtrSize)
- // store new key at insert position
- *((*stringStruct)(insertk)) = *key
- h.count++
-
done:
elem := add(unsafe.Pointer(insertb), dataOffset+bucketCnt*2*goarch.PtrSize+inserti*uintptr(t.elemsize))
if h.flags&hashWriting == 0 {
@@ -313,173 +302,232 @@
key := stringStructOf(&ky)
hash := t.hasher(noescape(unsafe.Pointer(&ky)), uintptr(h.hash0))

- // Set hashWriting after calling t.hasher for consistency with mapdelete
+ // Set hashWriting after calling t.hasher, since t.hasher may panic,
+ // in which case we have not actually done a write (delete).
h.flags ^= hashWriting

- bucket := hash & bucketMask(h.B)
- if h.growing() {
- growWork_faststr(t, h, bucket)
- }
- b := (*bmap)(add(h.buckets, bucket*uintptr(t.bucketsize)))
- bOrig := b
+ p := newProbe(hash, bucketMask(h.B))
top := tophash(hash)
-search:
- for ; b != nil; b = b.overflow(t) {
- for i, kptr := uintptr(0), b.keys(); i < bucketCnt; i, kptr = i+1, add(kptr, 2*goarch.PtrSize) {
- k := (*stringStruct)(kptr)
- if k.len != key.len || b.tophash[i] != top {
- continue
- }
- if k.str != key.str && !memequal(k.str, key.str, uintptr(key.len)) {
- continue
- }
- // Clear key's pointer.
- k.str = nil
- e := add(unsafe.Pointer(b), dataOffset+bucketCnt*2*goarch.PtrSize+i*uintptr(t.elemsize))
- if t.elem.ptrdata != 0 {
- memclrHasPointers(e, t.elem.size)
- } else {
- memclrNoHeapPointers(e, t.elem.size)
- }
- b.tophash[i] = emptyOne
- // If the bucket now ends in a bunch of emptyOne states,
- // change those to emptyRest states.
- if i == bucketCnt-1 {
- if b.overflow(t) != nil && b.overflow(t).tophash[0] != emptyRest {
- goto notLast
- }
- } else {
- if b.tophash[i+1] != emptyRest {
- goto notLast
- }
- }
- for {
- b.tophash[i] = emptyRest
- if i == 0 {
- if b == bOrig {
- break // beginning of initial bucket, we're done.
- }
- // Find previous bucket, continue at its last entry.
- c := b
- for b = bOrig; b.overflow(t) != c; b = b.overflow(t) {
- }
- i = bucketCnt - 1
- } else {
- i--
- }
- if b.tophash[i] != emptyOne {
- break
- }
- }
- notLast:
- h.count--
- // Reset the hash seed to make it more difficult for attackers to
- // repeatedly trigger hash collisions. See issue 25237.
- if h.count == 0 {
- h.hash0 = fastrand()
- }
- break search
- }
- }

+ for {
+ b := (*bmap)(add(h.buckets, p.Bucket()*uintptr(t.bucketsize)))
+ status := matchTopHash(b.tophash, top)
+ for {
+ i := status.NextMatch()
+ if i >= bucketCnt {
+ break
+ }
+ kptr := add(unsafe.Pointer(b), dataOffset+i*2*goarch.PtrSize)
+ k := (*stringStruct)(kptr)
+ if k.len == key.len && (k.str == key.str || memequal(k.str, key.str, uintptr(key.len))) {
+ // Found this key.
+ h.count -= 1
+ k.str = nil
+ e := add(unsafe.Pointer(b), dataOffset+bucketCnt*2*goarch.PtrSize+i*uintptr(t.elemsize))
+ if t.elem.ptrdata != 0 {
+ memclrHasPointers(e, t.elem.size)
+ } else {
+ memclrNoHeapPointers(e, t.elem.size)
+ }
+ // Update tophash.
+ if matchEmpty(b.tophash) == 0 {
+ // We only ever mark the slot as deleted if the entry we want to delete
+ // is in a pack of bucketCnt non-EMPTY buckets.
+ b.tophash[i] = deletedSlot
+ } else {
+ h.growthLeft += 1
+ b.tophash[i] = emptySlot
+ }
+ goto done
+ }
+ status.RemoveNextMatch()
+ }
+ if matchEmpty(b.tophash) != 0 {
+ // The key is not in this map.
+ goto done
+ }
+ p.Next()
+ }
+done:
+ if h.count == 0 {
+ // Reset the hash seed to make it more difficult for attackers to
+ // repeatedly trigger hash collisions. See issue 25237.
+ h.hash0 = fastrand()
+ }
if h.flags&hashWriting == 0 {
fatal("concurrent map writes")
}
h.flags &^= hashWriting
}

-func growWork_faststr(t *maptype, h *hmap, bucket uintptr) {
- // make sure we evacuate the oldbucket corresponding
- // to the bucket we're about to use
- evacuate_faststr(t, h, bucket&h.oldbucketmask())
-
- // evacuate one more oldbucket to make progress on growing
- if h.growing() {
- evacuate_faststr(t, h, h.nevacuate)
+func grow_faststr(h *hmap, t *maptype) {
+ cap := bucketShift(h.B) * bucketCnt
+ if uintptr(h.count*32) <= cap*25 && (h.flags&iterator != iterator) {
+ // Rehash in place if the current size is <= 25/32 of capacity.
+ // If there may be an iterator using buckets, we disable growsamesize.
+ // Because it may move data to different buckets, this behavior will break the iterator.
+ growsamesize_faststr(h, t)
+ } else {
+ growbig_faststr(h, t)
}
}

-func evacuate_faststr(t *maptype, h *hmap, oldbucket uintptr) {
- b := (*bmap)(add(h.oldbuckets, oldbucket*uintptr(t.bucketsize)))
- newbit := h.noldbuckets()
- if !evacuated(b) {
- // TODO: reuse overflow buckets instead of using new ones, if there
- // is no iterator using the old buckets. (If !oldIterator.)
+func growbig_faststr(h *hmap, t *maptype) {
+ oldB := h.B
+ newB := h.B + 1
+ oldBucketnum := bucketShift(oldB)
+ newBucketnum := bucketShift(newB)
+ newCap := newBucketnum * bucketCnt

- // xy contains the x and y (low and high) evacuation destinations.
- var xy [2]evacDst
- x := &xy[0]
- x.b = (*bmap)(add(h.buckets, oldbucket*uintptr(t.bucketsize)))
- x.k = add(unsafe.Pointer(x.b), dataOffset)
- x.e = add(x.k, bucketCnt*2*goarch.PtrSize)
+ newBucketArray := makeBucketArray(t, newB)
+ newMask := newBucketnum - 1
+ hash0 := uintptr(h.hash0)

- if !h.sameSizeGrow() {
- // Only calculate y pointers if we're growing bigger.
- // Otherwise GC can see bad pointers.
- y := &xy[1]
- y.b = (*bmap)(add(h.buckets, (oldbucket+newbit)*uintptr(t.bucketsize)))
- y.k = add(unsafe.Pointer(y.b), dataOffset)
- y.e = add(y.k, bucketCnt*2*goarch.PtrSize)
- }
-
- for ; b != nil; b = b.overflow(t) {
- k := add(unsafe.Pointer(b), dataOffset)
- e := add(k, bucketCnt*2*goarch.PtrSize)
- for i := 0; i < bucketCnt; i, k, e = i+1, add(k, 2*goarch.PtrSize), add(e, uintptr(t.elemsize)) {
- top := b.tophash[i]
- if isEmpty(top) {
- b.tophash[i] = evacuatedEmpty
- continue
- }
- if top < minTopHash {
- throw("bad map state")
- }
- var useY uint8
- if !h.sameSizeGrow() {
- // Compute hash to make our evacuation decision (whether we need
- // to send this key/elem to bucket x or bucket y).
- hash := t.hasher(k, uintptr(h.hash0))
- if hash&newbit != 0 {
- useY = 1
- }
- }
-
- b.tophash[i] = evacuatedX + useY // evacuatedX + 1 == evacuatedY, enforced in makemap
- dst := &xy[useY] // evacuation destination
-
- if dst.i == bucketCnt {
- dst.b = h.newoverflow(t, dst.b)
- dst.i = 0
- dst.k = add(unsafe.Pointer(dst.b), dataOffset)
- dst.e = add(dst.k, bucketCnt*2*goarch.PtrSize)
- }
- dst.b.tophash[dst.i&(bucketCnt-1)] = top // mask dst.i as an optimization, to avoid a bounds check
-
- // Copy key.
- *(*string)(dst.k) = *(*string)(k)
-
- typedmemmove(t.elem, dst.e, e)
- dst.i++
- // These updates might push these pointers past the end of the
- // key or elem arrays. That's ok, as we have the overflow pointer
- // at the end of the bucket to protect against pointing past the
- // end of the bucket.
- dst.k = add(dst.k, 2*goarch.PtrSize)
- dst.e = add(dst.e, uintptr(t.elemsize))
+ for bucket := uintptr(0); bucket < oldBucketnum; bucket++ {
+ b := (*bmap)(add(h.buckets, bucket*uintptr(t.bucketsize)))
+ base := add(unsafe.Pointer(b), dataOffset) // key and value start
+ status := matchFull(b.tophash)
+ for {
+ i := status.NextMatch()
+ if i >= bucketCnt {
+ break
}
- }
- // Unlink the overflow buckets & clear key/elem to help GC.
- if h.flags&oldIterator == 0 && t.bucket.ptrdata != 0 {
- b := add(h.oldbuckets, oldbucket*uintptr(t.bucketsize))
- // Preserve b.tophash because the evacuation
- // state is maintained there.
- ptr := add(b, dataOffset)
- n := uintptr(t.bucketsize) - dataOffset
- memclrHasPointers(ptr, n)
+ kptr := add(base, i*2*goarch.PtrSize)
+ k := (*stringStruct)(kptr)
+ e := add(base, bucketCnt*2*goarch.PtrSize+i*uintptr(t.elemsize))
+ mapassignwithoutgrow_faststr(t, hash0, newMask, newBucketArray, k, e)
+ status.RemoveNextMatch()
}
}

- if oldbucket == h.nevacuate {
- advanceEvacuationMark(h, t, newbit)
+ h.B = newB
+ h.flags &^= iterator
+ h.buckets = newBucketArray
+ h.growthLeft = int(newCap) - h.count
+}
+
+func growsamesize_faststr(h *hmap, t *maptype) {
+ bucketNum := bucketShift(h.B)
+ mask := bucketNum - 1
+ // For all buckets:
+ // - mark all DELETED slots as EMPTY
+ // - mark all FULL slots as DELETED
+ for bucket := uintptr(0); bucket < bucketNum; bucket++ {
+ b := (*bmap)(add(h.buckets, bucket*uintptr(t.bucketsize)))
+ prepareSameSizeGrow(&b.tophash)
+ }
+ // Temporary key and value used to swap.
+ tmpk := newobject(t.key)
+ tmpe := newobject(t.elem)
+
+ for bucket := uintptr(0); bucket < bucketNum; bucket++ {
+ b := (*bmap)(add(h.buckets, bucket*uintptr(t.bucketsize)))
+ for i := uintptr(0); i < bucketCnt; {
+ if b.tophash[i] != deletedSlot {
+ i++
+ continue
+ }
+ base := add(unsafe.Pointer(b), dataOffset)
+ k := add(base, i*2*goarch.PtrSize)
+ e := add(base, bucketCnt*2*goarch.PtrSize+i*uintptr(t.elemsize))
+ hash := t.hasher(noescape(unsafe.Pointer(k)), uintptr(h.hash0))
+ top := tophash(hash)
+ // Find first not null slot.
+ var (
+ dstb *bmap
+ dsti uintptr
+ dstp = newProbe(hash, mask)
+ )
+ for {
+ dstb = (*bmap)(add(h.buckets, dstp.Bucket()*uintptr(t.bucketsize)))
+ status := matchEmptyOrDeleted(dstb.tophash)
+ dsti = status.NextMatch()
+ if dsti < bucketCnt {
+ break
+ }
+ dstp.Next()
+ }
+
+ // The target bucket is the same.
+ if dstp.Bucket() == bucket {
+ // Just mark slot as FULL.
+ b.tophash[i] = top
+ i += 1
+ continue
+ }
+
+ dstbase := add(unsafe.Pointer(dstb), dataOffset) // key and value start
+ dstk := add(unsafe.Pointer(dstbase), dsti*2*goarch.PtrSize)
+ dste := add(unsafe.Pointer(dstbase), bucketCnt*2*goarch.PtrSize+dsti*uintptr(t.elemsize))
+
+ // Target is in another bucket.
+ switch dstb.tophash[dsti] {
+ case emptySlot:
+ // 1. Transfer element to target
+ // 2. Mark target as FULL
+ // 3. Mark slot as EMPTY
+
+ // Store new key and value at insert position.
+ *(*stringStruct)(dstk) = *(*stringStruct)(k)
+ typedmemmove(t.elem, dste, e)
+
+ // Clear key and value.
+ (*stringStruct)(k).str = nil
+
+ if t.elem.ptrdata != 0 {
+ memclrHasPointers(e, uintptr(t.elemsize))
+ } else {
+ memclrNoHeapPointers(e, uintptr(t.elemsize))
+ }
+
+ dstb.tophash[dsti] = top
+ b.tophash[i] = emptySlot
+ i++
+ case deletedSlot:
+ // 1. Swap current element with target element
+ // 2. Mark target as FULL
+ // 3. Repeat procedure for current slot with moved from element (target)
+
+ // tmpk,tmpe = dstk,dste
+ *(*stringStruct)(tmpk) = *(*stringStruct)(dstk)
+ typedmemmove(t.elem, tmpe, dste)
+
+ // dstk,dste = k,e
+ *(*stringStruct)(dstk) = *(*stringStruct)(k)
+ typedmemmove(t.elem, dste, e)
+
+ // k,e = tmpk,tmpe
+ *(*stringStruct)(k) = *(*stringStruct)(tmpk)
+ typedmemmove(t.elem, e, tmpe)
+
+ dstb.tophash[dsti] = top
+ }
+ }
+ }
+ h.growthLeft = int(bucketNum*bucketCnt) - h.count
+}
+
+func mapassignwithoutgrow_faststr(t *maptype, hash0, mask uintptr, buckets unsafe.Pointer, key *stringStruct, elem unsafe.Pointer) {
+ hash := t.hasher(noescape(unsafe.Pointer(key)), hash0)
+ top := tophash(hash)
+ p := newProbe(hash, mask)
+
+ // The key is not in the map.
+ for {
+ b := (*bmap)(add(buckets, p.Bucket()*uintptr(t.bucketsize)))
+ // Check empty slot or deleted slot.
+ status := matchEmptyOrDeleted(b.tophash)
+ i := status.NextMatch()
+ if i < bucketCnt {
+ // Insert key and value.
+ b.tophash[i] = top
+ base := add(unsafe.Pointer(b), dataOffset)
+ k := add(unsafe.Pointer(base), i*2*goarch.PtrSize)
+ *((*stringStruct)(k)) = *key
+ e := add(unsafe.Pointer(base), bucketCnt*2*goarch.PtrSize+i*uintptr(t.elemsize))
+ typedmemmove(t.elem, e, elem)
+ return
+ }
+ p.Next()
}
}
diff --git a/src/runtime/map_test.go b/src/runtime/map_test.go
index 4afbae6..169eb6e 100644
--- a/src/runtime/map_test.go
+++ b/src/runtime/map_test.go
@@ -8,6 +8,7 @@
"fmt"
"internal/goarch"
"math"
+ "math/rand"
"reflect"
"runtime"
"sort"
@@ -20,8 +21,8 @@
func TestHmapSize(t *testing.T) {
// The structure of hmap is defined in runtime/map.go
// and in cmd/compile/internal/gc/reflect.go and must be in sync.
- // The size of hmap should be 48 bytes on 64 bit and 28 bytes on 32 bit platforms.
- var hmapSize = uintptr(8 + 5*goarch.PtrSize)
+ // The size of hmap should be 32 bytes on 64 bit and 20 bytes on 32 bit platforms.
+ var hmapSize = uintptr(8 + 3*goarch.PtrSize)
if runtime.RuntimeHmapSize != hmapSize {
t.Errorf("sizeof(runtime.hmap{})==%d, want %d", runtime.RuntimeHmapSize, hmapSize)
}
@@ -145,6 +146,204 @@
}
}

+func TestMapInt64SameSizeGrow(t *testing.T) {
+ type ktype int64
+ type vtype int
+ type kv struct {
+ k ktype
+ v vtype
+ }
+ mcap := (runtime.RuntimeBucketCnt - 1) * 16
+ m := make(map[ktype]vtype, mcap)
+ for i := 0; i < 1000; i++ {
+ var addk []kv
+ for i := len(m); i < mcap; i++ {
+ r := rand.Int()
+ k := ktype(r)
+ v := vtype(i)
+ m[k] = v
+ addk = append(addk, kv{k, v})
+ }
+ for _, elem := range addk {
+ k, v := elem.k, elem.v
+ got, ok := m[k]
+ if !ok || got != v {
+ t.Fatalf("got wrong value (%v,%v), expected (%v,true)", got, ok, v)
+ }
+ got = m[k]
+ if got != v {
+ t.Fatalf("got wrong value %v, expected %v", got, v)
+ }
+ }
+ for _, elem := range addk {
+ delete(m, elem.k)
+ if len(m) == 16 {
+ break
+ }
+ }
+ }
+}
+
+func TestMapInt32SameSizeGrow(t *testing.T) {
+ type ktype int32
+ type vtype int
+ type kv struct {
+ k ktype
+ v vtype
+ }
+ mcap := (runtime.RuntimeBucketCnt - 1) * 16
+ m := make(map[ktype]vtype, mcap)
+ for i := 0; i < 1000; i++ {
+ var addk []kv
+ for i := len(m); i < mcap; i++ {
+ r := rand.Int()
+ k := ktype(r)
+ v := vtype(i)
+ m[k] = v
+ addk = append(addk, kv{k, v})
+ }
+ for _, elem := range addk {
+ k, v := elem.k, elem.v
+ got, ok := m[k]
+ if !ok || got != v {
+ t.Fatalf("got wrong value (%v,%v), expected (%v,true)", got, ok, v)
+ }
+ got = m[k]
+ if got != v {
+ t.Fatalf("got wrong value %v, expected %v", got, v)
+ }
+ }
+ for _, elem := range addk {
+ delete(m, elem.k)
+ if len(m) == 16 {
+ break
+ }
+ }
+ }
+}
+
+func TestMapFloat64SameSizeGrow(t *testing.T) {
+ type ktype float64
+ type vtype int
+ type kv struct {
+ k ktype
+ v vtype
+ }
+ mcap := (runtime.RuntimeBucketCnt - 1) * 16
+ m := make(map[ktype]vtype, mcap)
+ for i := 0; i < 1000; i++ {
+ var addk []kv
+ for i := len(m); i < mcap; i++ {
+ r := rand.Int()
+ for r == 0 {
+ r = rand.Int()
+ }
+ k := ktype(r)
+ v := vtype(i)
+ m[k] = v
+ addk = append(addk, kv{k, v})
+ }
+ for _, elem := range addk {
+ k, v := elem.k, elem.v
+ got, ok := m[k]
+ if !ok || got != v {
+ t.Fatalf("got wrong value (%v,%v), expected (%v,true)", got, ok, v)
+ }
+ got = m[k]
+ if got != v {
+ t.Fatalf("got wrong value %v, expected %v", got, v)
+ }
+ }
+ for _, elem := range addk {
+ delete(m, elem.k)
+ if len(m) == 16 {
+ break
+ }
+ }
+ }
+}
+
+func TestMapStrSameSizeGrow(t *testing.T) {
+ type ktype string
+ type vtype int
+ type kv struct {
+ k ktype
+ v vtype
+ }
+ mcap := (runtime.RuntimeBucketCnt - 1) * 16
+ m := make(map[ktype]vtype, mcap)
+ for i := 0; i < 1000; i++ {
+ var addk []kv
+ for i := len(m); i < mcap; i++ {
+ r := rand.Int()
+ k := ktype(strconv.Itoa(r))
+ v := vtype(i)
+ m[k] = v
+ addk = append(addk, kv{k, v})
+ }
+ for _, elem := range addk {
+ k, v := elem.k, elem.v
+ got, ok := m[k]
+ if !ok || got != v {
+ t.Fatalf("got wrong value (%v,%v), expected (%v,true)", got, ok, v)
+ }
+ got = m[k]
+ if got != v {
+ t.Fatalf("got wrong value %v, expected %v", got, v)
+ }
+ }
+ for _, elem := range addk {
+ delete(m, elem.k)
+ if len(m) == 16 {
+ break
+ }
+ }
+ }
+}
+
+func TestMapIndirectKeyValueSameSizeGrow(t *testing.T) {
+ type ktype [129]byte
+ type vtype [129]byte
+ type kv struct {
+ k ktype
+ v vtype
+ }
+ mcap := (runtime.RuntimeBucketCnt - 1) * 16
+ m := make(map[ktype]vtype, mcap)
+ for i := 0; i < 1000; i++ {
+ var addk []kv
+ for i := len(m); i < mcap; i++ {
+ var k [129]byte
+ r := rand.Uint32()
+ k[0] = byte(r)
+ k[1] = byte(r >> 8)
+ k[2] = byte(r >> 16)
+ k[3] = byte(r >> 24)
+ v := k
+ v[0] = byte(i)
+ m[k] = v
+ addk = append(addk, kv{k, v})
+ }
+ for _, elem := range addk {
+ k, v := elem.k, elem.v
+ got, ok := m[k]
+ if !ok || got != v {
+ t.Fatalf("got wrong value (%v,%v), expected (%v,true)", got, ok, v)
+ }
+ got = m[k]
+ if got != v {
+ t.Fatalf("got wrong value %v, expected %v", got, v)
+ }
+ }
+ for _, elem := range addk {
+ delete(m, elem.k)
+ if len(m) == 16 {
+ break
+ }
+ }
+ }
+}
+
// Maps aren't actually copied on assignment.
func TestAlias(t *testing.T) {
m := make(map[int]int, 0)
@@ -682,10 +881,12 @@
{-1, 1, 1},
{0, 1, 1},
{1, 1, 1},
- {8, 1, 1},
+ {7, 1, 1},
+ {8, 2, 2},
{9, 2, 2},
{13, 2, 2},
- {14, 4, 4},
+ {14, 2, 2},
+ {15, 4, 4},
{26, 4, 4},
}

@@ -1149,31 +1350,6 @@
}
}

-func TestMapTombstones(t *testing.T) {
- m := map[int]int{}
- const N = 10000
- // Fill a map.
- for i := 0; i < N; i++ {
- m[i] = i
- }
- runtime.MapTombstoneCheck(m)
- // Delete half of the entries.
- for i := 0; i < N; i += 2 {
- delete(m, i)
- }
- runtime.MapTombstoneCheck(m)
- // Add new entries to fill in holes.
- for i := N; i < 3*N/2; i++ {
- m[i] = i
- }
- runtime.MapTombstoneCheck(m)
- // Delete everything.
- for i := 0; i < 3*N/2; i++ {
- delete(m, i)
- }
- runtime.MapTombstoneCheck(m)
-}
-
type canString int

func (c canString) String() string {
diff --git a/src/runtime/mapbithack.go b/src/runtime/mapbithack.go
new file mode 100644
index 0000000..adfedbe
--- /dev/null
+++ b/src/runtime/mapbithack.go
@@ -0,0 +1,80 @@
+package runtime
+
+import "runtime/internal/sys"
+
+func matchEmptyOrDeleted(tophash [bucketCnt]uint8) bitmask64 {
+ // The high bit is set for both empty slot and deleted slot.
+ tophashs := littleEndianBytesToUint64(tophash)
+ return bitmask64(emptyOrDeletedMask & tophashs)
+}
+
+func matchEmpty(tophash [bucketCnt]uint8) bitmask64 {
+ // Same as matchTopHash(tophash, emptySlot), but faster.
+ //
+ // The high bit is set for both empty slot and deleted slot.
+ // (tophashs & emptyOrDeletedMask) get all empty or deleted slots.
+ // (tophashs << 1) clears the high bit for deletedSlot.
+ // ANDing them we can get all the empty slots.
+ tophashs := littleEndianBytesToUint64(tophash)
+ return bitmask64((tophashs << 1) & tophashs & emptyOrDeletedMask)
+}
+
+// matchTopHash returns a bitmask indicating all bytes in the tophash which *may*
+// have the given value.
+//
+// For the technique, see:
+// http://graphics.stanford.edu/~seander/bithacks.html##ValueInWord
+// (Determine if a word has a byte equal to n).
+//
+// Caveat: there are false positives but:
+// - they only occur if there is a real match
+// - they will be handled gracefully by subsequent checks in code
+func matchTopHash(tophash [bucketCnt]uint8, top uint8) bitmask64 {
+ tophashs := littleEndianBytesToUint64(tophash)
+ cmp := tophashs ^ (uint64(0x0101_0101_0101_0101) * uint64(top))
+ return bitmask64((cmp - 0x0101_0101_0101_0101) & ^cmp & 0x8080_8080_8080_8080)
+}
+
+func matchFull(tophash [bucketCnt]uint8) bitmask64 {
+ // If a slot is neither empty nor deleted, then it must be FUll.
+ tophashs := littleEndianBytesToUint64(tophash)
+ return bitmask64(emptyOrDeletedMask & ^tophashs)
+}
+
+func prepareSameSizeGrow(tophash *[bucketCnt]uint8) {
+ // For all slots:
+ // Mark DELETED as EMPTY
+ // Mark FULL as DELETED
+ tophashs := littleEndianBytesToUint64(*tophash)
+ full := ^tophashs & emptyOrDeletedMask
+ full = ^full + (full >> 7)
+ *tophash = littleEndianUint64ToBytes(full)
+}
+
+// bitmask64 contains the result of a `match` operation on a `bucket`.
+//
+// The bitmask is arranged so that low-order bits represent lower memory
+// addresses for group match results.
+//
+// For generic version, the bits in the bitmask is sparsely packed, so
+// that there is only one bit-per-byte used (the high bit, 7).
+type bitmask64 uint64
+
+// NextMatch returns the lowest bit's byte index(low-order).
+// If there is no match, it returns bucketCnt.
+func (b *bitmask64) NextMatch() uintptr {
+ return uintptr(sys.TrailingZeros64(uint64(*b)) / bucketCnt)
+}
+
+// RemoveNextMatch remove the lowest bit in the bitmask.
+func (b *bitmask64) RemoveNextMatch() {
+ *b = *b & (*b - 1)
+}
+
+func littleEndianBytesToUint64(v [8]uint8) uint64 {
+ return uint64(v[0]) | uint64(v[1])<<8 | uint64(v[2])<<16 | uint64(v[3])<<24 | uint64(v[4])<<32 | uint64(v[5])<<40 | uint64(v[6])<<48 | uint64(v[7])<<56
+}
+
+func littleEndianUint64ToBytes(v uint64) [8]uint8 {
+ return [8]uint8{uint8(v), uint8(v >> 8), uint8(v >> 16), uint8(v >> 24), uint8(v >> 32), uint8(v >> 40), uint8(v >> 48), uint8(v >> 56)}
+}
diff --git a/src/runtime/runtime-gdb.py b/src/runtime/runtime-gdb.py
index 5bb605c..0bb8c86 100644
--- a/src/runtime/runtime-gdb.py
+++ b/src/runtime/runtime-gdb.py
@@ -162,34 +162,23 @@
def children(self):
B = self.val['B']
buckets = self.val['buckets']
- oldbuckets = self.val['oldbuckets']
flags = self.val['flags']
inttype = self.val['hash0'].type
cnt = 0
for bucket in xrange(2 ** int(B)):
bp = buckets + bucket
- if oldbuckets:
- oldbucket = bucket & (2 ** (B - 1) - 1)
- oldbp = oldbuckets + oldbucket
- oldb = oldbp.dereference()
- if (oldb['overflow'].cast(inttype) & 1) == 0: # old bucket not evacuated yet
- if bucket >= 2 ** (B - 1):
- continue # already did old bucket
- bp = oldbp
- while bp:
- b = bp.dereference()
- for i in xrange(8):
- if b['tophash'][i] != 0:
- k = b['keys'][i]
- v = b['values'][i]
- if flags & 1:
- k = k.dereference()
- if flags & 2:
- v = v.dereference()
- yield str(cnt), k
- yield str(cnt + 1), v
- cnt += 2
- bp = b['overflow']
+ b = bp.dereference()
+ for i in xrange(8):
+ if b['tophash'][i] != 255:
+ k = b['keys'][i]
+ v = b['values'][i]
+ if flags & 1:
+ k = k.dereference()
+ if flags & 2:
+ v = v.dereference()
+ yield str(cnt), k
+ yield str(cnt + 1), v
+ cnt += 2


class ChanTypePrinter:
diff --git a/src/runtime/stubs.go b/src/runtime/stubs.go
index 929f8fa..b97ca80 100644
--- a/src/runtime/stubs.go
+++ b/src/runtime/stubs.go
@@ -121,7 +121,7 @@
}

// exported value for testing
-const hashLoad = float32(loadFactorNum) / float32(loadFactorDen)
+const hashLoad = float32(maxItemInBucket) / float32(bucketCnt)

//go:nosplit
func fastrand() uint32 {

To view, visit change 426614. To unsubscribe, or for help writing mail filters, visit settings.

Gerrit-Project: go
Gerrit-Branch: master
Gerrit-Change-Id: I66f7de17d8b36b578f9cbc71843b0e4356b914f2
Gerrit-Change-Number: 426614
Gerrit-PatchSet: 1
Gerrit-Owner: 张云浩 <zhang...@bytedance.com>
Gerrit-MessageType: newchange

Matthew Dempsky (Gerrit)

unread,
Aug 30, 2022, 3:14:37 AM8/30/22
to 张云浩, goph...@pubsubhelper.golang.org, Dmitry Vyukov, Michael Hudson-Doyle, Keith Randall, Martin Möhrmann, Cherry Mui, Michael Knyszek, Michael Pratt, Robert Griesemer, Russ Cox, Ian Lance Taylor, Austin Clements, Than McIntosh, Gopher Robot, golang-co...@googlegroups.com

Attention is currently required from: Austin Clements, Cherry Mui, Ian Lance Taylor, Keith Randall, Martin Möhrmann, Michael Hudson-Doyle, Michael Knyszek, Michael Pratt, Robert Griesemer, Russ Cox, Than McIntosh.

Matthew Dempsky removed Dmitry Vyukov from this change.

View Change

To view, visit change 426614. To unsubscribe, or for help writing mail filters, visit settings.

Gerrit-Project: go
Gerrit-Branch: master
Gerrit-Change-Id: I66f7de17d8b36b578f9cbc71843b0e4356b914f2
Gerrit-Change-Number: 426614
Gerrit-PatchSet: 1
Gerrit-Owner: 张云浩 <zhang...@bytedance.com>
Gerrit-Reviewer: Austin Clements <aus...@google.com>
Gerrit-Reviewer: Cherry Mui <cher...@google.com>
Gerrit-Reviewer: Ian Lance Taylor <ia...@golang.org>
Gerrit-Reviewer: Keith Randall <k...@golang.org>
Gerrit-Reviewer: Martin Möhrmann <moeh...@google.com>
Gerrit-Reviewer: Michael Hudson-Doyle <michael...@canonical.com>
Gerrit-Reviewer: Michael Knyszek <mkny...@google.com>
Gerrit-Reviewer: Michael Pratt <mpr...@google.com>
Gerrit-Reviewer: Robert Griesemer <g...@golang.org>
Gerrit-Reviewer: Russ Cox <r...@golang.org>
Gerrit-Reviewer: Than McIntosh <th...@google.com>
Gerrit-CC: Gopher Robot <go...@golang.org>
Gerrit-Attention: Michael Knyszek <mkny...@google.com>
Gerrit-Attention: Austin Clements <aus...@google.com>
Gerrit-Attention: Michael Pratt <mpr...@google.com>
Gerrit-Attention: Robert Griesemer <g...@golang.org>
Gerrit-Attention: Than McIntosh <th...@google.com>
Gerrit-Attention: Martin Möhrmann <moeh...@google.com>
Gerrit-Attention: Ian Lance Taylor <ia...@golang.org>
Gerrit-Attention: Russ Cox <r...@golang.org>
Gerrit-Attention: Keith Randall <k...@golang.org>
Gerrit-Attention: Michael Hudson-Doyle <michael...@canonical.com>
Gerrit-Attention: Cherry Mui <cher...@google.com>
Gerrit-MessageType: deleteReviewer

Matthew Dempsky (Gerrit)

unread,
Aug 30, 2022, 3:14:39 AM8/30/22
to 张云浩, goph...@pubsubhelper.golang.org, Robert Griesemer, Michael Hudson-Doyle, Keith Randall, Martin Möhrmann, Cherry Mui, Michael Knyszek, Michael Pratt, Russ Cox, Ian Lance Taylor, Austin Clements, Than McIntosh, Gopher Robot, golang-co...@googlegroups.com

Attention is currently required from: Austin Clements, Cherry Mui, Ian Lance Taylor, Keith Randall, Martin Möhrmann, Michael Hudson-Doyle, Michael Knyszek, Michael Pratt, Russ Cox, Than McIntosh.

Matthew Dempsky removed Robert Griesemer from this change.

View Change

To view, visit change 426614. To unsubscribe, or for help writing mail filters, visit settings.

Gerrit-Project: go
Gerrit-Branch: master
Gerrit-Change-Id: I66f7de17d8b36b578f9cbc71843b0e4356b914f2
Gerrit-Change-Number: 426614
Gerrit-PatchSet: 1
Gerrit-Owner: 张云浩 <zhang...@bytedance.com>
Gerrit-Reviewer: Austin Clements <aus...@google.com>
Gerrit-Reviewer: Cherry Mui <cher...@google.com>
Gerrit-Reviewer: Ian Lance Taylor <ia...@golang.org>
Gerrit-Reviewer: Keith Randall <k...@golang.org>
Gerrit-Reviewer: Martin Möhrmann <moeh...@google.com>
Gerrit-Reviewer: Michael Hudson-Doyle <michael...@canonical.com>
Gerrit-Reviewer: Michael Knyszek <mkny...@google.com>
Gerrit-Reviewer: Michael Pratt <mpr...@google.com>
Gerrit-Reviewer: Russ Cox <r...@golang.org>
Gerrit-Reviewer: Than McIntosh <th...@google.com>
Gerrit-CC: Gopher Robot <go...@golang.org>
Gerrit-Attention: Michael Knyszek <mkny...@google.com>
Gerrit-Attention: Austin Clements <aus...@google.com>
Gerrit-Attention: Michael Pratt <mpr...@google.com>

Matthew Dempsky (Gerrit)

unread,
Aug 30, 2022, 3:14:47 AM8/30/22
to 张云浩, goph...@pubsubhelper.golang.org, Than McIntosh, Michael Hudson-Doyle, Keith Randall, Martin Möhrmann, Cherry Mui, Michael Knyszek, Michael Pratt, Russ Cox, Ian Lance Taylor, Austin Clements, Gopher Robot, golang-co...@googlegroups.com

Attention is currently required from: Austin Clements, Cherry Mui, Ian Lance Taylor, Keith Randall, Martin Möhrmann, Michael Hudson-Doyle, Michael Knyszek, Michael Pratt, Russ Cox.

Matthew Dempsky removed Than McIntosh from this change.

View Change

To view, visit change 426614. To unsubscribe, or for help writing mail filters, visit settings.

Gerrit-Project: go
Gerrit-Branch: master
Gerrit-Change-Id: I66f7de17d8b36b578f9cbc71843b0e4356b914f2
Gerrit-Change-Number: 426614
Gerrit-PatchSet: 1
Gerrit-Owner: 张云浩 <zhang...@bytedance.com>
Gerrit-Reviewer: Austin Clements <aus...@google.com>
Gerrit-Reviewer: Cherry Mui <cher...@google.com>
Gerrit-Reviewer: Ian Lance Taylor <ia...@golang.org>
Gerrit-Reviewer: Keith Randall <k...@golang.org>
Gerrit-Reviewer: Martin Möhrmann <moeh...@google.com>
Gerrit-Reviewer: Michael Hudson-Doyle <michael...@canonical.com>
Gerrit-Reviewer: Michael Knyszek <mkny...@google.com>
Gerrit-Reviewer: Michael Pratt <mpr...@google.com>
Gerrit-Reviewer: Russ Cox <r...@golang.org>
Gerrit-CC: Gopher Robot <go...@golang.org>
Gerrit-Attention: Michael Knyszek <mkny...@google.com>
Gerrit-Attention: Austin Clements <aus...@google.com>
Gerrit-Attention: Michael Pratt <mpr...@google.com>

Matthew Dempsky (Gerrit)

unread,
Aug 30, 2022, 3:14:50 AM8/30/22
to 张云浩, goph...@pubsubhelper.golang.org, Michael Hudson-Doyle, Keith Randall, Martin Möhrmann, Cherry Mui, Michael Knyszek, Michael Pratt, Russ Cox, Ian Lance Taylor, Austin Clements, Gopher Robot, golang-co...@googlegroups.com

Attention is currently required from: Austin Clements, Cherry Mui, Ian Lance Taylor, Keith Randall, Martin Möhrmann, Michael Knyszek, Michael Pratt, Russ Cox.

Matthew Dempsky removed Michael Hudson-Doyle from this change.

View Change

To view, visit change 426614. To unsubscribe, or for help writing mail filters, visit settings.

Gerrit-Project: go
Gerrit-Branch: master
Gerrit-Change-Id: I66f7de17d8b36b578f9cbc71843b0e4356b914f2
Gerrit-Change-Number: 426614
Gerrit-PatchSet: 1
Gerrit-Owner: 张云浩 <zhang...@bytedance.com>
Gerrit-Reviewer: Austin Clements <aus...@google.com>
Gerrit-Reviewer: Cherry Mui <cher...@google.com>
Gerrit-Reviewer: Ian Lance Taylor <ia...@golang.org>
Gerrit-Reviewer: Keith Randall <k...@golang.org>
Gerrit-Reviewer: Martin Möhrmann <moeh...@google.com>
Gerrit-Reviewer: Michael Knyszek <mkny...@google.com>
Gerrit-Reviewer: Michael Pratt <mpr...@google.com>
Gerrit-Reviewer: Russ Cox <r...@golang.org>
Gerrit-CC: Gopher Robot <go...@golang.org>
Gerrit-Attention: Michael Knyszek <mkny...@google.com>
Gerrit-Attention: Austin Clements <aus...@google.com>
Gerrit-Attention: Michael Pratt <mpr...@google.com>
Gerrit-Attention: Martin Möhrmann <moeh...@google.com>
Gerrit-Attention: Ian Lance Taylor <ia...@golang.org>
Gerrit-Attention: Russ Cox <r...@golang.org>
Gerrit-Attention: Keith Randall <k...@golang.org>

Matthew Dempsky (Gerrit)

unread,
Aug 30, 2022, 3:14:53 AM8/30/22
to 张云浩, goph...@pubsubhelper.golang.org, Russ Cox, Keith Randall, Martin Möhrmann, Cherry Mui, Michael Knyszek, Michael Pratt, Ian Lance Taylor, Austin Clements, Gopher Robot, golang-co...@googlegroups.com

Attention is currently required from: Austin Clements, Cherry Mui, Ian Lance Taylor, Keith Randall, Martin Möhrmann, Michael Knyszek, Michael Pratt.

Matthew Dempsky removed Russ Cox from this change.

View Change

To view, visit change 426614. To unsubscribe, or for help writing mail filters, visit settings.

Gerrit-Project: go
Gerrit-Branch: master
Gerrit-Change-Id: I66f7de17d8b36b578f9cbc71843b0e4356b914f2
Gerrit-Change-Number: 426614
Gerrit-PatchSet: 1
Gerrit-Owner: 张云浩 <zhang...@bytedance.com>
Gerrit-Reviewer: Austin Clements <aus...@google.com>
Gerrit-Reviewer: Cherry Mui <cher...@google.com>
Gerrit-Reviewer: Ian Lance Taylor <ia...@golang.org>
Gerrit-Reviewer: Keith Randall <k...@golang.org>
Gerrit-Reviewer: Martin Möhrmann <moeh...@google.com>
Gerrit-Reviewer: Michael Knyszek <mkny...@google.com>
Gerrit-Reviewer: Michael Pratt <mpr...@google.com>
Gerrit-CC: Gopher Robot <go...@golang.org>
Gerrit-Attention: Michael Knyszek <mkny...@google.com>
Gerrit-Attention: Austin Clements <aus...@google.com>
Gerrit-Attention: Michael Pratt <mpr...@google.com>
Gerrit-Attention: Martin Möhrmann <moeh...@google.com>
Gerrit-Attention: Ian Lance Taylor <ia...@golang.org>

张云浩 (Gerrit)

unread,
Aug 30, 2022, 4:18:44 AM8/30/22
to goph...@pubsubhelper.golang.org, golang-co...@googlegroups.com

Attention is currently required from: Austin Clements, Cherry Mui, Ian Lance Taylor, Keith Randall, Martin Möhrmann, Michael Knyszek, Michael Pratt.

张云浩 uploaded patch set #3 to this change.

View Change

runtime: use SwissTable

We suggest using SwissTable in the runtime to replace the original implementation of the hashmap.

SwissTable is now used in abseil and rust by default. We borrowed ideas from the two implementations and combined some optimization ideas of Go's original hashmap to construct the current implementation.

- abseil: https://abseil.io/about/design/swisstables
- rust: https://github.com/rust-lang/hashbrown

For #54766

Change-Id: I66f7de17d8b36b578f9cbc71843b0e4356b914f2
---
M src/cmd/compile/internal/reflectdata/reflect.go
M src/cmd/compile/internal/test/inl_test.go
M src/cmd/compile/internal/walk/builtin.go
M src/cmd/link/internal/ld/dwarf.go
M src/reflect/value.go
M src/runtime/export_test.go
M src/runtime/map.go
M src/runtime/map_fast32.go
M src/runtime/map_fast64.go
M src/runtime/map_faststr.go
M src/runtime/map_test.go
A src/runtime/mapbithack.go
M src/runtime/runtime-gdb.py
M src/runtime/stubs.go
14 files changed, 1,985 insertions(+), 1,707 deletions(-)

To view, visit change 426614. To unsubscribe, or for help writing mail filters, visit settings.

Gerrit-Project: go
Gerrit-Branch: master
Gerrit-Change-Id: I66f7de17d8b36b578f9cbc71843b0e4356b914f2
Gerrit-Change-Number: 426614
Gerrit-PatchSet: 3
Gerrit-Owner: 张云浩 <zhang...@bytedance.com>
Gerrit-Reviewer: Austin Clements <aus...@google.com>
Gerrit-Reviewer: Cherry Mui <cher...@google.com>
Gerrit-Reviewer: Ian Lance Taylor <ia...@golang.org>
Gerrit-Reviewer: Keith Randall <k...@golang.org>
Gerrit-Reviewer: Martin Möhrmann <moeh...@google.com>
Gerrit-Reviewer: Michael Knyszek <mkny...@google.com>
Gerrit-Reviewer: Michael Pratt <mpr...@google.com>
Gerrit-CC: Gopher Robot <go...@golang.org>
Gerrit-Attention: Michael Knyszek <mkny...@google.com>
Gerrit-Attention: Austin Clements <aus...@google.com>
Gerrit-Attention: Michael Pratt <mpr...@google.com>
Gerrit-Attention: Martin Möhrmann <moeh...@google.com>
Gerrit-Attention: Ian Lance Taylor <ia...@golang.org>
Gerrit-Attention: Keith Randall <k...@golang.org>
Gerrit-Attention: Cherry Mui <cher...@google.com>
Gerrit-MessageType: newpatchset

Martin Möhrmann (Gerrit)

unread,
Aug 30, 2022, 4:24:14 AM8/30/22
to 张云浩, goph...@pubsubhelper.golang.org, Keith Randall, Cherry Mui, Michael Knyszek, Michael Pratt, Ian Lance Taylor, Austin Clements, Gopher Robot, golang-co...@googlegroups.com

Attention is currently required from: Austin Clements, Cherry Mui, Ian Lance Taylor, Keith Randall, Michael Knyszek, Michael Pratt, 张云浩.

View Change

7 comments:

  • Patchset:

    • Patch Set #1:

      thanks. made a short first pass, definitely not exhaustive.

      Can we clearly call out in the CL description which map properties changed with this CL? e.g. could be:

      • maps immediately grow in place leader to potential long wait on map assign
      • might require more memory for some maps (I think there can be cases where before we had spare overflow buckets and now we dont use them, also 7 items max per bucket not 8)
  • File src/runtime/map.go:

    • // makemap_small implements Go map creation for make(map[k]v) and

    • // make(map[k]v, hint) when hint is known to be at most bucketCnt

    • // at compile time and the map needs to be allocated on the heap.

    • func makemap_small() *hmap {
      h := new(hmap)
      h.hash0 = fastrand()
      h.growthLeft = bucketCnt
      return h
      }

      please move back where it was before to reduce diff.

    • Patch Set #1, Line 248: 0xffff_ffff_ffff_ffff

      please define a constant and reference by name here.

    • Patch Set #1, Line 248: 0xffff_ffff_ffff_ffff

      would it be possible not need to write to each bucket but have all zeros which we can use memclr for to be the all empty state?

    • Patch Set #1, Line 796:

    • 	// Only clear buckets if there are pointers in it.

To view, visit change 426614. To unsubscribe, or for help writing mail filters, visit settings.

Gerrit-Project: go
Gerrit-Branch: master
Gerrit-Change-Id: I66f7de17d8b36b578f9cbc71843b0e4356b914f2
Gerrit-Change-Number: 426614
Gerrit-PatchSet: 1
Gerrit-Owner: 张云浩 <zhang...@bytedance.com>
Gerrit-Reviewer: Austin Clements <aus...@google.com>
Gerrit-Reviewer: Cherry Mui <cher...@google.com>
Gerrit-Reviewer: Ian Lance Taylor <ia...@golang.org>
Gerrit-Reviewer: Keith Randall <k...@golang.org>
Gerrit-Reviewer: Martin Möhrmann <moeh...@google.com>
Gerrit-Reviewer: Michael Knyszek <mkny...@google.com>
Gerrit-Reviewer: Michael Pratt <mpr...@google.com>
Gerrit-CC: Gopher Robot <go...@golang.org>
Gerrit-Attention: 张云浩 <zhang...@bytedance.com>
Gerrit-Attention: Michael Knyszek <mkny...@google.com>
Gerrit-Attention: Austin Clements <aus...@google.com>
Gerrit-Attention: Michael Pratt <mpr...@google.com>
Gerrit-Attention: Ian Lance Taylor <ia...@golang.org>
Gerrit-Attention: Keith Randall <k...@golang.org>
Gerrit-Attention: Cherry Mui <cher...@google.com>
Gerrit-Comment-Date: Tue, 30 Aug 2022 08:24:07 +0000
Gerrit-HasComments: Yes
Gerrit-Has-Labels: No
Gerrit-MessageType: comment

Martin Möhrmann (Gerrit)

unread,
Aug 30, 2022, 4:24:19 AM8/30/22
to 张云浩, goph...@pubsubhelper.golang.org, Keith Randall, Cherry Mui, Michael Knyszek, Michael Pratt, Ian Lance Taylor, Austin Clements, Gopher Robot, golang-co...@googlegroups.com

Attention is currently required from: Austin Clements, Cherry Mui, Ian Lance Taylor, Keith Randall, Michael Knyszek, Michael Pratt, 张云浩.

Patch set 3:Run-TryBot +1

View Change

    To view, visit change 426614. To unsubscribe, or for help writing mail filters, visit settings.

    Gerrit-Project: go
    Gerrit-Branch: master
    Gerrit-Change-Id: I66f7de17d8b36b578f9cbc71843b0e4356b914f2
    Gerrit-Change-Number: 426614
    Gerrit-PatchSet: 3
    Gerrit-Owner: 张云浩 <zhang...@bytedance.com>
    Gerrit-Reviewer: Austin Clements <aus...@google.com>
    Gerrit-Reviewer: Cherry Mui <cher...@google.com>
    Gerrit-Reviewer: Ian Lance Taylor <ia...@golang.org>
    Gerrit-Reviewer: Keith Randall <k...@golang.org>
    Gerrit-Reviewer: Martin Möhrmann <moeh...@google.com>
    Gerrit-Reviewer: Michael Knyszek <mkny...@google.com>
    Gerrit-Reviewer: Michael Pratt <mpr...@google.com>
    Gerrit-CC: Gopher Robot <go...@golang.org>
    Gerrit-Attention: 张云浩 <zhang...@bytedance.com>
    Gerrit-Attention: Michael Knyszek <mkny...@google.com>
    Gerrit-Attention: Austin Clements <aus...@google.com>
    Gerrit-Attention: Michael Pratt <mpr...@google.com>
    Gerrit-Attention: Ian Lance Taylor <ia...@golang.org>
    Gerrit-Attention: Keith Randall <k...@golang.org>
    Gerrit-Attention: Cherry Mui <cher...@google.com>
    Gerrit-Comment-Date: Tue, 30 Aug 2022 08:24:12 +0000
    Gerrit-HasComments: No
    Gerrit-Has-Labels: Yes
    Gerrit-MessageType: comment

    张云浩 (Gerrit)

    unread,
    Aug 30, 2022, 5:10:31 AM8/30/22
    to goph...@pubsubhelper.golang.org, golang-co...@googlegroups.com

    Attention is currently required from: Austin Clements, Cherry Mui, Ian Lance Taylor, Keith Randall, Martin Möhrmann, Michael Knyszek, Michael Pratt, 张云浩.

    张云浩 uploaded patch set #4 to this change.

    View Change

    The following approvals got outdated and were removed: Run-TryBot+1 by Martin Möhrmann, TryBot-Result-1 by Gopher Robot

    runtime: use SwissTable

    We suggest using SwissTable in the runtime to replace the original implementation of the hashmap.

    SwissTable is now used in abseil and rust by default. We borrowed ideas from the two implementations and combined some optimization ideas of Go's original hashmap to construct the current implementation.

    - abseil: https://abseil.io/about/design/swisstables
    - rust: https://github.com/rust-lang/hashbrown

    map properties changed with this CL:
    - maps immediately grow in place leader to potential long wait on map assign
    - It might require more memory for some maps (there can be cases where before we had spare overflow buckets and now we don't use them, also 7 items max per bucket not 8)


    For #54766

    Change-Id: I66f7de17d8b36b578f9cbc71843b0e4356b914f2
    ---
    M src/cmd/compile/internal/reflectdata/reflect.go
    M src/cmd/compile/internal/test/inl_test.go
    M src/cmd/compile/internal/walk/builtin.go
    M src/cmd/link/internal/ld/dwarf.go
    M src/reflect/value.go
    M src/runtime/export_test.go
    M src/runtime/map.go
    M src/runtime/map_fast32.go
    M src/runtime/map_fast64.go
    M src/runtime/map_faststr.go
    M src/runtime/map_test.go
    A src/runtime/mapbithack.go
    M src/runtime/runtime-gdb.py
    M src/runtime/stubs.go
    14 files changed, 1,988 insertions(+), 1,703 deletions(-)

    To view, visit change 426614. To unsubscribe, or for help writing mail filters, visit settings.

    Gerrit-Project: go
    Gerrit-Branch: master
    Gerrit-Change-Id: I66f7de17d8b36b578f9cbc71843b0e4356b914f2
    Gerrit-Change-Number: 426614
    Gerrit-PatchSet: 4
    Gerrit-Owner: 张云浩 <zhang...@bytedance.com>
    Gerrit-Reviewer: Austin Clements <aus...@google.com>
    Gerrit-Reviewer: Cherry Mui <cher...@google.com>
    Gerrit-Reviewer: Gopher Robot <go...@golang.org>
    Gerrit-Reviewer: Ian Lance Taylor <ia...@golang.org>
    Gerrit-Reviewer: Keith Randall <k...@golang.org>
    Gerrit-Reviewer: Martin Möhrmann <moeh...@google.com>
    Gerrit-Reviewer: Michael Knyszek <mkny...@google.com>
    Gerrit-Reviewer: Michael Pratt <mpr...@google.com>
    Gerrit-Attention: 张云浩 <zhang...@bytedance.com>
    Gerrit-Attention: Michael Knyszek <mkny...@google.com>
    Gerrit-Attention: Austin Clements <aus...@google.com>
    Gerrit-Attention: Michael Pratt <mpr...@google.com>
    Gerrit-Attention: Martin Möhrmann <moeh...@google.com>
    Gerrit-Attention: Ian Lance Taylor <ia...@golang.org>
    Gerrit-Attention: Keith Randall <k...@golang.org>
    Gerrit-Attention: Cherry Mui <cher...@google.com>
    Gerrit-MessageType: newpatchset

    张云浩 (Gerrit)

    unread,
    Aug 30, 2022, 5:13:20 AM8/30/22
    to goph...@pubsubhelper.golang.org, Gopher Robot, Martin Möhrmann, Keith Randall, Cherry Mui, Michael Knyszek, Michael Pratt, Ian Lance Taylor, Austin Clements, golang-co...@googlegroups.com

    Attention is currently required from: Austin Clements, Cherry Mui, Ian Lance Taylor, Keith Randall, Martin Möhrmann, Michael Knyszek, Michael Pratt.

    View Change

    8 comments:

    • Patchset:

      • Patch Set #1:

        thanks. made a short first pass, definitely not exhaustive. […]

        Done

    • Patchset:

    • File src/runtime/map.go:

      • Done

      • Patch Set #1, Line 111:


        func (b *bmap) keys() unsafe.Pointer {
        return add(unsafe.Pointer(b), dataOffset)
        }

        please move back where it was before to reduce diff

      • Done

      • Patch Set #1, Line 229:

        // makemap_small implements Go map creation for make(map[k]v) and
        // make(map[k]v, hint) when hint is known to be at most bucketCnt
        // at compile time and the map needs to be allocated on the heap.
        func makemap_small() *hmap {
        h := new(hmap)
        h.hash0 = fastrand()
        h.growthLeft = bucketCnt
        return h
        }

        please move back where it was before to reduce diff.

      • Done

      • would it be possible not need to write to each bucket but have all zeros which we can use memclr for […]

        Considered this approach before :) But because it requires changing the theoretical model of SwissTable, it would need a lot of validation work(such as whether SIMD supports this change), to avoid too many changes in one CL; maybe we could do it in a subsequent CL?

      • Done

      • Patch Set #1, Line 796:

        	// Only clear buckets if there are pointers in it.
        if t.bucket.ptrdata != 0 {
        memclrHasPointers(buckets, size)
        }

      • Should memclrNoHeapPointers if elems have no pointers same as delete does to not misalign behaviour: […]

        Done. Maybe this is the reason why the TryBot failed. I have encountered the same problem before, but it is hard to reproduce this problem on my PC.

    To view, visit change 426614. To unsubscribe, or for help writing mail filters, visit settings.

    Gerrit-Project: go
    Gerrit-Branch: master
    Gerrit-Change-Id: I66f7de17d8b36b578f9cbc71843b0e4356b914f2
    Gerrit-Change-Number: 426614
    Gerrit-PatchSet: 4
    Gerrit-Owner: 张云浩 <zhang...@bytedance.com>
    Gerrit-Reviewer: Austin Clements <aus...@google.com>
    Gerrit-Reviewer: Cherry Mui <cher...@google.com>
    Gerrit-Reviewer: Gopher Robot <go...@golang.org>
    Gerrit-Reviewer: Ian Lance Taylor <ia...@golang.org>
    Gerrit-Reviewer: Keith Randall <k...@golang.org>
    Gerrit-Reviewer: Martin Möhrmann <moeh...@google.com>
    Gerrit-Reviewer: Michael Knyszek <mkny...@google.com>
    Gerrit-Reviewer: Michael Pratt <mpr...@google.com>
    Gerrit-Attention: Michael Knyszek <mkny...@google.com>
    Gerrit-Attention: Austin Clements <aus...@google.com>
    Gerrit-Attention: Michael Pratt <mpr...@google.com>
    Gerrit-Attention: Martin Möhrmann <moeh...@google.com>
    Gerrit-Attention: Ian Lance Taylor <ia...@golang.org>
    Gerrit-Attention: Keith Randall <k...@golang.org>
    Gerrit-Attention: Cherry Mui <cher...@google.com>
    Gerrit-Comment-Date: Tue, 30 Aug 2022 09:13:13 +0000
    Gerrit-HasComments: Yes
    Gerrit-Has-Labels: No
    Comment-In-Reply-To: Martin Möhrmann <moeh...@google.com>
    Gerrit-MessageType: comment

    张云浩 (Gerrit)

    unread,
    Aug 30, 2022, 5:24:45 AM8/30/22
    to goph...@pubsubhelper.golang.org, golang-co...@googlegroups.com

    Attention is currently required from: Austin Clements, Cherry Mui, Ian Lance Taylor, Keith Randall, Martin Möhrmann, Michael Knyszek, Michael Pratt.

    张云浩 uploaded patch set #5 to this change.

    View Change

    runtime: use SwissTable

    To view, visit change 426614. To unsubscribe, or for help writing mail filters, visit settings.

    Gerrit-Project: go
    Gerrit-Branch: master
    Gerrit-Change-Id: I66f7de17d8b36b578f9cbc71843b0e4356b914f2
    Gerrit-Change-Number: 426614
    Gerrit-PatchSet: 5
    Gerrit-Owner: 张云浩 <zhang...@bytedance.com>
    Gerrit-Reviewer: Austin Clements <aus...@google.com>
    Gerrit-Reviewer: Cherry Mui <cher...@google.com>
    Gerrit-Reviewer: Gopher Robot <go...@golang.org>
    Gerrit-Reviewer: Ian Lance Taylor <ia...@golang.org>
    Gerrit-Reviewer: Keith Randall <k...@golang.org>
    Gerrit-Reviewer: Martin Möhrmann <moeh...@google.com>
    Gerrit-Reviewer: Michael Knyszek <mkny...@google.com>
    Gerrit-Reviewer: Michael Pratt <mpr...@google.com>
    Gerrit-Attention: Michael Knyszek <mkny...@google.com>
    Gerrit-Attention: Austin Clements <aus...@google.com>
    Gerrit-Attention: Michael Pratt <mpr...@google.com>
    Gerrit-Attention: Martin Möhrmann <moeh...@google.com>
    Gerrit-Attention: Ian Lance Taylor <ia...@golang.org>
    Gerrit-Attention: Keith Randall <k...@golang.org>
    Gerrit-Attention: Cherry Mui <cher...@google.com>
    Gerrit-MessageType: newpatchset

    Martin Möhrmann (Gerrit)

    unread,
    Aug 30, 2022, 5:37:35 AM8/30/22
    to 张云浩, goph...@pubsubhelper.golang.org, Gopher Robot, Keith Randall, Cherry Mui, Michael Knyszek, Michael Pratt, Ian Lance Taylor, Austin Clements, golang-co...@googlegroups.com

    Attention is currently required from: Austin Clements, Cherry Mui, Ian Lance Taylor, Keith Randall, Michael Knyszek, Michael Pratt, 张云浩.

    Patch set 5:Run-TryBot +1

    View Change

      To view, visit change 426614. To unsubscribe, or for help writing mail filters, visit settings.

      Gerrit-Project: go
      Gerrit-Branch: master
      Gerrit-Change-Id: I66f7de17d8b36b578f9cbc71843b0e4356b914f2
      Gerrit-Change-Number: 426614
      Gerrit-PatchSet: 5
      Gerrit-Owner: 张云浩 <zhang...@bytedance.com>
      Gerrit-Reviewer: Austin Clements <aus...@google.com>
      Gerrit-Reviewer: Cherry Mui <cher...@google.com>
      Gerrit-Reviewer: Gopher Robot <go...@golang.org>
      Gerrit-Reviewer: Ian Lance Taylor <ia...@golang.org>
      Gerrit-Reviewer: Keith Randall <k...@golang.org>
      Gerrit-Reviewer: Martin Möhrmann <moeh...@google.com>
      Gerrit-Reviewer: Michael Knyszek <mkny...@google.com>
      Gerrit-Reviewer: Michael Pratt <mpr...@google.com>
      Gerrit-Attention: 张云浩 <zhang...@bytedance.com>
      Gerrit-Attention: Michael Knyszek <mkny...@google.com>
      Gerrit-Attention: Austin Clements <aus...@google.com>
      Gerrit-Attention: Michael Pratt <mpr...@google.com>
      Gerrit-Attention: Ian Lance Taylor <ia...@golang.org>
      Gerrit-Attention: Keith Randall <k...@golang.org>
      Gerrit-Attention: Cherry Mui <cher...@google.com>
      Gerrit-Comment-Date: Tue, 30 Aug 2022 09:37:28 +0000

      张云浩 (Gerrit)

      unread,
      Aug 30, 2022, 5:56:50 AM8/30/22
      to goph...@pubsubhelper.golang.org, golang-co...@googlegroups.com

      Attention is currently required from: Austin Clements, Cherry Mui, Ian Lance Taylor, Keith Randall, Martin Möhrmann, Michael Knyszek, Michael Pratt, 张云浩.

      张云浩 uploaded patch set #6 to this change.

      View Change

      The following approvals got outdated and were removed: Run-TryBot+1 by Martin Möhrmann, TryBot-Result-1 by Gopher Robot

      runtime: use SwissTable
      14 files changed, 1,990 insertions(+), 1,703 deletions(-)

      To view, visit change 426614. To unsubscribe, or for help writing mail filters, visit settings.

      Gerrit-Project: go
      Gerrit-Branch: master
      Gerrit-Change-Id: I66f7de17d8b36b578f9cbc71843b0e4356b914f2
      Gerrit-Change-Number: 426614
      Gerrit-PatchSet: 6
      Gerrit-Owner: 张云浩 <zhang...@bytedance.com>
      Gerrit-Reviewer: Austin Clements <aus...@google.com>
      Gerrit-Reviewer: Cherry Mui <cher...@google.com>
      Gerrit-Reviewer: Gopher Robot <go...@golang.org>
      Gerrit-Reviewer: Ian Lance Taylor <ia...@golang.org>
      Gerrit-Reviewer: Keith Randall <k...@golang.org>
      Gerrit-Reviewer: Martin Möhrmann <moeh...@google.com>
      Gerrit-Reviewer: Michael Knyszek <mkny...@google.com>
      Gerrit-Reviewer: Michael Pratt <mpr...@google.com>
      Gerrit-Attention: 张云浩 <zhang...@bytedance.com>
      Gerrit-Attention: Michael Knyszek <mkny...@google.com>
      Gerrit-Attention: Austin Clements <aus...@google.com>
      Gerrit-Attention: Michael Pratt <mpr...@google.com>
      Gerrit-Attention: Martin Möhrmann <moeh...@google.com>
      Gerrit-Attention: Ian Lance Taylor <ia...@golang.org>
      Gerrit-Attention: Keith Randall <k...@golang.org>
      Gerrit-Attention: Cherry Mui <cher...@google.com>
      Gerrit-MessageType: newpatchset

      张云浩 (Gerrit)

      unread,
      Aug 30, 2022, 6:03:20 AM8/30/22
      to goph...@pubsubhelper.golang.org, Gopher Robot, Martin Möhrmann, Keith Randall, Cherry Mui, Michael Knyszek, Michael Pratt, Ian Lance Taylor, Austin Clements, golang-co...@googlegroups.com

      Attention is currently required from: Austin Clements, Cherry Mui, Ian Lance Taylor, Keith Randall, Martin Möhrmann, Michael Knyszek, Michael Pratt.

      View Change

      1 comment:

      • Patchset:

        • Patch Set #6:

          Maybe TryBot failed because the `mapclear` uses an invalid pointer? Patchset 6 tries to fix it, but I can't reproduce this error on my PC.

      To view, visit change 426614. To unsubscribe, or for help writing mail filters, visit settings.

      Gerrit-Project: go
      Gerrit-Branch: master
      Gerrit-Change-Id: I66f7de17d8b36b578f9cbc71843b0e4356b914f2
      Gerrit-Change-Number: 426614
      Gerrit-PatchSet: 6
      Gerrit-Owner: 张云浩 <zhang...@bytedance.com>
      Gerrit-Reviewer: Austin Clements <aus...@google.com>
      Gerrit-Reviewer: Cherry Mui <cher...@google.com>
      Gerrit-Reviewer: Gopher Robot <go...@golang.org>
      Gerrit-Reviewer: Ian Lance Taylor <ia...@golang.org>
      Gerrit-Reviewer: Keith Randall <k...@golang.org>
      Gerrit-Reviewer: Martin Möhrmann <moeh...@google.com>
      Gerrit-Reviewer: Michael Knyszek <mkny...@google.com>
      Gerrit-Reviewer: Michael Pratt <mpr...@google.com>
      Gerrit-Attention: Michael Knyszek <mkny...@google.com>
      Gerrit-Attention: Austin Clements <aus...@google.com>
      Gerrit-Attention: Michael Pratt <mpr...@google.com>
      Gerrit-Attention: Martin Möhrmann <moeh...@google.com>
      Gerrit-Attention: Ian Lance Taylor <ia...@golang.org>
      Gerrit-Attention: Keith Randall <k...@golang.org>
      Gerrit-Attention: Cherry Mui <cher...@google.com>
      Gerrit-Comment-Date: Tue, 30 Aug 2022 10:03:14 +0000
      Gerrit-HasComments: Yes
      Gerrit-Has-Labels: No
      Gerrit-MessageType: comment

      Michael Pratt (Gerrit)

      unread,
      Aug 30, 2022, 10:39:37 AM8/30/22
      to 张云浩, goph...@pubsubhelper.golang.org, Michael Pratt, Gopher Robot, Martin Möhrmann, Keith Randall, Cherry Mui, Michael Knyszek, Ian Lance Taylor, Austin Clements, golang-co...@googlegroups.com

      Attention is currently required from: Austin Clements, Cherry Mui, Ian Lance Taylor, Keith Randall, Martin Möhrmann, Michael Knyszek, 张云浩.

      Patch set 6:Run-TryBot +1

      View Change

        To view, visit change 426614. To unsubscribe, or for help writing mail filters, visit settings.

        Gerrit-Project: go
        Gerrit-Branch: master
        Gerrit-Change-Id: I66f7de17d8b36b578f9cbc71843b0e4356b914f2
        Gerrit-Change-Number: 426614
        Gerrit-PatchSet: 6
        Gerrit-Owner: 张云浩 <zhang...@bytedance.com>
        Gerrit-Reviewer: Austin Clements <aus...@google.com>
        Gerrit-Reviewer: Cherry Mui <cher...@google.com>
        Gerrit-Reviewer: Gopher Robot <go...@golang.org>
        Gerrit-Reviewer: Ian Lance Taylor <ia...@golang.org>
        Gerrit-Reviewer: Keith Randall <k...@golang.org>
        Gerrit-Reviewer: Martin Möhrmann <moeh...@google.com>
        Gerrit-Reviewer: Michael Knyszek <mkny...@google.com>
        Gerrit-Reviewer: Michael Pratt <mpr...@google.com>
        Gerrit-Attention: 张云浩 <zhang...@bytedance.com>
        Gerrit-Attention: Michael Knyszek <mkny...@google.com>
        Gerrit-Attention: Austin Clements <aus...@google.com>
        Gerrit-Attention: Martin Möhrmann <moeh...@google.com>
        Gerrit-Attention: Ian Lance Taylor <ia...@golang.org>
        Gerrit-Attention: Keith Randall <k...@golang.org>
        Gerrit-Attention: Cherry Mui <cher...@google.com>
        Gerrit-Comment-Date: Tue, 30 Aug 2022 14:39:33 +0000

        David Chase (Gerrit)

        unread,
        Aug 30, 2022, 11:25:36 AM8/30/22
        to 张云浩, goph...@pubsubhelper.golang.org, Gopher Robot, Michael Pratt, Martin Möhrmann, Keith Randall, Cherry Mui, Michael Knyszek, Ian Lance Taylor, Austin Clements, golang-co...@googlegroups.com

        Attention is currently required from: Austin Clements, Cherry Mui, Ian Lance Taylor, Keith Randall, Martin Möhrmann, Michael Knyszek, 张云浩.

        View Change

        2 comments:

        • File src/runtime/map.go:

          • Patch Set #6, Line 99: growthLeft int

            Would like a comment explaining what this is.

          • Patch Set #6, Line 183: overLoadFactor

            A description of what this means would help. My initial impression is that a "factor" would be a number, but this returns a boolean, so maybe a different name would work instead.

        To view, visit change 426614. To unsubscribe, or for help writing mail filters, visit settings.

        Gerrit-Project: go
        Gerrit-Branch: master
        Gerrit-Change-Id: I66f7de17d8b36b578f9cbc71843b0e4356b914f2
        Gerrit-Change-Number: 426614
        Gerrit-PatchSet: 6
        Gerrit-Owner: 张云浩 <zhang...@bytedance.com>
        Gerrit-Reviewer: Austin Clements <aus...@google.com>
        Gerrit-Reviewer: Cherry Mui <cher...@google.com>
        Gerrit-Reviewer: Gopher Robot <go...@golang.org>
        Gerrit-Reviewer: Ian Lance Taylor <ia...@golang.org>
        Gerrit-Reviewer: Keith Randall <k...@golang.org>
        Gerrit-Reviewer: Martin Möhrmann <moeh...@google.com>
        Gerrit-Reviewer: Michael Knyszek <mkny...@google.com>
        Gerrit-Reviewer: Michael Pratt <mpr...@google.com>
        Gerrit-CC: David Chase <drc...@google.com>
        Gerrit-Attention: 张云浩 <zhang...@bytedance.com>
        Gerrit-Attention: Michael Knyszek <mkny...@google.com>
        Gerrit-Attention: Austin Clements <aus...@google.com>
        Gerrit-Attention: Martin Möhrmann <moeh...@google.com>
        Gerrit-Attention: Ian Lance Taylor <ia...@golang.org>
        Gerrit-Attention: Keith Randall <k...@golang.org>
        Gerrit-Attention: Cherry Mui <cher...@google.com>
        Gerrit-Comment-Date: Tue, 30 Aug 2022 15:25:31 +0000

        张云浩 (Gerrit)

        unread,
        Aug 31, 2022, 6:04:03 AM8/31/22
        to goph...@pubsubhelper.golang.org, golang-co...@googlegroups.com

        Attention is currently required from: Austin Clements, Cherry Mui, Ian Lance Taylor, Keith Randall, Martin Möhrmann, Michael Knyszek, Michael Pratt, 张云浩.

        张云浩 uploaded patch set #7 to this change.

        View Change

        The following approvals got outdated and were removed: Run-TryBot+1 by Michael Pratt, TryBot-Result-1 by Gopher Robot

        14 files changed, 2,214 insertions(+), 1,703 deletions(-)

        To view, visit change 426614. To unsubscribe, or for help writing mail filters, visit settings.

        Gerrit-Project: go
        Gerrit-Branch: master
        Gerrit-Change-Id: I66f7de17d8b36b578f9cbc71843b0e4356b914f2
        Gerrit-Change-Number: 426614
        Gerrit-PatchSet: 7
        Gerrit-Owner: 张云浩 <zhang...@bytedance.com>
        Gerrit-Reviewer: Austin Clements <aus...@google.com>
        Gerrit-Reviewer: Cherry Mui <cher...@google.com>
        Gerrit-Reviewer: Gopher Robot <go...@golang.org>
        Gerrit-Reviewer: Ian Lance Taylor <ia...@golang.org>
        Gerrit-Reviewer: Keith Randall <k...@golang.org>
        Gerrit-Reviewer: Martin Möhrmann <moeh...@google.com>
        Gerrit-Reviewer: Michael Knyszek <mkny...@google.com>
        Gerrit-Reviewer: Michael Pratt <mpr...@google.com>
        Gerrit-CC: David Chase <drc...@google.com>
        Gerrit-Attention: 张云浩 <zhang...@bytedance.com>
        Gerrit-Attention: Michael Knyszek <mkny...@google.com>
        Gerrit-Attention: Austin Clements <aus...@google.com>
        Gerrit-Attention: Michael Pratt <mpr...@google.com>
        Gerrit-Attention: Martin Möhrmann <moeh...@google.com>
        Gerrit-Attention: Ian Lance Taylor <ia...@golang.org>
        Gerrit-Attention: Keith Randall <k...@golang.org>
        Gerrit-Attention: Cherry Mui <cher...@google.com>
        Gerrit-MessageType: newpatchset

        张云浩 (Gerrit)

        unread,
        Aug 31, 2022, 6:40:32 AM8/31/22
        to goph...@pubsubhelper.golang.org, David Chase, Gopher Robot, Michael Pratt, Martin Möhrmann, Keith Randall, Cherry Mui, Michael Knyszek, Ian Lance Taylor, Austin Clements, golang-co...@googlegroups.com

        Attention is currently required from: Austin Clements, Cherry Mui, David Chase, Ian Lance Taylor, Keith Randall, Martin Möhrmann, Michael Knyszek, Michael Pratt.

        View Change

        3 comments:

        • Patchset:

          • Patch Set #7:

            PatchSet-7 should fix the failure of TryBots. Can someone please rerun the TryBots? Thanks!

        • File src/runtime/map.go:

          • Done

          • A description of what this means would help. […]

            Changed to "isOverLoadFactor", it may look more clear. Also move back (this function comes from the previous implementation and is only used inside the `makemap`, there are some comments about it.)

        To view, visit change 426614. To unsubscribe, or for help writing mail filters, visit settings.

        Gerrit-Project: go
        Gerrit-Branch: master
        Gerrit-Change-Id: I66f7de17d8b36b578f9cbc71843b0e4356b914f2
        Gerrit-Change-Number: 426614
        Gerrit-PatchSet: 7
        Gerrit-Owner: 张云浩 <zhang...@bytedance.com>
        Gerrit-Reviewer: Austin Clements <aus...@google.com>
        Gerrit-Reviewer: Cherry Mui <cher...@google.com>
        Gerrit-Reviewer: Gopher Robot <go...@golang.org>
        Gerrit-Reviewer: Ian Lance Taylor <ia...@golang.org>
        Gerrit-Reviewer: Keith Randall <k...@golang.org>
        Gerrit-Reviewer: Martin Möhrmann <moeh...@google.com>
        Gerrit-Reviewer: Michael Knyszek <mkny...@google.com>
        Gerrit-Reviewer: Michael Pratt <mpr...@google.com>
        Gerrit-CC: David Chase <drc...@google.com>
        Gerrit-Attention: Michael Knyszek <mkny...@google.com>
        Gerrit-Attention: Austin Clements <aus...@google.com>
        Gerrit-Attention: Michael Pratt <mpr...@google.com>
        Gerrit-Attention: David Chase <drc...@google.com>
        Gerrit-Attention: Martin Möhrmann <moeh...@google.com>
        Gerrit-Attention: Ian Lance Taylor <ia...@golang.org>
        Gerrit-Attention: Keith Randall <k...@golang.org>
        Gerrit-Attention: Cherry Mui <cher...@google.com>
        Gerrit-Comment-Date: Wed, 31 Aug 2022 10:40:25 +0000
        Gerrit-HasComments: Yes
        Gerrit-Has-Labels: No
        Comment-In-Reply-To: David Chase <drc...@google.com>
        Gerrit-MessageType: comment

        张云浩 (Gerrit)

        unread,
        Aug 31, 2022, 6:40:46 AM8/31/22
        to goph...@pubsubhelper.golang.org, golang-co...@googlegroups.com

        Attention is currently required from: Austin Clements, Cherry Mui, David Chase, Ian Lance Taylor, Keith Randall, Martin Möhrmann, Michael Knyszek, Michael Pratt.

        张云浩 uploaded patch set #8 to this change.

        View Change

        runtime: use SwissTable
        14 files changed, 2,217 insertions(+), 1,706 deletions(-)

        To view, visit change 426614. To unsubscribe, or for help writing mail filters, visit settings.

        Gerrit-Project: go
        Gerrit-Branch: master
        Gerrit-Change-Id: I66f7de17d8b36b578f9cbc71843b0e4356b914f2
        Gerrit-Change-Number: 426614
        Gerrit-PatchSet: 8
        Gerrit-Owner: 张云浩 <zhang...@bytedance.com>
        Gerrit-Reviewer: Austin Clements <aus...@google.com>
        Gerrit-Reviewer: Cherry Mui <cher...@google.com>
        Gerrit-Reviewer: Gopher Robot <go...@golang.org>
        Gerrit-Reviewer: Ian Lance Taylor <ia...@golang.org>
        Gerrit-Reviewer: Keith Randall <k...@golang.org>
        Gerrit-Reviewer: Martin Möhrmann <moeh...@google.com>
        Gerrit-Reviewer: Michael Knyszek <mkny...@google.com>
        Gerrit-Reviewer: Michael Pratt <mpr...@google.com>
        Gerrit-CC: David Chase <drc...@google.com>
        Gerrit-Attention: Michael Knyszek <mkny...@google.com>
        Gerrit-Attention: Austin Clements <aus...@google.com>
        Gerrit-Attention: Michael Pratt <mpr...@google.com>
        Gerrit-Attention: David Chase <drc...@google.com>
        Gerrit-Attention: Martin Möhrmann <moeh...@google.com>
        Gerrit-Attention: Ian Lance Taylor <ia...@golang.org>
        Gerrit-Attention: Keith Randall <k...@golang.org>
        Gerrit-Attention: Cherry Mui <cher...@google.com>
        Gerrit-MessageType: newpatchset

        hopehook (Gerrit)

        unread,
        Aug 31, 2022, 6:59:02 AM8/31/22
        to 张云浩, goph...@pubsubhelper.golang.org, David Chase, Gopher Robot, Michael Pratt, Martin Möhrmann, Keith Randall, Cherry Mui, Michael Knyszek, Ian Lance Taylor, Austin Clements, golang-co...@googlegroups.com

        Attention is currently required from: Austin Clements, Cherry Mui, David Chase, Ian Lance Taylor, Keith Randall, Martin Möhrmann, Michael Knyszek, Michael Pratt, 张云浩.

        View Change

        3 comments:

        To view, visit change 426614. To unsubscribe, or for help writing mail filters, visit settings.

        Gerrit-Project: go
        Gerrit-Branch: master
        Gerrit-Change-Id: I66f7de17d8b36b578f9cbc71843b0e4356b914f2
        Gerrit-Change-Number: 426614
        Gerrit-PatchSet: 8
        Gerrit-Owner: 张云浩 <zhang...@bytedance.com>
        Gerrit-Reviewer: Austin Clements <aus...@google.com>
        Gerrit-Reviewer: Cherry Mui <cher...@google.com>
        Gerrit-Reviewer: Gopher Robot <go...@golang.org>
        Gerrit-Reviewer: Ian Lance Taylor <ia...@golang.org>
        Gerrit-Reviewer: Keith Randall <k...@golang.org>
        Gerrit-Reviewer: Martin Möhrmann <moeh...@google.com>
        Gerrit-Reviewer: Michael Knyszek <mkny...@google.com>
        Gerrit-Reviewer: Michael Pratt <mpr...@google.com>
        Gerrit-CC: David Chase <drc...@google.com>
        Gerrit-CC: hopehook <hope...@golangcn.org>
        Gerrit-Attention: 张云浩 <zhang...@bytedance.com>
        Gerrit-Attention: Michael Knyszek <mkny...@google.com>
        Gerrit-Attention: Austin Clements <aus...@google.com>
        Gerrit-Attention: Michael Pratt <mpr...@google.com>
        Gerrit-Attention: David Chase <drc...@google.com>
        Gerrit-Attention: Martin Möhrmann <moeh...@google.com>
        Gerrit-Attention: Ian Lance Taylor <ia...@golang.org>
        Gerrit-Attention: Keith Randall <k...@golang.org>
        Gerrit-Attention: Cherry Mui <cher...@google.com>
        Gerrit-Comment-Date: Wed, 31 Aug 2022 10:58:55 +0000
        Gerrit-HasComments: Yes
        Gerrit-Has-Labels: No
        Gerrit-MessageType: comment

        Than McIntosh (Gerrit)

        unread,
        Aug 31, 2022, 7:16:42 AM8/31/22
        to 张云浩, goph...@pubsubhelper.golang.org, hopehook, David Chase, Gopher Robot, Michael Pratt, Martin Möhrmann, Keith Randall, Cherry Mui, Michael Knyszek, Ian Lance Taylor, Austin Clements, golang-co...@googlegroups.com

        Attention is currently required from: Austin Clements, Cherry Mui, David Chase, Ian Lance Taylor, Keith Randall, Martin Möhrmann, Michael Knyszek, Michael Pratt, 张云浩.

        Patch set 8:Run-TryBot +1

        View Change

          To view, visit change 426614. To unsubscribe, or for help writing mail filters, visit settings.

          Gerrit-Project: go
          Gerrit-Branch: master
          Gerrit-Change-Id: I66f7de17d8b36b578f9cbc71843b0e4356b914f2
          Gerrit-Change-Number: 426614
          Gerrit-PatchSet: 8
          Gerrit-Owner: 张云浩 <zhang...@bytedance.com>
          Gerrit-Reviewer: Austin Clements <aus...@google.com>
          Gerrit-Reviewer: Cherry Mui <cher...@google.com>
          Gerrit-Reviewer: Gopher Robot <go...@golang.org>
          Gerrit-Reviewer: Ian Lance Taylor <ia...@golang.org>
          Gerrit-Reviewer: Keith Randall <k...@golang.org>
          Gerrit-Reviewer: Martin Möhrmann <moeh...@google.com>
          Gerrit-Reviewer: Michael Knyszek <mkny...@google.com>
          Gerrit-Reviewer: Michael Pratt <mpr...@google.com>
          Gerrit-Reviewer: Than McIntosh <th...@google.com>
          Gerrit-CC: David Chase <drc...@google.com>
          Gerrit-CC: hopehook <hope...@golangcn.org>
          Gerrit-Attention: 张云浩 <zhang...@bytedance.com>
          Gerrit-Attention: Michael Knyszek <mkny...@google.com>
          Gerrit-Attention: Austin Clements <aus...@google.com>
          Gerrit-Attention: Michael Pratt <mpr...@google.com>
          Gerrit-Attention: David Chase <drc...@google.com>
          Gerrit-Attention: Martin Möhrmann <moeh...@google.com>
          Gerrit-Attention: Ian Lance Taylor <ia...@golang.org>
          Gerrit-Attention: Keith Randall <k...@golang.org>
          Gerrit-Attention: Cherry Mui <cher...@google.com>
          Gerrit-Comment-Date: Wed, 31 Aug 2022 11:16:37 +0000

          张云浩 (Gerrit)

          unread,
          Aug 31, 2022, 7:32:15 AM8/31/22
          to goph...@pubsubhelper.golang.org, golang-co...@googlegroups.com

          Attention is currently required from: Austin Clements, Cherry Mui, David Chase, Ian Lance Taylor, Keith Randall, Martin Möhrmann, Michael Knyszek, Michael Pratt, Than McIntosh, 张云浩.

          张云浩 uploaded patch set #9 to this change.

          View Change

          The following approvals got outdated and were removed: Run-TryBot+1 by Than McIntosh, TryBot-Result+1 by Gopher Robot

          To view, visit change 426614. To unsubscribe, or for help writing mail filters, visit settings.

          Gerrit-Project: go
          Gerrit-Branch: master
          Gerrit-Change-Id: I66f7de17d8b36b578f9cbc71843b0e4356b914f2
          Gerrit-Change-Number: 426614
          Gerrit-PatchSet: 9
          Gerrit-Owner: 张云浩 <zhang...@bytedance.com>
          Gerrit-Reviewer: Austin Clements <aus...@google.com>
          Gerrit-Reviewer: Cherry Mui <cher...@google.com>
          Gerrit-Reviewer: Gopher Robot <go...@golang.org>
          Gerrit-Reviewer: Ian Lance Taylor <ia...@golang.org>
          Gerrit-Reviewer: Keith Randall <k...@golang.org>
          Gerrit-Reviewer: Martin Möhrmann <moeh...@google.com>
          Gerrit-Reviewer: Michael Knyszek <mkny...@google.com>
          Gerrit-Reviewer: Michael Pratt <mpr...@google.com>
          Gerrit-Reviewer: Than McIntosh <th...@google.com>
          Gerrit-CC: David Chase <drc...@google.com>
          Gerrit-CC: hopehook <hope...@golangcn.org>
          Gerrit-Attention: 张云浩 <zhang...@bytedance.com>
          Gerrit-Attention: Michael Knyszek <mkny...@google.com>
          Gerrit-Attention: Austin Clements <aus...@google.com>
          Gerrit-Attention: Michael Pratt <mpr...@google.com>
          Gerrit-Attention: Than McIntosh <th...@google.com>
          Gerrit-Attention: David Chase <drc...@google.com>
          Gerrit-Attention: Martin Möhrmann <moeh...@google.com>
          Gerrit-Attention: Ian Lance Taylor <ia...@golang.org>
          Gerrit-Attention: Keith Randall <k...@golang.org>
          Gerrit-Attention: Cherry Mui <cher...@google.com>
          Gerrit-MessageType: newpatchset

          张云浩 (Gerrit)

          unread,
          Aug 31, 2022, 7:32:50 AM8/31/22
          to goph...@pubsubhelper.golang.org, Gopher Robot, Than McIntosh, hopehook, David Chase, Michael Pratt, Martin Möhrmann, Keith Randall, Cherry Mui, Michael Knyszek, Ian Lance Taylor, Austin Clements, golang-co...@googlegroups.com

          Attention is currently required from: Austin Clements, Cherry Mui, David Chase, Ian Lance Taylor, Keith Randall, Martin Möhrmann, Michael Knyszek, Michael Pratt, Than McIntosh, hopehook.

          View Change

          4 comments:

            • Done

          • File src/runtime/map_fast32.go:

            • Done

            • Done

          To view, visit change 426614. To unsubscribe, or for help writing mail filters, visit settings.

          Gerrit-Project: go
          Gerrit-Branch: master
          Gerrit-Change-Id: I66f7de17d8b36b578f9cbc71843b0e4356b914f2
          Gerrit-Change-Number: 426614
          Gerrit-PatchSet: 9
          Gerrit-Owner: 张云浩 <zhang...@bytedance.com>
          Gerrit-Reviewer: Austin Clements <aus...@google.com>
          Gerrit-Reviewer: Cherry Mui <cher...@google.com>
          Gerrit-Reviewer: Gopher Robot <go...@golang.org>
          Gerrit-Reviewer: Ian Lance Taylor <ia...@golang.org>
          Gerrit-Reviewer: Keith Randall <k...@golang.org>
          Gerrit-Reviewer: Martin Möhrmann <moeh...@google.com>
          Gerrit-Reviewer: Michael Knyszek <mkny...@google.com>
          Gerrit-Reviewer: Michael Pratt <mpr...@google.com>
          Gerrit-Reviewer: Than McIntosh <th...@google.com>
          Gerrit-CC: David Chase <drc...@google.com>
          Gerrit-CC: hopehook <hope...@golangcn.org>
          Gerrit-Attention: hopehook <hope...@golangcn.org>
          Gerrit-Attention: Michael Knyszek <mkny...@google.com>
          Gerrit-Attention: Austin Clements <aus...@google.com>
          Gerrit-Attention: Michael Pratt <mpr...@google.com>
          Gerrit-Attention: Than McIntosh <th...@google.com>
          Gerrit-Attention: David Chase <drc...@google.com>
          Gerrit-Attention: Martin Möhrmann <moeh...@google.com>
          Gerrit-Attention: Ian Lance Taylor <ia...@golang.org>
          Gerrit-Attention: Keith Randall <k...@golang.org>
          Gerrit-Attention: Cherry Mui <cher...@google.com>
          Gerrit-Comment-Date: Wed, 31 Aug 2022 11:32:44 +0000
          Gerrit-HasComments: Yes
          Gerrit-Has-Labels: No
          Comment-In-Reply-To: hopehook <hope...@golangcn.org>
          Gerrit-MessageType: comment

          张云浩 (Gerrit)

          unread,
          Sep 6, 2022, 11:50:17 PM9/6/22
          to goph...@pubsubhelper.golang.org, golang-co...@googlegroups.com

          Attention is currently required from: Austin Clements, Cherry Mui, David Chase, Ian Lance Taylor, Keith Randall, Martin Möhrmann, Michael Knyszek, Michael Pratt, Than McIntosh, hopehook.

          张云浩 uploaded patch set #10 to this change.

          View Change

          runtime: use SwissTable
          14 files changed, 2,221 insertions(+), 1,705 deletions(-)

          To view, visit change 426614. To unsubscribe, or for help writing mail filters, visit settings.

          Gerrit-Project: go
          Gerrit-Branch: master
          Gerrit-Change-Id: I66f7de17d8b36b578f9cbc71843b0e4356b914f2
          Gerrit-Change-Number: 426614
          Gerrit-PatchSet: 10
          Gerrit-Owner: 张云浩 <zhang...@bytedance.com>
          Gerrit-Reviewer: Austin Clements <aus...@google.com>
          Gerrit-Reviewer: Cherry Mui <cher...@google.com>
          Gerrit-Reviewer: Gopher Robot <go...@golang.org>
          Gerrit-Reviewer: Ian Lance Taylor <ia...@golang.org>
          Gerrit-Reviewer: Keith Randall <k...@golang.org>
          Gerrit-Reviewer: Martin Möhrmann <moeh...@google.com>
          Gerrit-Reviewer: Michael Knyszek <mkny...@google.com>
          Gerrit-Reviewer: Michael Pratt <mpr...@google.com>
          Gerrit-Reviewer: Than McIntosh <th...@google.com>
          Gerrit-CC: David Chase <drc...@google.com>
          Gerrit-CC: Wayne Zuo <wdv...@golangcn.org>
          Gerrit-CC: hopehook <hope...@golangcn.org>
          Gerrit-CC: yunhao zhang <zhangyu...@gmail.com>
          Gerrit-Attention: hopehook <hope...@golangcn.org>
          Gerrit-Attention: Michael Knyszek <mkny...@google.com>
          Gerrit-Attention: Austin Clements <aus...@google.com>
          Gerrit-Attention: Michael Pratt <mpr...@google.com>
          Gerrit-Attention: Than McIntosh <th...@google.com>
          Gerrit-Attention: David Chase <drc...@google.com>
          Gerrit-Attention: Martin Möhrmann <moeh...@google.com>
          Gerrit-Attention: Ian Lance Taylor <ia...@golang.org>
          Gerrit-Attention: Keith Randall <k...@golang.org>
          Gerrit-Attention: Cherry Mui <cher...@google.com>
          Gerrit-MessageType: newpatchset

          张云浩 (Gerrit)

          unread,
          Sep 9, 2022, 5:27:44 AM9/9/22
          to goph...@pubsubhelper.golang.org, golang-co...@googlegroups.com

          Attention is currently required from: Austin Clements, Cherry Mui, David Chase, Ian Lance Taylor, Keith Randall, Martin Möhrmann, Michael Knyszek, Michael Pratt, Than McIntosh, hopehook.

          张云浩 uploaded patch set #11 to this change.

          View Change

          14 files changed, 2,242 insertions(+), 1,738 deletions(-)

          To view, visit change 426614. To unsubscribe, or for help writing mail filters, visit settings.

          Gerrit-Project: go
          Gerrit-Branch: master
          Gerrit-Change-Id: I66f7de17d8b36b578f9cbc71843b0e4356b914f2
          Gerrit-Change-Number: 426614
          Gerrit-PatchSet: 11

          张云浩 (Gerrit)

          unread,
          Sep 14, 2022, 7:18:39 AM9/14/22
          to goph...@pubsubhelper.golang.org, golang-co...@googlegroups.com

          Attention is currently required from: Austin Clements, Cherry Mui, David Chase, Ian Lance Taylor, Keith Randall, Martin Möhrmann, Michael Knyszek, Michael Pratt, Than McIntosh, hopehook.

          张云浩 uploaded patch set #12 to this change.

          View Change

          runtime: use SwissTable

          We suggest using SwissTable in the runtime to replace the original implementation of the hashmap.

          SwissTable is now used in abseil and rust by default. We borrowed ideas from the two implementations and combined some optimization ideas of Go's original hashmap to construct the current implementation.

          - abseil: https://abseil.io/about/design/swisstables
          - rust: https://github.com/rust-lang/hashbrown

          map properties changed with this CL:
          - maps immediately grow in place leader to potential long wait on map assign
          - It might require more memory for some maps (there can be cases where before we had spare overflow buckets and now we don't use them, also 7 items max per bucket not 8)

          For #54766

          Change-Id: I66f7de17d8b36b578f9cbc71843b0e4356b914f2
          ---
          M src/cmd/compile/internal/reflectdata/reflect.go
          M src/cmd/compile/internal/test/inl_test.go
          M src/cmd/compile/internal/walk/builtin.go
          M src/cmd/link/internal/ld/dwarf.go
          M src/reflect/all_test.go
          M src/reflect/type.go

          M src/reflect/value.go
          M src/runtime/export_test.go
          M src/runtime/map.go
          M src/runtime/map_fast32.go
          M src/runtime/map_fast64.go
          M src/runtime/map_faststr.go
          M src/runtime/map_test.go
          A src/runtime/mapbithack.go
          M src/runtime/runtime-gdb.py
          M src/runtime/stubs.go
          16 files changed, 2,251 insertions(+), 1,775 deletions(-)

          To view, visit change 426614. To unsubscribe, or for help writing mail filters, visit settings.

          Gerrit-Project: go
          Gerrit-Branch: master
          Gerrit-Change-Id: I66f7de17d8b36b578f9cbc71843b0e4356b914f2
          Gerrit-Change-Number: 426614
          Gerrit-PatchSet: 12

          Than McIntosh (Gerrit)

          unread,
          Oct 19, 2022, 1:50:09 PM10/19/22
          to 张云浩, goph...@pubsubhelper.golang.org, Andy Pan, Wayne Zuo, yunhao zhang, Gopher Robot, hopehook, David Chase, Michael Pratt, Martin Möhrmann, Keith Randall, Cherry Mui, Michael Knyszek, Ian Lance Taylor, Austin Clements, golang-co...@googlegroups.com

          Attention is currently required from: Austin Clements, Cherry Mui, David Chase, Ian Lance Taylor, Keith Randall, Martin Möhrmann, Michael Knyszek, Michael Pratt, hopehook, 张云浩.

          Patch set 12:Run-TryBot +1

          View Change

            To view, visit change 426614. To unsubscribe, or for help writing mail filters, visit settings.

            Gerrit-Project: go
            Gerrit-Branch: master
            Gerrit-Change-Id: I66f7de17d8b36b578f9cbc71843b0e4356b914f2
            Gerrit-Change-Number: 426614
            Gerrit-PatchSet: 12
            Gerrit-Owner: 张云浩 <zhang...@bytedance.com>
            Gerrit-Reviewer: Austin Clements <aus...@google.com>
            Gerrit-Reviewer: Cherry Mui <cher...@google.com>
            Gerrit-Reviewer: Gopher Robot <go...@golang.org>
            Gerrit-Reviewer: Ian Lance Taylor <ia...@golang.org>
            Gerrit-Reviewer: Keith Randall <k...@golang.org>
            Gerrit-Reviewer: Martin Möhrmann <moeh...@google.com>
            Gerrit-Reviewer: Michael Knyszek <mkny...@google.com>
            Gerrit-Reviewer: Michael Pratt <mpr...@google.com>
            Gerrit-Reviewer: Than McIntosh <th...@google.com>
            Gerrit-CC: Andy Pan <panj...@gmail.com>
            Gerrit-CC: David Chase <drc...@google.com>
            Gerrit-CC: Wayne Zuo <wdv...@golangcn.org>
            Gerrit-CC: hopehook <hope...@golangcn.org>
            Gerrit-CC: yunhao zhang <zhangyu...@gmail.com>
            Gerrit-Attention: 张云浩 <zhang...@bytedance.com>
            Gerrit-Attention: hopehook <hope...@golangcn.org>
            Gerrit-Attention: Michael Knyszek <mkny...@google.com>
            Gerrit-Attention: Austin Clements <aus...@google.com>
            Gerrit-Attention: Michael Pratt <mpr...@google.com>
            Gerrit-Attention: David Chase <drc...@google.com>
            Gerrit-Attention: Martin Möhrmann <moeh...@google.com>
            Gerrit-Attention: Ian Lance Taylor <ia...@golang.org>
            Gerrit-Attention: Keith Randall <k...@golang.org>
            Gerrit-Attention: Cherry Mui <cher...@google.com>
            Gerrit-Comment-Date: Wed, 19 Oct 2022 17:50:03 +0000

            Funda Secgin (Gerrit)

            unread,
            Oct 28, 2024, 4:01:34 PM10/28/24
            to 张云浩, goph...@pubsubhelper.golang.org, Gopher Robot, Andy Pan, Wayne Zuo, yunhao zhang, hopehook, David Chase, Michael Pratt, Martin Möhrmann, Keith Randall, Cherry Mui, Michael Knyszek, Ian Lance Taylor, Austin Clements, golang-co...@googlegroups.com
            Attention needed from Austin Clements, Cherry Mui, David Chase, Ian Lance Taylor, Keith Randall, Martin Möhrmann, Michael Knyszek, Michael Pratt, hopehook and 张云浩

            Funda Secgin added 2 comments

            File src/runtime/map.go
            Line 183, Patchset 6:func overLoadFactor(count int, B uint8) bool {
            David Chase . resolved

            A description of what this means would help. My initial impression is that a "factor" would be a number, but this returns a boolean, so maybe a different name would work instead.

            张云浩

            Changed to "isOverLoadFactor", it may look more clear. Also move back (this function comes from the previous implementation and is only used inside the `makemap`, there are some comments about it.)

            Funda Secgin

            ```suggestion
            func overLoadFactor(count int, B uint8) bool {
            ```

            Line 248, Patchset 1: *(*uint64)(base) = 0xffff_ffff_ffff_ffff // all empty
            Martin Möhrmann . resolved

            would it be possible not need to write to each bucket but have all zeros which we can use memclr for to be the all empty state?

            张云浩

            Considered this approach before :) But because it requires changing the theoretical model of SwissTable, it would need a lot of validation work(such as whether SIMD supports this change), to avoid too many changes in one CL; maybe we could do it in a subsequent CL?

            Funda Secgin
            ```suggestion
            *(*uint64)(base) = 0xffff_ffff_ffff_ffff // all empty
            ```
            Open in Gerrit

            Related details

            Attention is currently required from:
            • Austin Clements
            • Cherry Mui
            • David Chase
            • Ian Lance Taylor
            • Keith Randall
            • Martin Möhrmann
            • Michael Knyszek
            • Michael Pratt
            • hopehook
            • 张云浩
            Submit Requirements:
            • requirement is not satisfiedCode-Review
            • requirement satisfiedLegacy-TryBots-Pass
            • requirement satisfiedNo-Unresolved-Comments
            • requirement is not satisfiedReview-Enforcement
            • requirement is not satisfiedTryBots-Pass
            Inspect html for hidden footers to help with email filtering. To unsubscribe visit settings. DiffyGerrit
            Gerrit-MessageType: comment
            Gerrit-Project: go
            Gerrit-Branch: master
            Gerrit-Change-Id: I66f7de17d8b36b578f9cbc71843b0e4356b914f2
            Gerrit-Change-Number: 426614
            Gerrit-PatchSet: 12
            Gerrit-Owner: 张云浩 <zhang...@bytedance.com>
            Gerrit-Reviewer: Austin Clements <aus...@google.com>
            Gerrit-Reviewer: Cherry Mui <cher...@google.com>
            Gerrit-Reviewer: Gopher Robot <go...@golang.org>
            Gerrit-Reviewer: Ian Lance Taylor <ia...@golang.org>
            Gerrit-Reviewer: Keith Randall <k...@golang.org>
            Gerrit-Reviewer: Martin Möhrmann <moeh...@google.com>
            Gerrit-Reviewer: Michael Knyszek <mkny...@google.com>
            Gerrit-Reviewer: Michael Pratt <mpr...@google.com>
            Gerrit-Reviewer: Than McIntosh <th...@google.com>
            Gerrit-CC: Andy Pan <panj...@gmail.com>
            Gerrit-CC: David Chase <drc...@google.com>
            Gerrit-CC: Funda Secgin <fundas...@gmail.com>
            Gerrit-CC: Wayne Zuo <wdvxd...@gmail.com>
            Gerrit-CC: hopehook <hope...@golangcn.org>
            Gerrit-CC: yunhao zhang <zhangyu...@gmail.com>
            Gerrit-Attention: 张云浩 <zhang...@bytedance.com>
            Gerrit-Attention: hopehook <hope...@golangcn.org>
            Gerrit-Attention: Michael Knyszek <mkny...@google.com>
            Gerrit-Attention: Austin Clements <aus...@google.com>
            Gerrit-Attention: Michael Pratt <mpr...@google.com>
            Gerrit-Attention: David Chase <drc...@google.com>
            Gerrit-Attention: Martin Möhrmann <moeh...@google.com>
            Gerrit-Attention: Ian Lance Taylor <ia...@golang.org>
            Gerrit-Attention: Keith Randall <k...@golang.org>
            Gerrit-Attention: Cherry Mui <cher...@google.com>
            Gerrit-Comment-Date: Mon, 28 Oct 2024 20:01:24 +0000
            Gerrit-HasComments: Yes
            Gerrit-Has-Labels: No
            Comment-In-Reply-To: 张云浩 <zhang...@bytedance.com>
            Comment-In-Reply-To: David Chase <drc...@google.com>
            Comment-In-Reply-To: Martin Möhrmann <moeh...@google.com>
            unsatisfied_requirement
            satisfied_requirement
            open
            diffy
            Reply all
            Reply to author
            Forward
            0 new messages