// A header for a Go map. type hmap struct { // Note: the format of the hmap is also encoded in cmd/compile/internal/gc/reflect.go. // Make sure this stays in sync with the compiler's definition. count int// # live cells == size of map. Must be first (used by len() builtin) flags uint8 B uint8// log_2 of # of buckets (can hold up to loadFactor * 2^B items) noverflow uint16// approximate number of overflow buckets; see incrnoverflow for details hash0 uint32// hash seed
buckets unsafe.Pointer // array of 2^B Buckets. may be nil if count==0. oldbuckets unsafe.Pointer // previous bucket array of half the size, non-nil only when growing nevacuate uintptr// progress counter for evacuation (buckets less than this have been evacuated)
// A bucket for a Go map. type bmap struct { // tophash generally contains the top byte of the hash value // for each key in this bucket. If tophash[0] < minTopHash, // tophash[0] is a bucket evacuation state instead. tophash [bucketCnt]uint8 // Followed by bucketCnt keys and then bucketCnt elems. // NOTE: packing all the keys together and then all the elems together makes the // code a bit more complicated than alternating key/elem/key/elem/... but it allows // us to eliminate padding which would be needed for, e.g., map[int64]int8. // Followed by an overflow pointer. }
funcTestMapRange(t *testing.T) { m := map[int]string{1: "a", 2: "b", 3: "c"} t.Log("first range:") // 默认无序遍历 for i, v := range m { t.Logf("m[%v]=%v ", i, v) } t.Log("\nsecond range:") for i, v := range m { t.Logf("m[%v]=%v ", i, v) }
// 实现有序遍历 var sl []int // 把 key 单独取出放到切片 for k := range m { sl = append(sl, k) } // 排序切片 sort.Ints(sl) // 以切片中的 key 顺序遍历 map 就是有序的了 for _, k := range sl { t.Log(k, m[k]) } }
非线程安全
map默认是并发不安全的,原因如下:
Go 官方在经过了长时间的讨论后,认为 Go map 更应适配典型使用场景(不需要从多个 goroutine 中进行安全访问),而不是为了小部分情况(并发访问),导致大部分程序付出加锁代价(性能),决定了不支持。
funcBenchmarkMapConcurrencySafeBySyncMap(b *testing.B) { var m sync.Map var wg sync.WaitGroup for i := 0; i < b.N; i++ { wg.Add(1) gofunc(i int) { defer wg.Done() m.Store(i, i) }(i) } wg.Wait() b.Log(b.N) }
哈希冲突
golang中map是一个kv对集合。底层使用hash table,用链表来解决冲突 ,出现冲突时,不是每一个key都申请一个结构通过链表串起来,而是以bmap为最小粒度挂载,一个bmap可以放8个kv。在哈希函数的选择上,会在程序启动时,检测 cpu 是否支持 aes,如果支持,则使用 aes hash,否则使用 memhash。
获取数据
需要找到键和值的内存地址。
首先找到桶。它是通过将密钥散列的前 8 位与桶结构中的相应数据存储进行比较来选择的。
接下来通过其地址查找键值。
1 2 3 4
k := add(unsafe.Pointer(b), dataOffset+i*uintptr(t.keysize)) if t.indirectkey() { k = *((*unsafe.Pointer)(k)) }
接下来以同样的方式找到值。
1 2 3 4 5 6 7
if t.key.equal(key, k) { e := add(unsafe.Pointer(b), dataOffset+bucketCnt*uintptr(t.keysize)+i*uintptr(t.elemsize)) if t.indirectelem() { e = *((*unsafe.Pointer)(e)) } return e }
// makemap_small implements Go map creation for make(map[k]v) and // make(map[k]v, hint) when hint is known to be at most bucketCnt // at compile time and the map needs to be allocated on the heap. funcmakemap_small() *hmap { h := new(hmap) h.hash0 = fastrand() return h }
const ( // Maximum number of key/elem pairs a bucket can hold. bucketCntBits = 3 bucketCnt = 1 << bucketCntBits )
// makemap implements Go map creation for make(map[k]v, hint). // If the compiler has determined that the map or the first bucket // can be created on the stack, h and/or bucket may be non-nil. // If h != nil, the map can be created directly in h. // If h.buckets != nil, bucket pointed to can be used as the first bucket.
// 如果编译器发现map或者它第一个bucket能够被分配在栈上,则传入的h参数将不为空。 funcmakemap(t *maptype, hint int, h *hmap) *hmap { mem, overflow := math.MulUintptr(uintptr(hint), t.bucket.size) if overflow || mem > maxAlloc { hint = 0 }
// initialize Hmap if h == nil { h = new(hmap) } h.hash0 = fastrand()
// Find the size parameter B which will hold the requested # of elements. // For hint < 0 overLoadFactor returns false since hint < bucketCnt.
// hint表示创建map的预期初始大小,因此要计算一个合适的对数B // overLoadFactor即用于判断当前的2^B是否已经超过hint B := uint8(0) for overLoadFactor(hint, B) { B++ } h.B = B
// allocate initial hash table // if B == 0, the buckets field is allocated lazily later (in mapassign) // If hint is large zeroing this memory could take a while.
// mapaccess1 returns a pointer to h[key]. Never returns nil, instead // it will return a reference to the zero object for the elem type if // the key is not in the map. // NOTE: The returned pointer may keep the whole map live, so don't // hold onto it for very long. funcmapaccess1(t *maptype, h *hmap, key unsafe.Pointer) unsafe.Pointer { if raceenabled && h != nil { callerpc := getcallerpc() pc := funcPC(mapaccess1) racereadpc(unsafe.Pointer(h), callerpc, pc) raceReadObjectPC(t.key, key, callerpc, pc) } if msanenabled && h != nil { msanread(key, t.key.size) } if h == nil || h.count == 0 { if t.hashMightPanic() { t.hasher(key, 0) // see issue 23734 } return unsafe.Pointer(&zeroVal[0]) }
// map不允许并发读写,会触发该panic。这里是通过flags的标记位来判断是否正在进行写操作。 if h.flags&hashWriting != 0 { throw("concurrent map read and map write") } hash := t.hasher(key, uintptr(h.hash0)) m := bucketMask(h.B) // m即后B位,用于定位[]bmap中对应的bmap。 b := (*bmap)(add(h.buckets, (hash&m)*uintptr(t.bucketsize))) // 定位到具体的bmap。 if c := h.oldbuckets; c != nil { if !h.sameSizeGrow() { // There used to be half as many buckets; mask down one more power of two. m >>= 1 } oldb := (*bmap)(add(c, (hash&m)*uintptr(t.bucketsize))) if !evacuated(oldb) { b = oldb } } top := tophash(hash) // tophash即首8位,用于快速比较。 bucketloop: // 遍历迭代bmap和它的overflow bmap。 for ; b != nil; b = b.overflow(t) { for i := uintptr(0); i < bucketCnt; i++ { // 首先比较tophash是否相同,不相同会直接continue。 if b.tophash[i] != top { if b.tophash[i] == emptyRest { break bucketloop } continue } k := add(unsafe.Pointer(b), dataOffset+i*uintptr(t.keysize)) if t.indirectkey() { k = *((*unsafe.Pointer)(k)) }
// tophash相同的情况下,会比较key的值是否相同。若相同,则说明已经定位到该key,返回结果。 if t.key.equal(key, k) { e := add(unsafe.Pointer(b), dataOffset+bucketCnt*uintptr(t.keysize)+i*uintptr(t.elemsize)) if t.indirectelem() { e = *((*unsafe.Pointer)(e)) } return e } } } return unsafe.Pointer(&zeroVal[0]) }
funcmapaccess1_fast64(t *maptype, h *hmap, key uint64) unsafe.Pointer { if raceenabled && h != nil { callerpc := getcallerpc() racereadpc(unsafe.Pointer(h), callerpc, funcPC(mapaccess1_fast64)) } if h == nil || h.count == 0 { return unsafe.Pointer(&zeroVal[0]) } if h.flags&hashWriting != 0 { throw("concurrent map read and map write") } var b *bmap if h.B == 0 { // One-bucket table. No need to hash. b = (*bmap)(h.buckets) } else { hash := t.hasher(noescape(unsafe.Pointer(&key)), uintptr(h.hash0)) m := bucketMask(h.B) b = (*bmap)(add(h.buckets, (hash&m)*uintptr(t.bucketsize))) if c := h.oldbuckets; c != nil { if !h.sameSizeGrow() { // There used to be half as many buckets; mask down one more power of two. m >>= 1 } oldb := (*bmap)(add(c, (hash&m)*uintptr(t.bucketsize))) if !evacuated(oldb) { b = oldb } } } for ; b != nil; b = b.overflow(t) { for i, k := uintptr(0), b.keys(); i < bucketCnt; i, k = i+1, add(k, 8) { if *(*uint64)(k) == key && !isEmpty(b.tophash[i]) { // 直接比较key,没有先比较tophash再比较key。 return add(unsafe.Pointer(b), dataOffset+bucketCnt*8+i*uintptr(t.elemsize)) } } } return unsafe.Pointer(&zeroVal[0]) }
// Like mapaccess, but allocates a slot for the key if it is not present in the map. funcmapassign(t *maptype, h *hmap, key unsafe.Pointer) unsafe.Pointer { if h == nil { panic(plainError("assignment to entry in nil map")) } if raceenabled { callerpc := getcallerpc() pc := funcPC(mapassign) racewritepc(unsafe.Pointer(h), callerpc, pc) raceReadObjectPC(t.key, key, callerpc, pc) } if msanenabled { msanread(key, t.key.size) } // map不允许并发读写,通过flags的位判断。 if h.flags&hashWriting != 0 { throw("concurrent map writes") } hash := t.hasher(key, uintptr(h.hash0))
// Set hashWriting after calling t.hasher, since t.hasher may panic, // in which case we have not actually done a write. // 因为赋值是写操作,因此置位flags写标志。 h.flags ^= hashWriting
again: bucket := hash & bucketMask(h.B) if h.growing() { // 判断是否处于扩容中。 growWork(t, h, bucket) } b := (*bmap)(add(h.buckets, bucket*uintptr(t.bucketsize))) // 定位bmap top := tophash(hash) // 计算tophash
var inserti *uint8 var insertk unsafe.Pointer var elem unsafe.Pointer bucketloop: for { for i := uintptr(0); i < bucketCnt; i++ { if b.tophash[i] != top { // 当遇到第一个空闲位置的tophash,记录下来。 // 遍历完整个bmap及其overflow bmap,没有找到该key,则认为map中不存在该key。 // 最终插入位置即这里记录的位置。 if isEmpty(b.tophash[i]) && inserti == nil { inserti = &b.tophash[i] insertk = add(unsafe.Pointer(b), dataOffset+i*uintptr(t.keysize)) elem = add(unsafe.Pointer(b), dataOffset+bucketCnt*uintptr(t.keysize)+i*uintptr(t.elemsize)) } if b.tophash[i] == emptyRest { break bucketloop } continue } k := add(unsafe.Pointer(b), dataOffset+i*uintptr(t.keysize)) if t.indirectkey() { k = *((*unsafe.Pointer)(k)) } if !t.key.equal(key, k) { continue } // already have a mapping for key. Update it. // 表明在map中已经存在这个key,那么就需要对该key的值进行更新操作。 if t.needkeyupdate() { typedmemmove(t.key, k, key) } elem = add(unsafe.Pointer(b), dataOffset+bucketCnt*uintptr(t.keysize)+i*uintptr(t.elemsize)) goto done } ovf := b.overflow(t) // 继续遍历下一个overflow bmap if ovf == nil { break } b = ovf }
// Did not find mapping for key. Allocate new cell & add entry.
// If we hit the max load factor or we have too many overflow buckets, // and we're not already in the middle of growing, start growing.
// 触发扩容的情况。 if !h.growing() && (overLoadFactor(h.count+1, h.B) || tooManyOverflowBuckets(h.noverflow, h.B)) { hashGrow(t, h) goto again // Growing the table invalidates everything, so try again }
if inserti == nil { // The current bucket and all the overflow buckets connected to it are full, allocate a new one. newb := h.newoverflow(t, b) inserti = &newb.tophash[0] insertk = add(unsafe.Pointer(newb), dataOffset) elem = add(insertk, bucketCnt*uintptr(t.keysize)) }
// store new key/elem at insert position if t.indirectkey() { kmem := newobject(t.key) *(*unsafe.Pointer)(insertk) = kmem insertk = kmem } if t.indirectelem() { vmem := newobject(t.elem) *(*unsafe.Pointer)(elem) = vmem } typedmemmove(t.key, insertk, key) *inserti = top h.count++
// overLoadFactor reports whether count items placed in 1<<B buckets is over loadFactor. funcoverLoadFactor(count int, B uint8)bool { return count > bucketCnt && uintptr(count) > loadFactorNum*(bucketShift(B)/loadFactorDen) }
// bucketShift returns 1<<b, optimized for code generation. funcbucketShift(b uint8)uintptr { // Masking the shift amount allows overflow checks to be elided. returnuintptr(1) << (b & (sys.PtrSize*8 - 1)) }
const ( // Maximum number of key/elem pairs a bucket can hold. bucketCntBits = 3 bucketCnt = 1 << bucketCntBits
// Maximum average load of a bucket that triggers growth is 6.5. // Represent as loadFactorNum/loadFactorDen, to allow integer math. loadFactorNum = 13 loadFactorDen = 2 )
funchashGrow(t *maptype, h *hmap) { // If we've hit the load factor, get bigger. // Otherwise, there are too many overflow buckets, // so keep the same number of buckets and "grow" laterally. bigger := uint8(1) if !overLoadFactor(h.count+1, h.B) { // 如果不是负载因子超过阈值,那么本次map的扩容实际上更应该理解为map的整理,容量不变。 bigger = 0 h.flags |= sameSizeGrow // 标记sameSizeGrow,表示容量不变。 } oldbuckets := h.buckets // 保留旧的buckets。 newbuckets, nextOverflow := makeBucketArray(t, h.B+bigger, nil) // 创建新的buckets。
// evacDst is an evacuation destination. type evacDst struct { b *bmap // current destination bucket // 搬迁目标bmap。 i int// key/elem index into b // 目标bmap的当前索引。 k unsafe.Pointer // pointer to current key storage // 当前key。 e unsafe.Pointer // pointer to current elem storage // 当前value。 }
funcevacuate(t *maptype, h *hmap, oldbucket uintptr) { b := (*bmap)(add(h.oldbuckets, oldbucket*uintptr(t.bucketsize))) // 定位旧bmap。 newbit := h.noldbuckets() if !evacuated(b) { // 判断是否已经被搬迁过。 // TODO: reuse overflow buckets instead of using new ones, if there // is no iterator using the old buckets. (If !oldIterator.)
// xy contains the x and y (low and high) evacuation destinations. var xy [2]evacDst x := &xy[0] x.b = (*bmap)(add(h.buckets, oldbucket*uintptr(t.bucketsize))) x.k = add(unsafe.Pointer(x.b), dataOffset) x.e = add(x.k, bucketCnt*uintptr(t.keysize))
if !h.sameSizeGrow() { // 对于2倍扩容的场景,搬迁目标可能有2个,因此xy[1]记录另一个目标。 // Only calculate y pointers if we're growing bigger. // Otherwise GC can see bad pointers. y := &xy[1] y.b = (*bmap)(add(h.buckets, (oldbucket+newbit)*uintptr(t.bucketsize))) y.k = add(unsafe.Pointer(y.b), dataOffset) y.e = add(y.k, bucketCnt*uintptr(t.keysize)) }
for ; b != nil; b = b.overflow(t) { // 遍历当前的bmap。 k := add(unsafe.Pointer(b), dataOffset) e := add(k, bucketCnt*uintptr(t.keysize)) for i := 0; i < bucketCnt; i, k, e = i+1, add(k, uintptr(t.keysize)), add(e, uintptr(t.elemsize)) { top := b.tophash[i] if isEmpty(top) { b.tophash[i] = evacuatedEmpty continue } if top < minTopHash { throw("bad map state") } k2 := k if t.indirectkey() { k2 = *((*unsafe.Pointer)(k2)) } var useY uint8 if !h.sameSizeGrow() { // 对于2倍扩容的场景,要重新计算tophash // Compute hash to make our evacuation decision (whether we need // to send this key/elem to bucket x or bucket y). hash := t.hasher(k2, uintptr(h.hash0)) if h.flags&iterator != 0 && !t.reflexivekey() && !t.key.equal(k2, k2) { // If key != key (NaNs), then the hash could be (and probably // will be) entirely different from the old hash. Moreover, // it isn't reproducible. Reproducibility is required in the // presence of iterators, as our evacuation decision must // match whatever decision the iterator made. // Fortunately, we have the freedom to send these keys either // way. Also, tophash is meaningless for these kinds of keys. // We let the low bit of tophash drive the evacuation decision. // We recompute a new random tophash for the next level so // these keys will get evenly distributed across all buckets // after multiple grows. useY = top & 1 top = tophash(hash) } else { if hash&newbit != 0 { useY = 1 } } }
if dst.i == bucketCnt { dst.b = h.newoverflow(t, dst.b) dst.i = 0 dst.k = add(unsafe.Pointer(dst.b), dataOffset) dst.e = add(dst.k, bucketCnt*uintptr(t.keysize)) } dst.b.tophash[dst.i&(bucketCnt-1)] = top // mask dst.i as an optimization, to avoid a bounds check
// 拷贝内存数据到新的搬迁位置 if t.indirectkey() { *(*unsafe.Pointer)(dst.k) = k2 // copy pointer } else { typedmemmove(t.key, dst.k, k) // copy elem } if t.indirectelem() { *(*unsafe.Pointer)(dst.e) = *(*unsafe.Pointer)(e) } else { typedmemmove(t.elem, dst.e, e) } dst.i++ // These updates might push these pointers past the end of the // key or elem arrays. That's ok, as we have the overflow pointer // at the end of the bucket to protect against pointing past the // end of the bucket. dst.k = add(dst.k, uintptr(t.keysize)) dst.e = add(dst.e, uintptr(t.elemsize)) } } // Unlink the overflow buckets & clear key/elem to help GC. // 把旧的bmap引用解除,便于GC完成内存回收。 if h.flags&oldIterator == 0 && t.bucket.ptrdata != 0 { b := add(h.oldbuckets, oldbucket*uintptr(t.bucketsize)) // Preserve b.tophash because the evacuation // state is maintained there. ptr := add(b, dataOffset) n := uintptr(t.bucketsize) - dataOffset memclrHasPointers(ptr, n) } }
if oldbucket == h.nevacuate { advanceEvacuationMark(h, t, newbit) } }
funcmapdelete(t *maptype, h *hmap, key unsafe.Pointer) { if raceenabled && h != nil { callerpc := getcallerpc() pc := funcPC(mapdelete) racewritepc(unsafe.Pointer(h), callerpc, pc) raceReadObjectPC(t.key, key, callerpc, pc) } if msanenabled && h != nil { msanread(key, t.key.size) } if h == nil || h.count == 0 { if t.hashMightPanic() { t.hasher(key, 0) // see issue 23734 } return } if h.flags&hashWriting != 0 { throw("concurrent map writes") }
hash := t.hasher(key, uintptr(h.hash0))
// Set hashWriting after calling t.hasher, since t.hasher may panic, // in which case we have not actually done a write (delete). h.flags ^= hashWriting
bucket := hash & bucketMask(h.B) if h.growing() { growWork(t, h, bucket) } b := (*bmap)(add(h.buckets, bucket*uintptr(t.bucketsize))) bOrig := b top := tophash(hash) search: for ; b != nil; b = b.overflow(t) { for i := uintptr(0); i < bucketCnt; i++ { if b.tophash[i] != top { if b.tophash[i] == emptyRest { break search } continue } k := add(unsafe.Pointer(b), dataOffset+i*uintptr(t.keysize)) k2 := k if t.indirectkey() { k2 = *((*unsafe.Pointer)(k2)) } if !t.key.equal(key, k2) { continue } // 删除key // Only clear key if there are pointers in it. if t.indirectkey() { *(*unsafe.Pointer)(k) = nil } elseif t.key.ptrdata != 0 { memclrHasPointers(k, t.key.size) }
// 清空tophash b.tophash[i] = emptyOne // If the bucket now ends in a bunch of emptyOne states, // change those to emptyRest states. // It would be nice to make this a separate function, but // for loops are not currently inlineable. if i == bucketCnt-1 { if b.overflow(t) != nil && b.overflow(t).tophash[0] != emptyRest { goto notLast } } else { if b.tophash[i+1] != emptyRest { goto notLast } } for { b.tophash[i] = emptyRest if i == 0 { if b == bOrig { break// beginning of initial bucket, we're done. } // Find previous bucket, continue at its last entry. c := b for b = bOrig; b.overflow(t) != c; b = b.overflow(t) { } i = bucketCnt - 1 } else { i-- } if b.tophash[i] != emptyOne { break } } notLast: h.count-- // Reset the hash seed to make it more difficult for attackers to // repeatedly trigger hash collisions. See issue 25237. if h.count == 0 { h.hash0 = fastrand() } break search } }
funcmapdelete_fast32(t *maptype, h *hmap, key uint32) funcmapdelete_fast64(t *maptype, h *hmap, key uint64) funcmapdelete_faststr(t *maptype, h *hmap, ky string)
// mapiterinit initializes the hiter struct used for ranging over maps. // The hiter struct pointed to by 'it' is allocated on the stack // by the compilers order pass or on the heap by reflect_mapiterinit. // Both need to have zeroed hiter since the struct contains pointers. funcmapiterinit(t *maptype, h *hmap, it *hiter) { if raceenabled && h != nil { callerpc := getcallerpc() racereadpc(unsafe.Pointer(h), callerpc, funcPC(mapiterinit)) }
if h == nil || h.count == 0 { return }
if unsafe.Sizeof(hiter{})/sys.PtrSize != 12 { throw("hash_iter size incorrect") // see cmd/compile/internal/gc/reflect.go } it.t = t it.h = h
// grab snapshot of bucket state it.B = h.B it.buckets = h.buckets if t.bucket.ptrdata == 0 { // Allocate the current slice and remember pointers to both current and old. // This preserves all relevant overflow buckets alive even if // the table grows and/or overflow buckets are added to the table // while we are iterating. h.createOverflow() it.overflow = h.extra.overflow it.oldoverflow = h.extra.oldoverflow }
// decide where to start // 可以看出,map的迭代器会随机选择一个开始的bucket位置和开始的offset。 r := uintptr(fastrand()) if h.B > 31-bucketCntBits { r += uintptr(fastrand()) << 31 } it.startBucket = r & bucketMask(h.B) it.offset = uint8(r >> h.B & (bucketCnt - 1))
// iterator state it.bucket = it.startBucket // 记录随机的开始位置。
// Remember we have an iterator. // Can run concurrently with another mapiterinit(). if old := h.flags; old&(iterator|oldIterator) != iterator|oldIterator { atomic.Or8(&h.flags, iterator|oldIterator) }
funcmapiternext(it *hiter) { h := it.h if raceenabled { callerpc := getcallerpc() racereadpc(unsafe.Pointer(h), callerpc, funcPC(mapiternext)) } if h.flags&hashWriting != 0 { throw("concurrent map iteration and map write") } t := it.t bucket := it.bucket b := it.bptr i := it.i checkBucket := it.checkBucket
next: if b == nil { if bucket == it.startBucket && it.wrapped { // end of iteration it.key = nil it.elem = nil return } // 判断是否正处于扩容搬迁中,对于正在搬迁过程中仍未完成搬迁的bucket,迭代过程中需要考虑进入旧的bucket里面。 if h.growing() && it.B == h.B { // Iterator was started in the middle of a grow, and the grow isn't done yet. // If the bucket we're looking at hasn't been filled in yet (i.e. the old // bucket hasn't been evacuated) then we need to iterate through the old // bucket and only return the ones that will be migrated to this bucket. oldbucket := bucket & it.h.oldbucketmask() b = (*bmap)(add(h.oldbuckets, oldbucket*uintptr(t.bucketsize))) if !evacuated(b) { checkBucket = bucket } else { b = (*bmap)(add(it.buckets, bucket*uintptr(t.bucketsize))) checkBucket = noCheck } } else { b = (*bmap)(add(it.buckets, bucket*uintptr(t.bucketsize))) checkBucket = noCheck } bucket++ if bucket == bucketShift(it.B) { bucket = 0 it.wrapped = true } i = 0 } for ; i < bucketCnt; i++ { offi := (i + it.offset) & (bucketCnt - 1) if isEmpty(b.tophash[offi]) || b.tophash[offi] == evacuatedEmpty { // TODO: emptyRest is hard to use here, as we start iterating // in the middle of a bucket. It's feasible, just tricky. continue } k := add(unsafe.Pointer(b), dataOffset+uintptr(offi)*uintptr(t.keysize)) if t.indirectkey() { k = *((*unsafe.Pointer)(k)) } e := add(unsafe.Pointer(b), dataOffset+bucketCnt*uintptr(t.keysize)+uintptr(offi)*uintptr(t.elemsize)) if checkBucket != noCheck && !h.sameSizeGrow() { // Special case: iterator was started during a grow to a larger size // and the grow is not done yet. We're working on a bucket whose // oldbucket has not been evacuated yet. Or at least, it wasn't // evacuated when we started the bucket. So we're iterating // through the oldbucket, skipping any keys that will go // to the other new bucket (each oldbucket expands to two // buckets during a grow). if t.reflexivekey() || t.key.equal(k, k) { // If the item in the oldbucket is not destined for // the current new bucket in the iteration, skip it. hash := t.hasher(k, uintptr(h.hash0)) if hash&bucketMask(it.B) != checkBucket { continue } } else { // Hash isn't repeatable if k != k (NaNs). We need a // repeatable and randomish choice of which direction // to send NaNs during evacuation. We'll use the low // bit of tophash to decide which way NaNs go. // NOTE: this case is why we need two evacuate tophash // values, evacuatedX and evacuatedY, that differ in // their low bit. if checkBucket>>(it.B-1) != uintptr(b.tophash[offi]&1) { continue } } } if (b.tophash[offi] != evacuatedX && b.tophash[offi] != evacuatedY) || !(t.reflexivekey() || t.key.equal(k, k)) { // This is the golden data, we can return it. // OR // key!=key, so the entry can't be deleted or updated, so we can just return it. // That's lucky for us because when key!=key we can't look it up successfully. it.key = k if t.indirectelem() { e = *((*unsafe.Pointer)(e)) } it.elem = e } else { // The hash table has grown since the iterator was started. // The golden data for this key is now somewhere else. // Check the current hash table for the data. // This code handles the case where the key // has been deleted, updated, or deleted and reinserted. // NOTE: we need to regrab the key as it has potentially been // updated to an equal() but not identical key (e.g. +0.0 vs -0.0). rk, re := mapaccessK(t, h, k) if rk == nil { continue// key has been deleted } it.key = rk it.elem = re } it.bucket = bucket if it.bptr != b { // avoid unnecessary write barrier; see issue 14921 it.bptr = b } it.i = i + 1 it.checkBucket = checkBucket return } b = b.overflow(t) i = 0 goto next }
for ; b != nil; b = b.overflow(t) { k := add(unsafe.Pointer(b), dataOffset) v := add(k, bucketCnt*uintptr(t.keysize)) for i := 0; i < bucketCnt; i, k, v = i+1, add(k, uintptr(t.keysize)), add(v, uintptr(t.valuesize)) { top := b.tophash[i] k2 := k var useY uint8 hash := t.key.alg.hash(k2, uintptr(h.hash0)) if hash&newbit != 0 { useY = 1 } b.tophash[i] = evacuatedX + useY dst := &xy[useY]