From b5a7f2eef7cb17255cb396cd4ff7df04957dd21e Mon Sep 17 00:00:00 2001 From: cuiweixie Date: Mon, 27 Feb 2023 07:15:50 +0800 Subject: maps,runtime: improve maps.Clone MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit name old time/op new time/op delta MapClone-10 65.8ms ± 7% 10.3ms ± 2% -84.30% (p=0.000 n=10+9) name old alloc/op new alloc/op delta MapClone-10 40.2MB ± 0% 40.5MB ± 0% +0.57% (p=0.000 n=10+9) name old allocs/op new allocs/op delta MapClone-10 20.0 ± 0% 23.0 ± 0% +15.00% (p=0.000 n=10+10) Updates #58740. Change-Id: I148501e723cb2124f02045400e7ceb36af0871c8 Reviewed-on: https://go-review.googlesource.com/c/go/+/471400 Reviewed-by: Heschi Kreinick TryBot-Result: Gopher Robot Reviewed-by: Keith Randall Run-TryBot: xie cui <523516579@qq.com> Reviewed-by: Keith Randall --- src/runtime/map.go | 148 +++++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 148 insertions(+) (limited to 'src/runtime') diff --git a/src/runtime/map.go b/src/runtime/map.go index 9c3a7e2b8c..e98860fe7a 100644 --- a/src/runtime/map.go +++ b/src/runtime/map.go @@ -1445,3 +1445,151 @@ var zeroVal [maxZero]byte // map init function to this symbol. Defined in assembly so as to avoid // complications with instrumentation (coverage, etc). func mapinitnoop() + +// mapclone for implementing maps.Clone +// +//go:linkname mapclone maps.clone +func mapclone(m any) any { + e := efaceOf(&m) + e.data = unsafe.Pointer(mapclone2((*maptype)(unsafe.Pointer(e._type)), (*hmap)(e.data))) + return m +} + +// moveToBmap moves a bucket from src to dst. It returns the destination bucket or new destination bucket if it overflows +// and the pos that the next key/value will be written, if pos == bucketCnt means needs to written in overflow bucket. +func moveToBmap(t *maptype, h *hmap, dst *bmap, pos int, src *bmap) (*bmap, int) { + for i := 0; i < bucketCnt; i++ { + if isEmpty(src.tophash[i]) { + continue + } + + for ; pos < bucketCnt; pos++ { + if isEmpty(dst.tophash[pos]) { + break + } + } + + if pos == bucketCnt { + dst = h.newoverflow(t, dst) + pos = 0 + } + + srcK := add(unsafe.Pointer(src), dataOffset+uintptr(i)*uintptr(t.keysize)) + srcEle := add(unsafe.Pointer(src), dataOffset+bucketCnt*uintptr(t.keysize)+uintptr(i)*uintptr(t.elemsize)) + dstK := add(unsafe.Pointer(dst), dataOffset+uintptr(pos)*uintptr(t.keysize)) + dstEle := add(unsafe.Pointer(dst), dataOffset+bucketCnt*uintptr(t.keysize)+uintptr(pos)*uintptr(t.elemsize)) + + dst.tophash[pos] = src.tophash[i] + if t.indirectkey() { + *(*unsafe.Pointer)(dstK) = *(*unsafe.Pointer)(srcK) + } else { + typedmemmove(t.key, dstK, srcK) + } + if t.indirectelem() { + *(*unsafe.Pointer)(dstEle) = *(*unsafe.Pointer)(srcEle) + } else { + typedmemmove(t.elem, dstEle, srcEle) + } + pos++ + h.count++ + } + return dst, pos +} + +func mapclone2(t *maptype, src *hmap) *hmap { + dst := makemap(t, src.count, nil) + dst.hash0 = src.hash0 + dst.nevacuate = 0 + //flags do not need to be copied here, just like a new map has no flags. + + if src.count == 0 { + return dst + } + + if src.flags&hashWriting != 0 { + fatal("concurrent map clone and map write") + } + + if src.B == 0 { + dst.buckets = newobject(t.bucket) + dst.count = src.count + typedmemmove(t.bucket, dst.buckets, src.buckets) + return dst + } + + //src.B != 0 + if dst.B == 0 { + dst.buckets = newobject(t.bucket) + } + dstArraySize := int(bucketShift(dst.B)) + srcArraySize := int(bucketShift(src.B)) + for i := 0; i < dstArraySize; i++ { + dstBmap := (*bmap)(add(dst.buckets, uintptr(i*int(t.bucketsize)))) + pos := 0 + for j := 0; j < srcArraySize; j += dstArraySize { + srcBmap := (*bmap)(add(src.buckets, uintptr((i+j)*int(t.bucketsize)))) + for srcBmap != nil { + dstBmap, pos = moveToBmap(t, dst, dstBmap, pos, srcBmap) + srcBmap = srcBmap.overflow(t) + } + } + } + + if src.oldbuckets == nil { + return dst + } + + oldB := src.B + srcOldbuckets := src.oldbuckets + if !src.sameSizeGrow() { + oldB-- + } + oldSrcArraySize := int(bucketShift(oldB)) + + for i := 0; i < oldSrcArraySize; i++ { + srcBmap := (*bmap)(add(srcOldbuckets, uintptr(i*int(t.bucketsize)))) + if evacuated(srcBmap) { + continue + } + + if oldB >= dst.B { // main bucket bits in dst is less than oldB bits in src + dstBmap := (*bmap)(add(dst.buckets, uintptr(i)&bucketMask(dst.B))) + for dstBmap.overflow(t) != nil { + dstBmap = dstBmap.overflow(t) + } + pos := 0 + for srcBmap != nil { + dstBmap, pos = moveToBmap(t, dst, dstBmap, pos, srcBmap) + srcBmap = srcBmap.overflow(t) + } + continue + } + + for srcBmap != nil { + // move from oldBlucket to new bucket + for i := uintptr(0); i < bucketCnt; i++ { + if isEmpty(srcBmap.tophash[i]) { + continue + } + + if src.flags&hashWriting != 0 { + fatal("concurrent map clone and map write") + } + + srcK := add(unsafe.Pointer(srcBmap), dataOffset+i*uintptr(t.keysize)) + if t.indirectkey() { + srcK = *((*unsafe.Pointer)(srcK)) + } + + srcEle := add(unsafe.Pointer(srcBmap), dataOffset+bucketCnt*uintptr(t.keysize)+i*uintptr(t.elemsize)) + if t.indirectelem() { + srcEle = *((*unsafe.Pointer)(srcEle)) + } + dstEle := mapassign(t, dst, srcK) + typedmemmove(t.elem, dstEle, srcEle) + } + srcBmap = srcBmap.overflow(t) + } + } + return dst +} -- cgit v1.3