diff options
| author | Rémy Oudompheng <oudomphe@phare.normalesup.org> | 2013-09-16 20:31:21 -0400 |
|---|---|---|
| committer | Russ Cox <rsc@golang.org> | 2013-09-16 20:31:21 -0400 |
| commit | 045dbeaf053f0c78941a11140e5a877237ccc489 (patch) | |
| tree | 61a21dc187ba93e8feb0df2d85fa3127ed665590 /src/pkg/runtime/slice.c | |
| parent | 00061219f03d666e93947a3cb326256062a7a92c (diff) | |
| download | go-045dbeaf053f0c78941a11140e5a877237ccc489.tar.xz | |
cmd/gc, runtime: inline append in frontend.
A new transformation during walk turns append calls
into a combination of growslice and memmove.
benchmark old ns/op new ns/op delta
BenchmarkAppend 141 141 +0.00%
BenchmarkAppend1Byte 18 11 -39.56%
BenchmarkAppend4Bytes 19 10 -42.63%
BenchmarkAppend7Bytes 18 10 -42.16%
BenchmarkAppend8Bytes 18 10 -40.44%
BenchmarkAppend15Bytes 19 11 -41.67%
BenchmarkAppend16Bytes 19 11 -41.97%
BenchmarkAppend32Bytes 23 14 -38.82%
BenchmarkAppendStr1Byte 14 10 -23.78%
BenchmarkAppendStr4Bytes 14 11 -21.13%
BenchmarkAppendStr8Bytes 14 10 -25.17%
BenchmarkAppendStr16Bytes 19 11 -41.45%
BenchmarkAppendStr32Bytes 18 14 -19.44%
BenchmarkAppendSpecialCase 62 63 +1.77%
R=golang-dev, khr, cshapiro, rsc, dave
CC=golang-dev
https://golang.org/cl/12815046
Diffstat (limited to 'src/pkg/runtime/slice.c')
| -rw-r--r-- | src/pkg/runtime/slice.c | 103 |
1 files changed, 0 insertions, 103 deletions
diff --git a/src/pkg/runtime/slice.c b/src/pkg/runtime/slice.c index abe4cfb5f9..ef8ab7fe0a 100644 --- a/src/pkg/runtime/slice.c +++ b/src/pkg/runtime/slice.c @@ -57,109 +57,6 @@ makeslice1(SliceType *t, intgo len, intgo cap, Slice *ret) ret->array = runtime·cnewarray(t->elem, cap); } -// appendslice(type *Type, x, y, []T) []T -#pragma textflag NOSPLIT -void -runtime·appendslice(SliceType *t, Slice x, Slice y, Slice ret) -{ - intgo m; - uintptr w; - void *pc; - uint8 *p, *q; - - m = x.len+y.len; - w = t->elem->size; - - if(m < x.len) - runtime·throw("append: slice overflow"); - - if(m > x.cap) - growslice1(t, x, m, &ret); - else - ret = x; - - if(raceenabled) { - // Don't mark read/writes on the newly allocated slice. - pc = runtime·getcallerpc(&t); - // read x[:len] - if(m > x.cap) - runtime·racereadrangepc(x.array, x.len*w, pc, runtime·appendslice); - // read y - runtime·racereadrangepc(y.array, y.len*w, pc, runtime·appendslice); - // write x[len(x):len(x)+len(y)] - if(m <= x.cap) - runtime·racewriterangepc(ret.array+ret.len*w, y.len*w, pc, runtime·appendslice); - } - - // A very common case is appending bytes. Small appends can avoid the overhead of memmove. - // We can generalize a bit here, and just pick small-sized appends. - p = ret.array+ret.len*w; - q = y.array; - w *= y.len; - if(appendCrossover > 0 && w <= appendCrossover) { - if(p <= q || w <= p-q) // No overlap. - while(w-- > 0) - *p++ = *q++; - else { - p += w; - q += w; - while(w-- > 0) - *--p = *--q; - } - } else { - runtime·memmove(p, q, w); - } - ret.len += y.len; - FLUSH(&ret); -} - - -// appendstr([]byte, string) []byte -#pragma textflag NOSPLIT -void -runtime·appendstr(SliceType *t, Slice x, String y, Slice ret) -{ - intgo m; - void *pc; - uintptr w; - uint8 *p, *q; - - m = x.len+y.len; - - if(m < x.len) - runtime·throw("append: string overflow"); - - if(m > x.cap) - growslice1(t, x, m, &ret); - else - ret = x; - - if(raceenabled) { - // Don't mark read/writes on the newly allocated slice. - pc = runtime·getcallerpc(&t); - // read x[:len] - if(m > x.cap) - runtime·racereadrangepc(x.array, x.len, pc, runtime·appendstr); - // write x[len(x):len(x)+len(y)] - if(m <= x.cap) - runtime·racewriterangepc(ret.array+ret.len, y.len, pc, runtime·appendstr); - } - - // Small appends can avoid the overhead of memmove. - w = y.len; - p = ret.array+ret.len; - q = y.str; - if(appendCrossover > 0 && w <= appendCrossover) { - while(w-- > 0) - *p++ = *q++; - } else { - runtime·memmove(p, q, w); - } - ret.len += y.len; - FLUSH(&ret); -} - - // growslice(type *Type, x, []T, n int64) []T void runtime·growslice(SliceType *t, Slice old, int64 n, Slice ret) |
