diff options
| author | Michael Anthony Knyszek <mknyszek@google.com> | 2022-03-15 00:23:26 +0000 |
|---|---|---|
| committer | Michael Knyszek <mknyszek@google.com> | 2022-03-31 20:02:55 +0000 |
| commit | f990b0f1e80cf6152219b4d3f9a397899e8d6d40 (patch) | |
| tree | 022a41c828dbfeef63248342c6a3d08b94fa7dc0 /src/runtime/mem_linux.go | |
| parent | 4a56ba1c453927256f231a8bcef316bb4b3aa68a (diff) | |
| download | go-f990b0f1e80cf6152219b4d3f9a397899e8d6d40.tar.xz | |
runtime: add wrappers for sys* functions and consolidate docs
This change lifts all non-platform-specific code out of sys* functions
for each platform up into wrappers, and moves documentation about the OS
virtual memory abstraction layer from malloc.go to mem.go, which
contains those wrappers.
Change-Id: Ie803e4447403eaafc508b34b53a1a47d6cee9388
Reviewed-on: https://go-review.googlesource.com/c/go/+/393398
Reviewed-by: Austin Clements <austin@google.com>
Reviewed-by: Michael Pratt <mpratt@google.com>
Trust: Michael Knyszek <mknyszek@google.com>
Diffstat (limited to 'src/runtime/mem_linux.go')
| -rw-r--r-- | src/runtime/mem_linux.go | 22 |
1 files changed, 9 insertions, 13 deletions
diff --git a/src/runtime/mem_linux.go b/src/runtime/mem_linux.go index f8333014c2..980f7bb53d 100644 --- a/src/runtime/mem_linux.go +++ b/src/runtime/mem_linux.go @@ -17,7 +17,7 @@ const ( // Don't split the stack as this method may be invoked without a valid G, which // prevents us from allocating more stack. //go:nosplit -func sysAlloc(n uintptr, sysStat *sysMemStat) unsafe.Pointer { +func sysAllocOS(n uintptr) unsafe.Pointer { p, err := mmap(nil, n, _PROT_READ|_PROT_WRITE, _MAP_ANON|_MAP_PRIVATE, -1, 0) if err != 0 { if err == _EACCES { @@ -30,13 +30,12 @@ func sysAlloc(n uintptr, sysStat *sysMemStat) unsafe.Pointer { } return nil } - sysStat.add(int64(n)) return p } var adviseUnused = uint32(_MADV_FREE) -func sysUnused(v unsafe.Pointer, n uintptr) { +func sysUnusedOS(v unsafe.Pointer, n uintptr) { // By default, Linux's "transparent huge page" support will // merge pages into a huge page if there's even a single // present regular page, undoing the effects of madvise(adviseUnused) @@ -123,7 +122,7 @@ func sysUnused(v unsafe.Pointer, n uintptr) { } } -func sysUsed(v unsafe.Pointer, n uintptr) { +func sysUsedOS(v unsafe.Pointer, n uintptr) { if debug.harddecommit > 0 { p, err := mmap(v, n, _PROT_READ|_PROT_WRITE, _MAP_ANON|_MAP_FIXED|_MAP_PRIVATE, -1, 0) if err == _ENOMEM { @@ -145,10 +144,10 @@ func sysUsed(v unsafe.Pointer, n uintptr) { // the end points as well, but it's probably not worth // the cost because when neighboring allocations are // freed sysUnused will just set NOHUGEPAGE again. - sysHugePage(v, n) + sysHugePageOS(v, n) } -func sysHugePage(v unsafe.Pointer, n uintptr) { +func sysHugePageOS(v unsafe.Pointer, n uintptr) { if physHugePageSize != 0 { // Round v up to a huge page boundary. beg := alignUp(uintptr(v), physHugePageSize) @@ -164,16 +163,15 @@ func sysHugePage(v unsafe.Pointer, n uintptr) { // Don't split the stack as this function may be invoked without a valid G, // which prevents us from allocating more stack. //go:nosplit -func sysFree(v unsafe.Pointer, n uintptr, sysStat *sysMemStat) { - sysStat.add(-int64(n)) +func sysFreeOS(v unsafe.Pointer, n uintptr) { munmap(v, n) } -func sysFault(v unsafe.Pointer, n uintptr) { +func sysFaultOS(v unsafe.Pointer, n uintptr) { mmap(v, n, _PROT_NONE, _MAP_ANON|_MAP_PRIVATE|_MAP_FIXED, -1, 0) } -func sysReserve(v unsafe.Pointer, n uintptr) unsafe.Pointer { +func sysReserveOS(v unsafe.Pointer, n uintptr) unsafe.Pointer { p, err := mmap(v, n, _PROT_NONE, _MAP_ANON|_MAP_PRIVATE, -1, 0) if err != 0 { return nil @@ -181,9 +179,7 @@ func sysReserve(v unsafe.Pointer, n uintptr) unsafe.Pointer { return p } -func sysMap(v unsafe.Pointer, n uintptr, sysStat *sysMemStat) { - sysStat.add(int64(n)) - +func sysMapOS(v unsafe.Pointer, n uintptr) { p, err := mmap(v, n, _PROT_READ|_PROT_WRITE, _MAP_ANON|_MAP_FIXED|_MAP_PRIVATE, -1, 0) if err == _ENOMEM { throw("runtime: out of memory") |
