aboutsummaryrefslogtreecommitdiff
path: root/src/runtime
diff options
context:
space:
mode:
authorMartin Möhrmann <moehrmann@google.com>2018-08-24 11:02:00 +0200
committerMartin Möhrmann <moehrmann@google.com>2018-08-24 18:40:16 +0000
commit05c02444eb2d8b8d3ecd949c4308d8e2323ae087 (patch)
treee578be0855d35afba810156ae16f37dc656da453 /src/runtime
parent961eb13b6781907b5bfe4a7b22f68206020c4468 (diff)
downloadgo-05c02444eb2d8b8d3ecd949c4308d8e2323ae087.tar.xz
all: align cpu feature variable offset naming
Add an "offset_" prefix to all cpu feature variable offset constants to signify that they are not boolean cpu feature variables. Remove _ from offset constant names. Change-Id: I6e22a79ebcbe6e2ae54c4ac8764f9260bb3223ff Reviewed-on: https://go-review.googlesource.com/131215 Run-TryBot: Martin Möhrmann <moehrmann@google.com> TryBot-Result: Gobot Gobot <gobot@golang.org> Reviewed-by: Brad Fitzpatrick <bradfitz@golang.org>
Diffstat (limited to 'src/runtime')
-rw-r--r--src/runtime/asm_386.s2
-rw-r--r--src/runtime/cpuflags.go8
-rw-r--r--src/runtime/memclr_386.s2
-rw-r--r--src/runtime/memclr_amd64.s2
-rw-r--r--src/runtime/memmove_386.s4
-rw-r--r--src/runtime/memmove_amd64.s2
-rw-r--r--src/runtime/vlop_arm.s2
7 files changed, 11 insertions, 11 deletions
diff --git a/src/runtime/asm_386.s b/src/runtime/asm_386.s
index 725271eec4..7761415ecd 100644
--- a/src/runtime/asm_386.s
+++ b/src/runtime/asm_386.s
@@ -881,7 +881,7 @@ TEXT runtime·stackcheck(SB), NOSPLIT, $0-0
// func cputicks() int64
TEXT runtime·cputicks(SB),NOSPLIT,$0-8
- CMPB internal∕cpu·X86+const_offset_x86_HasSSE2(SB), $1
+ CMPB internal∕cpu·X86+const_offsetX86HasSSE2(SB), $1
JNE done
CMPB runtime·lfenceBeforeRdtsc(SB), $1
JNE mfence
diff --git a/src/runtime/cpuflags.go b/src/runtime/cpuflags.go
index 050168c2d7..b65523766a 100644
--- a/src/runtime/cpuflags.go
+++ b/src/runtime/cpuflags.go
@@ -11,9 +11,9 @@ import (
// Offsets into internal/cpu records for use in assembly.
const (
- offset_x86_HasAVX2 = unsafe.Offsetof(cpu.X86.HasAVX2)
- offset_x86_HasERMS = unsafe.Offsetof(cpu.X86.HasERMS)
- offset_x86_HasSSE2 = unsafe.Offsetof(cpu.X86.HasSSE2)
+ offsetX86HasAVX2 = unsafe.Offsetof(cpu.X86.HasAVX2)
+ offsetX86HasERMS = unsafe.Offsetof(cpu.X86.HasERMS)
+ offsetX86HasSSE2 = unsafe.Offsetof(cpu.X86.HasSSE2)
- offset_arm_HasIDIVA = unsafe.Offsetof(cpu.ARM.HasIDIVA)
+ offsetARMHasIDIVA = unsafe.Offsetof(cpu.ARM.HasIDIVA)
)
diff --git a/src/runtime/memclr_386.s b/src/runtime/memclr_386.s
index 318f883964..65f7196312 100644
--- a/src/runtime/memclr_386.s
+++ b/src/runtime/memclr_386.s
@@ -29,7 +29,7 @@ tail:
JBE _5through8
CMPL BX, $16
JBE _9through16
- CMPB internal∕cpu·X86+const_offset_x86_HasSSE2(SB), $1
+ CMPB internal∕cpu·X86+const_offsetX86HasSSE2(SB), $1
JNE nosse2
PXOR X0, X0
CMPL BX, $32
diff --git a/src/runtime/memclr_amd64.s b/src/runtime/memclr_amd64.s
index b64b1477f9..d79078fd00 100644
--- a/src/runtime/memclr_amd64.s
+++ b/src/runtime/memclr_amd64.s
@@ -38,7 +38,7 @@ tail:
JBE _65through128
CMPQ BX, $256
JBE _129through256
- CMPB internal∕cpu·X86+const_offset_x86_HasAVX2(SB), $1
+ CMPB internal∕cpu·X86+const_offsetX86HasAVX2(SB), $1
JE loop_preheader_avx2
// TODO: for really big clears, use MOVNTDQ, even without AVX2.
diff --git a/src/runtime/memmove_386.s b/src/runtime/memmove_386.s
index 85c622b6b6..7b54070f59 100644
--- a/src/runtime/memmove_386.s
+++ b/src/runtime/memmove_386.s
@@ -52,7 +52,7 @@ tail:
JBE move_5through8
CMPL BX, $16
JBE move_9through16
- CMPB internal∕cpu·X86+const_offset_x86_HasSSE2(SB), $1
+ CMPB internal∕cpu·X86+const_offsetX86HasSSE2(SB), $1
JNE nosse2
CMPL BX, $32
JBE move_17through32
@@ -73,7 +73,7 @@ nosse2:
*/
forward:
// If REP MOVSB isn't fast, don't use it
- CMPB internal∕cpu·X86+const_offset_x86_HasERMS(SB), $1 // enhanced REP MOVSB/STOSB
+ CMPB internal∕cpu·X86+const_offsetX86HasERMS(SB), $1 // enhanced REP MOVSB/STOSB
JNE fwdBy4
// Check alignment
diff --git a/src/runtime/memmove_amd64.s b/src/runtime/memmove_amd64.s
index c5385a3d43..b4243a833b 100644
--- a/src/runtime/memmove_amd64.s
+++ b/src/runtime/memmove_amd64.s
@@ -84,7 +84,7 @@ forward:
JLS move_256through2048
// If REP MOVSB isn't fast, don't use it
- CMPB internal∕cpu·X86+const_offset_x86_HasERMS(SB), $1 // enhanced REP MOVSB/STOSB
+ CMPB internal∕cpu·X86+const_offsetX86HasERMS(SB), $1 // enhanced REP MOVSB/STOSB
JNE fwdBy8
// Check alignment
diff --git a/src/runtime/vlop_arm.s b/src/runtime/vlop_arm.s
index 8df13abd98..729653488f 100644
--- a/src/runtime/vlop_arm.s
+++ b/src/runtime/vlop_arm.s
@@ -44,7 +44,7 @@
// the RET instruction will clobber R12 on nacl, and the compiler's register
// allocator needs to know.
TEXT runtime·udiv(SB),NOSPLIT|NOFRAME,$0
- MOVBU internal∕cpu·ARM+const_offset_arm_HasIDIVA(SB), Ra
+ MOVBU internal∕cpu·ARM+const_offsetARMHasIDIVA(SB), Ra
CMP $0, Ra
BNE udiv_hardware