From 4c2b1e0feb3d3112da94fa4cd11ebe995003fa89 Mon Sep 17 00:00:00 2001 From: Andy Pan Date: Thu, 1 Feb 2024 10:21:14 +0800 Subject: runtime: migrate internal/atomic to internal/runtime For #65355 Change-Id: I65dd090fb99de9b231af2112c5ccb0eb635db2be Reviewed-on: https://go-review.googlesource.com/c/go/+/560155 Reviewed-by: David Chase Reviewed-by: Michael Pratt LUCI-TryBot-Result: Go LUCI Reviewed-by: Ibrahim Bazoka Auto-Submit: Emmanuel Odeke --- src/cmd/compile/internal/ssa/_gen/genericOps.go | 4 +- src/cmd/compile/internal/ssagen/ssa.go | 216 ++++---- src/cmd/compile/internal/test/inl_test.go | 2 +- src/cmd/compile/internal/types/size.go | 2 +- src/cmd/compile/internal/types2/sizes.go | 2 +- src/cmd/cover/cover.go | 2 +- src/cmd/go/internal/load/pkg.go | 4 +- src/cmd/internal/objabi/pkgspecial.go | 2 +- src/go/build/deps_test.go | 2 +- src/go/types/sizes.go | 2 +- src/internal/coverage/pkid.go | 4 +- src/internal/runtime/atomic/atomic_386.go | 121 +++++ src/internal/runtime/atomic/atomic_386.s | 366 +++++++++++++ src/internal/runtime/atomic/atomic_amd64.go | 135 +++++ src/internal/runtime/atomic/atomic_amd64.s | 289 ++++++++++ .../runtime/atomic/atomic_andor_generic.go | 67 +++ src/internal/runtime/atomic/atomic_andor_test.go | 246 +++++++++ src/internal/runtime/atomic/atomic_arm.go | 248 +++++++++ src/internal/runtime/atomic/atomic_arm.s | 312 +++++++++++ src/internal/runtime/atomic/atomic_arm64.go | 112 ++++ src/internal/runtime/atomic/atomic_arm64.s | 467 ++++++++++++++++ src/internal/runtime/atomic/atomic_loong64.go | 107 ++++ src/internal/runtime/atomic/atomic_loong64.s | 366 +++++++++++++ src/internal/runtime/atomic/atomic_mips64x.go | 89 ++++ src/internal/runtime/atomic/atomic_mips64x.s | 359 +++++++++++++ src/internal/runtime/atomic/atomic_mipsx.go | 162 ++++++ src/internal/runtime/atomic/atomic_mipsx.s | 262 +++++++++ src/internal/runtime/atomic/atomic_ppc64x.go | 107 ++++ src/internal/runtime/atomic/atomic_ppc64x.s | 424 +++++++++++++++ src/internal/runtime/atomic/atomic_riscv64.go | 103 ++++ src/internal/runtime/atomic/atomic_riscv64.s | 324 ++++++++++++ src/internal/runtime/atomic/atomic_s390x.go | 141 +++++ src/internal/runtime/atomic/atomic_s390x.s | 304 +++++++++++ src/internal/runtime/atomic/atomic_test.go | 386 ++++++++++++++ src/internal/runtime/atomic/atomic_wasm.go | 341 ++++++++++++ src/internal/runtime/atomic/atomic_wasm.s | 10 + src/internal/runtime/atomic/bench_test.go | 195 +++++++ src/internal/runtime/atomic/doc.go | 18 + src/internal/runtime/atomic/stubs.go | 59 +++ src/internal/runtime/atomic/sys_linux_arm.s | 134 +++++ src/internal/runtime/atomic/sys_nonlinux_arm.s | 79 +++ src/internal/runtime/atomic/types.go | 587 +++++++++++++++++++++ src/internal/runtime/atomic/types_64bit.go | 33 ++ src/internal/runtime/atomic/unaligned.go | 9 + src/runtime/HACKING.md | 2 +- src/runtime/abi_test.go | 2 +- src/runtime/align_runtime_test.go | 2 +- src/runtime/arena.go | 2 +- src/runtime/arena_test.go | 2 +- src/runtime/atomic_pointer.go | 6 +- src/runtime/chan.go | 2 +- src/runtime/coverage/testsupport.go | 2 +- src/runtime/debug.go | 2 +- src/runtime/debuglog.go | 2 +- src/runtime/export_test.go | 2 +- src/runtime/histogram.go | 2 +- src/runtime/iface.go | 2 +- src/runtime/internal/atomic/atomic_386.go | 121 ----- src/runtime/internal/atomic/atomic_386.s | 366 ------------- src/runtime/internal/atomic/atomic_amd64.go | 135 ----- src/runtime/internal/atomic/atomic_amd64.s | 289 ---------- .../internal/atomic/atomic_andor_generic.go | 67 --- src/runtime/internal/atomic/atomic_andor_test.go | 246 --------- src/runtime/internal/atomic/atomic_arm.go | 248 --------- src/runtime/internal/atomic/atomic_arm.s | 312 ----------- src/runtime/internal/atomic/atomic_arm64.go | 112 ---- src/runtime/internal/atomic/atomic_arm64.s | 467 ---------------- src/runtime/internal/atomic/atomic_loong64.go | 107 ---- src/runtime/internal/atomic/atomic_loong64.s | 366 ------------- src/runtime/internal/atomic/atomic_mips64x.go | 89 ---- src/runtime/internal/atomic/atomic_mips64x.s | 359 ------------- src/runtime/internal/atomic/atomic_mipsx.go | 162 ------ src/runtime/internal/atomic/atomic_mipsx.s | 262 --------- src/runtime/internal/atomic/atomic_ppc64x.go | 107 ---- src/runtime/internal/atomic/atomic_ppc64x.s | 424 --------------- src/runtime/internal/atomic/atomic_riscv64.go | 103 ---- src/runtime/internal/atomic/atomic_riscv64.s | 324 ------------ src/runtime/internal/atomic/atomic_s390x.go | 141 ----- src/runtime/internal/atomic/atomic_s390x.s | 304 ----------- src/runtime/internal/atomic/atomic_test.go | 386 -------------- src/runtime/internal/atomic/atomic_wasm.go | 341 ------------ src/runtime/internal/atomic/atomic_wasm.s | 10 - src/runtime/internal/atomic/bench_test.go | 195 ------- src/runtime/internal/atomic/doc.go | 18 - src/runtime/internal/atomic/stubs.go | 59 --- src/runtime/internal/atomic/sys_linux_arm.s | 134 ----- src/runtime/internal/atomic/sys_nonlinux_arm.s | 79 --- src/runtime/internal/atomic/types.go | 587 --------------------- src/runtime/internal/atomic/types_64bit.go | 33 -- src/runtime/internal/atomic/unaligned.go | 9 - src/runtime/lfstack.go | 2 +- src/runtime/lock_futex.go | 2 +- src/runtime/lock_sema.go | 2 +- src/runtime/lockrank_on.go | 2 +- src/runtime/malloc.go | 2 +- src/runtime/map.go | 4 +- src/runtime/mbitmap.go | 2 +- src/runtime/mcache.go | 2 +- src/runtime/mcentral.go | 2 +- src/runtime/mcheckmark.go | 2 +- src/runtime/mem_linux.go | 2 +- src/runtime/mfinal.go | 2 +- src/runtime/mgc.go | 2 +- src/runtime/mgclimit.go | 2 +- src/runtime/mgcmark.go | 2 +- src/runtime/mgcpacer.go | 2 +- src/runtime/mgcscavenge.go | 2 +- src/runtime/mgcscavenge_test.go | 2 +- src/runtime/mgcsweep.go | 2 +- src/runtime/mgcwork.go | 2 +- src/runtime/mheap.go | 2 +- src/runtime/mpagealloc.go | 2 +- src/runtime/mprof.go | 2 +- src/runtime/mranges.go | 2 +- src/runtime/mspanset.go | 2 +- src/runtime/mstats.go | 2 +- src/runtime/mwbbuf.go | 2 +- src/runtime/netpoll.go | 2 +- src/runtime/netpoll_aix.go | 2 +- src/runtime/netpoll_epoll.go | 2 +- src/runtime/netpoll_kqueue.go | 2 +- src/runtime/netpoll_solaris.go | 2 +- src/runtime/netpoll_stub.go | 2 +- src/runtime/netpoll_windows.go | 2 +- src/runtime/os3_solaris.go | 2 +- src/runtime/os_aix.go | 2 +- src/runtime/os_linux.go | 8 +- src/runtime/os_netbsd.go | 2 +- src/runtime/os_openbsd.go | 2 +- src/runtime/os_openbsd_syscall2.go | 2 +- src/runtime/os_plan9.go | 2 +- src/runtime/os_wasm.go | 2 +- src/runtime/os_windows.go | 2 +- src/runtime/panic.go | 2 +- src/runtime/pinner.go | 2 +- src/runtime/proc.go | 10 +- src/runtime/profbuf.go | 2 +- src/runtime/runtime.go | 2 +- src/runtime/runtime1.go | 2 +- src/runtime/runtime2.go | 2 +- src/runtime/rwmutex.go | 2 +- src/runtime/sema.go | 2 +- src/runtime/signal_unix.go | 2 +- src/runtime/sigqueue.go | 2 +- src/runtime/stack.go | 2 +- src/runtime/stubs2.go | 2 +- src/runtime/symtab.go | 2 +- src/runtime/sys_darwin.go | 2 +- src/runtime/sys_openbsd2.go | 2 +- src/runtime/testdata/testprognet/waiters.go | 2 +- src/runtime/time.go | 2 +- src/runtime/trace.go | 2 +- src/runtime/trace2.go | 2 +- src/runtime/trace2map.go | 2 +- src/runtime/trace2runtime.go | 2 +- src/runtime/trace2status.go | 2 +- src/runtime/traceback.go | 4 +- src/runtime/vdso_freebsd.go | 2 +- src/runtime/vdso_freebsd_x86.go | 2 +- src/sync/atomic/asm.s | 52 +- src/sync/pool.go | 6 +- 161 files changed, 7205 insertions(+), 7205 deletions(-) create mode 100644 src/internal/runtime/atomic/atomic_386.go create mode 100644 src/internal/runtime/atomic/atomic_386.s create mode 100644 src/internal/runtime/atomic/atomic_amd64.go create mode 100644 src/internal/runtime/atomic/atomic_amd64.s create mode 100644 src/internal/runtime/atomic/atomic_andor_generic.go create mode 100644 src/internal/runtime/atomic/atomic_andor_test.go create mode 100644 src/internal/runtime/atomic/atomic_arm.go create mode 100644 src/internal/runtime/atomic/atomic_arm.s create mode 100644 src/internal/runtime/atomic/atomic_arm64.go create mode 100644 src/internal/runtime/atomic/atomic_arm64.s create mode 100644 src/internal/runtime/atomic/atomic_loong64.go create mode 100644 src/internal/runtime/atomic/atomic_loong64.s create mode 100644 src/internal/runtime/atomic/atomic_mips64x.go create mode 100644 src/internal/runtime/atomic/atomic_mips64x.s create mode 100644 src/internal/runtime/atomic/atomic_mipsx.go create mode 100644 src/internal/runtime/atomic/atomic_mipsx.s create mode 100644 src/internal/runtime/atomic/atomic_ppc64x.go create mode 100644 src/internal/runtime/atomic/atomic_ppc64x.s create mode 100644 src/internal/runtime/atomic/atomic_riscv64.go create mode 100644 src/internal/runtime/atomic/atomic_riscv64.s create mode 100644 src/internal/runtime/atomic/atomic_s390x.go create mode 100644 src/internal/runtime/atomic/atomic_s390x.s create mode 100644 src/internal/runtime/atomic/atomic_test.go create mode 100644 src/internal/runtime/atomic/atomic_wasm.go create mode 100644 src/internal/runtime/atomic/atomic_wasm.s create mode 100644 src/internal/runtime/atomic/bench_test.go create mode 100644 src/internal/runtime/atomic/doc.go create mode 100644 src/internal/runtime/atomic/stubs.go create mode 100644 src/internal/runtime/atomic/sys_linux_arm.s create mode 100644 src/internal/runtime/atomic/sys_nonlinux_arm.s create mode 100644 src/internal/runtime/atomic/types.go create mode 100644 src/internal/runtime/atomic/types_64bit.go create mode 100644 src/internal/runtime/atomic/unaligned.go delete mode 100644 src/runtime/internal/atomic/atomic_386.go delete mode 100644 src/runtime/internal/atomic/atomic_386.s delete mode 100644 src/runtime/internal/atomic/atomic_amd64.go delete mode 100644 src/runtime/internal/atomic/atomic_amd64.s delete mode 100644 src/runtime/internal/atomic/atomic_andor_generic.go delete mode 100644 src/runtime/internal/atomic/atomic_andor_test.go delete mode 100644 src/runtime/internal/atomic/atomic_arm.go delete mode 100644 src/runtime/internal/atomic/atomic_arm.s delete mode 100644 src/runtime/internal/atomic/atomic_arm64.go delete mode 100644 src/runtime/internal/atomic/atomic_arm64.s delete mode 100644 src/runtime/internal/atomic/atomic_loong64.go delete mode 100644 src/runtime/internal/atomic/atomic_loong64.s delete mode 100644 src/runtime/internal/atomic/atomic_mips64x.go delete mode 100644 src/runtime/internal/atomic/atomic_mips64x.s delete mode 100644 src/runtime/internal/atomic/atomic_mipsx.go delete mode 100644 src/runtime/internal/atomic/atomic_mipsx.s delete mode 100644 src/runtime/internal/atomic/atomic_ppc64x.go delete mode 100644 src/runtime/internal/atomic/atomic_ppc64x.s delete mode 100644 src/runtime/internal/atomic/atomic_riscv64.go delete mode 100644 src/runtime/internal/atomic/atomic_riscv64.s delete mode 100644 src/runtime/internal/atomic/atomic_s390x.go delete mode 100644 src/runtime/internal/atomic/atomic_s390x.s delete mode 100644 src/runtime/internal/atomic/atomic_test.go delete mode 100644 src/runtime/internal/atomic/atomic_wasm.go delete mode 100644 src/runtime/internal/atomic/atomic_wasm.s delete mode 100644 src/runtime/internal/atomic/bench_test.go delete mode 100644 src/runtime/internal/atomic/doc.go delete mode 100644 src/runtime/internal/atomic/stubs.go delete mode 100644 src/runtime/internal/atomic/sys_linux_arm.s delete mode 100644 src/runtime/internal/atomic/sys_nonlinux_arm.s delete mode 100644 src/runtime/internal/atomic/types.go delete mode 100644 src/runtime/internal/atomic/types_64bit.go delete mode 100644 src/runtime/internal/atomic/unaligned.go (limited to 'src') diff --git a/src/cmd/compile/internal/ssa/_gen/genericOps.go b/src/cmd/compile/internal/ssa/_gen/genericOps.go index 69eb48ce44..637e7b617c 100644 --- a/src/cmd/compile/internal/ssa/_gen/genericOps.go +++ b/src/cmd/compile/internal/ssa/_gen/genericOps.go @@ -587,7 +587,7 @@ var genericOps = []opData{ {name: "MakeResult", argLength: -1}, // arg0 .. are components of a "Result" (like the result from a Call). The last arg should be memory (like the result from a call). // Atomic operations used for semantically inlining sync/atomic and - // runtime/internal/atomic. Atomic loads return a new memory so that + // internal/runtime/atomic. Atomic loads return a new memory so that // the loads are properly ordered with respect to other loads and // stores. {name: "AtomicLoad8", argLength: 2, typ: "(UInt8,Mem)"}, // Load from arg0. arg1=memory. Returns loaded value and new memory. @@ -650,7 +650,7 @@ var genericOps = []opData{ // If [boolean Value] [then, else] // First [] [always, never] // Defer [mem] [nopanic, panic] (control opcode should be OpStaticCall to runtime.deferproc) -//JumpTable [integer Value] [succ1,succ2,..] +// JumpTable [integer Value] [succ1,succ2,..] var genericBlocks = []blockData{ {name: "Plain"}, // a single successor diff --git a/src/cmd/compile/internal/ssagen/ssa.go b/src/cmd/compile/internal/ssagen/ssa.go index 5174cf123c..37d6165e42 100644 --- a/src/cmd/compile/internal/ssagen/ssa.go +++ b/src/cmd/compile/internal/ssagen/ssa.go @@ -2008,7 +2008,7 @@ func (s *state) stmt(n ir.Node) { if base.Flag.N == 0 && rtabi.UseInterfaceSwitchCache(Arch.LinkArch.Name) { // Note: we can only use the cache if we have the right atomic load instruction. // Double-check that here. - if _, ok := intrinsics[intrinsicKey{Arch.LinkArch.Arch, "runtime/internal/atomic", "Loadp"}]; !ok { + if _, ok := intrinsics[intrinsicKey{Arch.LinkArch.Arch, "internal/runtime/atomic", "Loadp"}]; !ok { s.Fatalf("atomic load not available") } merge = s.f.NewBlock(ssa.BlockPlain) @@ -4286,43 +4286,43 @@ func InitTables() { addF("runtime/internal/sys", "PrefetchStreamed", makePrefetchFunc(ssa.OpPrefetchCacheStreamed), sys.AMD64, sys.ARM64, sys.PPC64) - /******** runtime/internal/atomic ********/ - addF("runtime/internal/atomic", "Load", + /******** internal/runtime/atomic ********/ + addF("internal/runtime/atomic", "Load", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { v := s.newValue2(ssa.OpAtomicLoad32, types.NewTuple(types.Types[types.TUINT32], types.TypeMem), args[0], s.mem()) s.vars[memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v) return s.newValue1(ssa.OpSelect0, types.Types[types.TUINT32], v) }, sys.AMD64, sys.ARM64, sys.Loong64, sys.MIPS, sys.MIPS64, sys.PPC64, sys.RISCV64, sys.S390X) - addF("runtime/internal/atomic", "Load8", + addF("internal/runtime/atomic", "Load8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { v := s.newValue2(ssa.OpAtomicLoad8, types.NewTuple(types.Types[types.TUINT8], types.TypeMem), args[0], s.mem()) s.vars[memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v) return s.newValue1(ssa.OpSelect0, types.Types[types.TUINT8], v) }, sys.AMD64, sys.ARM64, sys.Loong64, sys.MIPS, sys.MIPS64, sys.PPC64, sys.RISCV64, sys.S390X) - addF("runtime/internal/atomic", "Load64", + addF("internal/runtime/atomic", "Load64", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { v := s.newValue2(ssa.OpAtomicLoad64, types.NewTuple(types.Types[types.TUINT64], types.TypeMem), args[0], s.mem()) s.vars[memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v) return s.newValue1(ssa.OpSelect0, types.Types[types.TUINT64], v) }, sys.AMD64, sys.ARM64, sys.Loong64, sys.MIPS64, sys.PPC64, sys.RISCV64, sys.S390X) - addF("runtime/internal/atomic", "LoadAcq", + addF("internal/runtime/atomic", "LoadAcq", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { v := s.newValue2(ssa.OpAtomicLoadAcq32, types.NewTuple(types.Types[types.TUINT32], types.TypeMem), args[0], s.mem()) s.vars[memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v) return s.newValue1(ssa.OpSelect0, types.Types[types.TUINT32], v) }, sys.PPC64, sys.S390X) - addF("runtime/internal/atomic", "LoadAcq64", + addF("internal/runtime/atomic", "LoadAcq64", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { v := s.newValue2(ssa.OpAtomicLoadAcq64, types.NewTuple(types.Types[types.TUINT64], types.TypeMem), args[0], s.mem()) s.vars[memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v) return s.newValue1(ssa.OpSelect0, types.Types[types.TUINT64], v) }, sys.PPC64) - addF("runtime/internal/atomic", "Loadp", + addF("internal/runtime/atomic", "Loadp", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { v := s.newValue2(ssa.OpAtomicLoadPtr, types.NewTuple(s.f.Config.Types.BytePtr, types.TypeMem), args[0], s.mem()) s.vars[memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v) @@ -4330,51 +4330,51 @@ func InitTables() { }, sys.AMD64, sys.ARM64, sys.Loong64, sys.MIPS, sys.MIPS64, sys.PPC64, sys.RISCV64, sys.S390X) - addF("runtime/internal/atomic", "Store", + addF("internal/runtime/atomic", "Store", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { s.vars[memVar] = s.newValue3(ssa.OpAtomicStore32, types.TypeMem, args[0], args[1], s.mem()) return nil }, sys.AMD64, sys.ARM64, sys.Loong64, sys.MIPS, sys.MIPS64, sys.PPC64, sys.RISCV64, sys.S390X) - addF("runtime/internal/atomic", "Store8", + addF("internal/runtime/atomic", "Store8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { s.vars[memVar] = s.newValue3(ssa.OpAtomicStore8, types.TypeMem, args[0], args[1], s.mem()) return nil }, sys.AMD64, sys.ARM64, sys.Loong64, sys.MIPS, sys.MIPS64, sys.PPC64, sys.RISCV64, sys.S390X) - addF("runtime/internal/atomic", "Store64", + addF("internal/runtime/atomic", "Store64", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { s.vars[memVar] = s.newValue3(ssa.OpAtomicStore64, types.TypeMem, args[0], args[1], s.mem()) return nil }, sys.AMD64, sys.ARM64, sys.Loong64, sys.MIPS64, sys.PPC64, sys.RISCV64, sys.S390X) - addF("runtime/internal/atomic", "StorepNoWB", + addF("internal/runtime/atomic", "StorepNoWB", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { s.vars[memVar] = s.newValue3(ssa.OpAtomicStorePtrNoWB, types.TypeMem, args[0], args[1], s.mem()) return nil }, sys.AMD64, sys.ARM64, sys.Loong64, sys.MIPS, sys.MIPS64, sys.RISCV64, sys.S390X) - addF("runtime/internal/atomic", "StoreRel", + addF("internal/runtime/atomic", "StoreRel", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { s.vars[memVar] = s.newValue3(ssa.OpAtomicStoreRel32, types.TypeMem, args[0], args[1], s.mem()) return nil }, sys.PPC64, sys.S390X) - addF("runtime/internal/atomic", "StoreRel64", + addF("internal/runtime/atomic", "StoreRel64", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { s.vars[memVar] = s.newValue3(ssa.OpAtomicStoreRel64, types.TypeMem, args[0], args[1], s.mem()) return nil }, sys.PPC64) - addF("runtime/internal/atomic", "Xchg", + addF("internal/runtime/atomic", "Xchg", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { v := s.newValue3(ssa.OpAtomicExchange32, types.NewTuple(types.Types[types.TUINT32], types.TypeMem), args[0], args[1], s.mem()) s.vars[memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v) return s.newValue1(ssa.OpSelect0, types.Types[types.TUINT32], v) }, sys.AMD64, sys.Loong64, sys.MIPS, sys.MIPS64, sys.PPC64, sys.RISCV64, sys.S390X) - addF("runtime/internal/atomic", "Xchg64", + addF("internal/runtime/atomic", "Xchg64", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { v := s.newValue3(ssa.OpAtomicExchange64, types.NewTuple(types.Types[types.TUINT64], types.TypeMem), args[0], args[1], s.mem()) s.vars[memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v) @@ -4429,21 +4429,21 @@ func InitTables() { s.vars[memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v) s.vars[n] = s.newValue1(ssa.OpSelect0, types.Types[typ], v) } - addF("runtime/internal/atomic", "Xchg", + addF("internal/runtime/atomic", "Xchg", makeAtomicGuardedIntrinsicARM64(ssa.OpAtomicExchange32, ssa.OpAtomicExchange32Variant, types.TUINT32, types.TUINT32, atomicXchgXaddEmitterARM64), sys.ARM64) - addF("runtime/internal/atomic", "Xchg64", + addF("internal/runtime/atomic", "Xchg64", makeAtomicGuardedIntrinsicARM64(ssa.OpAtomicExchange64, ssa.OpAtomicExchange64Variant, types.TUINT64, types.TUINT64, atomicXchgXaddEmitterARM64), sys.ARM64) - addF("runtime/internal/atomic", "Xadd", + addF("internal/runtime/atomic", "Xadd", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { v := s.newValue3(ssa.OpAtomicAdd32, types.NewTuple(types.Types[types.TUINT32], types.TypeMem), args[0], args[1], s.mem()) s.vars[memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v) return s.newValue1(ssa.OpSelect0, types.Types[types.TUINT32], v) }, sys.AMD64, sys.Loong64, sys.MIPS, sys.MIPS64, sys.PPC64, sys.RISCV64, sys.S390X) - addF("runtime/internal/atomic", "Xadd64", + addF("internal/runtime/atomic", "Xadd64", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { v := s.newValue3(ssa.OpAtomicAdd64, types.NewTuple(types.Types[types.TUINT64], types.TypeMem), args[0], args[1], s.mem()) s.vars[memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v) @@ -4451,28 +4451,28 @@ func InitTables() { }, sys.AMD64, sys.Loong64, sys.MIPS64, sys.PPC64, sys.RISCV64, sys.S390X) - addF("runtime/internal/atomic", "Xadd", + addF("internal/runtime/atomic", "Xadd", makeAtomicGuardedIntrinsicARM64(ssa.OpAtomicAdd32, ssa.OpAtomicAdd32Variant, types.TUINT32, types.TUINT32, atomicXchgXaddEmitterARM64), sys.ARM64) - addF("runtime/internal/atomic", "Xadd64", + addF("internal/runtime/atomic", "Xadd64", makeAtomicGuardedIntrinsicARM64(ssa.OpAtomicAdd64, ssa.OpAtomicAdd64Variant, types.TUINT64, types.TUINT64, atomicXchgXaddEmitterARM64), sys.ARM64) - addF("runtime/internal/atomic", "Cas", + addF("internal/runtime/atomic", "Cas", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { v := s.newValue4(ssa.OpAtomicCompareAndSwap32, types.NewTuple(types.Types[types.TBOOL], types.TypeMem), args[0], args[1], args[2], s.mem()) s.vars[memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v) return s.newValue1(ssa.OpSelect0, types.Types[types.TBOOL], v) }, sys.AMD64, sys.Loong64, sys.MIPS, sys.MIPS64, sys.PPC64, sys.RISCV64, sys.S390X) - addF("runtime/internal/atomic", "Cas64", + addF("internal/runtime/atomic", "Cas64", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { v := s.newValue4(ssa.OpAtomicCompareAndSwap64, types.NewTuple(types.Types[types.TBOOL], types.TypeMem), args[0], args[1], args[2], s.mem()) s.vars[memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v) return s.newValue1(ssa.OpSelect0, types.Types[types.TBOOL], v) }, sys.AMD64, sys.Loong64, sys.MIPS64, sys.PPC64, sys.RISCV64, sys.S390X) - addF("runtime/internal/atomic", "CasRel", + addF("internal/runtime/atomic", "CasRel", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { v := s.newValue4(ssa.OpAtomicCompareAndSwap32, types.NewTuple(types.Types[types.TBOOL], types.TypeMem), args[0], args[1], args[2], s.mem()) s.vars[memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v) @@ -4486,32 +4486,32 @@ func InitTables() { s.vars[n] = s.newValue1(ssa.OpSelect0, types.Types[typ], v) } - addF("runtime/internal/atomic", "Cas", + addF("internal/runtime/atomic", "Cas", makeAtomicGuardedIntrinsicARM64(ssa.OpAtomicCompareAndSwap32, ssa.OpAtomicCompareAndSwap32Variant, types.TUINT32, types.TBOOL, atomicCasEmitterARM64), sys.ARM64) - addF("runtime/internal/atomic", "Cas64", + addF("internal/runtime/atomic", "Cas64", makeAtomicGuardedIntrinsicARM64(ssa.OpAtomicCompareAndSwap64, ssa.OpAtomicCompareAndSwap64Variant, types.TUINT64, types.TBOOL, atomicCasEmitterARM64), sys.ARM64) - addF("runtime/internal/atomic", "And8", + addF("internal/runtime/atomic", "And8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { s.vars[memVar] = s.newValue3(ssa.OpAtomicAnd8, types.TypeMem, args[0], args[1], s.mem()) return nil }, sys.AMD64, sys.MIPS, sys.MIPS64, sys.PPC64, sys.RISCV64, sys.S390X) - addF("runtime/internal/atomic", "And", + addF("internal/runtime/atomic", "And", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { s.vars[memVar] = s.newValue3(ssa.OpAtomicAnd32, types.TypeMem, args[0], args[1], s.mem()) return nil }, sys.AMD64, sys.MIPS, sys.MIPS64, sys.PPC64, sys.RISCV64, sys.S390X) - addF("runtime/internal/atomic", "Or8", + addF("internal/runtime/atomic", "Or8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { s.vars[memVar] = s.newValue3(ssa.OpAtomicOr8, types.TypeMem, args[0], args[1], s.mem()) return nil }, sys.AMD64, sys.ARM64, sys.MIPS, sys.MIPS64, sys.PPC64, sys.RISCV64, sys.S390X) - addF("runtime/internal/atomic", "Or", + addF("internal/runtime/atomic", "Or", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { s.vars[memVar] = s.newValue3(ssa.OpAtomicOr32, types.TypeMem, args[0], args[1], s.mem()) return nil @@ -4522,65 +4522,65 @@ func InitTables() { s.vars[memVar] = s.newValue3(op, types.TypeMem, args[0], args[1], s.mem()) } - addF("runtime/internal/atomic", "And8", + addF("internal/runtime/atomic", "And8", makeAtomicGuardedIntrinsicARM64(ssa.OpAtomicAnd8, ssa.OpAtomicAnd8Variant, types.TNIL, types.TNIL, atomicAndOrEmitterARM64), sys.ARM64) - addF("runtime/internal/atomic", "And", + addF("internal/runtime/atomic", "And", makeAtomicGuardedIntrinsicARM64(ssa.OpAtomicAnd32, ssa.OpAtomicAnd32Variant, types.TNIL, types.TNIL, atomicAndOrEmitterARM64), sys.ARM64) - addF("runtime/internal/atomic", "Or8", + addF("internal/runtime/atomic", "Or8", makeAtomicGuardedIntrinsicARM64(ssa.OpAtomicOr8, ssa.OpAtomicOr8Variant, types.TNIL, types.TNIL, atomicAndOrEmitterARM64), sys.ARM64) - addF("runtime/internal/atomic", "Or", + addF("internal/runtime/atomic", "Or", makeAtomicGuardedIntrinsicARM64(ssa.OpAtomicOr32, ssa.OpAtomicOr32Variant, types.TNIL, types.TNIL, atomicAndOrEmitterARM64), sys.ARM64) // Aliases for atomic load operations - alias("runtime/internal/atomic", "Loadint32", "runtime/internal/atomic", "Load", all...) - alias("runtime/internal/atomic", "Loadint64", "runtime/internal/atomic", "Load64", all...) - alias("runtime/internal/atomic", "Loaduintptr", "runtime/internal/atomic", "Load", p4...) - alias("runtime/internal/atomic", "Loaduintptr", "runtime/internal/atomic", "Load64", p8...) - alias("runtime/internal/atomic", "Loaduint", "runtime/internal/atomic", "Load", p4...) - alias("runtime/internal/atomic", "Loaduint", "runtime/internal/atomic", "Load64", p8...) - alias("runtime/internal/atomic", "LoadAcq", "runtime/internal/atomic", "Load", lwatomics...) - alias("runtime/internal/atomic", "LoadAcq64", "runtime/internal/atomic", "Load64", lwatomics...) - alias("runtime/internal/atomic", "LoadAcquintptr", "runtime/internal/atomic", "LoadAcq", p4...) - alias("sync", "runtime_LoadAcquintptr", "runtime/internal/atomic", "LoadAcq", p4...) // linknamed - alias("runtime/internal/atomic", "LoadAcquintptr", "runtime/internal/atomic", "LoadAcq64", p8...) - alias("sync", "runtime_LoadAcquintptr", "runtime/internal/atomic", "LoadAcq64", p8...) // linknamed + alias("internal/runtime/atomic", "Loadint32", "internal/runtime/atomic", "Load", all...) + alias("internal/runtime/atomic", "Loadint64", "internal/runtime/atomic", "Load64", all...) + alias("internal/runtime/atomic", "Loaduintptr", "internal/runtime/atomic", "Load", p4...) + alias("internal/runtime/atomic", "Loaduintptr", "internal/runtime/atomic", "Load64", p8...) + alias("internal/runtime/atomic", "Loaduint", "internal/runtime/atomic", "Load", p4...) + alias("internal/runtime/atomic", "Loaduint", "internal/runtime/atomic", "Load64", p8...) + alias("internal/runtime/atomic", "LoadAcq", "internal/runtime/atomic", "Load", lwatomics...) + alias("internal/runtime/atomic", "LoadAcq64", "internal/runtime/atomic", "Load64", lwatomics...) + alias("internal/runtime/atomic", "LoadAcquintptr", "internal/runtime/atomic", "LoadAcq", p4...) + alias("sync", "runtime_LoadAcquintptr", "internal/runtime/atomic", "LoadAcq", p4...) // linknamed + alias("internal/runtime/atomic", "LoadAcquintptr", "internal/runtime/atomic", "LoadAcq64", p8...) + alias("sync", "runtime_LoadAcquintptr", "internal/runtime/atomic", "LoadAcq64", p8...) // linknamed // Aliases for atomic store operations - alias("runtime/internal/atomic", "Storeint32", "runtime/internal/atomic", "Store", all...) - alias("runtime/internal/atomic", "Storeint64", "runtime/internal/atomic", "Store64", all...) - alias("runtime/internal/atomic", "Storeuintptr", "runtime/internal/atomic", "Store", p4...) - alias("runtime/internal/atomic", "Storeuintptr", "runtime/internal/atomic", "Store64", p8...) - alias("runtime/internal/atomic", "StoreRel", "runtime/internal/atomic", "Store", lwatomics...) - alias("runtime/internal/atomic", "StoreRel64", "runtime/internal/atomic", "Store64", lwatomics...) - alias("runtime/internal/atomic", "StoreReluintptr", "runtime/internal/atomic", "StoreRel", p4...) - alias("sync", "runtime_StoreReluintptr", "runtime/internal/atomic", "StoreRel", p4...) // linknamed - alias("runtime/internal/atomic", "StoreReluintptr", "runtime/internal/atomic", "StoreRel64", p8...) - alias("sync", "runtime_StoreReluintptr", "runtime/internal/atomic", "StoreRel64", p8...) // linknamed + alias("internal/runtime/atomic", "Storeint32", "internal/runtime/atomic", "Store", all...) + alias("internal/runtime/atomic", "Storeint64", "internal/runtime/atomic", "Store64", all...) + alias("internal/runtime/atomic", "Storeuintptr", "internal/runtime/atomic", "Store", p4...) + alias("internal/runtime/atomic", "Storeuintptr", "internal/runtime/atomic", "Store64", p8...) + alias("internal/runtime/atomic", "StoreRel", "internal/runtime/atomic", "Store", lwatomics...) + alias("internal/runtime/atomic", "StoreRel64", "internal/runtime/atomic", "Store64", lwatomics...) + alias("internal/runtime/atomic", "StoreReluintptr", "internal/runtime/atomic", "StoreRel", p4...) + alias("sync", "runtime_StoreReluintptr", "internal/runtime/atomic", "StoreRel", p4...) // linknamed + alias("internal/runtime/atomic", "StoreReluintptr", "internal/runtime/atomic", "StoreRel64", p8...) + alias("sync", "runtime_StoreReluintptr", "internal/runtime/atomic", "StoreRel64", p8...) // linknamed // Aliases for atomic swap operations - alias("runtime/internal/atomic", "Xchgint32", "runtime/internal/atomic", "Xchg", all...) - alias("runtime/internal/atomic", "Xchgint64", "runtime/internal/atomic", "Xchg64", all...) - alias("runtime/internal/atomic", "Xchguintptr", "runtime/internal/atomic", "Xchg", p4...) - alias("runtime/internal/atomic", "Xchguintptr", "runtime/internal/atomic", "Xchg64", p8...) + alias("internal/runtime/atomic", "Xchgint32", "internal/runtime/atomic", "Xchg", all...) + alias("internal/runtime/atomic", "Xchgint64", "internal/runtime/atomic", "Xchg64", all...) + alias("internal/runtime/atomic", "Xchguintptr", "internal/runtime/atomic", "Xchg", p4...) + alias("internal/runtime/atomic", "Xchguintptr", "internal/runtime/atomic", "Xchg64", p8...) // Aliases for atomic add operations - alias("runtime/internal/atomic", "Xaddint32", "runtime/internal/atomic", "Xadd", all...) - alias("runtime/internal/atomic", "Xaddint64", "runtime/internal/atomic", "Xadd64", all...) - alias("runtime/internal/atomic", "Xadduintptr", "runtime/internal/atomic", "Xadd", p4...) - alias("runtime/internal/atomic", "Xadduintptr", "runtime/internal/atomic", "Xadd64", p8...) + alias("internal/runtime/atomic", "Xaddint32", "internal/runtime/atomic", "Xadd", all...) + alias("internal/runtime/atomic", "Xaddint64", "internal/runtime/atomic", "Xadd64", all...) + alias("internal/runtime/atomic", "Xadduintptr", "internal/runtime/atomic", "Xadd", p4...) + alias("internal/runtime/atomic", "Xadduintptr", "internal/runtime/atomic", "Xadd64", p8...) // Aliases for atomic CAS operations - alias("runtime/internal/atomic", "Casint32", "runtime/internal/atomic", "Cas", all...) - alias("runtime/internal/atomic", "Casint64", "runtime/internal/atomic", "Cas64", all...) - alias("runtime/internal/atomic", "Casuintptr", "runtime/internal/atomic", "Cas", p4...) - alias("runtime/internal/atomic", "Casuintptr", "runtime/internal/atomic", "Cas64", p8...) - alias("runtime/internal/atomic", "Casp1", "runtime/internal/atomic", "Cas", p4...) - alias("runtime/internal/atomic", "Casp1", "runtime/internal/atomic", "Cas64", p8...) - alias("runtime/internal/atomic", "CasRel", "runtime/internal/atomic", "Cas", lwatomics...) + alias("internal/runtime/atomic", "Casint32", "internal/runtime/atomic", "Cas", all...) + alias("internal/runtime/atomic", "Casint64", "internal/runtime/atomic", "Cas64", all...) + alias("internal/runtime/atomic", "Casuintptr", "internal/runtime/atomic", "Cas", p4...) + alias("internal/runtime/atomic", "Casuintptr", "internal/runtime/atomic", "Cas64", p8...) + alias("internal/runtime/atomic", "Casp1", "internal/runtime/atomic", "Cas", p4...) + alias("internal/runtime/atomic", "Casp1", "internal/runtime/atomic", "Cas64", p8...) + alias("internal/runtime/atomic", "CasRel", "internal/runtime/atomic", "Cas", lwatomics...) /******** math ********/ addF("math", "sqrt", @@ -5021,42 +5021,42 @@ func InitTables() { /******** sync/atomic ********/ // Note: these are disabled by flag_race in findIntrinsic below. - alias("sync/atomic", "LoadInt32", "runtime/internal/atomic", "Load", all...) - alias("sync/atomic", "LoadInt64", "runtime/internal/atomic", "Load64", all...) - alias("sync/atomic", "LoadPointer", "runtime/internal/atomic", "Loadp", all...) - alias("sync/atomic", "LoadUint32", "runtime/internal/atomic", "Load", all...) - alias("sync/atomic", "LoadUint64", "runtime/internal/atomic", "Load64", all...) - alias("sync/atomic", "LoadUintptr", "runtime/internal/atomic", "Load", p4...) - alias("sync/atomic", "LoadUintptr", "runtime/internal/atomic", "Load64", p8...) - - alias("sync/atomic", "StoreInt32", "runtime/internal/atomic", "Store", all...) - alias("sync/atomic", "StoreInt64", "runtime/internal/atomic", "Store64", all...) + alias("sync/atomic", "LoadInt32", "internal/runtime/atomic", "Load", all...) + alias("sync/atomic", "LoadInt64", "internal/runtime/atomic", "Load64", all...) + alias("sync/atomic", "LoadPointer", "internal/runtime/atomic", "Loadp", all...) + alias("sync/atomic", "LoadUint32", "internal/runtime/atomic", "Load", all...) + alias("sync/atomic", "LoadUint64", "internal/runtime/atomic", "Load64", all...) + alias("sync/atomic", "LoadUintptr", "internal/runtime/atomic", "Load", p4...) + alias("sync/atomic", "LoadUintptr", "internal/runtime/atomic", "Load64", p8...) + + alias("sync/atomic", "StoreInt32", "internal/runtime/atomic", "Store", all...) + alias("sync/atomic", "StoreInt64", "internal/runtime/atomic", "Store64", all...) // Note: not StorePointer, that needs a write barrier. Same below for {CompareAnd}Swap. - alias("sync/atomic", "StoreUint32", "runtime/internal/atomic", "Store", all...) - alias("sync/atomic", "StoreUint64", "runtime/internal/atomic", "Store64", all...) - alias("sync/atomic", "StoreUintptr", "runtime/internal/atomic", "Store", p4...) - alias("sync/atomic", "StoreUintptr", "runtime/internal/atomic", "Store64", p8...) - - alias("sync/atomic", "SwapInt32", "runtime/internal/atomic", "Xchg", all...) - alias("sync/atomic", "SwapInt64", "runtime/internal/atomic", "Xchg64", all...) - alias("sync/atomic", "SwapUint32", "runtime/internal/atomic", "Xchg", all...) - alias("sync/atomic", "SwapUint64", "runtime/internal/atomic", "Xchg64", all...) - alias("sync/atomic", "SwapUintptr", "runtime/internal/atomic", "Xchg", p4...) - alias("sync/atomic", "SwapUintptr", "runtime/internal/atomic", "Xchg64", p8...) - - alias("sync/atomic", "CompareAndSwapInt32", "runtime/internal/atomic", "Cas", all...) - alias("sync/atomic", "CompareAndSwapInt64", "runtime/internal/atomic", "Cas64", all...) - alias("sync/atomic", "CompareAndSwapUint32", "runtime/internal/atomic", "Cas", all...) - alias("sync/atomic", "CompareAndSwapUint64", "runtime/internal/atomic", "Cas64", all...) - alias("sync/atomic", "CompareAndSwapUintptr", "runtime/internal/atomic", "Cas", p4...) - alias("sync/atomic", "CompareAndSwapUintptr", "runtime/internal/atomic", "Cas64", p8...) - - alias("sync/atomic", "AddInt32", "runtime/internal/atomic", "Xadd", all...) - alias("sync/atomic", "AddInt64", "runtime/internal/atomic", "Xadd64", all...) - alias("sync/atomic", "AddUint32", "runtime/internal/atomic", "Xadd", all...) - alias("sync/atomic", "AddUint64", "runtime/internal/atomic", "Xadd64", all...) - alias("sync/atomic", "AddUintptr", "runtime/internal/atomic", "Xadd", p4...) - alias("sync/atomic", "AddUintptr", "runtime/internal/atomic", "Xadd64", p8...) + alias("sync/atomic", "StoreUint32", "internal/runtime/atomic", "Store", all...) + alias("sync/atomic", "StoreUint64", "internal/runtime/atomic", "Store64", all...) + alias("sync/atomic", "StoreUintptr", "internal/runtime/atomic", "Store", p4...) + alias("sync/atomic", "StoreUintptr", "internal/runtime/atomic", "Store64", p8...) + + alias("sync/atomic", "SwapInt32", "internal/runtime/atomic", "Xchg", all...) + alias("sync/atomic", "SwapInt64", "internal/runtime/atomic", "Xchg64", all...) + alias("sync/atomic", "SwapUint32", "internal/runtime/atomic", "Xchg", all...) + alias("sync/atomic", "SwapUint64", "internal/runtime/atomic", "Xchg64", all...) + alias("sync/atomic", "SwapUintptr", "internal/runtime/atomic", "Xchg", p4...) + alias("sync/atomic", "SwapUintptr", "internal/runtime/atomic", "Xchg64", p8...) + + alias("sync/atomic", "CompareAndSwapInt32", "internal/runtime/atomic", "Cas", all...) + alias("sync/atomic", "CompareAndSwapInt64", "internal/runtime/atomic", "Cas64", all...) + alias("sync/atomic", "CompareAndSwapUint32", "internal/runtime/atomic", "Cas", all...) + alias("sync/atomic", "CompareAndSwapUint64", "internal/runtime/atomic", "Cas64", all...) + alias("sync/atomic", "CompareAndSwapUintptr", "internal/runtime/atomic", "Cas", p4...) + alias("sync/atomic", "CompareAndSwapUintptr", "internal/runtime/atomic", "Cas64", p8...) + + alias("sync/atomic", "AddInt32", "internal/runtime/atomic", "Xadd", all...) + alias("sync/atomic", "AddInt64", "internal/runtime/atomic", "Xadd64", all...) + alias("sync/atomic", "AddUint32", "internal/runtime/atomic", "Xadd", all...) + alias("sync/atomic", "AddUint64", "internal/runtime/atomic", "Xadd64", all...) + alias("sync/atomic", "AddUintptr", "internal/runtime/atomic", "Xadd", p4...) + alias("sync/atomic", "AddUintptr", "internal/runtime/atomic", "Xadd64", p8...) /******** math/big ********/ alias("math/big", "mulWW", "math/bits", "Mul64", p8...) @@ -6663,7 +6663,7 @@ func (s *state) dottype1(pos src.XPos, src, dst *types.Type, iface, source, targ if base.Flag.N == 0 && rtabi.UseInterfaceSwitchCache(Arch.LinkArch.Name) { // Note: we can only use the cache if we have the right atomic load instruction. // Double-check that here. - if _, ok := intrinsics[intrinsicKey{Arch.LinkArch.Arch, "runtime/internal/atomic", "Loadp"}]; !ok { + if _, ok := intrinsics[intrinsicKey{Arch.LinkArch.Arch, "internal/runtime/atomic", "Loadp"}]; !ok { s.Fatalf("atomic load not available") } // Pick right size ops. diff --git a/src/cmd/compile/internal/test/inl_test.go b/src/cmd/compile/internal/test/inl_test.go index 0ccc7b3761..271834a595 100644 --- a/src/cmd/compile/internal/test/inl_test.go +++ b/src/cmd/compile/internal/test/inl_test.go @@ -254,7 +254,7 @@ func TestIntendedInlining(t *testing.T) { want["runtime/internal/sys"] = append(want["runtime/internal/sys"], "Bswap32") } if runtime.GOARCH == "amd64" || runtime.GOARCH == "arm64" || runtime.GOARCH == "loong64" || runtime.GOARCH == "mips" || runtime.GOARCH == "mips64" || runtime.GOARCH == "ppc64" || runtime.GOARCH == "riscv64" || runtime.GOARCH == "s390x" { - // runtime/internal/atomic.Loaduintptr is only intrinsified on these platforms. + // internal/runtime/atomic.Loaduintptr is only intrinsified on these platforms. want["runtime"] = append(want["runtime"], "traceAcquire") } if bits.UintSize == 64 { diff --git a/src/cmd/compile/internal/types/size.go b/src/cmd/compile/internal/types/size.go index 1ae8b6988a..00707fc86e 100644 --- a/src/cmd/compile/internal/types/size.go +++ b/src/cmd/compile/internal/types/size.go @@ -199,7 +199,7 @@ func isAtomicStdPkg(p *Pkg) bool { if p.Prefix == `""` { panic("bad package prefix") } - return p.Prefix == "sync/atomic" || p.Prefix == "runtime/internal/atomic" + return p.Prefix == "sync/atomic" || p.Prefix == "internal/runtime/atomic" } // CalcSize calculates and stores the size, alignment, eq/hash algorithm, diff --git a/src/cmd/compile/internal/types2/sizes.go b/src/cmd/compile/internal/types2/sizes.go index 7d20c97010..7b1c00b40a 100644 --- a/src/cmd/compile/internal/types2/sizes.go +++ b/src/cmd/compile/internal/types2/sizes.go @@ -120,7 +120,7 @@ func IsSyncAtomicAlign64(T Type) bool { return obj.Name() == "align64" && obj.Pkg() != nil && (obj.Pkg().Path() == "sync/atomic" || - obj.Pkg().Path() == "runtime/internal/atomic") + obj.Pkg().Path() == "internal/runtime/atomic") } func (s *StdSizes) Offsetsof(fields []*Var) []int64 { diff --git a/src/cmd/cover/cover.go b/src/cmd/cover/cover.go index ba7694b3af..d4e529bcde 100644 --- a/src/cmd/cover/cover.go +++ b/src/cmd/cover/cover.go @@ -405,7 +405,7 @@ func (f *File) Visit(node ast.Node) ast.Visitor { // // Note that in the current implementation (Go 1.20) both // routines are assembly stubs that forward calls to the - // runtime/internal/atomic equivalents, hence the infinite + // internal/runtime/atomic equivalents, hence the infinite // loop scenario is purely theoretical (maybe if in some // future implementation one of these functions might be // written in Go). See #57445 for more details. diff --git a/src/cmd/go/internal/load/pkg.go b/src/cmd/go/internal/load/pkg.go index fd599b5189..f241e93ee8 100644 --- a/src/cmd/go/internal/load/pkg.go +++ b/src/cmd/go/internal/load/pkg.go @@ -3480,11 +3480,11 @@ func SelectCoverPackages(roots []*Package, match []func(*Package) bool, op strin } // Silently ignore attempts to run coverage on sync/atomic - // and/or runtime/internal/atomic when using atomic coverage + // and/or internal/runtime/atomic when using atomic coverage // mode. Atomic coverage mode uses sync/atomic, so we can't // also do coverage on it. if cfg.BuildCoverMode == "atomic" && p.Standard && - (p.ImportPath == "sync/atomic" || p.ImportPath == "runtime/internal/atomic") { + (p.ImportPath == "sync/atomic" || p.ImportPath == "internal/runtime/atomic") { continue } diff --git a/src/cmd/internal/objabi/pkgspecial.go b/src/cmd/internal/objabi/pkgspecial.go index 6c1f460f07..44f26024f7 100644 --- a/src/cmd/internal/objabi/pkgspecial.go +++ b/src/cmd/internal/objabi/pkgspecial.go @@ -43,7 +43,7 @@ type PkgSpecial struct { var runtimePkgs = []string{ "runtime", - "runtime/internal/atomic", + "internal/runtime/atomic", "runtime/internal/math", "runtime/internal/sys", "internal/runtime/syscall", diff --git a/src/go/build/deps_test.go b/src/go/build/deps_test.go index 427f5a96b5..59c0fdde55 100644 --- a/src/go/build/deps_test.go +++ b/src/go/build/deps_test.go @@ -70,7 +70,7 @@ var depsRules = ` < internal/unsafeheader < runtime/internal/sys < internal/runtime/syscall - < runtime/internal/atomic + < internal/runtime/atomic < runtime/internal/math < runtime < sync/atomic diff --git a/src/go/types/sizes.go b/src/go/types/sizes.go index b7a2bea0e8..9e377fe2df 100644 --- a/src/go/types/sizes.go +++ b/src/go/types/sizes.go @@ -122,7 +122,7 @@ func _IsSyncAtomicAlign64(T Type) bool { return obj.Name() == "align64" && obj.Pkg() != nil && (obj.Pkg().Path() == "sync/atomic" || - obj.Pkg().Path() == "runtime/internal/atomic") + obj.Pkg().Path() == "internal/runtime/atomic") } func (s *StdSizes) Offsetsof(fields []*Var) []int64 { diff --git a/src/internal/coverage/pkid.go b/src/internal/coverage/pkid.go index a7a89c03ee..86ff302507 100644 --- a/src/internal/coverage/pkid.go +++ b/src/internal/coverage/pkid.go @@ -24,7 +24,7 @@ package coverage // registered list: // slot: 0 path='internal/cpu' hard-coded id: 1 // slot: 1 path='internal/goarch' hard-coded id: 2 -// slot: 2 path='runtime/internal/atomic' hard-coded id: 3 +// slot: 2 path='internal/runtime/atomic' hard-coded id: 3 // slot: 3 path='internal/goos' // slot: 4 path='runtime/internal/sys' hard-coded id: 5 // slot: 5 path='internal/abi' hard-coded id: 4 @@ -47,7 +47,7 @@ package coverage var rtPkgs = [...]string{ "internal/cpu", "internal/goarch", - "runtime/internal/atomic", + "internal/runtime/atomic", "internal/goos", "internal/chacha8rand", "runtime/internal/sys", diff --git a/src/internal/runtime/atomic/atomic_386.go b/src/internal/runtime/atomic/atomic_386.go new file mode 100644 index 0000000000..e74dcaa92d --- /dev/null +++ b/src/internal/runtime/atomic/atomic_386.go @@ -0,0 +1,121 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build 386 + +package atomic + +import "unsafe" + +// Export some functions via linkname to assembly in sync/atomic. +// +//go:linkname Load +//go:linkname Loadp + +//go:nosplit +//go:noinline +func Load(ptr *uint32) uint32 { + return *ptr +} + +//go:nosplit +//go:noinline +func Loadp(ptr unsafe.Pointer) unsafe.Pointer { + return *(*unsafe.Pointer)(ptr) +} + +//go:nosplit +//go:noinline +func LoadAcq(ptr *uint32) uint32 { + return *ptr +} + +//go:nosplit +//go:noinline +func LoadAcquintptr(ptr *uintptr) uintptr { + return *ptr +} + +//go:noescape +func Xadd64(ptr *uint64, delta int64) uint64 + +//go:noescape +func Xadduintptr(ptr *uintptr, delta uintptr) uintptr + +//go:noescape +func Xadd(ptr *uint32, delta int32) uint32 + +//go:noescape +func Xchg64(ptr *uint64, new uint64) uint64 + +//go:noescape +func Xchg(ptr *uint32, new uint32) uint32 + +//go:noescape +func Xchguintptr(ptr *uintptr, new uintptr) uintptr + +//go:noescape +func Load64(ptr *uint64) uint64 + +//go:nosplit +//go:noinline +func Load8(ptr *uint8) uint8 { + return *ptr +} + +//go:noescape +func And8(ptr *uint8, val uint8) + +//go:noescape +func Or8(ptr *uint8, val uint8) + +//go:noescape +func And(ptr *uint32, val uint32) + +//go:noescape +func Or(ptr *uint32, val uint32) + +//go:noescape +func And32(ptr *uint32, val uint32) uint32 + +//go:noescape +func Or32(ptr *uint32, val uint32) uint32 + +//go:noescape +func And64(ptr *uint64, val uint64) uint64 + +//go:noescape +func Or64(ptr *uint64, val uint64) uint64 + +//go:noescape +func Anduintptr(ptr *uintptr, val uintptr) uintptr + +//go:noescape +func Oruintptr(ptr *uintptr, val uintptr) uintptr + +// NOTE: Do not add atomicxor8 (XOR is not idempotent). + +//go:noescape +func Cas64(ptr *uint64, old, new uint64) bool + +//go:noescape +func CasRel(ptr *uint32, old, new uint32) bool + +//go:noescape +func Store(ptr *uint32, val uint32) + +//go:noescape +func Store8(ptr *uint8, val uint8) + +//go:noescape +func Store64(ptr *uint64, val uint64) + +//go:noescape +func StoreRel(ptr *uint32, val uint32) + +//go:noescape +func StoreReluintptr(ptr *uintptr, val uintptr) + +// NO go:noescape annotation; see atomic_pointer.go. +func StorepNoWB(ptr unsafe.Pointer, val unsafe.Pointer) diff --git a/src/internal/runtime/atomic/atomic_386.s b/src/internal/runtime/atomic/atomic_386.s new file mode 100644 index 0000000000..08812c37ec --- /dev/null +++ b/src/internal/runtime/atomic/atomic_386.s @@ -0,0 +1,366 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +#include "textflag.h" +#include "funcdata.h" + +// bool Cas(int32 *val, int32 old, int32 new) +// Atomically: +// if(*val == old){ +// *val = new; +// return 1; +// }else +// return 0; +TEXT ·Cas(SB), NOSPLIT, $0-13 + MOVL ptr+0(FP), BX + MOVL old+4(FP), AX + MOVL new+8(FP), CX + LOCK + CMPXCHGL CX, 0(BX) + SETEQ ret+12(FP) + RET + +TEXT ·Casint32(SB), NOSPLIT, $0-13 + JMP ·Cas(SB) + +TEXT ·Casint64(SB), NOSPLIT, $0-21 + JMP ·Cas64(SB) + +TEXT ·Casuintptr(SB), NOSPLIT, $0-13 + JMP ·Cas(SB) + +TEXT ·CasRel(SB), NOSPLIT, $0-13 + JMP ·Cas(SB) + +TEXT ·Loaduintptr(SB), NOSPLIT, $0-8 + JMP ·Load(SB) + +TEXT ·Loaduint(SB), NOSPLIT, $0-8 + JMP ·Load(SB) + +TEXT ·Storeint32(SB), NOSPLIT, $0-8 + JMP ·Store(SB) + +TEXT ·Storeint64(SB), NOSPLIT, $0-12 + JMP ·Store64(SB) + +TEXT ·Storeuintptr(SB), NOSPLIT, $0-8 + JMP ·Store(SB) + +TEXT ·Xadduintptr(SB), NOSPLIT, $0-12 + JMP ·Xadd(SB) + +TEXT ·Loadint32(SB), NOSPLIT, $0-8 + JMP ·Load(SB) + +TEXT ·Loadint64(SB), NOSPLIT, $0-12 + JMP ·Load64(SB) + +TEXT ·Xaddint32(SB), NOSPLIT, $0-12 + JMP ·Xadd(SB) + +TEXT ·Xaddint64(SB), NOSPLIT, $0-20 + JMP ·Xadd64(SB) + +// bool ·Cas64(uint64 *val, uint64 old, uint64 new) +// Atomically: +// if(*val == old){ +// *val = new; +// return 1; +// } else { +// return 0; +// } +TEXT ·Cas64(SB), NOSPLIT, $0-21 + NO_LOCAL_POINTERS + MOVL ptr+0(FP), BP + TESTL $7, BP + JZ 2(PC) + CALL ·panicUnaligned(SB) + MOVL old_lo+4(FP), AX + MOVL old_hi+8(FP), DX + MOVL new_lo+12(FP), BX + MOVL new_hi+16(FP), CX + LOCK + CMPXCHG8B 0(BP) + SETEQ ret+20(FP) + RET + +// bool Casp1(void **p, void *old, void *new) +// Atomically: +// if(*p == old){ +// *p = new; +// return 1; +// }else +// return 0; +TEXT ·Casp1(SB), NOSPLIT, $0-13 + MOVL ptr+0(FP), BX + MOVL old+4(FP), AX + MOVL new+8(FP), CX + LOCK + CMPXCHGL CX, 0(BX) + SETEQ ret+12(FP) + RET + +// uint32 Xadd(uint32 volatile *val, int32 delta) +// Atomically: +// *val += delta; +// return *val; +TEXT ·Xadd(SB), NOSPLIT, $0-12 + MOVL ptr+0(FP), BX + MOVL delta+4(FP), AX + MOVL AX, CX + LOCK + XADDL AX, 0(BX) + ADDL CX, AX + MOVL AX, ret+8(FP) + RET + +TEXT ·Xadd64(SB), NOSPLIT, $0-20 + NO_LOCAL_POINTERS + // no XADDQ so use CMPXCHG8B loop + MOVL ptr+0(FP), BP + TESTL $7, BP + JZ 2(PC) + CALL ·panicUnaligned(SB) + // DI:SI = delta + MOVL delta_lo+4(FP), SI + MOVL delta_hi+8(FP), DI + // DX:AX = *addr + MOVL 0(BP), AX + MOVL 4(BP), DX +addloop: + // CX:BX = DX:AX (*addr) + DI:SI (delta) + MOVL AX, BX + MOVL DX, CX + ADDL SI, BX + ADCL DI, CX + + // if *addr == DX:AX { + // *addr = CX:BX + // } else { + // DX:AX = *addr + // } + // all in one instruction + LOCK + CMPXCHG8B 0(BP) + + JNZ addloop + + // success + // return CX:BX + MOVL BX, ret_lo+12(FP) + MOVL CX, ret_hi+16(FP) + RET + +TEXT ·Xchg(SB), NOSPLIT, $0-12 + MOVL ptr+0(FP), BX + MOVL new+4(FP), AX + XCHGL AX, 0(BX) + MOVL AX, ret+8(FP) + RET + +TEXT ·Xchgint32(SB), NOSPLIT, $0-12 + JMP ·Xchg(SB) + +TEXT ·Xchgint64(SB), NOSPLIT, $0-20 + JMP ·Xchg64(SB) + +TEXT ·Xchguintptr(SB), NOSPLIT, $0-12 + JMP ·Xchg(SB) + +TEXT ·Xchg64(SB),NOSPLIT,$0-20 + NO_LOCAL_POINTERS + // no XCHGQ so use CMPXCHG8B loop + MOVL ptr+0(FP), BP + TESTL $7, BP + JZ 2(PC) + CALL ·panicUnaligned(SB) + // CX:BX = new + MOVL new_lo+4(FP), BX + MOVL new_hi+8(FP), CX + // DX:AX = *addr + MOVL 0(BP), AX + MOVL 4(BP), DX +swaploop: + // if *addr == DX:AX + // *addr = CX:BX + // else + // DX:AX = *addr + // all in one instruction + LOCK + CMPXCHG8B 0(BP) + JNZ swaploop + + // success + // return DX:AX + MOVL AX, ret_lo+12(FP) + MOVL DX, ret_hi+16(FP) + RET + +TEXT ·StorepNoWB(SB), NOSPLIT, $0-8 + MOVL ptr+0(FP), BX + MOVL val+4(FP), AX + XCHGL AX, 0(BX) + RET + +TEXT ·Store(SB), NOSPLIT, $0-8 + MOVL ptr+0(FP), BX + MOVL val+4(FP), AX + XCHGL AX, 0(BX) + RET + +TEXT ·StoreRel(SB), NOSPLIT, $0-8 + JMP ·Store(SB) + +TEXT ·StoreReluintptr(SB), NOSPLIT, $0-8 + JMP ·Store(SB) + +// uint64 atomicload64(uint64 volatile* addr); +TEXT ·Load64(SB), NOSPLIT, $0-12 + NO_LOCAL_POINTERS + MOVL ptr+0(FP), AX + TESTL $7, AX + JZ 2(PC) + CALL ·panicUnaligned(SB) + MOVQ (AX), M0 + MOVQ M0, ret+4(FP) + EMMS + RET + +// void ·Store64(uint64 volatile* addr, uint64 v); +TEXT ·Store64(SB), NOSPLIT, $0-12 + NO_LOCAL_POINTERS + MOVL ptr+0(FP), AX + TESTL $7, AX + JZ 2(PC) + CALL ·panicUnaligned(SB) + // MOVQ and EMMS were introduced on the Pentium MMX. + MOVQ val+4(FP), M0 + MOVQ M0, (AX) + EMMS + // This is essentially a no-op, but it provides required memory fencing. + // It can be replaced with MFENCE, but MFENCE was introduced only on the Pentium4 (SSE2). + XORL AX, AX + LOCK + XADDL AX, (SP) + RET + +// void ·Or8(byte volatile*, byte); +TEXT ·Or8(SB), NOSPLIT, $0-5 + MOVL ptr+0(FP), AX + MOVB val+4(FP), BX + LOCK + ORB BX, (AX) + RET + +// void ·And8(byte volatile*, byte); +TEXT ·And8(SB), NOSPLIT, $0-5 + MOVL ptr+0(FP), AX + MOVB val+4(FP), BX + LOCK + ANDB BX, (AX) + RET + +TEXT ·Store8(SB), NOSPLIT, $0-5 + MOVL ptr+0(FP), BX + MOVB val+4(FP), AX + XCHGB AX, 0(BX) + RET + +// func Or(addr *uint32, v uint32) +TEXT ·Or(SB), NOSPLIT, $0-8 + MOVL ptr+0(FP), AX + MOVL val+4(FP), BX + LOCK + ORL BX, (AX) + RET + +// func And(addr *uint32, v uint32) +TEXT ·And(SB), NOSPLIT, $0-8 + MOVL ptr+0(FP), AX + MOVL val+4(FP), BX + LOCK + ANDL BX, (AX) + RET + +// func And32(addr *uint32, v uint32) old uint32 +TEXT ·And32(SB), NOSPLIT, $0-12 + MOVL ptr+0(FP), BX + MOVL val+4(FP), CX +casloop: + MOVL CX, DX + MOVL (BX), AX + ANDL AX, DX + LOCK + CMPXCHGL DX, (BX) + JNZ casloop + MOVL AX, ret+8(FP) + RET + +// func Or32(addr *uint32, v uint32) old uint32 +TEXT ·Or32(SB), NOSPLIT, $0-12 + MOVL ptr+0(FP), BX + MOVL val+4(FP), CX +casloop: + MOVL CX, DX + MOVL (BX), AX + ORL AX, DX + LOCK + CMPXCHGL DX, (BX) + JNZ casloop + MOVL AX, ret+8(FP) + RET + +// func And64(addr *uint64, v uint64) old uint64 +TEXT ·And64(SB), NOSPLIT, $0-20 + MOVL ptr+0(FP), BP + // DI:SI = v + MOVL val_lo+4(FP), SI + MOVL val_hi+8(FP), DI + // DX:AX = *addr + MOVL 0(BP), AX + MOVL 4(BP), DX +casloop: + // CX:BX = DX:AX (*addr) & DI:SI (mask) + MOVL AX, BX + MOVL DX, CX + ANDL SI, BX + ANDL DI, CX + LOCK + CMPXCHG8B 0(BP) + JNZ casloop + MOVL AX, ret_lo+12(FP) + MOVL DX, ret_hi+16(FP) + RET + + +// func Or64(addr *uint64, v uint64) old uint64 +TEXT ·Or64(SB), NOSPLIT, $0-20 + MOVL ptr+0(FP), BP + // DI:SI = v + MOVL val_lo+4(FP), SI + MOVL val_hi+8(FP), DI + // DX:AX = *addr + MOVL 0(BP), AX + MOVL 4(BP), DX +casloop: + // CX:BX = DX:AX (*addr) | DI:SI (mask) + MOVL AX, BX + MOVL DX, CX + ORL SI, BX + ORL DI, CX + LOCK + CMPXCHG8B 0(BP) + JNZ casloop + MOVL AX, ret_lo+12(FP) + MOVL DX, ret_hi+16(FP) + RET + +// func Anduintptr(addr *uintptr, v uintptr) old uintptr +TEXT ·Anduintptr(SB), NOSPLIT, $0-12 + JMP ·And32(SB) + +// func Oruintptr(addr *uintptr, v uintptr) old uintptr +TEXT ·Oruintptr(SB), NOSPLIT, $0-12 + JMP ·Or32(SB) diff --git a/src/internal/runtime/atomic/atomic_amd64.go b/src/internal/runtime/atomic/atomic_amd64.go new file mode 100644 index 0000000000..b439954093 --- /dev/null +++ b/src/internal/runtime/atomic/atomic_amd64.go @@ -0,0 +1,135 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package atomic + +import "unsafe" + +// Export some functions via linkname to assembly in sync/atomic. +// +//go:linkname Load +//go:linkname Loadp +//go:linkname Load64 + +//go:nosplit +//go:noinline +func Load(ptr *uint32) uint32 { + return *ptr +} + +//go:nosplit +//go:noinline +func Loadp(ptr unsafe.Pointer) unsafe.Pointer { + return *(*unsafe.Pointer)(ptr) +} + +//go:nosplit +//go:noinline +func Load64(ptr *uint64) uint64 { + return *ptr +} + +//go:nosplit +//go:noinline +func LoadAcq(ptr *uint32) uint32 { + return *ptr +} + +//go:nosplit +//go:noinline +func LoadAcq64(ptr *uint64) uint64 { + return *ptr +} + +//go:nosplit +//go:noinline +func LoadAcquintptr(ptr *uintptr) uintptr { + return *ptr +} + +//go:noescape +func Xadd(ptr *uint32, delta int32) uint32 + +//go:noescape +func Xadd64(ptr *uint64, delta int64) uint64 + +//go:noescape +func Xadduintptr(ptr *uintptr, delta uintptr) uintptr + +//go:noescape +func Xchg(ptr *uint32, new uint32) uint32 + +//go:noescape +func Xchg64(ptr *uint64, new uint64) uint64 + +//go:noescape +func Xchguintptr(ptr *uintptr, new uintptr) uintptr + +//go:nosplit +//go:noinline +func Load8(ptr *uint8) uint8 { + return *ptr +} + +//go:noescape +func And8(ptr *uint8, val uint8) + +//go:noescape +func Or8(ptr *uint8, val uint8) + +//go:noescape +func And(ptr *uint32, val uint32) + +//go:noescape +func Or(ptr *uint32, val uint32) + +//go:noescape +func And32(ptr *uint32, val uint32) uint32 + +//go:noescape +func Or32(ptr *uint32, val uint32) uint32 + +//go:noescape +func And64(ptr *uint64, val uint64) uint64 + +//go:noescape +func Or64(ptr *uint64, val uint64) uint64 + +//go:noescape +func Anduintptr(ptr *uintptr, val uintptr) uintptr + +//go:noescape +func Oruintptr(ptr *uintptr, val uintptr) uintptr + +// NOTE: Do not add atomicxor8 (XOR is not idempotent). + +//go:noescape +func Cas64(ptr *uint64, old, new uint64) bool + +//go:noescape +func CasRel(ptr *uint32, old, new uint32) bool + +//go:noescape +func Store(ptr *uint32, val uint32) + +//go:noescape +func Store8(ptr *uint8, val uint8) + +//go:noescape +func Store64(ptr *uint64, val uint64) + +//go:noescape +func StoreRel(ptr *uint32, val uint32) + +//go:noescape +func StoreRel64(ptr *uint64, val uint64) + +//go:noescape +func StoreReluintptr(ptr *uintptr, val uintptr) + +// StorepNoWB performs *ptr = val atomically and without a write +// barrier. +// +// NO go:noescape annotation; see atomic_pointer.go. +func StorepNoWB(ptr unsafe.Pointer, val unsafe.Pointer) diff --git a/src/internal/runtime/atomic/atomic_amd64.s b/src/internal/runtime/atomic/atomic_amd64.s new file mode 100644 index 0000000000..ec75bf9332 --- /dev/null +++ b/src/internal/runtime/atomic/atomic_amd64.s @@ -0,0 +1,289 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Note: some of these functions are semantically inlined +// by the compiler (in src/cmd/compile/internal/gc/ssa.go). + +#include "textflag.h" + +TEXT ·Loaduintptr(SB), NOSPLIT, $0-16 + JMP ·Load64(SB) + +TEXT ·Loaduint(SB), NOSPLIT, $0-16 + JMP ·Load64(SB) + +TEXT ·Loadint32(SB), NOSPLIT, $0-12 + JMP ·Load(SB) + +TEXT ·Loadint64(SB), NOSPLIT, $0-16 + JMP ·Load64(SB) + +// bool Cas(int32 *val, int32 old, int32 new) +// Atomically: +// if(*val == old){ +// *val = new; +// return 1; +// } else +// return 0; +TEXT ·Cas(SB),NOSPLIT,$0-17 + MOVQ ptr+0(FP), BX + MOVL old+8(FP), AX + MOVL new+12(FP), CX + LOCK + CMPXCHGL CX, 0(BX) + SETEQ ret+16(FP) + RET + +// bool ·Cas64(uint64 *val, uint64 old, uint64 new) +// Atomically: +// if(*val == old){ +// *val = new; +// return 1; +// } else { +// return 0; +// } +TEXT ·Cas64(SB), NOSPLIT, $0-25 + MOVQ ptr+0(FP), BX + MOVQ old+8(FP), AX + MOVQ new+16(FP), CX + LOCK + CMPXCHGQ CX, 0(BX) + SETEQ ret+24(FP) + RET + +// bool Casp1(void **val, void *old, void *new) +// Atomically: +// if(*val == old){ +// *val = new; +// return 1; +// } else +// return 0; +TEXT ·Casp1(SB), NOSPLIT, $0-25 + MOVQ ptr+0(FP), BX + MOVQ old+8(FP), AX + MOVQ new+16(FP), CX + LOCK + CMPXCHGQ CX, 0(BX) + SETEQ ret+24(FP) + RET + +TEXT ·Casint32(SB), NOSPLIT, $0-17 + JMP ·Cas(SB) + +TEXT ·Casint64(SB), NOSPLIT, $0-25 + JMP ·Cas64(SB) + +TEXT ·Casuintptr(SB), NOSPLIT, $0-25 + JMP ·Cas64(SB) + +TEXT ·CasRel(SB), NOSPLIT, $0-17 + JMP ·Cas(SB) + +// uint32 Xadd(uint32 volatile *val, int32 delta) +// Atomically: +// *val += delta; +// return *val; +TEXT ·Xadd(SB), NOSPLIT, $0-20 + MOVQ ptr+0(FP), BX + MOVL delta+8(FP), AX + MOVL AX, CX + LOCK + XADDL AX, 0(BX) + ADDL CX, AX + MOVL AX, ret+16(FP) + RET + +// uint64 Xadd64(uint64 volatile *val, int64 delta) +// Atomically: +// *val += delta; +// return *val; +TEXT ·Xadd64(SB), NOSPLIT, $0-24 + MOVQ ptr+0(FP), BX + MOVQ delta+8(FP), AX + MOVQ AX, CX + LOCK + XADDQ AX, 0(BX) + ADDQ CX, AX + MOVQ AX, ret+16(FP) + RET + +TEXT ·Xaddint32(SB), NOSPLIT, $0-20 + JMP ·Xadd(SB) + +TEXT ·Xaddint64(SB), NOSPLIT, $0-24 + JMP ·Xadd64(SB) + +TEXT ·Xadduintptr(SB), NOSPLIT, $0-24 + JMP ·Xadd64(SB) + +// uint32 Xchg(ptr *uint32, new uint32) +// Atomically: +// old := *ptr; +// *ptr = new; +// return old; +TEXT ·Xchg(SB), NOSPLIT, $0-20 + MOVQ ptr+0(FP), BX + MOVL new+8(FP), AX + XCHGL AX, 0(BX) + MOVL AX, ret+16(FP) + RET + +// uint64 Xchg64(ptr *uint64, new uint64) +// Atomically: +// old := *ptr; +// *ptr = new; +// return old; +TEXT ·Xchg64(SB), NOSPLIT, $0-24 + MOVQ ptr+0(FP), BX + MOVQ new+8(FP), AX + XCHGQ AX, 0(BX) + MOVQ AX, ret+16(FP) + RET + +TEXT ·Xchgint32(SB), NOSPLIT, $0-20 + JMP ·Xchg(SB) + +TEXT ·Xchgint64(SB), NOSPLIT, $0-24 + JMP ·Xchg64(SB) + +TEXT ·Xchguintptr(SB), NOSPLIT, $0-24 + JMP ·Xchg64(SB) + +TEXT ·StorepNoWB(SB), NOSPLIT, $0-16 + MOVQ ptr+0(FP), BX + MOVQ val+8(FP), AX + XCHGQ AX, 0(BX) + RET + +TEXT ·Store(SB), NOSPLIT, $0-12 + MOVQ ptr+0(FP), BX + MOVL val+8(FP), AX + XCHGL AX, 0(BX) + RET + +TEXT ·Store8(SB), NOSPLIT, $0-9 + MOVQ ptr+0(FP), BX + MOVB val+8(FP), AX + XCHGB AX, 0(BX) + RET + +TEXT ·Store64(SB), NOSPLIT, $0-16 + MOVQ ptr+0(FP), BX + MOVQ val+8(FP), AX + XCHGQ AX, 0(BX) + RET + +TEXT ·Storeint32(SB), NOSPLIT, $0-12 + JMP ·Store(SB) + +TEXT ·Storeint64(SB), NOSPLIT, $0-16 + JMP ·Store64(SB) + +TEXT ·Storeuintptr(SB), NOSPLIT, $0-16 + JMP ·Store64(SB) + +TEXT ·StoreRel(SB), NOSPLIT, $0-12 + JMP ·Store(SB) + +TEXT ·StoreRel64(SB), NOSPLIT, $0-16 + JMP ·Store64(SB) + +TEXT ·StoreReluintptr(SB), NOSPLIT, $0-16 + JMP ·Store64(SB) + +// void ·Or8(byte volatile*, byte); +TEXT ·Or8(SB), NOSPLIT, $0-9 + MOVQ ptr+0(FP), AX + MOVB val+8(FP), BX + LOCK + ORB BX, (AX) + RET + +// void ·And8(byte volatile*, byte); +TEXT ·And8(SB), NOSPLIT, $0-9 + MOVQ ptr+0(FP), AX + MOVB val+8(FP), BX + LOCK + ANDB BX, (AX) + RET + +// func Or(addr *uint32, v uint32) +TEXT ·Or(SB), NOSPLIT, $0-12 + MOVQ ptr+0(FP), AX + MOVL val+8(FP), BX + LOCK + ORL BX, (AX) + RET + +// func And(addr *uint32, v uint32) +TEXT ·And(SB), NOSPLIT, $0-12 + MOVQ ptr+0(FP), AX + MOVL val+8(FP), BX + LOCK + ANDL BX, (AX) + RET + +// func Or32(addr *uint32, v uint32) old uint32 +TEXT ·Or32(SB), NOSPLIT, $0-20 + MOVQ ptr+0(FP), BX + MOVL val+8(FP), CX +casloop: + MOVL CX, DX + MOVL (BX), AX + ORL AX, DX + LOCK + CMPXCHGL DX, (BX) + JNZ casloop + MOVL AX, ret+16(FP) + RET + +// func And32(addr *uint32, v uint32) old uint32 +TEXT ·And32(SB), NOSPLIT, $0-20 + MOVQ ptr+0(FP), BX + MOVL val+8(FP), CX +casloop: + MOVL CX, DX + MOVL (BX), AX + ANDL AX, DX + LOCK + CMPXCHGL DX, (BX) + JNZ casloop + MOVL AX, ret+16(FP) + RET + +// func Or64(addr *uint64, v uint64) old uint64 +TEXT ·Or64(SB), NOSPLIT, $0-24 + MOVQ ptr+0(FP), BX + MOVQ val+8(FP), CX +casloop: + MOVQ CX, DX + MOVQ (BX), AX + ORQ AX, DX + LOCK + CMPXCHGQ DX, (BX) + JNZ casloop + MOVQ AX, ret+16(FP) + RET + +// func And64(addr *uint64, v uint64) old uint64 +TEXT ·And64(SB), NOSPLIT, $0-24 + MOVQ ptr+0(FP), BX + MOVQ val+8(FP), CX +casloop: + MOVQ CX, DX + MOVQ (BX), AX + ANDQ AX, DX + LOCK + CMPXCHGQ DX, (BX) + JNZ casloop + MOVQ AX, ret+16(FP) + RET + +// func Anduintptr(addr *uintptr, v uintptr) old uintptr +TEXT ·Anduintptr(SB), NOSPLIT, $0-24 + JMP ·And64(SB) + +// func Oruintptr(addr *uintptr, v uintptr) old uintptr +TEXT ·Oruintptr(SB), NOSPLIT, $0-24 + JMP ·Or64(SB) diff --git a/src/internal/runtime/atomic/atomic_andor_generic.go b/src/internal/runtime/atomic/atomic_andor_generic.go new file mode 100644 index 0000000000..f8b148dda5 --- /dev/null +++ b/src/internal/runtime/atomic/atomic_andor_generic.go @@ -0,0 +1,67 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build arm || mips || mipsle || mips64 || mips64le || wasm + +package atomic + +//go:nosplit +func And32(ptr *uint32, val uint32) uint32 { + for { + old := *ptr + if Cas(ptr, old, old&val) { + return old + } + } +} + +//go:nosplit +func Or32(ptr *uint32, val uint32) uint32 { + for { + old := *ptr + if Cas(ptr, old, old|val) { + return old + } + } +} + +//go:nosplit +func And64(ptr *uint64, val uint64) uint64 { + for { + old := *ptr + if Cas64(ptr, old, old&val) { + return old + } + } +} + +//go:nosplit +func Or64(ptr *uint64, val uint64) uint64 { + for { + old := *ptr + if Cas64(ptr, old, old|val) { + return old + } + } +} + +//go:nosplit +func Anduintptr(ptr *uintptr, val uintptr) uintptr { + for { + old := *ptr + if Casuintptr(ptr, old, old&val) { + return old + } + } +} + +//go:nosplit +func Oruintptr(ptr *uintptr, val uintptr) uintptr { + for { + old := *ptr + if Casuintptr(ptr, old, old|val) { + return old + } + } +} diff --git a/src/internal/runtime/atomic/atomic_andor_test.go b/src/internal/runtime/atomic/atomic_andor_test.go new file mode 100644 index 0000000000..631a6e637d --- /dev/null +++ b/src/internal/runtime/atomic/atomic_andor_test.go @@ -0,0 +1,246 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// TODO(61395): move these tests to atomic_test.go once And/Or have +// implementations for all architectures. +package atomic_test + +import ( + "internal/runtime/atomic" + "testing" +) + +func TestAnd32(t *testing.T) { + // Basic sanity check. + x := uint32(0xffffffff) + for i := uint32(0); i < 32; i++ { + old := x + v := atomic.And32(&x, ^(1 << i)) + if r := uint32(0xffffffff) << (i + 1); x != r || v != old { + t.Fatalf("clearing bit %#x: want %#x, got new %#x and old %#v", uint32(1<>3)%uintptr(len(locktab))].l +} + +// Atomic add and return new value. +// +//go:nosplit +func Xadd(val *uint32, delta int32) uint32 { + for { + oval := *val + nval := oval + uint32(delta) + if Cas(val, oval, nval) { + return nval + } + } +} + +//go:noescape +func Xadduintptr(ptr *uintptr, delta uintptr) uintptr + +//go:nosplit +func Xchg(addr *uint32, v uint32) uint32 { + for { + old := *addr + if Cas(addr, old, v) { + return old + } + } +} + +//go:nosplit +func Xchguintptr(addr *uintptr, v uintptr) uintptr { + return uintptr(Xchg((*uint32)(unsafe.Pointer(addr)), uint32(v))) +} + +// Not noescape -- it installs a pointer to addr. +func StorepNoWB(addr unsafe.Pointer, v unsafe.Pointer) + +//go:noescape +func Store(addr *uint32, v uint32) + +//go:noescape +func StoreRel(addr *uint32, v uint32) + +//go:noescape +func StoreReluintptr(addr *uintptr, v uintptr) + +//go:nosplit +func goCas64(addr *uint64, old, new uint64) bool { + if uintptr(unsafe.Pointer(addr))&7 != 0 { + *(*int)(nil) = 0 // crash on unaligned uint64 + } + _ = *addr // if nil, fault before taking the lock + var ok bool + addrLock(addr).lock() + if *addr == old { + *addr = new + ok = true + } + addrLock(addr).unlock() + return ok +} + +//go:nosplit +func goXadd64(addr *uint64, delta int64) uint64 { + if uintptr(unsafe.Pointer(addr))&7 != 0 { + *(*int)(nil) = 0 // crash on unaligned uint64 + } + _ = *addr // if nil, fault before taking the lock + var r uint64 + addrLock(addr).lock() + r = *addr + uint64(delta) + *addr = r + addrLock(addr).unlock() + return r +} + +//go:nosplit +func goXchg64(addr *uint64, v uint64) uint64 { + if uintptr(unsafe.Pointer(addr))&7 != 0 { + *(*int)(nil) = 0 // crash on unaligned uint64 + } + _ = *addr // if nil, fault before taking the lock + var r uint64 + addrLock(addr).lock() + r = *addr + *addr = v + addrLock(addr).unlock() + return r +} + +//go:nosplit +func goLoad64(addr *uint64) uint64 { + if uintptr(unsafe.Pointer(addr))&7 != 0 { + *(*int)(nil) = 0 // crash on unaligned uint64 + } + _ = *addr // if nil, fault before taking the lock + var r uint64 + addrLock(addr).lock() + r = *addr + addrLock(addr).unlock() + return r +} + +//go:nosplit +func goStore64(addr *uint64, v uint64) { + if uintptr(unsafe.Pointer(addr))&7 != 0 { + *(*int)(nil) = 0 // crash on unaligned uint64 + } + _ = *addr // if nil, fault before taking the lock + addrLock(addr).lock() + *addr = v + addrLock(addr).unlock() +} + +//go:nosplit +func Or8(addr *uint8, v uint8) { + // Align down to 4 bytes and use 32-bit CAS. + uaddr := uintptr(unsafe.Pointer(addr)) + addr32 := (*uint32)(unsafe.Pointer(uaddr &^ 3)) + word := uint32(v) << ((uaddr & 3) * 8) // little endian + for { + old := *addr32 + if Cas(addr32, old, old|word) { + return + } + } +} + +//go:nosplit +func And8(addr *uint8, v uint8) { + // Align down to 4 bytes and use 32-bit CAS. + uaddr := uintptr(unsafe.Pointer(addr)) + addr32 := (*uint32)(unsafe.Pointer(uaddr &^ 3)) + word := uint32(v) << ((uaddr & 3) * 8) // little endian + mask := uint32(0xFF) << ((uaddr & 3) * 8) // little endian + word |= ^mask + for { + old := *addr32 + if Cas(addr32, old, old&word) { + return + } + } +} + +//go:nosplit +func Or(addr *uint32, v uint32) { + for { + old := *addr + if Cas(addr, old, old|v) { + return + } + } +} + +//go:nosplit +func And(addr *uint32, v uint32) { + for { + old := *addr + if Cas(addr, old, old&v) { + return + } + } +} + +//go:nosplit +func armcas(ptr *uint32, old, new uint32) bool + +//go:noescape +func Load(addr *uint32) uint32 + +// NO go:noescape annotation; *addr escapes if result escapes (#31525) +func Loadp(addr unsafe.Pointer) unsafe.Pointer + +//go:noescape +func Load8(addr *uint8) uint8 + +//go:noescape +func LoadAcq(addr *uint32) uint32 + +//go:noescape +func LoadAcquintptr(ptr *uintptr) uintptr + +//go:noescape +func Cas64(addr *uint64, old, new uint64) bool + +//go:noescape +func CasRel(addr *uint32, old, new uint32) bool + +//go:noescape +func Xadd64(addr *uint64, delta int64) uint64 + +//go:noescape +func Xchg64(addr *uint64, v uint64) uint64 + +//go:noescape +func Load64(addr *uint64) uint64 + +//go:noescape +func Store8(addr *uint8, v uint8) + +//go:noescape +func Store64(addr *uint64, v uint64) diff --git a/src/internal/runtime/atomic/atomic_arm.s b/src/internal/runtime/atomic/atomic_arm.s new file mode 100644 index 0000000000..1cf7d8f6ef --- /dev/null +++ b/src/internal/runtime/atomic/atomic_arm.s @@ -0,0 +1,312 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +#include "go_asm.h" +#include "textflag.h" +#include "funcdata.h" + +// bool armcas(int32 *val, int32 old, int32 new) +// Atomically: +// if(*val == old){ +// *val = new; +// return 1; +// }else +// return 0; +// +// To implement ·cas in sys_$GOOS_arm.s +// using the native instructions, use: +// +// TEXT ·cas(SB),NOSPLIT,$0 +// B ·armcas(SB) +// +TEXT ·armcas(SB),NOSPLIT,$0-13 + MOVW ptr+0(FP), R1 + MOVW old+4(FP), R2 + MOVW new+8(FP), R3 +casl: + LDREX (R1), R0 + CMP R0, R2 + BNE casfail + +#ifndef GOARM_7 + MOVB internal∕cpu·ARM+const_offsetARMHasV7Atomics(SB), R11 + CMP $0, R11 + BEQ 2(PC) +#endif + DMB MB_ISHST + + STREX R3, (R1), R0 + CMP $0, R0 + BNE casl + MOVW $1, R0 + +#ifndef GOARM_7 + CMP $0, R11 + BEQ 2(PC) +#endif + DMB MB_ISH + + MOVB R0, ret+12(FP) + RET +casfail: + MOVW $0, R0 + MOVB R0, ret+12(FP) + RET + +// stubs + +TEXT ·Loadp(SB),NOSPLIT|NOFRAME,$0-8 + B ·Load(SB) + +TEXT ·LoadAcq(SB),NOSPLIT|NOFRAME,$0-8 + B ·Load(SB) + +TEXT ·LoadAcquintptr(SB),NOSPLIT|NOFRAME,$0-8 + B ·Load(SB) + +TEXT ·Casint32(SB),NOSPLIT,$0-13 + B ·Cas(SB) + +TEXT ·Casint64(SB),NOSPLIT,$-4-21 + B ·Cas64(SB) + +TEXT ·Casuintptr(SB),NOSPLIT,$0-13 + B ·Cas(SB) + +TEXT ·Casp1(SB),NOSPLIT,$0-13 + B ·Cas(SB) + +TEXT ·CasRel(SB),NOSPLIT,$0-13 + B ·Cas(SB) + +TEXT ·Loadint32(SB),NOSPLIT,$0-8 + B ·Load(SB) + +TEXT ·Loadint64(SB),NOSPLIT,$-4-12 + B ·Load64(SB) + +TEXT ·Loaduintptr(SB),NOSPLIT,$0-8 + B ·Load(SB) + +TEXT ·Loaduint(SB),NOSPLIT,$0-8 + B ·Load(SB) + +TEXT ·Storeint32(SB),NOSPLIT,$0-8 + B ·Store(SB) + +TEXT ·Storeint64(SB),NOSPLIT,$0-12 + B ·Store64(SB) + +TEXT ·Storeuintptr(SB),NOSPLIT,$0-8 + B ·Store(SB) + +TEXT ·StorepNoWB(SB),NOSPLIT,$0-8 + B ·Store(SB) + +TEXT ·StoreRel(SB),NOSPLIT,$0-8 + B ·Store(SB) + +TEXT ·StoreReluintptr(SB),NOSPLIT,$0-8 + B ·Store(SB) + +TEXT ·Xaddint32(SB),NOSPLIT,$0-12 + B ·Xadd(SB) + +TEXT ·Xaddint64(SB),NOSPLIT,$-4-20 + B ·Xadd64(SB) + +TEXT ·Xadduintptr(SB),NOSPLIT,$0-12 + B ·Xadd(SB) + +TEXT ·Xchgint32(SB),NOSPLIT,$0-12 + B ·Xchg(SB) + +TEXT ·Xchgint64(SB),NOSPLIT,$-4-20 + B ·Xchg64(SB) + +// 64-bit atomics +// The native ARM implementations use LDREXD/STREXD, which are +// available on ARMv6k or later. We use them only on ARMv7. +// On older ARM, we use Go implementations which simulate 64-bit +// atomics with locks. +TEXT armCas64<>(SB),NOSPLIT,$0-21 + // addr is already in R1 + MOVW old_lo+4(FP), R2 + MOVW old_hi+8(FP), R3 + MOVW new_lo+12(FP), R4 + MOVW new_hi+16(FP), R5 +cas64loop: + LDREXD (R1), R6 // loads R6 and R7 + CMP R2, R6 + BNE cas64fail + CMP R3, R7 + BNE cas64fail + + DMB MB_ISHST + + STREXD R4, (R1), R0 // stores R4 and R5 + CMP $0, R0 + BNE cas64loop + MOVW $1, R0 + + DMB MB_ISH + + MOVBU R0, swapped+20(FP) + RET +cas64fail: + MOVW $0, R0 + MOVBU R0, swapped+20(FP) + RET + +TEXT armXadd64<>(SB),NOSPLIT,$0-20 + // addr is already in R1 + MOVW delta_lo+4(FP), R2 + MOVW delta_hi+8(FP), R3 + +add64loop: + LDREXD (R1), R4 // loads R4 and R5 + ADD.S R2, R4 + ADC R3, R5 + + DMB MB_ISHST + + STREXD R4, (R1), R0 // stores R4 and R5 + CMP $0, R0 + BNE add64loop + + DMB MB_ISH + + MOVW R4, new_lo+12(FP) + MOVW R5, new_hi+16(FP) + RET + +TEXT armXchg64<>(SB),NOSPLIT,$0-20 + // addr is already in R1 + MOVW new_lo+4(FP), R2 + MOVW new_hi+8(FP), R3 + +swap64loop: + LDREXD (R1), R4 // loads R4 and R5 + + DMB MB_ISHST + + STREXD R2, (R1), R0 // stores R2 and R3 + CMP $0, R0 + BNE swap64loop + + DMB MB_ISH + + MOVW R4, old_lo+12(FP) + MOVW R5, old_hi+16(FP) + RET + +TEXT armLoad64<>(SB),NOSPLIT,$0-12 + // addr is already in R1 + + LDREXD (R1), R2 // loads R2 and R3 + DMB MB_ISH + + MOVW R2, val_lo+4(FP) + MOVW R3, val_hi+8(FP) + RET + +TEXT armStore64<>(SB),NOSPLIT,$0-12 + // addr is already in R1 + MOVW val_lo+4(FP), R2 + MOVW val_hi+8(FP), R3 + +store64loop: + LDREXD (R1), R4 // loads R4 and R5 + + DMB MB_ISHST + + STREXD R2, (R1), R0 // stores R2 and R3 + CMP $0, R0 + BNE store64loop + + DMB MB_ISH + RET + +// The following functions all panic if their address argument isn't +// 8-byte aligned. Since we're calling back into Go code to do this, +// we have to cooperate with stack unwinding. In the normal case, the +// functions tail-call into the appropriate implementation, which +// means they must not open a frame. Hence, when they go down the +// panic path, at that point they push the LR to create a real frame +// (they don't need to pop it because panic won't return; however, we +// do need to set the SP delta back). + +// Check if R1 is 8-byte aligned, panic if not. +// Clobbers R2. +#define CHECK_ALIGN \ + AND.S $7, R1, R2 \ + BEQ 4(PC) \ + MOVW.W R14, -4(R13) /* prepare a real frame */ \ + BL ·panicUnaligned(SB) \ + ADD $4, R13 /* compensate SP delta */ + +TEXT ·Cas64(SB),NOSPLIT,$-4-21 + NO_LOCAL_POINTERS + MOVW addr+0(FP), R1 + CHECK_ALIGN + +#ifndef GOARM_7 + MOVB internal∕cpu·ARM+const_offsetARMHasV7Atomics(SB), R11 + CMP $1, R11 + BEQ 2(PC) + JMP ·goCas64(SB) +#endif + JMP armCas64<>(SB) + +TEXT ·Xadd64(SB),NOSPLIT,$-4-20 + NO_LOCAL_POINTERS + MOVW addr+0(FP), R1 + CHECK_ALIGN + +#ifndef GOARM_7 + MOVB internal∕cpu·ARM+const_offsetARMHasV7Atomics(SB), R11 + CMP $1, R11 + BEQ 2(PC) + JMP ·goXadd64(SB) +#endif + JMP armXadd64<>(SB) + +TEXT ·Xchg64(SB),NOSPLIT,$-4-20 + NO_LOCAL_POINTERS + MOVW addr+0(FP), R1 + CHECK_ALIGN + +#ifndef GOARM_7 + MOVB internal∕cpu·ARM+const_offsetARMHasV7Atomics(SB), R11 + CMP $1, R11 + BEQ 2(PC) + JMP ·goXchg64(SB) +#endif + JMP armXchg64<>(SB) + +TEXT ·Load64(SB),NOSPLIT,$-4-12 + NO_LOCAL_POINTERS + MOVW addr+0(FP), R1 + CHECK_ALIGN + +#ifndef GOARM_7 + MOVB internal∕cpu·ARM+const_offsetARMHasV7Atomics(SB), R11 + CMP $1, R11 + BEQ 2(PC) + JMP ·goLoad64(SB) +#endif + JMP armLoad64<>(SB) + +TEXT ·Store64(SB),NOSPLIT,$-4-12 + NO_LOCAL_POINTERS + MOVW addr+0(FP), R1 + CHECK_ALIGN + +#ifndef GOARM_7 + MOVB internal∕cpu·ARM+const_offsetARMHasV7Atomics(SB), R11 + CMP $1, R11 + BEQ 2(PC) + JMP ·goStore64(SB) +#endif + JMP armStore64<>(SB) diff --git a/src/internal/runtime/atomic/atomic_arm64.go b/src/internal/runtime/atomic/atomic_arm64.go new file mode 100644 index 0000000000..c4c56ae895 --- /dev/null +++ b/src/internal/runtime/atomic/atomic_arm64.go @@ -0,0 +1,112 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build arm64 + +package atomic + +import ( + "internal/cpu" + "unsafe" +) + +const ( + offsetARM64HasATOMICS = unsafe.Offsetof(cpu.ARM64.HasATOMICS) +) + +//go:noescape +func Xadd(ptr *uint32, delta int32) uint32 + +//go:noescape +func Xadd64(ptr *uint64, delta int64) uint64 + +//go:noescape +func Xadduintptr(ptr *uintptr, delta uintptr) uintptr + +//go:noescape +func Xchg(ptr *uint32, new uint32) uint32 + +//go:noescape +func Xchg64(ptr *uint64, new uint64) uint64 + +//go:noescape +func Xchguintptr(ptr *uintptr, new uintptr) uintptr + +//go:noescape +func Load(ptr *uint32) uint32 + +//go:noescape +func Load8(ptr *uint8) uint8 + +//go:noescape +func Load64(ptr *uint64) uint64 + +// NO go:noescape annotation; *ptr escapes if result escapes (#31525) +func Loadp(ptr unsafe.Pointer) unsafe.Pointer + +//go:noescape +func LoadAcq(addr *uint32) uint32 + +//go:noescape +func LoadAcq64(ptr *uint64) uint64 + +//go:noescape +func LoadAcquintptr(ptr *uintptr) uintptr + +//go:noescape +func Or8(ptr *uint8, val uint8) + +//go:noescape +func And8(ptr *uint8, val uint8) + +//go:noescape +func And(ptr *uint32, val uint32) + +//go:noescape +func Or(ptr *uint32, val uint32) + +//go:noescape +func And32(ptr *uint32, val uint32) uint32 + +//go:noescape +func Or32(ptr *uint32, val uint32) uint32 + +//go:noescape +func And64(ptr *uint64, val uint64) uint64 + +//go:noescape +func Or64(ptr *uint64, val uint64) uint64 + +//go:noescape +func Anduintptr(ptr *uintptr, val uintptr) uintptr + +//go:noescape +func Oruintptr(ptr *uintptr, val uintptr) uintptr + +//go:noescape +func Cas64(ptr *uint64, old, new uint64) bool + +//go:noescape +func CasRel(ptr *uint32, old, new uint32) bool + +//go:noescape +func Store(ptr *uint32, val uint32) + +//go:noescape +func Store8(ptr *uint8, val uint8) + +//go:noescape +func Store64(ptr *uint64, val uint64) + +// NO go:noescape annotation; see atomic_pointer.go. +func StorepNoWB(ptr unsafe.Pointer, val unsafe.Pointer) + +//go:noescape +func StoreRel(ptr *uint32, val uint32) + +//go:noescape +func StoreRel64(ptr *uint64, val uint64) + +//go:noescape +func StoreReluintptr(ptr *uintptr, val uintptr) diff --git a/src/internal/runtime/atomic/atomic_arm64.s b/src/internal/runtime/atomic/atomic_arm64.s new file mode 100644 index 0000000000..ede56538b8 --- /dev/null +++ b/src/internal/runtime/atomic/atomic_arm64.s @@ -0,0 +1,467 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +#include "go_asm.h" +#include "textflag.h" + +TEXT ·Casint32(SB), NOSPLIT, $0-17 + B ·Cas(SB) + +TEXT ·Casint64(SB), NOSPLIT, $0-25 + B ·Cas64(SB) + +TEXT ·Casuintptr(SB), NOSPLIT, $0-25 + B ·Cas64(SB) + +TEXT ·CasRel(SB), NOSPLIT, $0-17 + B ·Cas(SB) + +TEXT ·Loadint32(SB), NOSPLIT, $0-12 + B ·Load(SB) + +TEXT ·Loadint64(SB), NOSPLIT, $0-16 + B ·Load64(SB) + +TEXT ·Loaduintptr(SB), NOSPLIT, $0-16 + B ·Load64(SB) + +TEXT ·Loaduint(SB), NOSPLIT, $0-16 + B ·Load64(SB) + +TEXT ·Storeint32(SB), NOSPLIT, $0-12 + B ·Store(SB) + +TEXT ·Storeint64(SB), NOSPLIT, $0-16 + B ·Store64(SB) + +TEXT ·Storeuintptr(SB), NOSPLIT, $0-16 + B ·Store64(SB) + +TEXT ·Xaddint32(SB), NOSPLIT, $0-20 + B ·Xadd(SB) + +TEXT ·Xaddint64(SB), NOSPLIT, $0-24 + B ·Xadd64(SB) + +TEXT ·Xadduintptr(SB), NOSPLIT, $0-24 + B ·Xadd64(SB) + +TEXT ·Casp1(SB), NOSPLIT, $0-25 + B ·Cas64(SB) + +// uint32 ·Load(uint32 volatile* addr) +TEXT ·Load(SB),NOSPLIT,$0-12 + MOVD ptr+0(FP), R0 + LDARW (R0), R0 + MOVW R0, ret+8(FP) + RET + +// uint8 ·Load8(uint8 volatile* addr) +TEXT ·Load8(SB),NOSPLIT,$0-9 + MOVD ptr+0(FP), R0 + LDARB (R0), R0 + MOVB R0, ret+8(FP) + RET + +// uint64 ·Load64(uint64 volatile* addr) +TEXT ·Load64(SB),NOSPLIT,$0-16 + MOVD ptr+0(FP), R0 + LDAR (R0), R0 + MOVD R0, ret+8(FP) + RET + +// void *·Loadp(void *volatile *addr) +TEXT ·Loadp(SB),NOSPLIT,$0-16 + MOVD ptr+0(FP), R0 + LDAR (R0), R0 + MOVD R0, ret+8(FP) + RET + +// uint32 ·LoadAcq(uint32 volatile* addr) +TEXT ·LoadAcq(SB),NOSPLIT,$0-12 + B ·Load(SB) + +// uint64 ·LoadAcquintptr(uint64 volatile* addr) +TEXT ·LoadAcq64(SB),NOSPLIT,$0-16 + B ·Load64(SB) + +// uintptr ·LoadAcq64(uintptr volatile* addr) +TEXT ·LoadAcquintptr(SB),NOSPLIT,$0-16 + B ·Load64(SB) + +TEXT ·StorepNoWB(SB), NOSPLIT, $0-16 + B ·Store64(SB) + +TEXT ·StoreRel(SB), NOSPLIT, $0-12 + B ·Store(SB) + +TEXT ·StoreRel64(SB), NOSPLIT, $0-16 + B ·Store64(SB) + +TEXT ·StoreReluintptr(SB), NOSPLIT, $0-16 + B ·Store64(SB) + +TEXT ·Store(SB), NOSPLIT, $0-12 + MOVD ptr+0(FP), R0 + MOVW val+8(FP), R1 + STLRW R1, (R0) + RET + +TEXT ·Store8(SB), NOSPLIT, $0-9 + MOVD ptr+0(FP), R0 + MOVB val+8(FP), R1 + STLRB R1, (R0) + RET + +TEXT ·Store64(SB), NOSPLIT, $0-16 + MOVD ptr+0(FP), R0 + MOVD val+8(FP), R1 + STLR R1, (R0) + RET + +// uint32 Xchg(ptr *uint32, new uint32) +// Atomically: +// old := *ptr; +// *ptr = new; +// return old; +TEXT ·Xchg(SB), NOSPLIT, $0-20 + MOVD ptr+0(FP), R0 + MOVW new+8(FP), R1 +#ifndef GOARM64_LSE + MOVBU internal∕cpu·ARM64+const_offsetARM64HasATOMICS(SB), R4 + CBZ R4, load_store_loop +#endif + SWPALW R1, (R0), R2 + MOVW R2, ret+16(FP) + RET +#ifndef GOARM64_LSE +load_store_loop: + LDAXRW (R0), R2 + STLXRW R1, (R0), R3 + CBNZ R3, load_store_loop + MOVW R2, ret+16(FP) + RET +#endif + +// uint64 Xchg64(ptr *uint64, new uint64) +// Atomically: +// old := *ptr; +// *ptr = new; +// return old; +TEXT ·Xchg64(SB), NOSPLIT, $0-24 + MOVD ptr+0(FP), R0 + MOVD new+8(FP), R1 +#ifndef GOARM64_LSE + MOVBU internal∕cpu·ARM64+const_offsetARM64HasATOMICS(SB), R4 + CBZ R4, load_store_loop +#endif + SWPALD R1, (R0), R2 + MOVD R2, ret+16(FP) + RET +#ifndef GOARM64_LSE +load_store_loop: + LDAXR (R0), R2 + STLXR R1, (R0), R3 + CBNZ R3, load_store_loop + MOVD R2, ret+16(FP) + RET +#endif + +// bool Cas(uint32 *ptr, uint32 old, uint32 new) +// Atomically: +// if(*val == old){ +// *val = new; +// return 1; +// } else +// return 0; +TEXT ·Cas(SB), NOSPLIT, $0-17 + MOVD ptr+0(FP), R0 + MOVW old+8(FP), R1 + MOVW new+12(FP), R2 +#ifndef GOARM64_LSE + MOVBU internal∕cpu·ARM64+const_offsetARM64HasATOMICS(SB), R4 + CBZ R4, load_store_loop +#endif + MOVD R1, R3 + CASALW R3, (R0), R2 + CMP R1, R3 + CSET EQ, R0 + MOVB R0, ret+16(FP) + RET +#ifndef GOARM64_LSE +load_store_loop: + LDAXRW (R0), R3 + CMPW R1, R3 + BNE ok + STLXRW R2, (R0), R3 + CBNZ R3, load_store_loop +ok: + CSET EQ, R0 + MOVB R0, ret+16(FP) + RET +#endif + +// bool ·Cas64(uint64 *ptr, uint64 old, uint64 new) +// Atomically: +// if(*val == old){ +// *val = new; +// return 1; +// } else { +// return 0; +// } +TEXT ·Cas64(SB), NOSPLIT, $0-25 + MOVD ptr+0(FP), R0 + MOVD old+8(FP), R1 + MOVD new+16(FP), R2 +#ifndef GOARM64_LSE + MOVBU internal∕cpu·ARM64+const_offsetARM64HasATOMICS(SB), R4 + CBZ R4, load_store_loop +#endif + MOVD R1, R3 + CASALD R3, (R0), R2 + CMP R1, R3 + CSET EQ, R0 + MOVB R0, ret+24(FP) + RET +#ifndef GOARM64_LSE +load_store_loop: + LDAXR (R0), R3 + CMP R1, R3 + BNE ok + STLXR R2, (R0), R3 + CBNZ R3, load_store_loop +ok: + CSET EQ, R0 + MOVB R0, ret+24(FP) + RET +#endif + +// uint32 xadd(uint32 volatile *ptr, int32 delta) +// Atomically: +// *val += delta; +// return *val; +TEXT ·Xadd(SB), NOSPLIT, $0-20 + MOVD ptr+0(FP), R0 + MOVW delta+8(FP), R1 +#ifndef GOARM64_LSE + MOVBU internal∕cpu·ARM64+const_offsetARM64HasATOMICS(SB), R4 + CBZ R4, load_store_loop +#endif + LDADDALW R1, (R0), R2 + ADD R1, R2 + MOVW R2, ret+16(FP) + RET +#ifndef GOARM64_LSE +load_store_loop: + LDAXRW (R0), R2 + ADDW R2, R1, R2 + STLXRW R2, (R0), R3 + CBNZ R3, load_store_loop + MOVW R2, ret+16(FP) + RET +#endif + +// uint64 Xadd64(uint64 volatile *ptr, int64 delta) +// Atomically: +// *val += delta; +// return *val; +TEXT ·Xadd64(SB), NOSPLIT, $0-24 + MOVD ptr+0(FP), R0 + MOVD delta+8(FP), R1 +#ifndef GOARM64_LSE + MOVBU internal∕cpu·ARM64+const_offsetARM64HasATOMICS(SB), R4 + CBZ R4, load_store_loop +#endif + LDADDALD R1, (R0), R2 + ADD R1, R2 + MOVD R2, ret+16(FP) + RET +#ifndef GOARM64_LSE +load_store_loop: + LDAXR (R0), R2 + ADD R2, R1, R2 + STLXR R2, (R0), R3 + CBNZ R3, load_store_loop + MOVD R2, ret+16(FP) + RET +#endif + +TEXT ·Xchgint32(SB), NOSPLIT, $0-20 + B ·Xchg(SB) + +TEXT ·Xchgint64(SB), NOSPLIT, $0-24 + B ·Xchg64(SB) + +TEXT ·Xchguintptr(SB), NOSPLIT, $0-24 + B ·Xchg64(SB) + +TEXT ·And8(SB), NOSPLIT, $0-9 + MOVD ptr+0(FP), R0 + MOVB val+8(FP), R1 +#ifndef GOARM64_LSE + MOVBU internal∕cpu·ARM64+const_offsetARM64HasATOMICS(SB), R4 + CBZ R4, load_store_loop +#endif + MVN R1, R2 + LDCLRALB R2, (R0), R3 + RET +#ifndef GOARM64_LSE +load_store_loop: + LDAXRB (R0), R2 + AND R1, R2 + STLXRB R2, (R0), R3 + CBNZ R3, load_store_loop + RET +#endif + +TEXT ·Or8(SB), NOSPLIT, $0-9 + MOVD ptr+0(FP), R0 + MOVB val+8(FP), R1 +#ifndef GOARM64_LSE + MOVBU internal∕cpu·ARM64+const_offsetARM64HasATOMICS(SB), R4 + CBZ R4, load_store_loop +#endif + LDORALB R1, (R0), R2 + RET +#ifndef GOARM64_LSE +load_store_loop: + LDAXRB (R0), R2 + ORR R1, R2 + STLXRB R2, (R0), R3 + CBNZ R3, load_store_loop + RET +#endif + +// func And(addr *uint32, v uint32) +TEXT ·And(SB), NOSPLIT, $0-12 + MOVD ptr+0(FP), R0 + MOVW val+8(FP), R1 +#ifndef GOARM64_LSE + MOVBU internal∕cpu·ARM64+const_offsetARM64HasATOMICS(SB), R4 + CBZ R4, load_store_loop +#endif + MVN R1, R2 + LDCLRALW R2, (R0), R3 + RET +#ifndef GOARM64_LSE +load_store_loop: + LDAXRW (R0), R2 + AND R1, R2 + STLXRW R2, (R0), R3 + CBNZ R3, load_store_loop + RET +#endif + +// func Or(addr *uint32, v uint32) +TEXT ·Or(SB), NOSPLIT, $0-12 + MOVD ptr+0(FP), R0 + MOVW val+8(FP), R1 +#ifndef GOARM64_LSE + MOVBU internal∕cpu·ARM64+const_offsetARM64HasATOMICS(SB), R4 + CBZ R4, load_store_loop +#endif + LDORALW R1, (R0), R2 + RET +#ifndef GOARM64_LSE +load_store_loop: + LDAXRW (R0), R2 + ORR R1, R2 + STLXRW R2, (R0), R3 + CBNZ R3, load_store_loop + RET +#endif + +// func Or32(addr *uint32, v uint32) old uint32 +TEXT ·Or32(SB), NOSPLIT, $0-20 + MOVD ptr+0(FP), R0 + MOVW val+8(FP), R1 +#ifndef GOARM64_LSE + MOVBU internal∕cpu·ARM64+const_offsetARM64HasATOMICS(SB), R4 + CBZ R4, load_store_loop +#endif + LDORALW R1, (R0), R2 + MOVD R2, ret+16(FP) + RET +#ifndef GOARM64_LSE +load_store_loop: + LDAXRW (R0), R2 + ORR R1, R2, R3 + STLXRW R3, (R0), R4 + CBNZ R4, load_store_loop + MOVD R2, ret+16(FP) + RET +#endif + +// func And32(addr *uint32, v uint32) old uint32 +TEXT ·And32(SB), NOSPLIT, $0-20 + MOVD ptr+0(FP), R0 + MOVW val+8(FP), R1 +#ifndef GOARM64_LSE + MOVBU internal∕cpu·ARM64+const_offsetARM64HasATOMICS(SB), R4 + CBZ R4, load_store_loop +#endif + MVN R1, R2 + LDCLRALW R2, (R0), R3 + MOVD R3, ret+16(FP) + RET +#ifndef GOARM64_LSE +load_store_loop: + LDAXRW (R0), R2 + AND R1, R2, R3 + STLXRW R3, (R0), R4 + CBNZ R4, load_store_loop + MOVD R2, ret+16(FP) + RET +#endif + +// func Or64(addr *uint64, v uint64) old uint64 +TEXT ·Or64(SB), NOSPLIT, $0-24 + MOVD ptr+0(FP), R0 + MOVD val+8(FP), R1 +#ifndef GOARM64_LSE + MOVBU internal∕cpu·ARM64+const_offsetARM64HasATOMICS(SB), R4 + CBZ R4, load_store_loop +#endif + LDORALD R1, (R0), R2 + MOVD R2, ret+16(FP) + RET +#ifndef GOARM64_LSE +load_store_loop: + LDAXR (R0), R2 + ORR R1, R2, R3 + STLXR R3, (R0), R4 + CBNZ R4, load_store_loop + MOVD R2, ret+16(FP) + RET +#endif + +// func And64(addr *uint64, v uint64) old uint64 +TEXT ·And64(SB), NOSPLIT, $0-24 + MOVD ptr+0(FP), R0 + MOVD val+8(FP), R1 +#ifndef GOARM64_LSE + MOVBU internal∕cpu·ARM64+const_offsetARM64HasATOMICS(SB), R4 + CBZ R4, load_store_loop +#endif + MVN R1, R2 + LDCLRALD R2, (R0), R3 + MOVD R3, ret+16(FP) + RET +#ifndef GOARM64_LSE +load_store_loop: + LDAXR (R0), R2 + AND R1, R2, R3 + STLXR R3, (R0), R4 + CBNZ R4, load_store_loop + MOVD R2, ret+16(FP) + RET +#endif + +// func Anduintptr(addr *uintptr, v uintptr) old uintptr +TEXT ·Anduintptr(SB), NOSPLIT, $0-24 + B ·And64(SB) + +// func Oruintptr(addr *uintptr, v uintptr) old uintptr +TEXT ·Oruintptr(SB), NOSPLIT, $0-24 + B ·Or64(SB) diff --git a/src/internal/runtime/atomic/atomic_loong64.go b/src/internal/runtime/atomic/atomic_loong64.go new file mode 100644 index 0000000000..de6d4b4ba6 --- /dev/null +++ b/src/internal/runtime/atomic/atomic_loong64.go @@ -0,0 +1,107 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build loong64 + +package atomic + +import "unsafe" + +//go:noescape +func Xadd(ptr *uint32, delta int32) uint32 + +//go:noescape +func Xadd64(ptr *uint64, delta int64) uint64 + +//go:noescape +func Xadduintptr(ptr *uintptr, delta uintptr) uintptr + +//go:noescape +func Xchg(ptr *uint32, new uint32) uint32 + +//go:noescape +func Xchg64(ptr *uint64, new uint64) uint64 + +//go:noescape +func Xchguintptr(ptr *uintptr, new uintptr) uintptr + +//go:noescape +func Load(ptr *uint32) uint32 + +//go:noescape +func Load8(ptr *uint8) uint8 + +//go:noescape +func Load64(ptr *uint64) uint64 + +// NO go:noescape annotation; *ptr escapes if result escapes (#31525) +func Loadp(ptr unsafe.Pointer) unsafe.Pointer + +//go:noescape +func LoadAcq(ptr *uint32) uint32 + +//go:noescape +func LoadAcq64(ptr *uint64) uint64 + +//go:noescape +func LoadAcquintptr(ptr *uintptr) uintptr + +//go:noescape +func And8(ptr *uint8, val uint8) + +//go:noescape +func And(ptr *uint32, val uint32) + +//go:noescape +func Or8(ptr *uint8, val uint8) + +//go:noescape +func Or(ptr *uint32, val uint32) + +//go:noescape +func And32(ptr *uint32, val uint32) uint32 + +//go:noescape +func Or32(ptr *uint32, val uint32) uint32 + +//go:noescape +func And64(ptr *uint64, val uint64) uint64 + +//go:noescape +func Or64(ptr *uint64, val uint64) uint64 + +//go:noescape +func Anduintptr(ptr *uintptr, val uintptr) uintptr + +//go:noescape +func Oruintptr(ptr *uintptr, val uintptr) uintptr + +// NOTE: Do not add atomicxor8 (XOR is not idempotent). + +//go:noescape +func Cas64(ptr *uint64, old, new uint64) bool + +//go:noescape +func CasRel(ptr *uint32, old, new uint32) bool + +//go:noescape +func Store(ptr *uint32, val uint32) + +//go:noescape +func Store8(ptr *uint8, val uint8) + +//go:noescape +func Store64(ptr *uint64, val uint64) + +// NO go:noescape annotation; see atomic_pointer.go. +func StorepNoWB(ptr unsafe.Pointer, val unsafe.Pointer) + +//go:noescape +func StoreRel(ptr *uint32, val uint32) + +//go:noescape +func StoreRel64(ptr *uint64, val uint64) + +//go:noescape +func StoreReluintptr(ptr *uintptr, val uintptr) diff --git a/src/internal/runtime/atomic/atomic_loong64.s b/src/internal/runtime/atomic/atomic_loong64.s new file mode 100644 index 0000000000..1812cb95fd --- /dev/null +++ b/src/internal/runtime/atomic/atomic_loong64.s @@ -0,0 +1,366 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +#include "textflag.h" + +// bool cas(uint32 *ptr, uint32 old, uint32 new) +// Atomically: +// if(*ptr == old){ +// *ptr = new; +// return 1; +// } else +// return 0; +TEXT ·Cas(SB), NOSPLIT, $0-17 + MOVV ptr+0(FP), R4 + MOVW old+8(FP), R5 + MOVW new+12(FP), R6 + DBAR +cas_again: + MOVV R6, R7 + LL (R4), R8 + BNE R5, R8, cas_fail + SC R7, (R4) + BEQ R7, cas_again + MOVV $1, R4 + MOVB R4, ret+16(FP) + DBAR + RET +cas_fail: + MOVV $0, R4 + JMP -4(PC) + +// bool cas64(uint64 *ptr, uint64 old, uint64 new) +// Atomically: +// if(*ptr == old){ +// *ptr = new; +// return 1; +// } else { +// return 0; +// } +TEXT ·Cas64(SB), NOSPLIT, $0-25 + MOVV ptr+0(FP), R4 + MOVV old+8(FP), R5 + MOVV new+16(FP), R6 + DBAR +cas64_again: + MOVV R6, R7 + LLV (R4), R8 + BNE R5, R8, cas64_fail + SCV R7, (R4) + BEQ R7, cas64_again + MOVV $1, R4 + MOVB R4, ret+24(FP) + DBAR + RET +cas64_fail: + MOVV $0, R4 + JMP -4(PC) + +TEXT ·Casuintptr(SB), NOSPLIT, $0-25 + JMP ·Cas64(SB) + +TEXT ·CasRel(SB), NOSPLIT, $0-17 + JMP ·Cas(SB) + +TEXT ·Loaduintptr(SB), NOSPLIT|NOFRAME, $0-16 + JMP ·Load64(SB) + +TEXT ·Loaduint(SB), NOSPLIT|NOFRAME, $0-16 + JMP ·Load64(SB) + +TEXT ·Storeuintptr(SB), NOSPLIT, $0-16 + JMP ·Store64(SB) + +TEXT ·Xadduintptr(SB), NOSPLIT, $0-24 + JMP ·Xadd64(SB) + +TEXT ·Loadint64(SB), NOSPLIT, $0-16 + JMP ·Load64(SB) + +TEXT ·Xaddint64(SB), NOSPLIT, $0-24 + JMP ·Xadd64(SB) + +// bool casp(void **val, void *old, void *new) +// Atomically: +// if(*val == old){ +// *val = new; +// return 1; +// } else +// return 0; +TEXT ·Casp1(SB), NOSPLIT, $0-25 + JMP ·Cas64(SB) + +// uint32 xadd(uint32 volatile *ptr, int32 delta) +// Atomically: +// *val += delta; +// return *val; +TEXT ·Xadd(SB), NOSPLIT, $0-20 + MOVV ptr+0(FP), R4 + MOVW delta+8(FP), R5 + DBAR + LL (R4), R6 + ADDU R6, R5, R7 + MOVV R7, R6 + SC R7, (R4) + BEQ R7, -4(PC) + MOVW R6, ret+16(FP) + DBAR + RET + +TEXT ·Xadd64(SB), NOSPLIT, $0-24 + MOVV ptr+0(FP), R4 + MOVV delta+8(FP), R5 + DBAR + LLV (R4), R6 + ADDVU R6, R5, R7 + MOVV R7, R6 + SCV R7, (R4) + BEQ R7, -4(PC) + MOVV R6, ret+16(FP) + DBAR + RET + +TEXT ·Xchg(SB), NOSPLIT, $0-20 + MOVV ptr+0(FP), R4 + MOVW new+8(FP), R5 + + DBAR + MOVV R5, R6 + LL (R4), R7 + SC R6, (R4) + BEQ R6, -3(PC) + MOVW R7, ret+16(FP) + DBAR + RET + +TEXT ·Xchg64(SB), NOSPLIT, $0-24 + MOVV ptr+0(FP), R4 + MOVV new+8(FP), R5 + + DBAR + MOVV R5, R6 + LLV (R4), R7 + SCV R6, (R4) + BEQ R6, -3(PC) + MOVV R7, ret+16(FP) + DBAR + RET + +TEXT ·Xchguintptr(SB), NOSPLIT, $0-24 + JMP ·Xchg64(SB) + +TEXT ·StorepNoWB(SB), NOSPLIT, $0-16 + JMP ·Store64(SB) + +TEXT ·StoreRel(SB), NOSPLIT, $0-12 + JMP ·Store(SB) + +TEXT ·StoreRel64(SB), NOSPLIT, $0-16 + JMP ·Store64(SB) + +TEXT ·StoreReluintptr(SB), NOSPLIT, $0-16 + JMP ·Store64(SB) + +TEXT ·Store(SB), NOSPLIT, $0-12 + MOVV ptr+0(FP), R4 + MOVW val+8(FP), R5 + DBAR + MOVW R5, 0(R4) + DBAR + RET + +TEXT ·Store8(SB), NOSPLIT, $0-9 + MOVV ptr+0(FP), R4 + MOVB val+8(FP), R5 + DBAR + MOVB R5, 0(R4) + DBAR + RET + +TEXT ·Store64(SB), NOSPLIT, $0-16 + MOVV ptr+0(FP), R4 + MOVV val+8(FP), R5 + DBAR + MOVV R5, 0(R4) + DBAR + RET + +// void Or8(byte volatile*, byte); +TEXT ·Or8(SB), NOSPLIT, $0-9 + MOVV ptr+0(FP), R4 + MOVBU val+8(FP), R5 + // Align ptr down to 4 bytes so we can use 32-bit load/store. + MOVV $~3, R6 + AND R4, R6 + // R7 = ((ptr & 3) * 8) + AND $3, R4, R7 + SLLV $3, R7 + // Shift val for aligned ptr. R5 = val << R4 + SLLV R7, R5 + + DBAR + LL (R6), R7 + OR R5, R7 + SC R7, (R6) + BEQ R7, -4(PC) + DBAR + RET + +// void And8(byte volatile*, byte); +TEXT ·And8(SB), NOSPLIT, $0-9 + MOVV ptr+0(FP), R4 + MOVBU val+8(FP), R5 + // Align ptr down to 4 bytes so we can use 32-bit load/store. + MOVV $~3, R6 + AND R4, R6 + // R7 = ((ptr & 3) * 8) + AND $3, R4, R7 + SLLV $3, R7 + // Shift val for aligned ptr. R5 = val << R7 | ^(0xFF << R7) + MOVV $0xFF, R8 + SLLV R7, R5 + SLLV R7, R8 + NOR R0, R8 + OR R8, R5 + + DBAR + LL (R6), R7 + AND R5, R7 + SC R7, (R6) + BEQ R7, -4(PC) + DBAR + RET + +// func Or(addr *uint32, v uint32) +TEXT ·Or(SB), NOSPLIT, $0-12 + MOVV ptr+0(FP), R4 + MOVW val+8(FP), R5 + DBAR + LL (R4), R6 + OR R5, R6 + SC R6, (R4) + BEQ R6, -4(PC) + DBAR + RET + +// func And(addr *uint32, v uint32) +TEXT ·And(SB), NOSPLIT, $0-12 + MOVV ptr+0(FP), R4 + MOVW val+8(FP), R5 + DBAR + LL (R4), R6 + AND R5, R6 + SC R6, (R4) + BEQ R6, -4(PC) + DBAR + RET + +// func Or32(addr *uint32, v uint32) old uint32 +TEXT ·Or32(SB), NOSPLIT, $0-20 + MOVV ptr+0(FP), R4 + MOVW val+8(FP), R5 + DBAR + LL (R4), R6 + OR R5, R6, R7 + SC R7, (R4) + BEQ R7, -4(PC) + DBAR + MOVW R6, ret+16(FP) + RET + +// func And32(addr *uint32, v uint32) old uint32 +TEXT ·And32(SB), NOSPLIT, $0-20 + MOVV ptr+0(FP), R4 + MOVW val+8(FP), R5 + DBAR + LL (R4), R6 + AND R5, R6, R7 + SC R7, (R4) + BEQ R7, -4(PC) + DBAR + MOVW R6, ret+16(FP) + RET + +// func Or64(addr *uint64, v uint64) old uint64 +TEXT ·Or64(SB), NOSPLIT, $0-24 + MOVV ptr+0(FP), R4 + MOVV val+8(FP), R5 + DBAR + LLV (R4), R6 + OR R5, R6, R7 + SCV R7, (R4) + BEQ R7, -4(PC) + DBAR + MOVV R6, ret+16(FP) + RET + +// func And64(addr *uint64, v uint64) old uint64 +TEXT ·And64(SB), NOSPLIT, $0-24 + MOVV ptr+0(FP), R4 + MOVV val+8(FP), R5 + DBAR + LLV (R4), R6 + AND R5, R6, R7 + SCV R7, (R4) + BEQ R7, -4(PC) + DBAR + MOVV R6, ret+16(FP) + RET + +// func Anduintptr(addr *uintptr, v uintptr) old uintptr +TEXT ·Anduintptr(SB), NOSPLIT, $0-24 + JMP ·And64(SB) + +// func Oruintptr(addr *uintptr, v uintptr) old uintptr +TEXT ·Oruintptr(SB), NOSPLIT, $0-24 + JMP ·Or64(SB) + +// uint32 internal∕runtime∕atomic·Load(uint32 volatile* ptr) +TEXT ·Load(SB),NOSPLIT|NOFRAME,$0-12 + MOVV ptr+0(FP), R19 + DBAR + MOVWU 0(R19), R19 + DBAR + MOVW R19, ret+8(FP) + RET + +// uint8 internal∕runtime∕atomic·Load8(uint8 volatile* ptr) +TEXT ·Load8(SB),NOSPLIT|NOFRAME,$0-9 + MOVV ptr+0(FP), R19 + DBAR + MOVBU 0(R19), R19 + DBAR + MOVB R19, ret+8(FP) + RET + +// uint64 internal∕runtime∕atomic·Load64(uint64 volatile* ptr) +TEXT ·Load64(SB),NOSPLIT|NOFRAME,$0-16 + MOVV ptr+0(FP), R19 + DBAR + MOVV 0(R19), R19 + DBAR + MOVV R19, ret+8(FP) + RET + +// void *internal∕runtime∕atomic·Loadp(void *volatile *ptr) +TEXT ·Loadp(SB),NOSPLIT|NOFRAME,$0-16 + MOVV ptr+0(FP), R19 + DBAR + MOVV 0(R19), R19 + DBAR + MOVV R19, ret+8(FP) + RET + +// uint32 internal∕runtime∕atomic·LoadAcq(uint32 volatile* ptr) +TEXT ·LoadAcq(SB),NOSPLIT|NOFRAME,$0-12 + JMP ·Load(SB) + +// uint64 ·LoadAcq64(uint64 volatile* ptr) +TEXT ·LoadAcq64(SB),NOSPLIT|NOFRAME,$0-16 + JMP ·Load64(SB) + +// uintptr ·LoadAcquintptr(uintptr volatile* ptr) +TEXT ·LoadAcquintptr(SB),NOSPLIT|NOFRAME,$0-16 + JMP ·Load64(SB) + diff --git a/src/internal/runtime/atomic/atomic_mips64x.go b/src/internal/runtime/atomic/atomic_mips64x.go new file mode 100644 index 0000000000..1e12b83801 --- /dev/null +++ b/src/internal/runtime/atomic/atomic_mips64x.go @@ -0,0 +1,89 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build mips64 || mips64le + +package atomic + +import "unsafe" + +//go:noescape +func Xadd(ptr *uint32, delta int32) uint32 + +//go:noescape +func Xadd64(ptr *uint64, delta int64) uint64 + +//go:noescape +func Xadduintptr(ptr *uintptr, delta uintptr) uintptr + +//go:noescape +func Xchg(ptr *uint32, new uint32) uint32 + +//go:noescape +func Xchg64(ptr *uint64, new uint64) uint64 + +//go:noescape +func Xchguintptr(ptr *uintptr, new uintptr) uintptr + +//go:noescape +func Load(ptr *uint32) uint32 + +//go:noescape +func Load8(ptr *uint8) uint8 + +//go:noescape +func Load64(ptr *uint64) uint64 + +// NO go:noescape annotation; *ptr escapes if result escapes (#31525) +func Loadp(ptr unsafe.Pointer) unsafe.Pointer + +//go:noescape +func LoadAcq(ptr *uint32) uint32 + +//go:noescape +func LoadAcq64(ptr *uint64) uint64 + +//go:noescape +func LoadAcquintptr(ptr *uintptr) uintptr + +//go:noescape +func And8(ptr *uint8, val uint8) + +//go:noescape +func Or8(ptr *uint8, val uint8) + +// NOTE: Do not add atomicxor8 (XOR is not idempotent). + +//go:noescape +func And(ptr *uint32, val uint32) + +//go:noescape +func Or(ptr *uint32, val uint32) + +//go:noescape +func Cas64(ptr *uint64, old, new uint64) bool + +//go:noescape +func CasRel(ptr *uint32, old, new uint32) bool + +//go:noescape +func Store(ptr *uint32, val uint32) + +//go:noescape +func Store8(ptr *uint8, val uint8) + +//go:noescape +func Store64(ptr *uint64, val uint64) + +// NO go:noescape annotation; see atomic_pointer.go. +func StorepNoWB(ptr unsafe.Pointer, val unsafe.Pointer) + +//go:noescape +func StoreRel(ptr *uint32, val uint32) + +//go:noescape +func StoreRel64(ptr *uint64, val uint64) + +//go:noescape +func StoreReluintptr(ptr *uintptr, val uintptr) diff --git a/src/internal/runtime/atomic/atomic_mips64x.s b/src/internal/runtime/atomic/atomic_mips64x.s new file mode 100644 index 0000000000..b4411d87da --- /dev/null +++ b/src/internal/runtime/atomic/atomic_mips64x.s @@ -0,0 +1,359 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build mips64 || mips64le + +#include "textflag.h" + +#define SYNC WORD $0xf + +// bool cas(uint32 *ptr, uint32 old, uint32 new) +// Atomically: +// if(*val == old){ +// *val = new; +// return 1; +// } else +// return 0; +TEXT ·Cas(SB), NOSPLIT, $0-17 + MOVV ptr+0(FP), R1 + MOVW old+8(FP), R2 + MOVW new+12(FP), R5 + SYNC +cas_again: + MOVV R5, R3 + LL (R1), R4 + BNE R2, R4, cas_fail + SC R3, (R1) + BEQ R3, cas_again + MOVV $1, R1 + MOVB R1, ret+16(FP) + SYNC + RET +cas_fail: + MOVV $0, R1 + JMP -4(PC) + +// bool cas64(uint64 *ptr, uint64 old, uint64 new) +// Atomically: +// if(*val == old){ +// *val = new; +// return 1; +// } else { +// return 0; +// } +TEXT ·Cas64(SB), NOSPLIT, $0-25 + MOVV ptr+0(FP), R1 + MOVV old+8(FP), R2 + MOVV new+16(FP), R5 + SYNC +cas64_again: + MOVV R5, R3 + LLV (R1), R4 + BNE R2, R4, cas64_fail + SCV R3, (R1) + BEQ R3, cas64_again + MOVV $1, R1 + MOVB R1, ret+24(FP) + SYNC + RET +cas64_fail: + MOVV $0, R1 + JMP -4(PC) + +TEXT ·Casint32(SB), NOSPLIT, $0-17 + JMP ·Cas(SB) + +TEXT ·Casint64(SB), NOSPLIT, $0-25 + JMP ·Cas64(SB) + +TEXT ·Casuintptr(SB), NOSPLIT, $0-25 + JMP ·Cas64(SB) + +TEXT ·CasRel(SB), NOSPLIT, $0-17 + JMP ·Cas(SB) + +TEXT ·Loaduintptr(SB), NOSPLIT|NOFRAME, $0-16 + JMP ·Load64(SB) + +TEXT ·Loaduint(SB), NOSPLIT|NOFRAME, $0-16 + JMP ·Load64(SB) + +TEXT ·Storeint32(SB), NOSPLIT, $0-12 + JMP ·Store(SB) + +TEXT ·Storeint64(SB), NOSPLIT, $0-16 + JMP ·Store64(SB) + +TEXT ·Storeuintptr(SB), NOSPLIT, $0-16 + JMP ·Store64(SB) + +TEXT ·Xadduintptr(SB), NOSPLIT, $0-24 + JMP ·Xadd64(SB) + +TEXT ·Loadint32(SB), NOSPLIT, $0-12 + JMP ·Load(SB) + +TEXT ·Loadint64(SB), NOSPLIT, $0-16 + JMP ·Load64(SB) + +TEXT ·Xaddint32(SB), NOSPLIT, $0-20 + JMP ·Xadd(SB) + +TEXT ·Xaddint64(SB), NOSPLIT, $0-24 + JMP ·Xadd64(SB) + +// bool casp(void **val, void *old, void *new) +// Atomically: +// if(*val == old){ +// *val = new; +// return 1; +// } else +// return 0; +TEXT ·Casp1(SB), NOSPLIT, $0-25 + JMP ·Cas64(SB) + +// uint32 xadd(uint32 volatile *ptr, int32 delta) +// Atomically: +// *val += delta; +// return *val; +TEXT ·Xadd(SB), NOSPLIT, $0-20 + MOVV ptr+0(FP), R2 + MOVW delta+8(FP), R3 + SYNC + LL (R2), R1 + ADDU R1, R3, R4 + MOVV R4, R1 + SC R4, (R2) + BEQ R4, -4(PC) + MOVW R1, ret+16(FP) + SYNC + RET + +// uint64 Xadd64(uint64 volatile *ptr, int64 delta) +// Atomically: +// *val += delta; +// return *val; +TEXT ·Xadd64(SB), NOSPLIT, $0-24 + MOVV ptr+0(FP), R2 + MOVV delta+8(FP), R3 + SYNC + LLV (R2), R1 + ADDVU R1, R3, R4 + MOVV R4, R1 + SCV R4, (R2) + BEQ R4, -4(PC) + MOVV R1, ret+16(FP) + SYNC + RET + +// uint32 Xchg(ptr *uint32, new uint32) +// Atomically: +// old := *ptr; +// *ptr = new; +// return old; +TEXT ·Xchg(SB), NOSPLIT, $0-20 + MOVV ptr+0(FP), R2 + MOVW new+8(FP), R5 + + SYNC + MOVV R5, R3 + LL (R2), R1 + SC R3, (R2) + BEQ R3, -3(PC) + MOVW R1, ret+16(FP) + SYNC + RET + +// uint64 Xchg64(ptr *uint64, new uint64) +// Atomically: +// old := *ptr; +// *ptr = new; +// return old; +TEXT ·Xchg64(SB), NOSPLIT, $0-24 + MOVV ptr+0(FP), R2 + MOVV new+8(FP), R5 + + SYNC + MOVV R5, R3 + LLV (R2), R1 + SCV R3, (R2) + BEQ R3, -3(PC) + MOVV R1, ret+16(FP) + SYNC + RET + +TEXT ·Xchgint32(SB), NOSPLIT, $0-20 + JMP ·Xchg(SB) + +TEXT ·Xchgint64(SB), NOSPLIT, $0-24 + JMP ·Xchg64(SB) + +TEXT ·Xchguintptr(SB), NOSPLIT, $0-24 + JMP ·Xchg64(SB) + +TEXT ·StorepNoWB(SB), NOSPLIT, $0-16 + JMP ·Store64(SB) + +TEXT ·StoreRel(SB), NOSPLIT, $0-12 + JMP ·Store(SB) + +TEXT ·StoreRel64(SB), NOSPLIT, $0-16 + JMP ·Store64(SB) + +TEXT ·StoreReluintptr(SB), NOSPLIT, $0-16 + JMP ·Store64(SB) + +TEXT ·Store(SB), NOSPLIT, $0-12 + MOVV ptr+0(FP), R1 + MOVW val+8(FP), R2 + SYNC + MOVW R2, 0(R1) + SYNC + RET + +TEXT ·Store8(SB), NOSPLIT, $0-9 + MOVV ptr+0(FP), R1 + MOVB val+8(FP), R2 + SYNC + MOVB R2, 0(R1) + SYNC + RET + +TEXT ·Store64(SB), NOSPLIT, $0-16 + MOVV ptr+0(FP), R1 + MOVV val+8(FP), R2 + SYNC + MOVV R2, 0(R1) + SYNC + RET + +// void Or8(byte volatile*, byte); +TEXT ·Or8(SB), NOSPLIT, $0-9 + MOVV ptr+0(FP), R1 + MOVBU val+8(FP), R2 + // Align ptr down to 4 bytes so we can use 32-bit load/store. + MOVV $~3, R3 + AND R1, R3 + // Compute val shift. +#ifdef GOARCH_mips64 + // Big endian. ptr = ptr ^ 3 + XOR $3, R1 +#endif + // R4 = ((ptr & 3) * 8) + AND $3, R1, R4 + SLLV $3, R4 + // Shift val for aligned ptr. R2 = val << R4 + SLLV R4, R2 + + SYNC + LL (R3), R4 + OR R2, R4 + SC R4, (R3) + BEQ R4, -4(PC) + SYNC + RET + +// void And8(byte volatile*, byte); +TEXT ·And8(SB), NOSPLIT, $0-9 + MOVV ptr+0(FP), R1 + MOVBU val+8(FP), R2 + // Align ptr down to 4 bytes so we can use 32-bit load/store. + MOVV $~3, R3 + AND R1, R3 + // Compute val shift. +#ifdef GOARCH_mips64 + // Big endian. ptr = ptr ^ 3 + XOR $3, R1 +#endif + // R4 = ((ptr & 3) * 8) + AND $3, R1, R4 + SLLV $3, R4 + // Shift val for aligned ptr. R2 = val << R4 | ^(0xFF << R4) + MOVV $0xFF, R5 + SLLV R4, R2 + SLLV R4, R5 + NOR R0, R5 + OR R5, R2 + + SYNC + LL (R3), R4 + AND R2, R4 + SC R4, (R3) + BEQ R4, -4(PC) + SYNC + RET + +// func Or(addr *uint32, v uint32) +TEXT ·Or(SB), NOSPLIT, $0-12 + MOVV ptr+0(FP), R1 + MOVW val+8(FP), R2 + + SYNC + LL (R1), R3 + OR R2, R3 + SC R3, (R1) + BEQ R3, -4(PC) + SYNC + RET + +// func And(addr *uint32, v uint32) +TEXT ·And(SB), NOSPLIT, $0-12 + MOVV ptr+0(FP), R1 + MOVW val+8(FP), R2 + + SYNC + LL (R1), R3 + AND R2, R3 + SC R3, (R1) + BEQ R3, -4(PC) + SYNC + RET + +// uint32 ·Load(uint32 volatile* ptr) +TEXT ·Load(SB),NOSPLIT|NOFRAME,$0-12 + MOVV ptr+0(FP), R1 + SYNC + MOVWU 0(R1), R1 + SYNC + MOVW R1, ret+8(FP) + RET + +// uint8 ·Load8(uint8 volatile* ptr) +TEXT ·Load8(SB),NOSPLIT|NOFRAME,$0-9 + MOVV ptr+0(FP), R1 + SYNC + MOVBU 0(R1), R1 + SYNC + MOVB R1, ret+8(FP) + RET + +// uint64 ·Load64(uint64 volatile* ptr) +TEXT ·Load64(SB),NOSPLIT|NOFRAME,$0-16 + MOVV ptr+0(FP), R1 + SYNC + MOVV 0(R1), R1 + SYNC + MOVV R1, ret+8(FP) + RET + +// void *·Loadp(void *volatile *ptr) +TEXT ·Loadp(SB),NOSPLIT|NOFRAME,$0-16 + MOVV ptr+0(FP), R1 + SYNC + MOVV 0(R1), R1 + SYNC + MOVV R1, ret+8(FP) + RET + +// uint32 ·LoadAcq(uint32 volatile* ptr) +TEXT ·LoadAcq(SB),NOSPLIT|NOFRAME,$0-12 + JMP atomic·Load(SB) + +// uint64 ·LoadAcq64(uint64 volatile* ptr) +TEXT ·LoadAcq64(SB),NOSPLIT|NOFRAME,$0-16 + JMP atomic·Load64(SB) + +// uintptr ·LoadAcquintptr(uintptr volatile* ptr) +TEXT ·LoadAcquintptr(SB),NOSPLIT|NOFRAME,$0-16 + JMP atomic·Load64(SB) diff --git a/src/internal/runtime/atomic/atomic_mipsx.go b/src/internal/runtime/atomic/atomic_mipsx.go new file mode 100644 index 0000000000..e3dcde1bde --- /dev/null +++ b/src/internal/runtime/atomic/atomic_mipsx.go @@ -0,0 +1,162 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build mips || mipsle + +// Export some functions via linkname to assembly in sync/atomic. +// +//go:linkname Xadd64 +//go:linkname Xchg64 +//go:linkname Cas64 +//go:linkname Load64 +//go:linkname Store64 + +package atomic + +import ( + "internal/cpu" + "unsafe" +) + +// TODO implement lock striping +var lock struct { + state uint32 + pad [cpu.CacheLinePadSize - 4]byte +} + +//go:noescape +func spinLock(state *uint32) + +//go:noescape +func spinUnlock(state *uint32) + +//go:nosplit +func lockAndCheck(addr *uint64) { + // ensure 8-byte alignment + if uintptr(unsafe.Pointer(addr))&7 != 0 { + panicUnaligned() + } + // force dereference before taking lock + _ = *addr + + spinLock(&lock.state) +} + +//go:nosplit +func unlock() { + spinUnlock(&lock.state) +} + +//go:nosplit +func Xadd64(addr *uint64, delta int64) (new uint64) { + lockAndCheck(addr) + + new = *addr + uint64(delta) + *addr = new + + unlock() + return +} + +//go:nosplit +func Xchg64(addr *uint64, new uint64) (old uint64) { + lockAndCheck(addr) + + old = *addr + *addr = new + + unlock() + return +} + +//go:nosplit +func Cas64(addr *uint64, old, new uint64) (swapped bool) { + lockAndCheck(addr) + + if (*addr) == old { + *addr = new + unlock() + return true + } + + unlock() + return false +} + +//go:nosplit +func Load64(addr *uint64) (val uint64) { + lockAndCheck(addr) + + val = *addr + + unlock() + return +} + +//go:nosplit +func Store64(addr *uint64, val uint64) { + lockAndCheck(addr) + + *addr = val + + unlock() + return +} + +//go:noescape +func Xadd(ptr *uint32, delta int32) uint32 + +//go:noescape +func Xadduintptr(ptr *uintptr, delta uintptr) uintptr + +//go:noescape +func Xchg(ptr *uint32, new uint32) uint32 + +//go:noescape +func Xchguintptr(ptr *uintptr, new uintptr) uintptr + +//go:noescape +func Load(ptr *uint32) uint32 + +//go:noescape +func Load8(ptr *uint8) uint8 + +// NO go:noescape annotation; *ptr escapes if result escapes (#31525) +func Loadp(ptr unsafe.Pointer) unsafe.Pointer + +//go:noescape +func LoadAcq(ptr *uint32) uint32 + +//go:noescape +func LoadAcquintptr(ptr *uintptr) uintptr + +//go:noescape +func And8(ptr *uint8, val uint8) + +//go:noescape +func Or8(ptr *uint8, val uint8) + +//go:noescape +func And(ptr *uint32, val uint32) + +//go:noescape +func Or(ptr *uint32, val uint32) + +//go:noescape +func Store(ptr *uint32, val uint32) + +//go:noescape +func Store8(ptr *uint8, val uint8) + +// NO go:noescape annotation; see atomic_pointer.go. +func StorepNoWB(ptr unsafe.Pointer, val unsafe.Pointer) + +//go:noescape +func StoreRel(ptr *uint32, val uint32) + +//go:noescape +func StoreReluintptr(ptr *uintptr, val uintptr) + +//go:noescape +func CasRel(addr *uint32, old, new uint32) bool diff --git a/src/internal/runtime/atomic/atomic_mipsx.s b/src/internal/runtime/atomic/atomic_mipsx.s new file mode 100644 index 0000000000..8f5fc53cb7 --- /dev/null +++ b/src/internal/runtime/atomic/atomic_mipsx.s @@ -0,0 +1,262 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build mips || mipsle + +#include "textflag.h" + +// bool Cas(int32 *val, int32 old, int32 new) +// Atomically: +// if(*val == old){ +// *val = new; +// return 1; +// } else +// return 0; +TEXT ·Cas(SB),NOSPLIT,$0-13 + MOVW ptr+0(FP), R1 + MOVW old+4(FP), R2 + MOVW new+8(FP), R5 + SYNC +try_cas: + MOVW R5, R3 + LL (R1), R4 // R4 = *R1 + BNE R2, R4, cas_fail + SC R3, (R1) // *R1 = R3 + BEQ R3, try_cas + SYNC + MOVB R3, ret+12(FP) + RET +cas_fail: + SYNC + MOVB R0, ret+12(FP) + RET + +TEXT ·Store(SB),NOSPLIT,$0-8 + MOVW ptr+0(FP), R1 + MOVW val+4(FP), R2 + SYNC + MOVW R2, 0(R1) + SYNC + RET + +TEXT ·Store8(SB),NOSPLIT,$0-5 + MOVW ptr+0(FP), R1 + MOVB val+4(FP), R2 + SYNC + MOVB R2, 0(R1) + SYNC + RET + +TEXT ·Load(SB),NOSPLIT,$0-8 + MOVW ptr+0(FP), R1 + SYNC + MOVW 0(R1), R1 + SYNC + MOVW R1, ret+4(FP) + RET + +TEXT ·Load8(SB),NOSPLIT,$0-5 + MOVW ptr+0(FP), R1 + SYNC + MOVB 0(R1), R1 + SYNC + MOVB R1, ret+4(FP) + RET + +// uint32 Xadd(uint32 volatile *val, int32 delta) +// Atomically: +// *val += delta; +// return *val; +TEXT ·Xadd(SB),NOSPLIT,$0-12 + MOVW ptr+0(FP), R2 + MOVW delta+4(FP), R3 + SYNC +try_xadd: + LL (R2), R1 // R1 = *R2 + ADDU R1, R3, R4 + MOVW R4, R1 + SC R4, (R2) // *R2 = R4 + BEQ R4, try_xadd + SYNC + MOVW R1, ret+8(FP) + RET + +// uint32 Xchg(ptr *uint32, new uint32) +// Atomically: +// old := *ptr; +// *ptr = new; +// return old; +TEXT ·Xchg(SB),NOSPLIT,$0-12 + MOVW ptr+0(FP), R2 + MOVW new+4(FP), R5 + SYNC +try_xchg: + MOVW R5, R3 + LL (R2), R1 // R1 = *R2 + SC R3, (R2) // *R2 = R3 + BEQ R3, try_xchg + SYNC + MOVW R1, ret+8(FP) + RET + +TEXT ·Casint32(SB),NOSPLIT,$0-13 + JMP ·Cas(SB) + +TEXT ·Casint64(SB),NOSPLIT,$0-21 + JMP ·Cas64(SB) + +TEXT ·Casuintptr(SB),NOSPLIT,$0-13 + JMP ·Cas(SB) + +TEXT ·CasRel(SB),NOSPLIT,$0-13 + JMP ·Cas(SB) + +TEXT ·Loaduintptr(SB),NOSPLIT,$0-8 + JMP ·Load(SB) + +TEXT ·Loaduint(SB),NOSPLIT,$0-8 + JMP ·Load(SB) + +TEXT ·Loadp(SB),NOSPLIT,$-0-8 + JMP ·Load(SB) + +TEXT ·Storeint32(SB),NOSPLIT,$0-8 + JMP ·Store(SB) + +TEXT ·Storeint64(SB),NOSPLIT,$0-12 + JMP ·Store64(SB) + +TEXT ·Storeuintptr(SB),NOSPLIT,$0-8 + JMP ·Store(SB) + +TEXT ·Xadduintptr(SB),NOSPLIT,$0-12 + JMP ·Xadd(SB) + +TEXT ·Loadint32(SB),NOSPLIT,$0-8 + JMP ·Load(SB) + +TEXT ·Loadint64(SB),NOSPLIT,$0-12 + JMP ·Load64(SB) + +TEXT ·Xaddint32(SB),NOSPLIT,$0-12 + JMP ·Xadd(SB) + +TEXT ·Xaddint64(SB),NOSPLIT,$0-20 + JMP ·Xadd64(SB) + +TEXT ·Casp1(SB),NOSPLIT,$0-13 + JMP ·Cas(SB) + +TEXT ·Xchgint32(SB),NOSPLIT,$0-12 + JMP ·Xchg(SB) + +TEXT ·Xchgint64(SB),NOSPLIT,$0-20 + JMP ·Xchg64(SB) + +TEXT ·Xchguintptr(SB),NOSPLIT,$0-12 + JMP ·Xchg(SB) + +TEXT ·StorepNoWB(SB),NOSPLIT,$0-8 + JMP ·Store(SB) + +TEXT ·StoreRel(SB),NOSPLIT,$0-8 + JMP ·Store(SB) + +TEXT ·StoreReluintptr(SB),NOSPLIT,$0-8 + JMP ·Store(SB) + +// void Or8(byte volatile*, byte); +TEXT ·Or8(SB),NOSPLIT,$0-5 + MOVW ptr+0(FP), R1 + MOVBU val+4(FP), R2 + MOVW $~3, R3 // Align ptr down to 4 bytes so we can use 32-bit load/store. + AND R1, R3 +#ifdef GOARCH_mips + // Big endian. ptr = ptr ^ 3 + XOR $3, R1 +#endif + AND $3, R1, R4 // R4 = ((ptr & 3) * 8) + SLL $3, R4 + SLL R4, R2, R2 // Shift val for aligned ptr. R2 = val << R4 + SYNC +try_or8: + LL (R3), R4 // R4 = *R3 + OR R2, R4 + SC R4, (R3) // *R3 = R4 + BEQ R4, try_or8 + SYNC + RET + +// void And8(byte volatile*, byte); +TEXT ·And8(SB),NOSPLIT,$0-5 + MOVW ptr+0(FP), R1 + MOVBU val+4(FP), R2 + MOVW $~3, R3 + AND R1, R3 +#ifdef GOARCH_mips + // Big endian. ptr = ptr ^ 3 + XOR $3, R1 +#endif + AND $3, R1, R4 // R4 = ((ptr & 3) * 8) + SLL $3, R4 + MOVW $0xFF, R5 + SLL R4, R2 + SLL R4, R5 + NOR R0, R5 + OR R5, R2 // Shift val for aligned ptr. R2 = val << R4 | ^(0xFF << R4) + SYNC +try_and8: + LL (R3), R4 // R4 = *R3 + AND R2, R4 + SC R4, (R3) // *R3 = R4 + BEQ R4, try_and8 + SYNC + RET + +// func Or(addr *uint32, v uint32) +TEXT ·Or(SB), NOSPLIT, $0-8 + MOVW ptr+0(FP), R1 + MOVW val+4(FP), R2 + + SYNC + LL (R1), R3 + OR R2, R3 + SC R3, (R1) + BEQ R3, -4(PC) + SYNC + RET + +// func And(addr *uint32, v uint32) +TEXT ·And(SB), NOSPLIT, $0-8 + MOVW ptr+0(FP), R1 + MOVW val+4(FP), R2 + + SYNC + LL (R1), R3 + AND R2, R3 + SC R3, (R1) + BEQ R3, -4(PC) + SYNC + RET + +TEXT ·spinLock(SB),NOSPLIT,$0-4 + MOVW state+0(FP), R1 + MOVW $1, R2 + SYNC +try_lock: + MOVW R2, R3 +check_again: + LL (R1), R4 + BNE R4, check_again + SC R3, (R1) + BEQ R3, try_lock + SYNC + RET + +TEXT ·spinUnlock(SB),NOSPLIT,$0-4 + MOVW state+0(FP), R1 + SYNC + MOVW R0, (R1) + SYNC + RET diff --git a/src/internal/runtime/atomic/atomic_ppc64x.go b/src/internal/runtime/atomic/atomic_ppc64x.go new file mode 100644 index 0000000000..33a92b53f4 --- /dev/null +++ b/src/internal/runtime/atomic/atomic_ppc64x.go @@ -0,0 +1,107 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build ppc64 || ppc64le + +package atomic + +import "unsafe" + +//go:noescape +func Xadd(ptr *uint32, delta int32) uint32 + +//go:noescape +func Xadd64(ptr *uint64, delta int64) uint64 + +//go:noescape +func Xadduintptr(ptr *uintptr, delta uintptr) uintptr + +//go:noescape +func Xchg(ptr *uint32, new uint32) uint32 + +//go:noescape +func Xchg64(ptr *uint64, new uint64) uint64 + +//go:noescape +func Xchguintptr(ptr *uintptr, new uintptr) uintptr + +//go:noescape +func Load(ptr *uint32) uint32 + +//go:noescape +func Load8(ptr *uint8) uint8 + +//go:noescape +func Load64(ptr *uint64) uint64 + +// NO go:noescape annotation; *ptr escapes if result escapes (#31525) +func Loadp(ptr unsafe.Pointer) unsafe.Pointer + +//go:noescape +func LoadAcq(ptr *uint32) uint32 + +//go:noescape +func LoadAcq64(ptr *uint64) uint64 + +//go:noescape +func LoadAcquintptr(ptr *uintptr) uintptr + +//go:noescape +func And8(ptr *uint8, val uint8) + +//go:noescape +func Or8(ptr *uint8, val uint8) + +// NOTE: Do not add atomicxor8 (XOR is not idempotent). + +//go:noescape +func And(ptr *uint32, val uint32) + +//go:noescape +func Or(ptr *uint32, val uint32) + +//go:noescape +func And32(ptr *uint32, val uint32) uint32 + +//go:noescape +func Or32(ptr *uint32, val uint32) uint32 + +//go:noescape +func And64(ptr *uint64, val uint64) uint64 + +//go:noescape +func Or64(ptr *uint64, val uint64) uint64 + +//go:noescape +func Anduintptr(ptr *uintptr, val uintptr) uintptr + +//go:noescape +func Oruintptr(ptr *uintptr, val uintptr) uintptr + +//go:noescape +func Cas64(ptr *uint64, old, new uint64) bool + +//go:noescape +func CasRel(ptr *uint32, old, new uint32) bool + +//go:noescape +func Store(ptr *uint32, val uint32) + +//go:noescape +func Store8(ptr *uint8, val uint8) + +//go:noescape +func Store64(ptr *uint64, val uint64) + +//go:noescape +func StoreRel(ptr *uint32, val uint32) + +//go:noescape +func StoreRel64(ptr *uint64, val uint64) + +//go:noescape +func StoreReluintptr(ptr *uintptr, val uintptr) + +// NO go:noescape annotation; see atomic_pointer.go. +func StorepNoWB(ptr unsafe.Pointer, val unsafe.Pointer) diff --git a/src/internal/runtime/atomic/atomic_ppc64x.s b/src/internal/runtime/atomic/atomic_ppc64x.s new file mode 100644 index 0000000000..75635b933d --- /dev/null +++ b/src/internal/runtime/atomic/atomic_ppc64x.s @@ -0,0 +1,424 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build ppc64 || ppc64le + +#include "textflag.h" + +// For more details about how various memory models are +// enforced on POWER, the following paper provides more +// details about how they enforce C/C++ like models. This +// gives context about why the strange looking code +// sequences below work. +// +// http://www.rdrop.com/users/paulmck/scalability/paper/N2745r.2011.03.04a.html + +// uint32 ·Load(uint32 volatile* ptr) +TEXT ·Load(SB),NOSPLIT|NOFRAME,$-8-12 + MOVD ptr+0(FP), R3 + SYNC + MOVWZ 0(R3), R3 + CMPW R3, R3, CR7 + BC 4, 30, 1(PC) // bne- cr7,0x4 + ISYNC + MOVW R3, ret+8(FP) + RET + +// uint8 ·Load8(uint8 volatile* ptr) +TEXT ·Load8(SB),NOSPLIT|NOFRAME,$-8-9 + MOVD ptr+0(FP), R3 + SYNC + MOVBZ 0(R3), R3 + CMP R3, R3, CR7 + BC 4, 30, 1(PC) // bne- cr7,0x4 + ISYNC + MOVB R3, ret+8(FP) + RET + +// uint64 ·Load64(uint64 volatile* ptr) +TEXT ·Load64(SB),NOSPLIT|NOFRAME,$-8-16 + MOVD ptr+0(FP), R3 + SYNC + MOVD 0(R3), R3 + CMP R3, R3, CR7 + BC 4, 30, 1(PC) // bne- cr7,0x4 + ISYNC + MOVD R3, ret+8(FP) + RET + +// void *·Loadp(void *volatile *ptr) +TEXT ·Loadp(SB),NOSPLIT|NOFRAME,$-8-16 + MOVD ptr+0(FP), R3 + SYNC + MOVD 0(R3), R3 + CMP R3, R3, CR7 + BC 4, 30, 1(PC) // bne- cr7,0x4 + ISYNC + MOVD R3, ret+8(FP) + RET + +// uint32 ·LoadAcq(uint32 volatile* ptr) +TEXT ·LoadAcq(SB),NOSPLIT|NOFRAME,$-8-12 + MOVD ptr+0(FP), R3 + MOVWZ 0(R3), R3 + CMPW R3, R3, CR7 + BC 4, 30, 1(PC) // bne- cr7, 0x4 + ISYNC + MOVW R3, ret+8(FP) + RET + +// uint64 ·LoadAcq64(uint64 volatile* ptr) +TEXT ·LoadAcq64(SB),NOSPLIT|NOFRAME,$-8-16 + MOVD ptr+0(FP), R3 + MOVD 0(R3), R3 + CMP R3, R3, CR7 + BC 4, 30, 1(PC) // bne- cr7, 0x4 + ISYNC + MOVD R3, ret+8(FP) + RET + +// bool cas(uint32 *ptr, uint32 old, uint32 new) +// Atomically: +// if(*val == old){ +// *val = new; +// return 1; +// } else +// return 0; +TEXT ·Cas(SB), NOSPLIT, $0-17 + MOVD ptr+0(FP), R3 + MOVWZ old+8(FP), R4 + MOVWZ new+12(FP), R5 + LWSYNC +cas_again: + LWAR (R3), R6 + CMPW R6, R4 + BNE cas_fail + STWCCC R5, (R3) + BNE cas_again + MOVD $1, R3 + LWSYNC + MOVB R3, ret+16(FP) + RET +cas_fail: + LWSYNC + MOVB R0, ret+16(FP) + RET + +// bool ·Cas64(uint64 *ptr, uint64 old, uint64 new) +// Atomically: +// if(*val == old){ +// *val = new; +// return 1; +// } else { +// return 0; +// } +TEXT ·Cas64(SB), NOSPLIT, $0-25 + MOVD ptr+0(FP), R3 + MOVD old+8(FP), R4 + MOVD new+16(FP), R5 + LWSYNC +cas64_again: + LDAR (R3), R6 + CMP R6, R4 + BNE cas64_fail + STDCCC R5, (R3) + BNE cas64_again + MOVD $1, R3 + LWSYNC + MOVB R3, ret+24(FP) + RET +cas64_fail: + LWSYNC + MOVB R0, ret+24(FP) + RET + +TEXT ·CasRel(SB), NOSPLIT, $0-17 + MOVD ptr+0(FP), R3 + MOVWZ old+8(FP), R4 + MOVWZ new+12(FP), R5 + LWSYNC +cas_again: + LWAR (R3), $0, R6 // 0 = Mutex release hint + CMPW R6, R4 + BNE cas_fail + STWCCC R5, (R3) + BNE cas_again + MOVD $1, R3 + MOVB R3, ret+16(FP) + RET +cas_fail: + MOVB R0, ret+16(FP) + RET + +TEXT ·Casint32(SB), NOSPLIT, $0-17 + BR ·Cas(SB) + +TEXT ·Casint64(SB), NOSPLIT, $0-25 + BR ·Cas64(SB) + +TEXT ·Casuintptr(SB), NOSPLIT, $0-25 + BR ·Cas64(SB) + +TEXT ·Loaduintptr(SB), NOSPLIT|NOFRAME, $0-16 + BR ·Load64(SB) + +TEXT ·LoadAcquintptr(SB), NOSPLIT|NOFRAME, $0-16 + BR ·LoadAcq64(SB) + +TEXT ·Loaduint(SB), NOSPLIT|NOFRAME, $0-16 + BR ·Load64(SB) + +TEXT ·Storeint32(SB), NOSPLIT, $0-12 + BR ·Store(SB) + +TEXT ·Storeint64(SB), NOSPLIT, $0-16 + BR ·Store64(SB) + +TEXT ·Storeuintptr(SB), NOSPLIT, $0-16 + BR ·Store64(SB) + +TEXT ·StoreReluintptr(SB), NOSPLIT, $0-16 + BR ·StoreRel64(SB) + +TEXT ·Xadduintptr(SB), NOSPLIT, $0-24 + BR ·Xadd64(SB) + +TEXT ·Loadint32(SB), NOSPLIT, $0-12 + BR ·Load(SB) + +TEXT ·Loadint64(SB), NOSPLIT, $0-16 + BR ·Load64(SB) + +TEXT ·Xaddint32(SB), NOSPLIT, $0-20 + BR ·Xadd(SB) + +TEXT ·Xaddint64(SB), NOSPLIT, $0-24 + BR ·Xadd64(SB) + +// bool casp(void **val, void *old, void *new) +// Atomically: +// if(*val == old){ +// *val = new; +// return 1; +// } else +// return 0; +TEXT ·Casp1(SB), NOSPLIT, $0-25 + BR ·Cas64(SB) + +// uint32 xadd(uint32 volatile *ptr, int32 delta) +// Atomically: +// *val += delta; +// return *val; +TEXT ·Xadd(SB), NOSPLIT, $0-20 + MOVD ptr+0(FP), R4 + MOVW delta+8(FP), R5 + LWSYNC + LWAR (R4), R3 + ADD R5, R3 + STWCCC R3, (R4) + BNE -3(PC) + MOVW R3, ret+16(FP) + RET + +// uint64 Xadd64(uint64 volatile *val, int64 delta) +// Atomically: +// *val += delta; +// return *val; +TEXT ·Xadd64(SB), NOSPLIT, $0-24 + MOVD ptr+0(FP), R4 + MOVD delta+8(FP), R5 + LWSYNC + LDAR (R4), R3 + ADD R5, R3 + STDCCC R3, (R4) + BNE -3(PC) + MOVD R3, ret+16(FP) + RET + +// uint32 Xchg(ptr *uint32, new uint32) +// Atomically: +// old := *ptr; +// *ptr = new; +// return old; +TEXT ·Xchg(SB), NOSPLIT, $0-20 + MOVD ptr+0(FP), R4 + MOVW new+8(FP), R5 + LWSYNC + LWAR (R4), R3 + STWCCC R5, (R4) + BNE -2(PC) + ISYNC + MOVW R3, ret+16(FP) + RET + +// uint64 Xchg64(ptr *uint64, new uint64) +// Atomically: +// old := *ptr; +// *ptr = new; +// return old; +TEXT ·Xchg64(SB), NOSPLIT, $0-24 + MOVD ptr+0(FP), R4 + MOVD new+8(FP), R5 + LWSYNC + LDAR (R4), R3 + STDCCC R5, (R4) + BNE -2(PC) + ISYNC + MOVD R3, ret+16(FP) + RET + +TEXT ·Xchgint32(SB), NOSPLIT, $0-20 + BR ·Xchg(SB) + +TEXT ·Xchgint64(SB), NOSPLIT, $0-24 + BR ·Xchg64(SB) + +TEXT ·Xchguintptr(SB), NOSPLIT, $0-24 + BR ·Xchg64(SB) + +TEXT ·StorepNoWB(SB), NOSPLIT, $0-16 + BR ·Store64(SB) + +TEXT ·Store(SB), NOSPLIT, $0-12 + MOVD ptr+0(FP), R3 + MOVW val+8(FP), R4 + SYNC + MOVW R4, 0(R3) + RET + +TEXT ·Store8(SB), NOSPLIT, $0-9 + MOVD ptr+0(FP), R3 + MOVB val+8(FP), R4 + SYNC + MOVB R4, 0(R3) + RET + +TEXT ·Store64(SB), NOSPLIT, $0-16 + MOVD ptr+0(FP), R3 + MOVD val+8(FP), R4 + SYNC + MOVD R4, 0(R3) + RET + +TEXT ·StoreRel(SB), NOSPLIT, $0-12 + MOVD ptr+0(FP), R3 + MOVW val+8(FP), R4 + LWSYNC + MOVW R4, 0(R3) + RET + +TEXT ·StoreRel64(SB), NOSPLIT, $0-16 + MOVD ptr+0(FP), R3 + MOVD val+8(FP), R4 + LWSYNC + MOVD R4, 0(R3) + RET + +// void ·Or8(byte volatile*, byte); +TEXT ·Or8(SB), NOSPLIT, $0-9 + MOVD ptr+0(FP), R3 + MOVBZ val+8(FP), R4 + LWSYNC +again: + LBAR (R3), R6 + OR R4, R6 + STBCCC R6, (R3) + BNE again + RET + +// void ·And8(byte volatile*, byte); +TEXT ·And8(SB), NOSPLIT, $0-9 + MOVD ptr+0(FP), R3 + MOVBZ val+8(FP), R4 + LWSYNC +again: + LBAR (R3), R6 + AND R4, R6 + STBCCC R6, (R3) + BNE again + RET + +// func Or(addr *uint32, v uint32) +TEXT ·Or(SB), NOSPLIT, $0-12 + MOVD ptr+0(FP), R3 + MOVW val+8(FP), R4 + LWSYNC +again: + LWAR (R3), R6 + OR R4, R6 + STWCCC R6, (R3) + BNE again + RET + +// func And(addr *uint32, v uint32) +TEXT ·And(SB), NOSPLIT, $0-12 + MOVD ptr+0(FP), R3 + MOVW val+8(FP), R4 + LWSYNC +again: + LWAR (R3),R6 + AND R4, R6 + STWCCC R6, (R3) + BNE again + RET + +// func Or32(addr *uint32, v uint32) old uint32 +TEXT ·Or32(SB), NOSPLIT, $0-20 + MOVD ptr+0(FP), R3 + MOVW val+8(FP), R4 + LWSYNC +again: + LWAR (R3), R6 + OR R4, R6, R7 + STWCCC R7, (R3) + BNE again + MOVW R6, ret+16(FP) + RET + +// func And32(addr *uint32, v uint32) old uint32 +TEXT ·And32(SB), NOSPLIT, $0-20 + MOVD ptr+0(FP), R3 + MOVW val+8(FP), R4 + LWSYNC +again: + LWAR (R3),R6 + AND R4, R6, R7 + STWCCC R7, (R3) + BNE again + MOVW R6, ret+16(FP) + RET + +// func Or64(addr *uint64, v uint64) old uint64 +TEXT ·Or64(SB), NOSPLIT, $0-24 + MOVD ptr+0(FP), R3 + MOVD val+8(FP), R4 + LWSYNC +again: + LDAR (R3), R6 + OR R4, R6, R7 + STDCCC R7, (R3) + BNE again + MOVD R6, ret+16(FP) + RET + +// func And64(addr *uint64, v uint64) old uint64 +TEXT ·And64(SB), NOSPLIT, $0-24 + MOVD ptr+0(FP), R3 + MOVD val+8(FP), R4 + LWSYNC +again: + LDAR (R3),R6 + AND R4, R6, R7 + STDCCC R7, (R3) + BNE again + MOVD R6, ret+16(FP) + RET + +// func Anduintptr(addr *uintptr, v uintptr) old uintptr +TEXT ·Anduintptr(SB), NOSPLIT, $0-24 + JMP ·And64(SB) + +// func Oruintptr(addr *uintptr, v uintptr) old uintptr +TEXT ·Oruintptr(SB), NOSPLIT, $0-24 + JMP ·Or64(SB) diff --git a/src/internal/runtime/atomic/atomic_riscv64.go b/src/internal/runtime/atomic/atomic_riscv64.go new file mode 100644 index 0000000000..9fc38376ae --- /dev/null +++ b/src/internal/runtime/atomic/atomic_riscv64.go @@ -0,0 +1,103 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package atomic + +import "unsafe" + +//go:noescape +func Xadd(ptr *uint32, delta int32) uint32 + +//go:noescape +func Xadd64(ptr *uint64, delta int64) uint64 + +//go:noescape +func Xadduintptr(ptr *uintptr, delta uintptr) uintptr + +//go:noescape +func Xchg(ptr *uint32, new uint32) uint32 + +//go:noescape +func Xchg64(ptr *uint64, new uint64) uint64 + +//go:noescape +func Xchguintptr(ptr *uintptr, new uintptr) uintptr + +//go:noescape +func Load(ptr *uint32) uint32 + +//go:noescape +func Load8(ptr *uint8) uint8 + +//go:noescape +func Load64(ptr *uint64) uint64 + +// NO go:noescape annotation; *ptr escapes if result escapes (#31525) +func Loadp(ptr unsafe.Pointer) unsafe.Pointer + +//go:noescape +func LoadAcq(ptr *uint32) uint32 + +//go:noescape +func LoadAcq64(ptr *uint64) uint64 + +//go:noescape +func LoadAcquintptr(ptr *uintptr) uintptr + +//go:noescape +func Or8(ptr *uint8, val uint8) + +//go:noescape +func And8(ptr *uint8, val uint8) + +//go:noescape +func And(ptr *uint32, val uint32) + +//go:noescape +func Or(ptr *uint32, val uint32) + +//go:noescape +func And32(ptr *uint32, val uint32) uint32 + +//go:noescape +func Or32(ptr *uint32, val uint32) uint32 + +//go:noescape +func And64(ptr *uint64, val uint64) uint64 + +//go:noescape +func Or64(ptr *uint64, val uint64) uint64 + +//go:noescape +func Anduintptr(ptr *uintptr, val uintptr) uintptr + +//go:noescape +func Oruintptr(ptr *uintptr, val uintptr) uintptr + +//go:noescape +func Cas64(ptr *uint64, old, new uint64) bool + +//go:noescape +func CasRel(ptr *uint32, old, new uint32) bool + +//go:noescape +func Store(ptr *uint32, val uint32) + +//go:noescape +func Store8(ptr *uint8, val uint8) + +//go:noescape +func Store64(ptr *uint64, val uint64) + +// NO go:noescape annotation; see atomic_pointer.go. +func StorepNoWB(ptr unsafe.Pointer, val unsafe.Pointer) + +//go:noescape +func StoreRel(ptr *uint32, val uint32) + +//go:noescape +func StoreRel64(ptr *uint64, val uint64) + +//go:noescape +func StoreReluintptr(ptr *uintptr, val uintptr) diff --git a/src/internal/runtime/atomic/atomic_riscv64.s b/src/internal/runtime/atomic/atomic_riscv64.s new file mode 100644 index 0000000000..bf6bd35ed7 --- /dev/null +++ b/src/internal/runtime/atomic/atomic_riscv64.s @@ -0,0 +1,324 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// RISC-V's atomic operations have two bits, aq ("acquire") and rl ("release"), +// which may be toggled on and off. Their precise semantics are defined in +// section 6.3 of the specification, but the basic idea is as follows: +// +// - If neither aq nor rl is set, the CPU may reorder the atomic arbitrarily. +// It guarantees only that it will execute atomically. +// +// - If aq is set, the CPU may move the instruction backward, but not forward. +// +// - If rl is set, the CPU may move the instruction forward, but not backward. +// +// - If both are set, the CPU may not reorder the instruction at all. +// +// These four modes correspond to other well-known memory models on other CPUs. +// On ARM, aq corresponds to a dmb ishst, aq+rl corresponds to a dmb ish. On +// Intel, aq corresponds to an lfence, rl to an sfence, and aq+rl to an mfence +// (or a lock prefix). +// +// Go's memory model requires that +// - if a read happens after a write, the read must observe the write, and +// that +// - if a read happens concurrently with a write, the read may observe the +// write. +// aq is sufficient to guarantee this, so that's what we use here. (This jibes +// with ARM, which uses dmb ishst.) + +#include "textflag.h" + +// func Cas(ptr *uint64, old, new uint64) bool +// Atomically: +// if(*val == old){ +// *val = new; +// return 1; +// } else { +// return 0; +// } +TEXT ·Cas(SB), NOSPLIT, $0-17 + MOV ptr+0(FP), A0 + MOVW old+8(FP), A1 + MOVW new+12(FP), A2 +cas_again: + LRW (A0), A3 + BNE A3, A1, cas_fail + SCW A2, (A0), A4 + BNE A4, ZERO, cas_again + MOV $1, A0 + MOVB A0, ret+16(FP) + RET +cas_fail: + MOV $0, A0 + MOV A0, ret+16(FP) + RET + +// func Cas64(ptr *uint64, old, new uint64) bool +TEXT ·Cas64(SB), NOSPLIT, $0-25 + MOV ptr+0(FP), A0 + MOV old+8(FP), A1 + MOV new+16(FP), A2 +cas_again: + LRD (A0), A3 + BNE A3, A1, cas_fail + SCD A2, (A0), A4 + BNE A4, ZERO, cas_again + MOV $1, A0 + MOVB A0, ret+24(FP) + RET +cas_fail: + MOVB ZERO, ret+24(FP) + RET + +// func Load(ptr *uint32) uint32 +TEXT ·Load(SB),NOSPLIT|NOFRAME,$0-12 + MOV ptr+0(FP), A0 + LRW (A0), A0 + MOVW A0, ret+8(FP) + RET + +// func Load8(ptr *uint8) uint8 +TEXT ·Load8(SB),NOSPLIT|NOFRAME,$0-9 + MOV ptr+0(FP), A0 + FENCE + MOVBU (A0), A1 + FENCE + MOVB A1, ret+8(FP) + RET + +// func Load64(ptr *uint64) uint64 +TEXT ·Load64(SB),NOSPLIT|NOFRAME,$0-16 + MOV ptr+0(FP), A0 + LRD (A0), A0 + MOV A0, ret+8(FP) + RET + +// func Store(ptr *uint32, val uint32) +TEXT ·Store(SB), NOSPLIT, $0-12 + MOV ptr+0(FP), A0 + MOVW val+8(FP), A1 + AMOSWAPW A1, (A0), ZERO + RET + +// func Store8(ptr *uint8, val uint8) +TEXT ·Store8(SB), NOSPLIT, $0-9 + MOV ptr+0(FP), A0 + MOVBU val+8(FP), A1 + FENCE + MOVB A1, (A0) + FENCE + RET + +// func Store64(ptr *uint64, val uint64) +TEXT ·Store64(SB), NOSPLIT, $0-16 + MOV ptr+0(FP), A0 + MOV val+8(FP), A1 + AMOSWAPD A1, (A0), ZERO + RET + +TEXT ·Casp1(SB), NOSPLIT, $0-25 + JMP ·Cas64(SB) + +TEXT ·Casint32(SB),NOSPLIT,$0-17 + JMP ·Cas(SB) + +TEXT ·Casint64(SB),NOSPLIT,$0-25 + JMP ·Cas64(SB) + +TEXT ·Casuintptr(SB),NOSPLIT,$0-25 + JMP ·Cas64(SB) + +TEXT ·CasRel(SB), NOSPLIT, $0-17 + JMP ·Cas(SB) + +TEXT ·Loaduintptr(SB),NOSPLIT,$0-16 + JMP ·Load64(SB) + +TEXT ·Storeint32(SB),NOSPLIT,$0-12 + JMP ·Store(SB) + +TEXT ·Storeint64(SB),NOSPLIT,$0-16 + JMP ·Store64(SB) + +TEXT ·Storeuintptr(SB),NOSPLIT,$0-16 + JMP ·Store64(SB) + +TEXT ·Loaduint(SB),NOSPLIT,$0-16 + JMP ·Loaduintptr(SB) + +TEXT ·Loadint32(SB),NOSPLIT,$0-12 + JMP ·Load(SB) + +TEXT ·Loadint64(SB),NOSPLIT,$0-16 + JMP ·Load64(SB) + +TEXT ·Xaddint32(SB),NOSPLIT,$0-20 + JMP ·Xadd(SB) + +TEXT ·Xaddint64(SB),NOSPLIT,$0-24 + MOV ptr+0(FP), A0 + MOV delta+8(FP), A1 + AMOADDD A1, (A0), A0 + ADD A0, A1, A0 + MOVW A0, ret+16(FP) + RET + +TEXT ·LoadAcq(SB),NOSPLIT|NOFRAME,$0-12 + JMP ·Load(SB) + +TEXT ·LoadAcq64(SB),NOSPLIT|NOFRAME,$0-16 + JMP ·Load64(SB) + +TEXT ·LoadAcquintptr(SB),NOSPLIT|NOFRAME,$0-16 + JMP ·Load64(SB) + +// func Loadp(ptr unsafe.Pointer) unsafe.Pointer +TEXT ·Loadp(SB),NOSPLIT,$0-16 + JMP ·Load64(SB) + +// func StorepNoWB(ptr unsafe.Pointer, val unsafe.Pointer) +TEXT ·StorepNoWB(SB), NOSPLIT, $0-16 + JMP ·Store64(SB) + +TEXT ·StoreRel(SB), NOSPLIT, $0-12 + JMP ·Store(SB) + +TEXT ·StoreRel64(SB), NOSPLIT, $0-16 + JMP ·Store64(SB) + +TEXT ·StoreReluintptr(SB), NOSPLIT, $0-16 + JMP ·Store64(SB) + +// func Xchg(ptr *uint32, new uint32) uint32 +TEXT ·Xchg(SB), NOSPLIT, $0-20 + MOV ptr+0(FP), A0 + MOVW new+8(FP), A1 + AMOSWAPW A1, (A0), A1 + MOVW A1, ret+16(FP) + RET + +// func Xchg64(ptr *uint64, new uint64) uint64 +TEXT ·Xchg64(SB), NOSPLIT, $0-24 + MOV ptr+0(FP), A0 + MOV new+8(FP), A1 + AMOSWAPD A1, (A0), A1 + MOV A1, ret+16(FP) + RET + +// Atomically: +// *val += delta; +// return *val; + +// func Xadd(ptr *uint32, delta int32) uint32 +TEXT ·Xadd(SB), NOSPLIT, $0-20 + MOV ptr+0(FP), A0 + MOVW delta+8(FP), A1 + AMOADDW A1, (A0), A2 + ADD A2,A1,A0 + MOVW A0, ret+16(FP) + RET + +// func Xadd64(ptr *uint64, delta int64) uint64 +TEXT ·Xadd64(SB), NOSPLIT, $0-24 + MOV ptr+0(FP), A0 + MOV delta+8(FP), A1 + AMOADDD A1, (A0), A2 + ADD A2, A1, A0 + MOV A0, ret+16(FP) + RET + +// func Xadduintptr(ptr *uintptr, delta uintptr) uintptr +TEXT ·Xadduintptr(SB), NOSPLIT, $0-24 + JMP ·Xadd64(SB) + +// func Xchgint32(ptr *int32, new int32) int32 +TEXT ·Xchgint32(SB), NOSPLIT, $0-20 + JMP ·Xchg(SB) + +// func Xchgint64(ptr *int64, new int64) int64 +TEXT ·Xchgint64(SB), NOSPLIT, $0-24 + JMP ·Xchg64(SB) + +// func Xchguintptr(ptr *uintptr, new uintptr) uintptr +TEXT ·Xchguintptr(SB), NOSPLIT, $0-24 + JMP ·Xchg64(SB) + +// func And8(ptr *uint8, val uint8) +TEXT ·And8(SB), NOSPLIT, $0-9 + MOV ptr+0(FP), A0 + MOVBU val+8(FP), A1 + AND $3, A0, A2 + AND $-4, A0 + SLL $3, A2 + XOR $255, A1 + SLL A2, A1 + XOR $-1, A1 + AMOANDW A1, (A0), ZERO + RET + +// func Or8(ptr *uint8, val uint8) +TEXT ·Or8(SB), NOSPLIT, $0-9 + MOV ptr+0(FP), A0 + MOVBU val+8(FP), A1 + AND $3, A0, A2 + AND $-4, A0 + SLL $3, A2 + SLL A2, A1 + AMOORW A1, (A0), ZERO + RET + +// func And(ptr *uint32, val uint32) +TEXT ·And(SB), NOSPLIT, $0-12 + MOV ptr+0(FP), A0 + MOVW val+8(FP), A1 + AMOANDW A1, (A0), ZERO + RET + +// func Or(ptr *uint32, val uint32) +TEXT ·Or(SB), NOSPLIT, $0-12 + MOV ptr+0(FP), A0 + MOVW val+8(FP), A1 + AMOORW A1, (A0), ZERO + RET + +// func Or32(ptr *uint32, val uint32) uint32 +TEXT ·Or32(SB), NOSPLIT, $0-20 + MOV ptr+0(FP), A0 + MOVW val+8(FP), A1 + AMOORW A1, (A0), A2 + MOVW A2, ret+16(FP) + RET + +// func And32(ptr *uint32, val uint32) uint32 +TEXT ·And32(SB), NOSPLIT, $0-20 + MOV ptr+0(FP), A0 + MOVW val+8(FP), A1 + AMOANDW A1, (A0), A2 + MOVW A2, ret+16(FP) + RET + +// func Or64(ptr *uint64, val uint64) uint64 +TEXT ·Or64(SB), NOSPLIT, $0-24 + MOV ptr+0(FP), A0 + MOV val+8(FP), A1 + AMOORD A1, (A0), A2 + MOV A2, ret+16(FP) + RET + +// func And64(ptr *uint64, val uint64) uint64 +TEXT ·And64(SB), NOSPLIT, $0-24 + MOV ptr+0(FP), A0 + MOV val+8(FP), A1 + AMOANDD A1, (A0), A2 + MOV A2, ret+16(FP) + RET + +// func Anduintptr(ptr *uintptr, val uintptr) uintptr +TEXT ·Anduintptr(SB), NOSPLIT, $0-24 + JMP ·And64(SB) + +// func Oruintptr(ptr *uintptr, val uintptr) uintptr +TEXT ·Oruintptr(SB), NOSPLIT, $0-24 + JMP ·Or64(SB) diff --git a/src/internal/runtime/atomic/atomic_s390x.go b/src/internal/runtime/atomic/atomic_s390x.go new file mode 100644 index 0000000000..68b4e160f9 --- /dev/null +++ b/src/internal/runtime/atomic/atomic_s390x.go @@ -0,0 +1,141 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package atomic + +import "unsafe" + +// Export some functions via linkname to assembly in sync/atomic. +// +//go:linkname Load +//go:linkname Loadp +//go:linkname Load64 + +//go:nosplit +//go:noinline +func Load(ptr *uint32) uint32 { + return *ptr +} + +//go:nosplit +//go:noinline +func Loadp(ptr unsafe.Pointer) unsafe.Pointer { + return *(*unsafe.Pointer)(ptr) +} + +//go:nosplit +//go:noinline +func Load8(ptr *uint8) uint8 { + return *ptr +} + +//go:nosplit +//go:noinline +func Load64(ptr *uint64) uint64 { + return *ptr +} + +//go:nosplit +//go:noinline +func LoadAcq(ptr *uint32) uint32 { + return *ptr +} + +//go:nosplit +//go:noinline +func LoadAcq64(ptr *uint64) uint64 { + return *ptr +} + +//go:nosplit +//go:noinline +func LoadAcquintptr(ptr *uintptr) uintptr { + return *ptr +} + +//go:noescape +func Store(ptr *uint32, val uint32) + +//go:noescape +func Store8(ptr *uint8, val uint8) + +//go:noescape +func Store64(ptr *uint64, val uint64) + +// NO go:noescape annotation; see atomic_pointer.go. +func StorepNoWB(ptr unsafe.Pointer, val unsafe.Pointer) + +//go:nosplit +//go:noinline +func StoreRel(ptr *uint32, val uint32) { + *ptr = val +} + +//go:nosplit +//go:noinline +func StoreRel64(ptr *uint64, val uint64) { + *ptr = val +} + +//go:nosplit +//go:noinline +func StoreReluintptr(ptr *uintptr, val uintptr) { + *ptr = val +} + +//go:noescape +func And8(ptr *uint8, val uint8) + +//go:noescape +func Or8(ptr *uint8, val uint8) + +// NOTE: Do not add atomicxor8 (XOR is not idempotent). + +//go:noescape +func And(ptr *uint32, val uint32) + +//go:noescape +func Or(ptr *uint32, val uint32) + +//go:noescape +func And32(ptr *uint32, val uint32) uint32 + +//go:noescape +func Or32(ptr *uint32, val uint32) uint32 + +//go:noescape +func And64(ptr *uint64, val uint64) uint64 + +//go:noescape +func Or64(ptr *uint64, val uint64) uint64 + +//go:noescape +func Anduintptr(ptr *uintptr, val uintptr) uintptr + +//go:noescape +func Oruintptr(ptr *uintptr, val uintptr) uintptr + +//go:noescape +func Xadd(ptr *uint32, delta int32) uint32 + +//go:noescape +func Xadd64(ptr *uint64, delta int64) uint64 + +//go:noescape +func Xadduintptr(ptr *uintptr, delta uintptr) uintptr + +//go:noescape +func Xchg(ptr *uint32, new uint32) uint32 + +//go:noescape +func Xchg64(ptr *uint64, new uint64) uint64 + +//go:noescape +func Xchguintptr(ptr *uintptr, new uintptr) uintptr + +//go:noescape +func Cas64(ptr *uint64, old, new uint64) bool + +//go:noescape +func CasRel(ptr *uint32, old, new uint32) bool diff --git a/src/internal/runtime/atomic/atomic_s390x.s b/src/internal/runtime/atomic/atomic_s390x.s new file mode 100644 index 0000000000..6e4ea0e32a --- /dev/null +++ b/src/internal/runtime/atomic/atomic_s390x.s @@ -0,0 +1,304 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +#include "textflag.h" + +// func Store(ptr *uint32, val uint32) +TEXT ·Store(SB), NOSPLIT, $0 + MOVD ptr+0(FP), R2 + MOVWZ val+8(FP), R3 + MOVW R3, 0(R2) + SYNC + RET + +// func Store8(ptr *uint8, val uint8) +TEXT ·Store8(SB), NOSPLIT, $0 + MOVD ptr+0(FP), R2 + MOVB val+8(FP), R3 + MOVB R3, 0(R2) + SYNC + RET + +// func Store64(ptr *uint64, val uint64) +TEXT ·Store64(SB), NOSPLIT, $0 + MOVD ptr+0(FP), R2 + MOVD val+8(FP), R3 + MOVD R3, 0(R2) + SYNC + RET + +// func StorepNoWB(ptr unsafe.Pointer, val unsafe.Pointer) +TEXT ·StorepNoWB(SB), NOSPLIT, $0 + MOVD ptr+0(FP), R2 + MOVD val+8(FP), R3 + MOVD R3, 0(R2) + SYNC + RET + +// func Cas(ptr *uint32, old, new uint32) bool +// Atomically: +// if *ptr == old { +// *val = new +// return 1 +// } else { +// return 0 +// } +TEXT ·Cas(SB), NOSPLIT, $0-17 + MOVD ptr+0(FP), R3 + MOVWZ old+8(FP), R4 + MOVWZ new+12(FP), R5 + CS R4, R5, 0(R3) // if (R4 == 0(R3)) then 0(R3)= R5 + BNE cas_fail + MOVB $1, ret+16(FP) + RET +cas_fail: + MOVB $0, ret+16(FP) + RET + +// func Cas64(ptr *uint64, old, new uint64) bool +// Atomically: +// if *ptr == old { +// *ptr = new +// return 1 +// } else { +// return 0 +// } +TEXT ·Cas64(SB), NOSPLIT, $0-25 + MOVD ptr+0(FP), R3 + MOVD old+8(FP), R4 + MOVD new+16(FP), R5 + CSG R4, R5, 0(R3) // if (R4 == 0(R3)) then 0(R3)= R5 + BNE cas64_fail + MOVB $1, ret+24(FP) + RET +cas64_fail: + MOVB $0, ret+24(FP) + RET + +// func Casint32(ptr *int32, old, new int32) bool +TEXT ·Casint32(SB), NOSPLIT, $0-17 + BR ·Cas(SB) + +// func Casint64(ptr *int64, old, new int64) bool +TEXT ·Casint64(SB), NOSPLIT, $0-25 + BR ·Cas64(SB) + +// func Casuintptr(ptr *uintptr, old, new uintptr) bool +TEXT ·Casuintptr(SB), NOSPLIT, $0-25 + BR ·Cas64(SB) + +// func CasRel(ptr *uint32, old, new uint32) bool +TEXT ·CasRel(SB), NOSPLIT, $0-17 + BR ·Cas(SB) + +// func Loaduintptr(ptr *uintptr) uintptr +TEXT ·Loaduintptr(SB), NOSPLIT, $0-16 + BR ·Load64(SB) + +// func Loaduint(ptr *uint) uint +TEXT ·Loaduint(SB), NOSPLIT, $0-16 + BR ·Load64(SB) + +// func Storeint32(ptr *int32, new int32) +TEXT ·Storeint32(SB), NOSPLIT, $0-12 + BR ·Store(SB) + +// func Storeint64(ptr *int64, new int64) +TEXT ·Storeint64(SB), NOSPLIT, $0-16 + BR ·Store64(SB) + +// func Storeuintptr(ptr *uintptr, new uintptr) +TEXT ·Storeuintptr(SB), NOSPLIT, $0-16 + BR ·Store64(SB) + +// func Loadint32(ptr *int32) int32 +TEXT ·Loadint32(SB), NOSPLIT, $0-12 + BR ·Load(SB) + +// func Loadint64(ptr *int64) int64 +TEXT ·Loadint64(SB), NOSPLIT, $0-16 + BR ·Load64(SB) + +// func Xadduintptr(ptr *uintptr, delta uintptr) uintptr +TEXT ·Xadduintptr(SB), NOSPLIT, $0-24 + BR ·Xadd64(SB) + +// func Xaddint32(ptr *int32, delta int32) int32 +TEXT ·Xaddint32(SB), NOSPLIT, $0-20 + BR ·Xadd(SB) + +// func Xaddint64(ptr *int64, delta int64) int64 +TEXT ·Xaddint64(SB), NOSPLIT, $0-24 + BR ·Xadd64(SB) + +// func Casp1(ptr *unsafe.Pointer, old, new unsafe.Pointer) bool +// Atomically: +// if *ptr == old { +// *ptr = new +// return 1 +// } else { +// return 0 +// } +TEXT ·Casp1(SB), NOSPLIT, $0-25 + BR ·Cas64(SB) + +// func Xadd(ptr *uint32, delta int32) uint32 +// Atomically: +// *ptr += delta +// return *ptr +TEXT ·Xadd(SB), NOSPLIT, $0-20 + MOVD ptr+0(FP), R4 + MOVW delta+8(FP), R5 + MOVW (R4), R3 +repeat: + ADD R5, R3, R6 + CS R3, R6, (R4) // if R3==(R4) then (R4)=R6 else R3=(R4) + BNE repeat + MOVW R6, ret+16(FP) + RET + +// func Xadd64(ptr *uint64, delta int64) uint64 +TEXT ·Xadd64(SB), NOSPLIT, $0-24 + MOVD ptr+0(FP), R4 + MOVD delta+8(FP), R5 + MOVD (R4), R3 +repeat: + ADD R5, R3, R6 + CSG R3, R6, (R4) // if R3==(R4) then (R4)=R6 else R3=(R4) + BNE repeat + MOVD R6, ret+16(FP) + RET + +// func Xchg(ptr *uint32, new uint32) uint32 +TEXT ·Xchg(SB), NOSPLIT, $0-20 + MOVD ptr+0(FP), R4 + MOVW new+8(FP), R3 + MOVW (R4), R6 +repeat: + CS R6, R3, (R4) // if R6==(R4) then (R4)=R3 else R6=(R4) + BNE repeat + MOVW R6, ret+16(FP) + RET + +// func Xchg64(ptr *uint64, new uint64) uint64 +TEXT ·Xchg64(SB), NOSPLIT, $0-24 + MOVD ptr+0(FP), R4 + MOVD new+8(FP), R3 + MOVD (R4), R6 +repeat: + CSG R6, R3, (R4) // if R6==(R4) then (R4)=R3 else R6=(R4) + BNE repeat + MOVD R6, ret+16(FP) + RET + +// func Xchgint32(ptr *int32, new int32) int32 +TEXT ·Xchgint32(SB), NOSPLIT, $0-20 + BR ·Xchg(SB) + +// func Xchgint64(ptr *int64, new int64) int64 +TEXT ·Xchgint64(SB), NOSPLIT, $0-24 + BR ·Xchg64(SB) + +// func Xchguintptr(ptr *uintptr, new uintptr) uintptr +TEXT ·Xchguintptr(SB), NOSPLIT, $0-24 + BR ·Xchg64(SB) + +// func Or8(addr *uint8, v uint8) +TEXT ·Or8(SB), NOSPLIT, $0-9 + MOVD ptr+0(FP), R3 + MOVBZ val+8(FP), R4 + // We don't have atomic operations that work on individual bytes so we + // need to align addr down to a word boundary and create a mask + // containing v to OR with the entire word atomically. + MOVD $(3<<3), R5 + RXSBG $59, $60, $3, R3, R5 // R5 = 24 - ((addr % 4) * 8) = ((addr & 3) << 3) ^ (3 << 3) + ANDW $~3, R3 // R3 = floor(addr, 4) = addr &^ 3 + SLW R5, R4 // R4 = uint32(v) << R5 + LAO R4, R6, 0(R3) // R6 = *R3; *R3 |= R4; (atomic) + RET + +// func And8(addr *uint8, v uint8) +TEXT ·And8(SB), NOSPLIT, $0-9 + MOVD ptr+0(FP), R3 + MOVBZ val+8(FP), R4 + // We don't have atomic operations that work on individual bytes so we + // need to align addr down to a word boundary and create a mask + // containing v to AND with the entire word atomically. + ORW $~0xff, R4 // R4 = uint32(v) | 0xffffff00 + MOVD $(3<<3), R5 + RXSBG $59, $60, $3, R3, R5 // R5 = 24 - ((addr % 4) * 8) = ((addr & 3) << 3) ^ (3 << 3) + ANDW $~3, R3 // R3 = floor(addr, 4) = addr &^ 3 + RLL R5, R4, R4 // R4 = rotl(R4, R5) + LAN R4, R6, 0(R3) // R6 = *R3; *R3 &= R4; (atomic) + RET + +// func Or(addr *uint32, v uint32) +TEXT ·Or(SB), NOSPLIT, $0-12 + MOVD ptr+0(FP), R3 + MOVW val+8(FP), R4 + LAO R4, R6, 0(R3) // R6 = *R3; *R3 |= R4; (atomic) + RET + +// func And(addr *uint32, v uint32) +TEXT ·And(SB), NOSPLIT, $0-12 + MOVD ptr+0(FP), R3 + MOVW val+8(FP), R4 + LAN R4, R6, 0(R3) // R6 = *R3; *R3 &= R4; (atomic) + RET + +// func Or32(addr *uint32, v uint32) old uint32 +TEXT ·Or32(SB), NOSPLIT, $0-20 + MOVD ptr+0(FP), R4 + MOVW val+8(FP), R5 + MOVW (R4), R3 +repeat: + OR R5, R3, R6 + CS R3, R6, (R4) // if R3==(R4) then (R4)=R6 else R3=(R4) + BNE repeat + MOVW R3, ret+16(FP) + RET + +// func And32(addr *uint32, v uint32) old uint32 +TEXT ·And32(SB), NOSPLIT, $0-20 + MOVD ptr+0(FP), R4 + MOVW val+8(FP), R5 + MOVW (R4), R3 +repeat: + AND R5, R3, R6 + CS R3, R6, (R4) // if R3==(R4) then (R4)=R6 else R3=(R4) + BNE repeat + MOVW R3, ret+16(FP) + RET + +// func Or64(addr *uint64, v uint64) old uint64 +TEXT ·Or64(SB), NOSPLIT, $0-24 + MOVD ptr+0(FP), R4 + MOVD val+8(FP), R5 + MOVD (R4), R3 +repeat: + OR R5, R3, R6 + CSG R3, R6, (R4) // if R3==(R4) then (R4)=R6 else R3=(R4) + BNE repeat + MOVD R3, ret+16(FP) + RET + +// func And64(addr *uint64, v uint64) old uint64 +TEXT ·And64(SB), NOSPLIT, $0-24 + MOVD ptr+0(FP), R4 + MOVD val+8(FP), R5 + MOVD (R4), R3 +repeat: + AND R5, R3, R6 + CSG R3, R6, (R4) // if R3==(R4) then (R4)=R6 else R3=(R4) + BNE repeat + MOVD R3, ret+16(FP) + RET + +// func Anduintptr(addr *uintptr, v uintptr) old uintptr +TEXT ·Anduintptr(SB), NOSPLIT, $0-24 + BR ·And64(SB) + +// func Oruintptr(addr *uintptr, v uintptr) old uintptr +TEXT ·Oruintptr(SB), NOSPLIT, $0-24 + BR ·Or64(SB) diff --git a/src/internal/runtime/atomic/atomic_test.go b/src/internal/runtime/atomic/atomic_test.go new file mode 100644 index 0000000000..f28213c3ce --- /dev/null +++ b/src/internal/runtime/atomic/atomic_test.go @@ -0,0 +1,386 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package atomic_test + +import ( + "internal/goarch" + "internal/runtime/atomic" + "runtime" + "testing" + "unsafe" +) + +func runParallel(N, iter int, f func()) { + defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(int(N))) + done := make(chan bool) + for i := 0; i < N; i++ { + go func() { + for j := 0; j < iter; j++ { + f() + } + done <- true + }() + } + for i := 0; i < N; i++ { + <-done + } +} + +func TestXadduintptr(t *testing.T) { + N := 20 + iter := 100000 + if testing.Short() { + N = 10 + iter = 10000 + } + inc := uintptr(100) + total := uintptr(0) + runParallel(N, iter, func() { + atomic.Xadduintptr(&total, inc) + }) + if want := uintptr(N*iter) * inc; want != total { + t.Fatalf("xadduintpr error, want %d, got %d", want, total) + } + total = 0 + runParallel(N, iter, func() { + atomic.Xadduintptr(&total, inc) + atomic.Xadduintptr(&total, uintptr(-int64(inc))) + }) + if total != 0 { + t.Fatalf("xadduintpr total error, want %d, got %d", 0, total) + } +} + +// Tests that xadduintptr correctly updates 64-bit values. The place where +// we actually do so is mstats.go, functions mSysStat{Inc,Dec}. +func TestXadduintptrOnUint64(t *testing.T) { + if goarch.BigEndian { + // On big endian architectures, we never use xadduintptr to update + // 64-bit values and hence we skip the test. (Note that functions + // mSysStat{Inc,Dec} in mstats.go have explicit checks for + // big-endianness.) + t.Skip("skip xadduintptr on big endian architecture") + } + const inc = 100 + val := uint64(0) + atomic.Xadduintptr((*uintptr)(unsafe.Pointer(&val)), inc) + if inc != val { + t.Fatalf("xadduintptr should increase lower-order bits, want %d, got %d", inc, val) + } +} + +func shouldPanic(t *testing.T, name string, f func()) { + defer func() { + // Check that all GC maps are sane. + runtime.GC() + + err := recover() + want := "unaligned 64-bit atomic operation" + if err == nil { + t.Errorf("%s did not panic", name) + } else if s, _ := err.(string); s != want { + t.Errorf("%s: wanted panic %q, got %q", name, want, err) + } + }() + f() +} + +// Variant of sync/atomic's TestUnaligned64: +func TestUnaligned64(t *testing.T) { + // Unaligned 64-bit atomics on 32-bit systems are + // a continual source of pain. Test that on 32-bit systems they crash + // instead of failing silently. + + if unsafe.Sizeof(int(0)) != 4 { + t.Skip("test only runs on 32-bit systems") + } + + x := make([]uint32, 4) + u := unsafe.Pointer(uintptr(unsafe.Pointer(&x[0])) | 4) // force alignment to 4 + + up64 := (*uint64)(u) // misaligned + p64 := (*int64)(u) // misaligned + + shouldPanic(t, "Load64", func() { atomic.Load64(up64) }) + shouldPanic(t, "Loadint64", func() { atomic.Loadint64(p64) }) + shouldPanic(t, "Store64", func() { atomic.Store64(up64, 0) }) + shouldPanic(t, "Xadd64", func() { atomic.Xadd64(up64, 1) }) + shouldPanic(t, "Xchg64", func() { atomic.Xchg64(up64, 1) }) + shouldPanic(t, "Cas64", func() { atomic.Cas64(up64, 1, 2) }) +} + +func TestAnd8(t *testing.T) { + // Basic sanity check. + x := uint8(0xff) + for i := uint8(0); i < 8; i++ { + atomic.And8(&x, ^(1 << i)) + if r := uint8(0xff) << (i + 1); x != r { + t.Fatalf("clearing bit %#x: want %#x, got %#x", uint8(1<(SB),NOSPLIT,$0 + MOVW $0xffff0fc0, R15 // R15 is hardware PC. + +TEXT ·Cas(SB),NOSPLIT|NOFRAME,$0 + MOVB runtime·goarm(SB), R11 + CMP $7, R11 + BLT 2(PC) + JMP ·armcas(SB) + JMP kernelcas<>(SB) + +TEXT kernelcas<>(SB),NOSPLIT,$0 + MOVW ptr+0(FP), R2 + // trigger potential paging fault here, + // because we don't know how to traceback through __kuser_cmpxchg + MOVW (R2), R0 + MOVW old+4(FP), R0 + MOVW new+8(FP), R1 + BL cas<>(SB) + BCC ret0 + MOVW $1, R0 + MOVB R0, ret+12(FP) + RET +ret0: + MOVW $0, R0 + MOVB R0, ret+12(FP) + RET + +// As for cas, memory barriers are complicated on ARM, but the kernel +// provides a user helper. ARMv5 does not support SMP and has no +// memory barrier instruction at all. ARMv6 added SMP support and has +// a memory barrier, but it requires writing to a coprocessor +// register. ARMv7 introduced the DMB instruction, but it's expensive +// even on single-core devices. The kernel helper takes care of all of +// this for us. + +// Use kernel helper version of memory_barrier, when compiled with GOARM < 7. +TEXT memory_barrier<>(SB),NOSPLIT|NOFRAME,$0 + MOVW $0xffff0fa0, R15 // R15 is hardware PC. + +TEXT ·Load(SB),NOSPLIT,$0-8 + MOVW addr+0(FP), R0 + MOVW (R0), R1 + + MOVB runtime·goarm(SB), R11 + CMP $7, R11 + BGE native_barrier + BL memory_barrier<>(SB) + B end +native_barrier: + DMB MB_ISH +end: + MOVW R1, ret+4(FP) + RET + +TEXT ·Store(SB),NOSPLIT,$0-8 + MOVW addr+0(FP), R1 + MOVW v+4(FP), R2 + + MOVB runtime·goarm(SB), R8 + CMP $7, R8 + BGE native_barrier + BL memory_barrier<>(SB) + B store +native_barrier: + DMB MB_ISH + +store: + MOVW R2, (R1) + + CMP $7, R8 + BGE native_barrier2 + BL memory_barrier<>(SB) + RET +native_barrier2: + DMB MB_ISH + RET + +TEXT ·Load8(SB),NOSPLIT,$0-5 + MOVW addr+0(FP), R0 + MOVB (R0), R1 + + MOVB runtime·goarm(SB), R11 + CMP $7, R11 + BGE native_barrier + BL memory_barrier<>(SB) + B end +native_barrier: + DMB MB_ISH +end: + MOVB R1, ret+4(FP) + RET + +TEXT ·Store8(SB),NOSPLIT,$0-5 + MOVW addr+0(FP), R1 + MOVB v+4(FP), R2 + + MOVB runtime·goarm(SB), R8 + CMP $7, R8 + BGE native_barrier + BL memory_barrier<>(SB) + B store +native_barrier: + DMB MB_ISH + +store: + MOVB R2, (R1) + + CMP $7, R8 + BGE native_barrier2 + BL memory_barrier<>(SB) + RET +native_barrier2: + DMB MB_ISH + RET diff --git a/src/internal/runtime/atomic/sys_nonlinux_arm.s b/src/internal/runtime/atomic/sys_nonlinux_arm.s new file mode 100644 index 0000000000..b55bf908a2 --- /dev/null +++ b/src/internal/runtime/atomic/sys_nonlinux_arm.s @@ -0,0 +1,79 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !linux + +#include "textflag.h" + +// TODO(minux): this is only valid for ARMv6+ +// bool armcas(int32 *val, int32 old, int32 new) +// Atomically: +// if(*val == old){ +// *val = new; +// return 1; +// }else +// return 0; +TEXT ·Cas(SB),NOSPLIT,$0 + JMP ·armcas(SB) + +// Non-linux OSes support only single processor machines before ARMv7. +// So we don't need memory barriers if goarm < 7. And we fail loud at +// startup (runtime.checkgoarm) if it is a multi-processor but goarm < 7. + +TEXT ·Load(SB),NOSPLIT|NOFRAME,$0-8 + MOVW addr+0(FP), R0 + MOVW (R0), R1 + + MOVB runtime·goarm(SB), R11 + CMP $7, R11 + BLT 2(PC) + DMB MB_ISH + + MOVW R1, ret+4(FP) + RET + +TEXT ·Store(SB),NOSPLIT,$0-8 + MOVW addr+0(FP), R1 + MOVW v+4(FP), R2 + + MOVB runtime·goarm(SB), R8 + CMP $7, R8 + BLT 2(PC) + DMB MB_ISH + + MOVW R2, (R1) + + CMP $7, R8 + BLT 2(PC) + DMB MB_ISH + RET + +TEXT ·Load8(SB),NOSPLIT|NOFRAME,$0-5 + MOVW addr+0(FP), R0 + MOVB (R0), R1 + + MOVB runtime·goarm(SB), R11 + CMP $7, R11 + BLT 2(PC) + DMB MB_ISH + + MOVB R1, ret+4(FP) + RET + +TEXT ·Store8(SB),NOSPLIT,$0-5 + MOVW addr+0(FP), R1 + MOVB v+4(FP), R2 + + MOVB runtime·goarm(SB), R8 + CMP $7, R8 + BLT 2(PC) + DMB MB_ISH + + MOVB R2, (R1) + + CMP $7, R8 + BLT 2(PC) + DMB MB_ISH + RET + diff --git a/src/internal/runtime/atomic/types.go b/src/internal/runtime/atomic/types.go new file mode 100644 index 0000000000..287742fee5 --- /dev/null +++ b/src/internal/runtime/atomic/types.go @@ -0,0 +1,587 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package atomic + +import "unsafe" + +// Int32 is an atomically accessed int32 value. +// +// An Int32 must not be copied. +type Int32 struct { + noCopy noCopy + value int32 +} + +// Load accesses and returns the value atomically. +// +//go:nosplit +func (i *Int32) Load() int32 { + return Loadint32(&i.value) +} + +// Store updates the value atomically. +// +//go:nosplit +func (i *Int32) Store(value int32) { + Storeint32(&i.value, value) +} + +// CompareAndSwap atomically compares i's value with old, +// and if they're equal, swaps i's value with new. +// It reports whether the swap ran. +// +//go:nosplit +func (i *Int32) CompareAndSwap(old, new int32) bool { + return Casint32(&i.value, old, new) +} + +// Swap replaces i's value with new, returning +// i's value before the replacement. +// +//go:nosplit +func (i *Int32) Swap(new int32) int32 { + return Xchgint32(&i.value, new) +} + +// Add adds delta to i atomically, returning +// the new updated value. +// +// This operation wraps around in the usual +// two's-complement way. +// +//go:nosplit +func (i *Int32) Add(delta int32) int32 { + return Xaddint32(&i.value, delta) +} + +// Int64 is an atomically accessed int64 value. +// +// 8-byte aligned on all platforms, unlike a regular int64. +// +// An Int64 must not be copied. +type Int64 struct { + noCopy noCopy + _ align64 + value int64 +} + +// Load accesses and returns the value atomically. +// +//go:nosplit +func (i *Int64) Load() int64 { + return Loadint64(&i.value) +} + +// Store updates the value atomically. +// +//go:nosplit +func (i *Int64) Store(value int64) { + Storeint64(&i.value, value) +} + +// CompareAndSwap atomically compares i's value with old, +// and if they're equal, swaps i's value with new. +// It reports whether the swap ran. +// +//go:nosplit +func (i *Int64) CompareAndSwap(old, new int64) bool { + return Casint64(&i.value, old, new) +} + +// Swap replaces i's value with new, returning +// i's value before the replacement. +// +//go:nosplit +func (i *Int64) Swap(new int64) int64 { + return Xchgint64(&i.value, new) +} + +// Add adds delta to i atomically, returning +// the new updated value. +// +// This operation wraps around in the usual +// two's-complement way. +// +//go:nosplit +func (i *Int64) Add(delta int64) int64 { + return Xaddint64(&i.value, delta) +} + +// Uint8 is an atomically accessed uint8 value. +// +// A Uint8 must not be copied. +type Uint8 struct { + noCopy noCopy + value uint8 +} + +// Load accesses and returns the value atomically. +// +//go:nosplit +func (u *Uint8) Load() uint8 { + return Load8(&u.value) +} + +// Store updates the value atomically. +// +//go:nosplit +func (u *Uint8) Store(value uint8) { + Store8(&u.value, value) +} + +// And takes value and performs a bit-wise +// "and" operation with the value of u, storing +// the result into u. +// +// The full process is performed atomically. +// +//go:nosplit +func (u *Uint8) And(value uint8) { + And8(&u.value, value) +} + +// Or takes value and performs a bit-wise +// "or" operation with the value of u, storing +// the result into u. +// +// The full process is performed atomically. +// +//go:nosplit +func (u *Uint8) Or(value uint8) { + Or8(&u.value, value) +} + +// Bool is an atomically accessed bool value. +// +// A Bool must not be copied. +type Bool struct { + // Inherits noCopy from Uint8. + u Uint8 +} + +// Load accesses and returns the value atomically. +// +//go:nosplit +func (b *Bool) Load() bool { + return b.u.Load() != 0 +} + +// Store updates the value atomically. +// +//go:nosplit +func (b *Bool) Store(value bool) { + s := uint8(0) + if value { + s = 1 + } + b.u.Store(s) +} + +// Uint32 is an atomically accessed uint32 value. +// +// A Uint32 must not be copied. +type Uint32 struct { + noCopy noCopy + value uint32 +} + +// Load accesses and returns the value atomically. +// +//go:nosplit +func (u *Uint32) Load() uint32 { + return Load(&u.value) +} + +// LoadAcquire is a partially unsynchronized version +// of Load that relaxes ordering constraints. Other threads +// may observe operations that precede this operation to +// occur after it, but no operation that occurs after it +// on this thread can be observed to occur before it. +// +// WARNING: Use sparingly and with great care. +// +//go:nosplit +func (u *Uint32) LoadAcquire() uint32 { + return LoadAcq(&u.value) +} + +// Store updates the value atomically. +// +//go:nosplit +func (u *Uint32) Store(value uint32) { + Store(&u.value, value) +} + +// StoreRelease is a partially unsynchronized version +// of Store that relaxes ordering constraints. Other threads +// may observe operations that occur after this operation to +// precede it, but no operation that precedes it +// on this thread can be observed to occur after it. +// +// WARNING: Use sparingly and with great care. +// +//go:nosplit +func (u *Uint32) StoreRelease(value uint32) { + StoreRel(&u.value, value) +} + +// CompareAndSwap atomically compares u's value with old, +// and if they're equal, swaps u's value with new. +// It reports whether the swap ran. +// +//go:nosplit +func (u *Uint32) CompareAndSwap(old, new uint32) bool { + return Cas(&u.value, old, new) +} + +// CompareAndSwapRelease is a partially unsynchronized version +// of Cas that relaxes ordering constraints. Other threads +// may observe operations that occur after this operation to +// precede it, but no operation that precedes it +// on this thread can be observed to occur after it. +// It reports whether the swap ran. +// +// WARNING: Use sparingly and with great care. +// +//go:nosplit +func (u *Uint32) CompareAndSwapRelease(old, new uint32) bool { + return CasRel(&u.value, old, new) +} + +// Swap replaces u's value with new, returning +// u's value before the replacement. +// +//go:nosplit +func (u *Uint32) Swap(value uint32) uint32 { + return Xchg(&u.value, value) +} + +// And takes value and performs a bit-wise +// "and" operation with the value of u, storing +// the result into u. +// +// The full process is performed atomically. +// +//go:nosplit +func (u *Uint32) And(value uint32) { + And(&u.value, value) +} + +// Or takes value and performs a bit-wise +// "or" operation with the value of u, storing +// the result into u. +// +// The full process is performed atomically. +// +//go:nosplit +func (u *Uint32) Or(value uint32) { + Or(&u.value, value) +} + +// Add adds delta to u atomically, returning +// the new updated value. +// +// This operation wraps around in the usual +// two's-complement way. +// +//go:nosplit +func (u *Uint32) Add(delta int32) uint32 { + return Xadd(&u.value, delta) +} + +// Uint64 is an atomically accessed uint64 value. +// +// 8-byte aligned on all platforms, unlike a regular uint64. +// +// A Uint64 must not be copied. +type Uint64 struct { + noCopy noCopy + _ align64 + value uint64 +} + +// Load accesses and returns the value atomically. +// +//go:nosplit +func (u *Uint64) Load() uint64 { + return Load64(&u.value) +} + +// Store updates the value atomically. +// +//go:nosplit +func (u *Uint64) Store(value uint64) { + Store64(&u.value, value) +} + +// CompareAndSwap atomically compares u's value with old, +// and if they're equal, swaps u's value with new. +// It reports whether the swap ran. +// +//go:nosplit +func (u *Uint64) CompareAndSwap(old, new uint64) bool { + return Cas64(&u.value, old, new) +} + +// Swap replaces u's value with new, returning +// u's value before the replacement. +// +//go:nosplit +func (u *Uint64) Swap(value uint64) uint64 { + return Xchg64(&u.value, value) +} + +// Add adds delta to u atomically, returning +// the new updated value. +// +// This operation wraps around in the usual +// two's-complement way. +// +//go:nosplit +func (u *Uint64) Add(delta int64) uint64 { + return Xadd64(&u.value, delta) +} + +// Uintptr is an atomically accessed uintptr value. +// +// A Uintptr must not be copied. +type Uintptr struct { + noCopy noCopy + value uintptr +} + +// Load accesses and returns the value atomically. +// +//go:nosplit +func (u *Uintptr) Load() uintptr { + return Loaduintptr(&u.value) +} + +// LoadAcquire is a partially unsynchronized version +// of Load that relaxes ordering constraints. Other threads +// may observe operations that precede this operation to +// occur after it, but no operation that occurs after it +// on this thread can be observed to occur before it. +// +// WARNING: Use sparingly and with great care. +// +//go:nosplit +func (u *Uintptr) LoadAcquire() uintptr { + return LoadAcquintptr(&u.value) +} + +// Store updates the value atomically. +// +//go:nosplit +func (u *Uintptr) Store(value uintptr) { + Storeuintptr(&u.value, value) +} + +// StoreRelease is a partially unsynchronized version +// of Store that relaxes ordering constraints. Other threads +// may observe operations that occur after this operation to +// precede it, but no operation that precedes it +// on this thread can be observed to occur after it. +// +// WARNING: Use sparingly and with great care. +// +//go:nosplit +func (u *Uintptr) StoreRelease(value uintptr) { + StoreReluintptr(&u.value, value) +} + +// CompareAndSwap atomically compares u's value with old, +// and if they're equal, swaps u's value with new. +// It reports whether the swap ran. +// +//go:nosplit +func (u *Uintptr) CompareAndSwap(old, new uintptr) bool { + return Casuintptr(&u.value, old, new) +} + +// Swap replaces u's value with new, returning +// u's value before the replacement. +// +//go:nosplit +func (u *Uintptr) Swap(value uintptr) uintptr { + return Xchguintptr(&u.value, value) +} + +// Add adds delta to u atomically, returning +// the new updated value. +// +// This operation wraps around in the usual +// two's-complement way. +// +//go:nosplit +func (u *Uintptr) Add(delta uintptr) uintptr { + return Xadduintptr(&u.value, delta) +} + +// Float64 is an atomically accessed float64 value. +// +// 8-byte aligned on all platforms, unlike a regular float64. +// +// A Float64 must not be copied. +type Float64 struct { + // Inherits noCopy and align64 from Uint64. + u Uint64 +} + +// Load accesses and returns the value atomically. +// +//go:nosplit +func (f *Float64) Load() float64 { + r := f.u.Load() + return *(*float64)(unsafe.Pointer(&r)) +} + +// Store updates the value atomically. +// +//go:nosplit +func (f *Float64) Store(value float64) { + f.u.Store(*(*uint64)(unsafe.Pointer(&value))) +} + +// UnsafePointer is an atomically accessed unsafe.Pointer value. +// +// Note that because of the atomicity guarantees, stores to values +// of this type never trigger a write barrier, and the relevant +// methods are suffixed with "NoWB" to indicate that explicitly. +// As a result, this type should be used carefully, and sparingly, +// mostly with values that do not live in the Go heap anyway. +// +// An UnsafePointer must not be copied. +type UnsafePointer struct { + noCopy noCopy + value unsafe.Pointer +} + +// Load accesses and returns the value atomically. +// +//go:nosplit +func (u *UnsafePointer) Load() unsafe.Pointer { + return Loadp(unsafe.Pointer(&u.value)) +} + +// StoreNoWB updates the value atomically. +// +// WARNING: As the name implies this operation does *not* +// perform a write barrier on value, and so this operation may +// hide pointers from the GC. Use with care and sparingly. +// It is safe to use with values not found in the Go heap. +// Prefer Store instead. +// +//go:nosplit +func (u *UnsafePointer) StoreNoWB(value unsafe.Pointer) { + StorepNoWB(unsafe.Pointer(&u.value), value) +} + +// Store updates the value atomically. +func (u *UnsafePointer) Store(value unsafe.Pointer) { + storePointer(&u.value, value) +} + +// provided by runtime +// +//go:linkname storePointer +func storePointer(ptr *unsafe.Pointer, new unsafe.Pointer) + +// CompareAndSwapNoWB atomically (with respect to other methods) +// compares u's value with old, and if they're equal, +// swaps u's value with new. +// It reports whether the swap ran. +// +// WARNING: As the name implies this operation does *not* +// perform a write barrier on value, and so this operation may +// hide pointers from the GC. Use with care and sparingly. +// It is safe to use with values not found in the Go heap. +// Prefer CompareAndSwap instead. +// +//go:nosplit +func (u *UnsafePointer) CompareAndSwapNoWB(old, new unsafe.Pointer) bool { + return Casp1(&u.value, old, new) +} + +// CompareAndSwap atomically compares u's value with old, +// and if they're equal, swaps u's value with new. +// It reports whether the swap ran. +func (u *UnsafePointer) CompareAndSwap(old, new unsafe.Pointer) bool { + return casPointer(&u.value, old, new) +} + +func casPointer(ptr *unsafe.Pointer, old, new unsafe.Pointer) bool + +// Pointer is an atomic pointer of type *T. +type Pointer[T any] struct { + u UnsafePointer +} + +// Load accesses and returns the value atomically. +// +//go:nosplit +func (p *Pointer[T]) Load() *T { + return (*T)(p.u.Load()) +} + +// StoreNoWB updates the value atomically. +// +// WARNING: As the name implies this operation does *not* +// perform a write barrier on value, and so this operation may +// hide pointers from the GC. Use with care and sparingly. +// It is safe to use with values not found in the Go heap. +// Prefer Store instead. +// +//go:nosplit +func (p *Pointer[T]) StoreNoWB(value *T) { + p.u.StoreNoWB(unsafe.Pointer(value)) +} + +// Store updates the value atomically. +// +//go:nosplit +func (p *Pointer[T]) Store(value *T) { + p.u.Store(unsafe.Pointer(value)) +} + +// CompareAndSwapNoWB atomically (with respect to other methods) +// compares u's value with old, and if they're equal, +// swaps u's value with new. +// It reports whether the swap ran. +// +// WARNING: As the name implies this operation does *not* +// perform a write barrier on value, and so this operation may +// hide pointers from the GC. Use with care and sparingly. +// It is safe to use with values not found in the Go heap. +// Prefer CompareAndSwap instead. +// +//go:nosplit +func (p *Pointer[T]) CompareAndSwapNoWB(old, new *T) bool { + return p.u.CompareAndSwapNoWB(unsafe.Pointer(old), unsafe.Pointer(new)) +} + +// CompareAndSwap atomically (with respect to other methods) +// compares u's value with old, and if they're equal, +// swaps u's value with new. +// It reports whether the swap ran. +func (p *Pointer[T]) CompareAndSwap(old, new *T) bool { + return p.u.CompareAndSwap(unsafe.Pointer(old), unsafe.Pointer(new)) +} + +// noCopy may be embedded into structs which must not be copied +// after the first use. +// +// See https://golang.org/issues/8005#issuecomment-190753527 +// for details. +type noCopy struct{} + +// Lock is a no-op used by -copylocks checker from `go vet`. +func (*noCopy) Lock() {} +func (*noCopy) Unlock() {} + +// align64 may be added to structs that must be 64-bit aligned. +// This struct is recognized by a special case in the compiler +// and will not work if copied to any other package. +type align64 struct{} diff --git a/src/internal/runtime/atomic/types_64bit.go b/src/internal/runtime/atomic/types_64bit.go new file mode 100644 index 0000000000..006e83ba87 --- /dev/null +++ b/src/internal/runtime/atomic/types_64bit.go @@ -0,0 +1,33 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build amd64 || arm64 || loong64 || mips64 || mips64le || ppc64 || ppc64le || riscv64 || s390x || wasm + +package atomic + +// LoadAcquire is a partially unsynchronized version +// of Load that relaxes ordering constraints. Other threads +// may observe operations that precede this operation to +// occur after it, but no operation that occurs after it +// on this thread can be observed to occur before it. +// +// WARNING: Use sparingly and with great care. +// +//go:nosplit +func (u *Uint64) LoadAcquire() uint64 { + return LoadAcq64(&u.value) +} + +// StoreRelease is a partially unsynchronized version +// of Store that relaxes ordering constraints. Other threads +// may observe operations that occur after this operation to +// precede it, but no operation that precedes it +// on this thread can be observed to occur after it. +// +// WARNING: Use sparingly and with great care. +// +//go:nosplit +func (u *Uint64) StoreRelease(value uint64) { + StoreRel64(&u.value, value) +} diff --git a/src/internal/runtime/atomic/unaligned.go b/src/internal/runtime/atomic/unaligned.go new file mode 100644 index 0000000000..a859de4144 --- /dev/null +++ b/src/internal/runtime/atomic/unaligned.go @@ -0,0 +1,9 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package atomic + +func panicUnaligned() { + panic("unaligned 64-bit atomic operation") +} diff --git a/src/runtime/HACKING.md b/src/runtime/HACKING.md index ce0b42a354..e1a43ba88e 100644 --- a/src/runtime/HACKING.md +++ b/src/runtime/HACKING.md @@ -173,7 +173,7 @@ In summary, Atomics ======= -The runtime uses its own atomics package at `runtime/internal/atomic`. +The runtime uses its own atomics package at `internal/runtime/atomic`. This corresponds to `sync/atomic`, but functions have different names for historical reasons and there are a few additional functions needed by the runtime. diff --git a/src/runtime/abi_test.go b/src/runtime/abi_test.go index 4caee597c5..d2e79c6dc4 100644 --- a/src/runtime/abi_test.go +++ b/src/runtime/abi_test.go @@ -11,11 +11,11 @@ package runtime_test import ( "internal/abi" + "internal/runtime/atomic" "internal/testenv" "os" "os/exec" "runtime" - "runtime/internal/atomic" "strings" "testing" "time" diff --git a/src/runtime/align_runtime_test.go b/src/runtime/align_runtime_test.go index d78b0b2d39..6d77e0d3d4 100644 --- a/src/runtime/align_runtime_test.go +++ b/src/runtime/align_runtime_test.go @@ -11,7 +11,7 @@ package runtime import "unsafe" // AtomicFields is the set of fields on which we perform 64-bit atomic -// operations (all the *64 operations in runtime/internal/atomic). +// operations (all the *64 operations in internal/runtime/atomic). var AtomicFields = []uintptr{ unsafe.Offsetof(m{}.procid), unsafe.Offsetof(p{}.gcFractionalMarkTime), diff --git a/src/runtime/arena.go b/src/runtime/arena.go index 3fdd4cbdd6..5c3350aabf 100644 --- a/src/runtime/arena.go +++ b/src/runtime/arena.go @@ -85,7 +85,7 @@ package runtime import ( "internal/goarch" "internal/goexperiment" - "runtime/internal/atomic" + "internal/runtime/atomic" "runtime/internal/math" "unsafe" ) diff --git a/src/runtime/arena_test.go b/src/runtime/arena_test.go index 018c423712..ca5223b59c 100644 --- a/src/runtime/arena_test.go +++ b/src/runtime/arena_test.go @@ -6,10 +6,10 @@ package runtime_test import ( "internal/goarch" + "internal/runtime/atomic" "reflect" . "runtime" "runtime/debug" - "runtime/internal/atomic" "testing" "time" "unsafe" diff --git a/src/runtime/atomic_pointer.go b/src/runtime/atomic_pointer.go index b61bf0b8b2..e3d17b5cf8 100644 --- a/src/runtime/atomic_pointer.go +++ b/src/runtime/atomic_pointer.go @@ -6,7 +6,7 @@ package runtime import ( "internal/goexperiment" - "runtime/internal/atomic" + "internal/runtime/atomic" "unsafe" ) @@ -43,7 +43,7 @@ func atomicstorep(ptr unsafe.Pointer, new unsafe.Pointer) { // (like StoreNoWB but with the write barrier). // //go:nosplit -//go:linkname atomic_storePointer runtime/internal/atomic.storePointer +//go:linkname atomic_storePointer internal/runtime/atomic.storePointer func atomic_storePointer(ptr *unsafe.Pointer, new unsafe.Pointer) { atomicstorep(unsafe.Pointer(ptr), new) } @@ -52,7 +52,7 @@ func atomic_storePointer(ptr *unsafe.Pointer, new unsafe.Pointer) { // (like CompareAndSwapNoWB but with the write barrier). // //go:nosplit -//go:linkname atomic_casPointer runtime/internal/atomic.casPointer +//go:linkname atomic_casPointer internal/runtime/atomic.casPointer func atomic_casPointer(ptr *unsafe.Pointer, old, new unsafe.Pointer) bool { if writeBarrier.enabled { atomicwb(ptr, new) diff --git a/src/runtime/chan.go b/src/runtime/chan.go index 8aca024c4c..6ce824f62c 100644 --- a/src/runtime/chan.go +++ b/src/runtime/chan.go @@ -19,7 +19,7 @@ package runtime import ( "internal/abi" - "runtime/internal/atomic" + "internal/runtime/atomic" "runtime/internal/math" "unsafe" ) diff --git a/src/runtime/coverage/testsupport.go b/src/runtime/coverage/testsupport.go index f169580618..a12b8589f0 100644 --- a/src/runtime/coverage/testsupport.go +++ b/src/runtime/coverage/testsupport.go @@ -14,10 +14,10 @@ import ( "internal/coverage/decodecounter" "internal/coverage/decodemeta" "internal/coverage/pods" + "internal/runtime/atomic" "io" "os" "path/filepath" - "runtime/internal/atomic" "strings" "unsafe" ) diff --git a/src/runtime/debug.go b/src/runtime/debug.go index 3233ce8ee7..184e4127c3 100644 --- a/src/runtime/debug.go +++ b/src/runtime/debug.go @@ -5,7 +5,7 @@ package runtime import ( - "runtime/internal/atomic" + "internal/runtime/atomic" "unsafe" ) diff --git a/src/runtime/debuglog.go b/src/runtime/debuglog.go index 873f1b45bd..545fde2b24 100644 --- a/src/runtime/debuglog.go +++ b/src/runtime/debuglog.go @@ -16,7 +16,7 @@ package runtime import ( - "runtime/internal/atomic" + "internal/runtime/atomic" "runtime/internal/sys" "unsafe" ) diff --git a/src/runtime/export_test.go b/src/runtime/export_test.go index fe79497e7f..e71f4766c6 100644 --- a/src/runtime/export_test.go +++ b/src/runtime/export_test.go @@ -11,7 +11,7 @@ import ( "internal/goarch" "internal/goexperiment" "internal/goos" - "runtime/internal/atomic" + "internal/runtime/atomic" "runtime/internal/sys" "unsafe" ) diff --git a/src/runtime/histogram.go b/src/runtime/histogram.go index f243667b55..95230d1f39 100644 --- a/src/runtime/histogram.go +++ b/src/runtime/histogram.go @@ -5,7 +5,7 @@ package runtime import ( - "runtime/internal/atomic" + "internal/runtime/atomic" "runtime/internal/sys" "unsafe" ) diff --git a/src/runtime/iface.go b/src/runtime/iface.go index 99e9a367f5..e280180665 100644 --- a/src/runtime/iface.go +++ b/src/runtime/iface.go @@ -7,7 +7,7 @@ package runtime import ( "internal/abi" "internal/goarch" - "runtime/internal/atomic" + "internal/runtime/atomic" "runtime/internal/sys" "unsafe" ) diff --git a/src/runtime/internal/atomic/atomic_386.go b/src/runtime/internal/atomic/atomic_386.go deleted file mode 100644 index e74dcaa92d..0000000000 --- a/src/runtime/internal/atomic/atomic_386.go +++ /dev/null @@ -1,121 +0,0 @@ -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build 386 - -package atomic - -import "unsafe" - -// Export some functions via linkname to assembly in sync/atomic. -// -//go:linkname Load -//go:linkname Loadp - -//go:nosplit -//go:noinline -func Load(ptr *uint32) uint32 { - return *ptr -} - -//go:nosplit -//go:noinline -func Loadp(ptr unsafe.Pointer) unsafe.Pointer { - return *(*unsafe.Pointer)(ptr) -} - -//go:nosplit -//go:noinline -func LoadAcq(ptr *uint32) uint32 { - return *ptr -} - -//go:nosplit -//go:noinline -func LoadAcquintptr(ptr *uintptr) uintptr { - return *ptr -} - -//go:noescape -func Xadd64(ptr *uint64, delta int64) uint64 - -//go:noescape -func Xadduintptr(ptr *uintptr, delta uintptr) uintptr - -//go:noescape -func Xadd(ptr *uint32, delta int32) uint32 - -//go:noescape -func Xchg64(ptr *uint64, new uint64) uint64 - -//go:noescape -func Xchg(ptr *uint32, new uint32) uint32 - -//go:noescape -func Xchguintptr(ptr *uintptr, new uintptr) uintptr - -//go:noescape -func Load64(ptr *uint64) uint64 - -//go:nosplit -//go:noinline -func Load8(ptr *uint8) uint8 { - return *ptr -} - -//go:noescape -func And8(ptr *uint8, val uint8) - -//go:noescape -func Or8(ptr *uint8, val uint8) - -//go:noescape -func And(ptr *uint32, val uint32) - -//go:noescape -func Or(ptr *uint32, val uint32) - -//go:noescape -func And32(ptr *uint32, val uint32) uint32 - -//go:noescape -func Or32(ptr *uint32, val uint32) uint32 - -//go:noescape -func And64(ptr *uint64, val uint64) uint64 - -//go:noescape -func Or64(ptr *uint64, val uint64) uint64 - -//go:noescape -func Anduintptr(ptr *uintptr, val uintptr) uintptr - -//go:noescape -func Oruintptr(ptr *uintptr, val uintptr) uintptr - -// NOTE: Do not add atomicxor8 (XOR is not idempotent). - -//go:noescape -func Cas64(ptr *uint64, old, new uint64) bool - -//go:noescape -func CasRel(ptr *uint32, old, new uint32) bool - -//go:noescape -func Store(ptr *uint32, val uint32) - -//go:noescape -func Store8(ptr *uint8, val uint8) - -//go:noescape -func Store64(ptr *uint64, val uint64) - -//go:noescape -func StoreRel(ptr *uint32, val uint32) - -//go:noescape -func StoreReluintptr(ptr *uintptr, val uintptr) - -// NO go:noescape annotation; see atomic_pointer.go. -func StorepNoWB(ptr unsafe.Pointer, val unsafe.Pointer) diff --git a/src/runtime/internal/atomic/atomic_386.s b/src/runtime/internal/atomic/atomic_386.s deleted file mode 100644 index 08812c37ec..0000000000 --- a/src/runtime/internal/atomic/atomic_386.s +++ /dev/null @@ -1,366 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -#include "textflag.h" -#include "funcdata.h" - -// bool Cas(int32 *val, int32 old, int32 new) -// Atomically: -// if(*val == old){ -// *val = new; -// return 1; -// }else -// return 0; -TEXT ·Cas(SB), NOSPLIT, $0-13 - MOVL ptr+0(FP), BX - MOVL old+4(FP), AX - MOVL new+8(FP), CX - LOCK - CMPXCHGL CX, 0(BX) - SETEQ ret+12(FP) - RET - -TEXT ·Casint32(SB), NOSPLIT, $0-13 - JMP ·Cas(SB) - -TEXT ·Casint64(SB), NOSPLIT, $0-21 - JMP ·Cas64(SB) - -TEXT ·Casuintptr(SB), NOSPLIT, $0-13 - JMP ·Cas(SB) - -TEXT ·CasRel(SB), NOSPLIT, $0-13 - JMP ·Cas(SB) - -TEXT ·Loaduintptr(SB), NOSPLIT, $0-8 - JMP ·Load(SB) - -TEXT ·Loaduint(SB), NOSPLIT, $0-8 - JMP ·Load(SB) - -TEXT ·Storeint32(SB), NOSPLIT, $0-8 - JMP ·Store(SB) - -TEXT ·Storeint64(SB), NOSPLIT, $0-12 - JMP ·Store64(SB) - -TEXT ·Storeuintptr(SB), NOSPLIT, $0-8 - JMP ·Store(SB) - -TEXT ·Xadduintptr(SB), NOSPLIT, $0-12 - JMP ·Xadd(SB) - -TEXT ·Loadint32(SB), NOSPLIT, $0-8 - JMP ·Load(SB) - -TEXT ·Loadint64(SB), NOSPLIT, $0-12 - JMP ·Load64(SB) - -TEXT ·Xaddint32(SB), NOSPLIT, $0-12 - JMP ·Xadd(SB) - -TEXT ·Xaddint64(SB), NOSPLIT, $0-20 - JMP ·Xadd64(SB) - -// bool ·Cas64(uint64 *val, uint64 old, uint64 new) -// Atomically: -// if(*val == old){ -// *val = new; -// return 1; -// } else { -// return 0; -// } -TEXT ·Cas64(SB), NOSPLIT, $0-21 - NO_LOCAL_POINTERS - MOVL ptr+0(FP), BP - TESTL $7, BP - JZ 2(PC) - CALL ·panicUnaligned(SB) - MOVL old_lo+4(FP), AX - MOVL old_hi+8(FP), DX - MOVL new_lo+12(FP), BX - MOVL new_hi+16(FP), CX - LOCK - CMPXCHG8B 0(BP) - SETEQ ret+20(FP) - RET - -// bool Casp1(void **p, void *old, void *new) -// Atomically: -// if(*p == old){ -// *p = new; -// return 1; -// }else -// return 0; -TEXT ·Casp1(SB), NOSPLIT, $0-13 - MOVL ptr+0(FP), BX - MOVL old+4(FP), AX - MOVL new+8(FP), CX - LOCK - CMPXCHGL CX, 0(BX) - SETEQ ret+12(FP) - RET - -// uint32 Xadd(uint32 volatile *val, int32 delta) -// Atomically: -// *val += delta; -// return *val; -TEXT ·Xadd(SB), NOSPLIT, $0-12 - MOVL ptr+0(FP), BX - MOVL delta+4(FP), AX - MOVL AX, CX - LOCK - XADDL AX, 0(BX) - ADDL CX, AX - MOVL AX, ret+8(FP) - RET - -TEXT ·Xadd64(SB), NOSPLIT, $0-20 - NO_LOCAL_POINTERS - // no XADDQ so use CMPXCHG8B loop - MOVL ptr+0(FP), BP - TESTL $7, BP - JZ 2(PC) - CALL ·panicUnaligned(SB) - // DI:SI = delta - MOVL delta_lo+4(FP), SI - MOVL delta_hi+8(FP), DI - // DX:AX = *addr - MOVL 0(BP), AX - MOVL 4(BP), DX -addloop: - // CX:BX = DX:AX (*addr) + DI:SI (delta) - MOVL AX, BX - MOVL DX, CX - ADDL SI, BX - ADCL DI, CX - - // if *addr == DX:AX { - // *addr = CX:BX - // } else { - // DX:AX = *addr - // } - // all in one instruction - LOCK - CMPXCHG8B 0(BP) - - JNZ addloop - - // success - // return CX:BX - MOVL BX, ret_lo+12(FP) - MOVL CX, ret_hi+16(FP) - RET - -TEXT ·Xchg(SB), NOSPLIT, $0-12 - MOVL ptr+0(FP), BX - MOVL new+4(FP), AX - XCHGL AX, 0(BX) - MOVL AX, ret+8(FP) - RET - -TEXT ·Xchgint32(SB), NOSPLIT, $0-12 - JMP ·Xchg(SB) - -TEXT ·Xchgint64(SB), NOSPLIT, $0-20 - JMP ·Xchg64(SB) - -TEXT ·Xchguintptr(SB), NOSPLIT, $0-12 - JMP ·Xchg(SB) - -TEXT ·Xchg64(SB),NOSPLIT,$0-20 - NO_LOCAL_POINTERS - // no XCHGQ so use CMPXCHG8B loop - MOVL ptr+0(FP), BP - TESTL $7, BP - JZ 2(PC) - CALL ·panicUnaligned(SB) - // CX:BX = new - MOVL new_lo+4(FP), BX - MOVL new_hi+8(FP), CX - // DX:AX = *addr - MOVL 0(BP), AX - MOVL 4(BP), DX -swaploop: - // if *addr == DX:AX - // *addr = CX:BX - // else - // DX:AX = *addr - // all in one instruction - LOCK - CMPXCHG8B 0(BP) - JNZ swaploop - - // success - // return DX:AX - MOVL AX, ret_lo+12(FP) - MOVL DX, ret_hi+16(FP) - RET - -TEXT ·StorepNoWB(SB), NOSPLIT, $0-8 - MOVL ptr+0(FP), BX - MOVL val+4(FP), AX - XCHGL AX, 0(BX) - RET - -TEXT ·Store(SB), NOSPLIT, $0-8 - MOVL ptr+0(FP), BX - MOVL val+4(FP), AX - XCHGL AX, 0(BX) - RET - -TEXT ·StoreRel(SB), NOSPLIT, $0-8 - JMP ·Store(SB) - -TEXT ·StoreReluintptr(SB), NOSPLIT, $0-8 - JMP ·Store(SB) - -// uint64 atomicload64(uint64 volatile* addr); -TEXT ·Load64(SB), NOSPLIT, $0-12 - NO_LOCAL_POINTERS - MOVL ptr+0(FP), AX - TESTL $7, AX - JZ 2(PC) - CALL ·panicUnaligned(SB) - MOVQ (AX), M0 - MOVQ M0, ret+4(FP) - EMMS - RET - -// void ·Store64(uint64 volatile* addr, uint64 v); -TEXT ·Store64(SB), NOSPLIT, $0-12 - NO_LOCAL_POINTERS - MOVL ptr+0(FP), AX - TESTL $7, AX - JZ 2(PC) - CALL ·panicUnaligned(SB) - // MOVQ and EMMS were introduced on the Pentium MMX. - MOVQ val+4(FP), M0 - MOVQ M0, (AX) - EMMS - // This is essentially a no-op, but it provides required memory fencing. - // It can be replaced with MFENCE, but MFENCE was introduced only on the Pentium4 (SSE2). - XORL AX, AX - LOCK - XADDL AX, (SP) - RET - -// void ·Or8(byte volatile*, byte); -TEXT ·Or8(SB), NOSPLIT, $0-5 - MOVL ptr+0(FP), AX - MOVB val+4(FP), BX - LOCK - ORB BX, (AX) - RET - -// void ·And8(byte volatile*, byte); -TEXT ·And8(SB), NOSPLIT, $0-5 - MOVL ptr+0(FP), AX - MOVB val+4(FP), BX - LOCK - ANDB BX, (AX) - RET - -TEXT ·Store8(SB), NOSPLIT, $0-5 - MOVL ptr+0(FP), BX - MOVB val+4(FP), AX - XCHGB AX, 0(BX) - RET - -// func Or(addr *uint32, v uint32) -TEXT ·Or(SB), NOSPLIT, $0-8 - MOVL ptr+0(FP), AX - MOVL val+4(FP), BX - LOCK - ORL BX, (AX) - RET - -// func And(addr *uint32, v uint32) -TEXT ·And(SB), NOSPLIT, $0-8 - MOVL ptr+0(FP), AX - MOVL val+4(FP), BX - LOCK - ANDL BX, (AX) - RET - -// func And32(addr *uint32, v uint32) old uint32 -TEXT ·And32(SB), NOSPLIT, $0-12 - MOVL ptr+0(FP), BX - MOVL val+4(FP), CX -casloop: - MOVL CX, DX - MOVL (BX), AX - ANDL AX, DX - LOCK - CMPXCHGL DX, (BX) - JNZ casloop - MOVL AX, ret+8(FP) - RET - -// func Or32(addr *uint32, v uint32) old uint32 -TEXT ·Or32(SB), NOSPLIT, $0-12 - MOVL ptr+0(FP), BX - MOVL val+4(FP), CX -casloop: - MOVL CX, DX - MOVL (BX), AX - ORL AX, DX - LOCK - CMPXCHGL DX, (BX) - JNZ casloop - MOVL AX, ret+8(FP) - RET - -// func And64(addr *uint64, v uint64) old uint64 -TEXT ·And64(SB), NOSPLIT, $0-20 - MOVL ptr+0(FP), BP - // DI:SI = v - MOVL val_lo+4(FP), SI - MOVL val_hi+8(FP), DI - // DX:AX = *addr - MOVL 0(BP), AX - MOVL 4(BP), DX -casloop: - // CX:BX = DX:AX (*addr) & DI:SI (mask) - MOVL AX, BX - MOVL DX, CX - ANDL SI, BX - ANDL DI, CX - LOCK - CMPXCHG8B 0(BP) - JNZ casloop - MOVL AX, ret_lo+12(FP) - MOVL DX, ret_hi+16(FP) - RET - - -// func Or64(addr *uint64, v uint64) old uint64 -TEXT ·Or64(SB), NOSPLIT, $0-20 - MOVL ptr+0(FP), BP - // DI:SI = v - MOVL val_lo+4(FP), SI - MOVL val_hi+8(FP), DI - // DX:AX = *addr - MOVL 0(BP), AX - MOVL 4(BP), DX -casloop: - // CX:BX = DX:AX (*addr) | DI:SI (mask) - MOVL AX, BX - MOVL DX, CX - ORL SI, BX - ORL DI, CX - LOCK - CMPXCHG8B 0(BP) - JNZ casloop - MOVL AX, ret_lo+12(FP) - MOVL DX, ret_hi+16(FP) - RET - -// func Anduintptr(addr *uintptr, v uintptr) old uintptr -TEXT ·Anduintptr(SB), NOSPLIT, $0-12 - JMP ·And32(SB) - -// func Oruintptr(addr *uintptr, v uintptr) old uintptr -TEXT ·Oruintptr(SB), NOSPLIT, $0-12 - JMP ·Or32(SB) diff --git a/src/runtime/internal/atomic/atomic_amd64.go b/src/runtime/internal/atomic/atomic_amd64.go deleted file mode 100644 index b439954093..0000000000 --- a/src/runtime/internal/atomic/atomic_amd64.go +++ /dev/null @@ -1,135 +0,0 @@ -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package atomic - -import "unsafe" - -// Export some functions via linkname to assembly in sync/atomic. -// -//go:linkname Load -//go:linkname Loadp -//go:linkname Load64 - -//go:nosplit -//go:noinline -func Load(ptr *uint32) uint32 { - return *ptr -} - -//go:nosplit -//go:noinline -func Loadp(ptr unsafe.Pointer) unsafe.Pointer { - return *(*unsafe.Pointer)(ptr) -} - -//go:nosplit -//go:noinline -func Load64(ptr *uint64) uint64 { - return *ptr -} - -//go:nosplit -//go:noinline -func LoadAcq(ptr *uint32) uint32 { - return *ptr -} - -//go:nosplit -//go:noinline -func LoadAcq64(ptr *uint64) uint64 { - return *ptr -} - -//go:nosplit -//go:noinline -func LoadAcquintptr(ptr *uintptr) uintptr { - return *ptr -} - -//go:noescape -func Xadd(ptr *uint32, delta int32) uint32 - -//go:noescape -func Xadd64(ptr *uint64, delta int64) uint64 - -//go:noescape -func Xadduintptr(ptr *uintptr, delta uintptr) uintptr - -//go:noescape -func Xchg(ptr *uint32, new uint32) uint32 - -//go:noescape -func Xchg64(ptr *uint64, new uint64) uint64 - -//go:noescape -func Xchguintptr(ptr *uintptr, new uintptr) uintptr - -//go:nosplit -//go:noinline -func Load8(ptr *uint8) uint8 { - return *ptr -} - -//go:noescape -func And8(ptr *uint8, val uint8) - -//go:noescape -func Or8(ptr *uint8, val uint8) - -//go:noescape -func And(ptr *uint32, val uint32) - -//go:noescape -func Or(ptr *uint32, val uint32) - -//go:noescape -func And32(ptr *uint32, val uint32) uint32 - -//go:noescape -func Or32(ptr *uint32, val uint32) uint32 - -//go:noescape -func And64(ptr *uint64, val uint64) uint64 - -//go:noescape -func Or64(ptr *uint64, val uint64) uint64 - -//go:noescape -func Anduintptr(ptr *uintptr, val uintptr) uintptr - -//go:noescape -func Oruintptr(ptr *uintptr, val uintptr) uintptr - -// NOTE: Do not add atomicxor8 (XOR is not idempotent). - -//go:noescape -func Cas64(ptr *uint64, old, new uint64) bool - -//go:noescape -func CasRel(ptr *uint32, old, new uint32) bool - -//go:noescape -func Store(ptr *uint32, val uint32) - -//go:noescape -func Store8(ptr *uint8, val uint8) - -//go:noescape -func Store64(ptr *uint64, val uint64) - -//go:noescape -func StoreRel(ptr *uint32, val uint32) - -//go:noescape -func StoreRel64(ptr *uint64, val uint64) - -//go:noescape -func StoreReluintptr(ptr *uintptr, val uintptr) - -// StorepNoWB performs *ptr = val atomically and without a write -// barrier. -// -// NO go:noescape annotation; see atomic_pointer.go. -func StorepNoWB(ptr unsafe.Pointer, val unsafe.Pointer) diff --git a/src/runtime/internal/atomic/atomic_amd64.s b/src/runtime/internal/atomic/atomic_amd64.s deleted file mode 100644 index ec75bf9332..0000000000 --- a/src/runtime/internal/atomic/atomic_amd64.s +++ /dev/null @@ -1,289 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Note: some of these functions are semantically inlined -// by the compiler (in src/cmd/compile/internal/gc/ssa.go). - -#include "textflag.h" - -TEXT ·Loaduintptr(SB), NOSPLIT, $0-16 - JMP ·Load64(SB) - -TEXT ·Loaduint(SB), NOSPLIT, $0-16 - JMP ·Load64(SB) - -TEXT ·Loadint32(SB), NOSPLIT, $0-12 - JMP ·Load(SB) - -TEXT ·Loadint64(SB), NOSPLIT, $0-16 - JMP ·Load64(SB) - -// bool Cas(int32 *val, int32 old, int32 new) -// Atomically: -// if(*val == old){ -// *val = new; -// return 1; -// } else -// return 0; -TEXT ·Cas(SB),NOSPLIT,$0-17 - MOVQ ptr+0(FP), BX - MOVL old+8(FP), AX - MOVL new+12(FP), CX - LOCK - CMPXCHGL CX, 0(BX) - SETEQ ret+16(FP) - RET - -// bool ·Cas64(uint64 *val, uint64 old, uint64 new) -// Atomically: -// if(*val == old){ -// *val = new; -// return 1; -// } else { -// return 0; -// } -TEXT ·Cas64(SB), NOSPLIT, $0-25 - MOVQ ptr+0(FP), BX - MOVQ old+8(FP), AX - MOVQ new+16(FP), CX - LOCK - CMPXCHGQ CX, 0(BX) - SETEQ ret+24(FP) - RET - -// bool Casp1(void **val, void *old, void *new) -// Atomically: -// if(*val == old){ -// *val = new; -// return 1; -// } else -// return 0; -TEXT ·Casp1(SB), NOSPLIT, $0-25 - MOVQ ptr+0(FP), BX - MOVQ old+8(FP), AX - MOVQ new+16(FP), CX - LOCK - CMPXCHGQ CX, 0(BX) - SETEQ ret+24(FP) - RET - -TEXT ·Casint32(SB), NOSPLIT, $0-17 - JMP ·Cas(SB) - -TEXT ·Casint64(SB), NOSPLIT, $0-25 - JMP ·Cas64(SB) - -TEXT ·Casuintptr(SB), NOSPLIT, $0-25 - JMP ·Cas64(SB) - -TEXT ·CasRel(SB), NOSPLIT, $0-17 - JMP ·Cas(SB) - -// uint32 Xadd(uint32 volatile *val, int32 delta) -// Atomically: -// *val += delta; -// return *val; -TEXT ·Xadd(SB), NOSPLIT, $0-20 - MOVQ ptr+0(FP), BX - MOVL delta+8(FP), AX - MOVL AX, CX - LOCK - XADDL AX, 0(BX) - ADDL CX, AX - MOVL AX, ret+16(FP) - RET - -// uint64 Xadd64(uint64 volatile *val, int64 delta) -// Atomically: -// *val += delta; -// return *val; -TEXT ·Xadd64(SB), NOSPLIT, $0-24 - MOVQ ptr+0(FP), BX - MOVQ delta+8(FP), AX - MOVQ AX, CX - LOCK - XADDQ AX, 0(BX) - ADDQ CX, AX - MOVQ AX, ret+16(FP) - RET - -TEXT ·Xaddint32(SB), NOSPLIT, $0-20 - JMP ·Xadd(SB) - -TEXT ·Xaddint64(SB), NOSPLIT, $0-24 - JMP ·Xadd64(SB) - -TEXT ·Xadduintptr(SB), NOSPLIT, $0-24 - JMP ·Xadd64(SB) - -// uint32 Xchg(ptr *uint32, new uint32) -// Atomically: -// old := *ptr; -// *ptr = new; -// return old; -TEXT ·Xchg(SB), NOSPLIT, $0-20 - MOVQ ptr+0(FP), BX - MOVL new+8(FP), AX - XCHGL AX, 0(BX) - MOVL AX, ret+16(FP) - RET - -// uint64 Xchg64(ptr *uint64, new uint64) -// Atomically: -// old := *ptr; -// *ptr = new; -// return old; -TEXT ·Xchg64(SB), NOSPLIT, $0-24 - MOVQ ptr+0(FP), BX - MOVQ new+8(FP), AX - XCHGQ AX, 0(BX) - MOVQ AX, ret+16(FP) - RET - -TEXT ·Xchgint32(SB), NOSPLIT, $0-20 - JMP ·Xchg(SB) - -TEXT ·Xchgint64(SB), NOSPLIT, $0-24 - JMP ·Xchg64(SB) - -TEXT ·Xchguintptr(SB), NOSPLIT, $0-24 - JMP ·Xchg64(SB) - -TEXT ·StorepNoWB(SB), NOSPLIT, $0-16 - MOVQ ptr+0(FP), BX - MOVQ val+8(FP), AX - XCHGQ AX, 0(BX) - RET - -TEXT ·Store(SB), NOSPLIT, $0-12 - MOVQ ptr+0(FP), BX - MOVL val+8(FP), AX - XCHGL AX, 0(BX) - RET - -TEXT ·Store8(SB), NOSPLIT, $0-9 - MOVQ ptr+0(FP), BX - MOVB val+8(FP), AX - XCHGB AX, 0(BX) - RET - -TEXT ·Store64(SB), NOSPLIT, $0-16 - MOVQ ptr+0(FP), BX - MOVQ val+8(FP), AX - XCHGQ AX, 0(BX) - RET - -TEXT ·Storeint32(SB), NOSPLIT, $0-12 - JMP ·Store(SB) - -TEXT ·Storeint64(SB), NOSPLIT, $0-16 - JMP ·Store64(SB) - -TEXT ·Storeuintptr(SB), NOSPLIT, $0-16 - JMP ·Store64(SB) - -TEXT ·StoreRel(SB), NOSPLIT, $0-12 - JMP ·Store(SB) - -TEXT ·StoreRel64(SB), NOSPLIT, $0-16 - JMP ·Store64(SB) - -TEXT ·StoreReluintptr(SB), NOSPLIT, $0-16 - JMP ·Store64(SB) - -// void ·Or8(byte volatile*, byte); -TEXT ·Or8(SB), NOSPLIT, $0-9 - MOVQ ptr+0(FP), AX - MOVB val+8(FP), BX - LOCK - ORB BX, (AX) - RET - -// void ·And8(byte volatile*, byte); -TEXT ·And8(SB), NOSPLIT, $0-9 - MOVQ ptr+0(FP), AX - MOVB val+8(FP), BX - LOCK - ANDB BX, (AX) - RET - -// func Or(addr *uint32, v uint32) -TEXT ·Or(SB), NOSPLIT, $0-12 - MOVQ ptr+0(FP), AX - MOVL val+8(FP), BX - LOCK - ORL BX, (AX) - RET - -// func And(addr *uint32, v uint32) -TEXT ·And(SB), NOSPLIT, $0-12 - MOVQ ptr+0(FP), AX - MOVL val+8(FP), BX - LOCK - ANDL BX, (AX) - RET - -// func Or32(addr *uint32, v uint32) old uint32 -TEXT ·Or32(SB), NOSPLIT, $0-20 - MOVQ ptr+0(FP), BX - MOVL val+8(FP), CX -casloop: - MOVL CX, DX - MOVL (BX), AX - ORL AX, DX - LOCK - CMPXCHGL DX, (BX) - JNZ casloop - MOVL AX, ret+16(FP) - RET - -// func And32(addr *uint32, v uint32) old uint32 -TEXT ·And32(SB), NOSPLIT, $0-20 - MOVQ ptr+0(FP), BX - MOVL val+8(FP), CX -casloop: - MOVL CX, DX - MOVL (BX), AX - ANDL AX, DX - LOCK - CMPXCHGL DX, (BX) - JNZ casloop - MOVL AX, ret+16(FP) - RET - -// func Or64(addr *uint64, v uint64) old uint64 -TEXT ·Or64(SB), NOSPLIT, $0-24 - MOVQ ptr+0(FP), BX - MOVQ val+8(FP), CX -casloop: - MOVQ CX, DX - MOVQ (BX), AX - ORQ AX, DX - LOCK - CMPXCHGQ DX, (BX) - JNZ casloop - MOVQ AX, ret+16(FP) - RET - -// func And64(addr *uint64, v uint64) old uint64 -TEXT ·And64(SB), NOSPLIT, $0-24 - MOVQ ptr+0(FP), BX - MOVQ val+8(FP), CX -casloop: - MOVQ CX, DX - MOVQ (BX), AX - ANDQ AX, DX - LOCK - CMPXCHGQ DX, (BX) - JNZ casloop - MOVQ AX, ret+16(FP) - RET - -// func Anduintptr(addr *uintptr, v uintptr) old uintptr -TEXT ·Anduintptr(SB), NOSPLIT, $0-24 - JMP ·And64(SB) - -// func Oruintptr(addr *uintptr, v uintptr) old uintptr -TEXT ·Oruintptr(SB), NOSPLIT, $0-24 - JMP ·Or64(SB) diff --git a/src/runtime/internal/atomic/atomic_andor_generic.go b/src/runtime/internal/atomic/atomic_andor_generic.go deleted file mode 100644 index f8b148dda5..0000000000 --- a/src/runtime/internal/atomic/atomic_andor_generic.go +++ /dev/null @@ -1,67 +0,0 @@ -// Copyright 2023 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build arm || mips || mipsle || mips64 || mips64le || wasm - -package atomic - -//go:nosplit -func And32(ptr *uint32, val uint32) uint32 { - for { - old := *ptr - if Cas(ptr, old, old&val) { - return old - } - } -} - -//go:nosplit -func Or32(ptr *uint32, val uint32) uint32 { - for { - old := *ptr - if Cas(ptr, old, old|val) { - return old - } - } -} - -//go:nosplit -func And64(ptr *uint64, val uint64) uint64 { - for { - old := *ptr - if Cas64(ptr, old, old&val) { - return old - } - } -} - -//go:nosplit -func Or64(ptr *uint64, val uint64) uint64 { - for { - old := *ptr - if Cas64(ptr, old, old|val) { - return old - } - } -} - -//go:nosplit -func Anduintptr(ptr *uintptr, val uintptr) uintptr { - for { - old := *ptr - if Casuintptr(ptr, old, old&val) { - return old - } - } -} - -//go:nosplit -func Oruintptr(ptr *uintptr, val uintptr) uintptr { - for { - old := *ptr - if Casuintptr(ptr, old, old|val) { - return old - } - } -} diff --git a/src/runtime/internal/atomic/atomic_andor_test.go b/src/runtime/internal/atomic/atomic_andor_test.go deleted file mode 100644 index a2f3b6f3a9..0000000000 --- a/src/runtime/internal/atomic/atomic_andor_test.go +++ /dev/null @@ -1,246 +0,0 @@ -// Copyright 2023 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// TODO(61395): move these tests to atomic_test.go once And/Or have -// implementations for all architectures. -package atomic_test - -import ( - "runtime/internal/atomic" - "testing" -) - -func TestAnd32(t *testing.T) { - // Basic sanity check. - x := uint32(0xffffffff) - for i := uint32(0); i < 32; i++ { - old := x - v := atomic.And32(&x, ^(1 << i)) - if r := uint32(0xffffffff) << (i + 1); x != r || v != old { - t.Fatalf("clearing bit %#x: want %#x, got new %#x and old %#v", uint32(1<>3)%uintptr(len(locktab))].l -} - -// Atomic add and return new value. -// -//go:nosplit -func Xadd(val *uint32, delta int32) uint32 { - for { - oval := *val - nval := oval + uint32(delta) - if Cas(val, oval, nval) { - return nval - } - } -} - -//go:noescape -func Xadduintptr(ptr *uintptr, delta uintptr) uintptr - -//go:nosplit -func Xchg(addr *uint32, v uint32) uint32 { - for { - old := *addr - if Cas(addr, old, v) { - return old - } - } -} - -//go:nosplit -func Xchguintptr(addr *uintptr, v uintptr) uintptr { - return uintptr(Xchg((*uint32)(unsafe.Pointer(addr)), uint32(v))) -} - -// Not noescape -- it installs a pointer to addr. -func StorepNoWB(addr unsafe.Pointer, v unsafe.Pointer) - -//go:noescape -func Store(addr *uint32, v uint32) - -//go:noescape -func StoreRel(addr *uint32, v uint32) - -//go:noescape -func StoreReluintptr(addr *uintptr, v uintptr) - -//go:nosplit -func goCas64(addr *uint64, old, new uint64) bool { - if uintptr(unsafe.Pointer(addr))&7 != 0 { - *(*int)(nil) = 0 // crash on unaligned uint64 - } - _ = *addr // if nil, fault before taking the lock - var ok bool - addrLock(addr).lock() - if *addr == old { - *addr = new - ok = true - } - addrLock(addr).unlock() - return ok -} - -//go:nosplit -func goXadd64(addr *uint64, delta int64) uint64 { - if uintptr(unsafe.Pointer(addr))&7 != 0 { - *(*int)(nil) = 0 // crash on unaligned uint64 - } - _ = *addr // if nil, fault before taking the lock - var r uint64 - addrLock(addr).lock() - r = *addr + uint64(delta) - *addr = r - addrLock(addr).unlock() - return r -} - -//go:nosplit -func goXchg64(addr *uint64, v uint64) uint64 { - if uintptr(unsafe.Pointer(addr))&7 != 0 { - *(*int)(nil) = 0 // crash on unaligned uint64 - } - _ = *addr // if nil, fault before taking the lock - var r uint64 - addrLock(addr).lock() - r = *addr - *addr = v - addrLock(addr).unlock() - return r -} - -//go:nosplit -func goLoad64(addr *uint64) uint64 { - if uintptr(unsafe.Pointer(addr))&7 != 0 { - *(*int)(nil) = 0 // crash on unaligned uint64 - } - _ = *addr // if nil, fault before taking the lock - var r uint64 - addrLock(addr).lock() - r = *addr - addrLock(addr).unlock() - return r -} - -//go:nosplit -func goStore64(addr *uint64, v uint64) { - if uintptr(unsafe.Pointer(addr))&7 != 0 { - *(*int)(nil) = 0 // crash on unaligned uint64 - } - _ = *addr // if nil, fault before taking the lock - addrLock(addr).lock() - *addr = v - addrLock(addr).unlock() -} - -//go:nosplit -func Or8(addr *uint8, v uint8) { - // Align down to 4 bytes and use 32-bit CAS. - uaddr := uintptr(unsafe.Pointer(addr)) - addr32 := (*uint32)(unsafe.Pointer(uaddr &^ 3)) - word := uint32(v) << ((uaddr & 3) * 8) // little endian - for { - old := *addr32 - if Cas(addr32, old, old|word) { - return - } - } -} - -//go:nosplit -func And8(addr *uint8, v uint8) { - // Align down to 4 bytes and use 32-bit CAS. - uaddr := uintptr(unsafe.Pointer(addr)) - addr32 := (*uint32)(unsafe.Pointer(uaddr &^ 3)) - word := uint32(v) << ((uaddr & 3) * 8) // little endian - mask := uint32(0xFF) << ((uaddr & 3) * 8) // little endian - word |= ^mask - for { - old := *addr32 - if Cas(addr32, old, old&word) { - return - } - } -} - -//go:nosplit -func Or(addr *uint32, v uint32) { - for { - old := *addr - if Cas(addr, old, old|v) { - return - } - } -} - -//go:nosplit -func And(addr *uint32, v uint32) { - for { - old := *addr - if Cas(addr, old, old&v) { - return - } - } -} - -//go:nosplit -func armcas(ptr *uint32, old, new uint32) bool - -//go:noescape -func Load(addr *uint32) uint32 - -// NO go:noescape annotation; *addr escapes if result escapes (#31525) -func Loadp(addr unsafe.Pointer) unsafe.Pointer - -//go:noescape -func Load8(addr *uint8) uint8 - -//go:noescape -func LoadAcq(addr *uint32) uint32 - -//go:noescape -func LoadAcquintptr(ptr *uintptr) uintptr - -//go:noescape -func Cas64(addr *uint64, old, new uint64) bool - -//go:noescape -func CasRel(addr *uint32, old, new uint32) bool - -//go:noescape -func Xadd64(addr *uint64, delta int64) uint64 - -//go:noescape -func Xchg64(addr *uint64, v uint64) uint64 - -//go:noescape -func Load64(addr *uint64) uint64 - -//go:noescape -func Store8(addr *uint8, v uint8) - -//go:noescape -func Store64(addr *uint64, v uint64) diff --git a/src/runtime/internal/atomic/atomic_arm.s b/src/runtime/internal/atomic/atomic_arm.s deleted file mode 100644 index 1cf7d8f6ef..0000000000 --- a/src/runtime/internal/atomic/atomic_arm.s +++ /dev/null @@ -1,312 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -#include "go_asm.h" -#include "textflag.h" -#include "funcdata.h" - -// bool armcas(int32 *val, int32 old, int32 new) -// Atomically: -// if(*val == old){ -// *val = new; -// return 1; -// }else -// return 0; -// -// To implement ·cas in sys_$GOOS_arm.s -// using the native instructions, use: -// -// TEXT ·cas(SB),NOSPLIT,$0 -// B ·armcas(SB) -// -TEXT ·armcas(SB),NOSPLIT,$0-13 - MOVW ptr+0(FP), R1 - MOVW old+4(FP), R2 - MOVW new+8(FP), R3 -casl: - LDREX (R1), R0 - CMP R0, R2 - BNE casfail - -#ifndef GOARM_7 - MOVB internal∕cpu·ARM+const_offsetARMHasV7Atomics(SB), R11 - CMP $0, R11 - BEQ 2(PC) -#endif - DMB MB_ISHST - - STREX R3, (R1), R0 - CMP $0, R0 - BNE casl - MOVW $1, R0 - -#ifndef GOARM_7 - CMP $0, R11 - BEQ 2(PC) -#endif - DMB MB_ISH - - MOVB R0, ret+12(FP) - RET -casfail: - MOVW $0, R0 - MOVB R0, ret+12(FP) - RET - -// stubs - -TEXT ·Loadp(SB),NOSPLIT|NOFRAME,$0-8 - B ·Load(SB) - -TEXT ·LoadAcq(SB),NOSPLIT|NOFRAME,$0-8 - B ·Load(SB) - -TEXT ·LoadAcquintptr(SB),NOSPLIT|NOFRAME,$0-8 - B ·Load(SB) - -TEXT ·Casint32(SB),NOSPLIT,$0-13 - B ·Cas(SB) - -TEXT ·Casint64(SB),NOSPLIT,$-4-21 - B ·Cas64(SB) - -TEXT ·Casuintptr(SB),NOSPLIT,$0-13 - B ·Cas(SB) - -TEXT ·Casp1(SB),NOSPLIT,$0-13 - B ·Cas(SB) - -TEXT ·CasRel(SB),NOSPLIT,$0-13 - B ·Cas(SB) - -TEXT ·Loadint32(SB),NOSPLIT,$0-8 - B ·Load(SB) - -TEXT ·Loadint64(SB),NOSPLIT,$-4-12 - B ·Load64(SB) - -TEXT ·Loaduintptr(SB),NOSPLIT,$0-8 - B ·Load(SB) - -TEXT ·Loaduint(SB),NOSPLIT,$0-8 - B ·Load(SB) - -TEXT ·Storeint32(SB),NOSPLIT,$0-8 - B ·Store(SB) - -TEXT ·Storeint64(SB),NOSPLIT,$0-12 - B ·Store64(SB) - -TEXT ·Storeuintptr(SB),NOSPLIT,$0-8 - B ·Store(SB) - -TEXT ·StorepNoWB(SB),NOSPLIT,$0-8 - B ·Store(SB) - -TEXT ·StoreRel(SB),NOSPLIT,$0-8 - B ·Store(SB) - -TEXT ·StoreReluintptr(SB),NOSPLIT,$0-8 - B ·Store(SB) - -TEXT ·Xaddint32(SB),NOSPLIT,$0-12 - B ·Xadd(SB) - -TEXT ·Xaddint64(SB),NOSPLIT,$-4-20 - B ·Xadd64(SB) - -TEXT ·Xadduintptr(SB),NOSPLIT,$0-12 - B ·Xadd(SB) - -TEXT ·Xchgint32(SB),NOSPLIT,$0-12 - B ·Xchg(SB) - -TEXT ·Xchgint64(SB),NOSPLIT,$-4-20 - B ·Xchg64(SB) - -// 64-bit atomics -// The native ARM implementations use LDREXD/STREXD, which are -// available on ARMv6k or later. We use them only on ARMv7. -// On older ARM, we use Go implementations which simulate 64-bit -// atomics with locks. -TEXT armCas64<>(SB),NOSPLIT,$0-21 - // addr is already in R1 - MOVW old_lo+4(FP), R2 - MOVW old_hi+8(FP), R3 - MOVW new_lo+12(FP), R4 - MOVW new_hi+16(FP), R5 -cas64loop: - LDREXD (R1), R6 // loads R6 and R7 - CMP R2, R6 - BNE cas64fail - CMP R3, R7 - BNE cas64fail - - DMB MB_ISHST - - STREXD R4, (R1), R0 // stores R4 and R5 - CMP $0, R0 - BNE cas64loop - MOVW $1, R0 - - DMB MB_ISH - - MOVBU R0, swapped+20(FP) - RET -cas64fail: - MOVW $0, R0 - MOVBU R0, swapped+20(FP) - RET - -TEXT armXadd64<>(SB),NOSPLIT,$0-20 - // addr is already in R1 - MOVW delta_lo+4(FP), R2 - MOVW delta_hi+8(FP), R3 - -add64loop: - LDREXD (R1), R4 // loads R4 and R5 - ADD.S R2, R4 - ADC R3, R5 - - DMB MB_ISHST - - STREXD R4, (R1), R0 // stores R4 and R5 - CMP $0, R0 - BNE add64loop - - DMB MB_ISH - - MOVW R4, new_lo+12(FP) - MOVW R5, new_hi+16(FP) - RET - -TEXT armXchg64<>(SB),NOSPLIT,$0-20 - // addr is already in R1 - MOVW new_lo+4(FP), R2 - MOVW new_hi+8(FP), R3 - -swap64loop: - LDREXD (R1), R4 // loads R4 and R5 - - DMB MB_ISHST - - STREXD R2, (R1), R0 // stores R2 and R3 - CMP $0, R0 - BNE swap64loop - - DMB MB_ISH - - MOVW R4, old_lo+12(FP) - MOVW R5, old_hi+16(FP) - RET - -TEXT armLoad64<>(SB),NOSPLIT,$0-12 - // addr is already in R1 - - LDREXD (R1), R2 // loads R2 and R3 - DMB MB_ISH - - MOVW R2, val_lo+4(FP) - MOVW R3, val_hi+8(FP) - RET - -TEXT armStore64<>(SB),NOSPLIT,$0-12 - // addr is already in R1 - MOVW val_lo+4(FP), R2 - MOVW val_hi+8(FP), R3 - -store64loop: - LDREXD (R1), R4 // loads R4 and R5 - - DMB MB_ISHST - - STREXD R2, (R1), R0 // stores R2 and R3 - CMP $0, R0 - BNE store64loop - - DMB MB_ISH - RET - -// The following functions all panic if their address argument isn't -// 8-byte aligned. Since we're calling back into Go code to do this, -// we have to cooperate with stack unwinding. In the normal case, the -// functions tail-call into the appropriate implementation, which -// means they must not open a frame. Hence, when they go down the -// panic path, at that point they push the LR to create a real frame -// (they don't need to pop it because panic won't return; however, we -// do need to set the SP delta back). - -// Check if R1 is 8-byte aligned, panic if not. -// Clobbers R2. -#define CHECK_ALIGN \ - AND.S $7, R1, R2 \ - BEQ 4(PC) \ - MOVW.W R14, -4(R13) /* prepare a real frame */ \ - BL ·panicUnaligned(SB) \ - ADD $4, R13 /* compensate SP delta */ - -TEXT ·Cas64(SB),NOSPLIT,$-4-21 - NO_LOCAL_POINTERS - MOVW addr+0(FP), R1 - CHECK_ALIGN - -#ifndef GOARM_7 - MOVB internal∕cpu·ARM+const_offsetARMHasV7Atomics(SB), R11 - CMP $1, R11 - BEQ 2(PC) - JMP ·goCas64(SB) -#endif - JMP armCas64<>(SB) - -TEXT ·Xadd64(SB),NOSPLIT,$-4-20 - NO_LOCAL_POINTERS - MOVW addr+0(FP), R1 - CHECK_ALIGN - -#ifndef GOARM_7 - MOVB internal∕cpu·ARM+const_offsetARMHasV7Atomics(SB), R11 - CMP $1, R11 - BEQ 2(PC) - JMP ·goXadd64(SB) -#endif - JMP armXadd64<>(SB) - -TEXT ·Xchg64(SB),NOSPLIT,$-4-20 - NO_LOCAL_POINTERS - MOVW addr+0(FP), R1 - CHECK_ALIGN - -#ifndef GOARM_7 - MOVB internal∕cpu·ARM+const_offsetARMHasV7Atomics(SB), R11 - CMP $1, R11 - BEQ 2(PC) - JMP ·goXchg64(SB) -#endif - JMP armXchg64<>(SB) - -TEXT ·Load64(SB),NOSPLIT,$-4-12 - NO_LOCAL_POINTERS - MOVW addr+0(FP), R1 - CHECK_ALIGN - -#ifndef GOARM_7 - MOVB internal∕cpu·ARM+const_offsetARMHasV7Atomics(SB), R11 - CMP $1, R11 - BEQ 2(PC) - JMP ·goLoad64(SB) -#endif - JMP armLoad64<>(SB) - -TEXT ·Store64(SB),NOSPLIT,$-4-12 - NO_LOCAL_POINTERS - MOVW addr+0(FP), R1 - CHECK_ALIGN - -#ifndef GOARM_7 - MOVB internal∕cpu·ARM+const_offsetARMHasV7Atomics(SB), R11 - CMP $1, R11 - BEQ 2(PC) - JMP ·goStore64(SB) -#endif - JMP armStore64<>(SB) diff --git a/src/runtime/internal/atomic/atomic_arm64.go b/src/runtime/internal/atomic/atomic_arm64.go deleted file mode 100644 index c4c56ae895..0000000000 --- a/src/runtime/internal/atomic/atomic_arm64.go +++ /dev/null @@ -1,112 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build arm64 - -package atomic - -import ( - "internal/cpu" - "unsafe" -) - -const ( - offsetARM64HasATOMICS = unsafe.Offsetof(cpu.ARM64.HasATOMICS) -) - -//go:noescape -func Xadd(ptr *uint32, delta int32) uint32 - -//go:noescape -func Xadd64(ptr *uint64, delta int64) uint64 - -//go:noescape -func Xadduintptr(ptr *uintptr, delta uintptr) uintptr - -//go:noescape -func Xchg(ptr *uint32, new uint32) uint32 - -//go:noescape -func Xchg64(ptr *uint64, new uint64) uint64 - -//go:noescape -func Xchguintptr(ptr *uintptr, new uintptr) uintptr - -//go:noescape -func Load(ptr *uint32) uint32 - -//go:noescape -func Load8(ptr *uint8) uint8 - -//go:noescape -func Load64(ptr *uint64) uint64 - -// NO go:noescape annotation; *ptr escapes if result escapes (#31525) -func Loadp(ptr unsafe.Pointer) unsafe.Pointer - -//go:noescape -func LoadAcq(addr *uint32) uint32 - -//go:noescape -func LoadAcq64(ptr *uint64) uint64 - -//go:noescape -func LoadAcquintptr(ptr *uintptr) uintptr - -//go:noescape -func Or8(ptr *uint8, val uint8) - -//go:noescape -func And8(ptr *uint8, val uint8) - -//go:noescape -func And(ptr *uint32, val uint32) - -//go:noescape -func Or(ptr *uint32, val uint32) - -//go:noescape -func And32(ptr *uint32, val uint32) uint32 - -//go:noescape -func Or32(ptr *uint32, val uint32) uint32 - -//go:noescape -func And64(ptr *uint64, val uint64) uint64 - -//go:noescape -func Or64(ptr *uint64, val uint64) uint64 - -//go:noescape -func Anduintptr(ptr *uintptr, val uintptr) uintptr - -//go:noescape -func Oruintptr(ptr *uintptr, val uintptr) uintptr - -//go:noescape -func Cas64(ptr *uint64, old, new uint64) bool - -//go:noescape -func CasRel(ptr *uint32, old, new uint32) bool - -//go:noescape -func Store(ptr *uint32, val uint32) - -//go:noescape -func Store8(ptr *uint8, val uint8) - -//go:noescape -func Store64(ptr *uint64, val uint64) - -// NO go:noescape annotation; see atomic_pointer.go. -func StorepNoWB(ptr unsafe.Pointer, val unsafe.Pointer) - -//go:noescape -func StoreRel(ptr *uint32, val uint32) - -//go:noescape -func StoreRel64(ptr *uint64, val uint64) - -//go:noescape -func StoreReluintptr(ptr *uintptr, val uintptr) diff --git a/src/runtime/internal/atomic/atomic_arm64.s b/src/runtime/internal/atomic/atomic_arm64.s deleted file mode 100644 index ede56538b8..0000000000 --- a/src/runtime/internal/atomic/atomic_arm64.s +++ /dev/null @@ -1,467 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -#include "go_asm.h" -#include "textflag.h" - -TEXT ·Casint32(SB), NOSPLIT, $0-17 - B ·Cas(SB) - -TEXT ·Casint64(SB), NOSPLIT, $0-25 - B ·Cas64(SB) - -TEXT ·Casuintptr(SB), NOSPLIT, $0-25 - B ·Cas64(SB) - -TEXT ·CasRel(SB), NOSPLIT, $0-17 - B ·Cas(SB) - -TEXT ·Loadint32(SB), NOSPLIT, $0-12 - B ·Load(SB) - -TEXT ·Loadint64(SB), NOSPLIT, $0-16 - B ·Load64(SB) - -TEXT ·Loaduintptr(SB), NOSPLIT, $0-16 - B ·Load64(SB) - -TEXT ·Loaduint(SB), NOSPLIT, $0-16 - B ·Load64(SB) - -TEXT ·Storeint32(SB), NOSPLIT, $0-12 - B ·Store(SB) - -TEXT ·Storeint64(SB), NOSPLIT, $0-16 - B ·Store64(SB) - -TEXT ·Storeuintptr(SB), NOSPLIT, $0-16 - B ·Store64(SB) - -TEXT ·Xaddint32(SB), NOSPLIT, $0-20 - B ·Xadd(SB) - -TEXT ·Xaddint64(SB), NOSPLIT, $0-24 - B ·Xadd64(SB) - -TEXT ·Xadduintptr(SB), NOSPLIT, $0-24 - B ·Xadd64(SB) - -TEXT ·Casp1(SB), NOSPLIT, $0-25 - B ·Cas64(SB) - -// uint32 ·Load(uint32 volatile* addr) -TEXT ·Load(SB),NOSPLIT,$0-12 - MOVD ptr+0(FP), R0 - LDARW (R0), R0 - MOVW R0, ret+8(FP) - RET - -// uint8 ·Load8(uint8 volatile* addr) -TEXT ·Load8(SB),NOSPLIT,$0-9 - MOVD ptr+0(FP), R0 - LDARB (R0), R0 - MOVB R0, ret+8(FP) - RET - -// uint64 ·Load64(uint64 volatile* addr) -TEXT ·Load64(SB),NOSPLIT,$0-16 - MOVD ptr+0(FP), R0 - LDAR (R0), R0 - MOVD R0, ret+8(FP) - RET - -// void *·Loadp(void *volatile *addr) -TEXT ·Loadp(SB),NOSPLIT,$0-16 - MOVD ptr+0(FP), R0 - LDAR (R0), R0 - MOVD R0, ret+8(FP) - RET - -// uint32 ·LoadAcq(uint32 volatile* addr) -TEXT ·LoadAcq(SB),NOSPLIT,$0-12 - B ·Load(SB) - -// uint64 ·LoadAcquintptr(uint64 volatile* addr) -TEXT ·LoadAcq64(SB),NOSPLIT,$0-16 - B ·Load64(SB) - -// uintptr ·LoadAcq64(uintptr volatile* addr) -TEXT ·LoadAcquintptr(SB),NOSPLIT,$0-16 - B ·Load64(SB) - -TEXT ·StorepNoWB(SB), NOSPLIT, $0-16 - B ·Store64(SB) - -TEXT ·StoreRel(SB), NOSPLIT, $0-12 - B ·Store(SB) - -TEXT ·StoreRel64(SB), NOSPLIT, $0-16 - B ·Store64(SB) - -TEXT ·StoreReluintptr(SB), NOSPLIT, $0-16 - B ·Store64(SB) - -TEXT ·Store(SB), NOSPLIT, $0-12 - MOVD ptr+0(FP), R0 - MOVW val+8(FP), R1 - STLRW R1, (R0) - RET - -TEXT ·Store8(SB), NOSPLIT, $0-9 - MOVD ptr+0(FP), R0 - MOVB val+8(FP), R1 - STLRB R1, (R0) - RET - -TEXT ·Store64(SB), NOSPLIT, $0-16 - MOVD ptr+0(FP), R0 - MOVD val+8(FP), R1 - STLR R1, (R0) - RET - -// uint32 Xchg(ptr *uint32, new uint32) -// Atomically: -// old := *ptr; -// *ptr = new; -// return old; -TEXT ·Xchg(SB), NOSPLIT, $0-20 - MOVD ptr+0(FP), R0 - MOVW new+8(FP), R1 -#ifndef GOARM64_LSE - MOVBU internal∕cpu·ARM64+const_offsetARM64HasATOMICS(SB), R4 - CBZ R4, load_store_loop -#endif - SWPALW R1, (R0), R2 - MOVW R2, ret+16(FP) - RET -#ifndef GOARM64_LSE -load_store_loop: - LDAXRW (R0), R2 - STLXRW R1, (R0), R3 - CBNZ R3, load_store_loop - MOVW R2, ret+16(FP) - RET -#endif - -// uint64 Xchg64(ptr *uint64, new uint64) -// Atomically: -// old := *ptr; -// *ptr = new; -// return old; -TEXT ·Xchg64(SB), NOSPLIT, $0-24 - MOVD ptr+0(FP), R0 - MOVD new+8(FP), R1 -#ifndef GOARM64_LSE - MOVBU internal∕cpu·ARM64+const_offsetARM64HasATOMICS(SB), R4 - CBZ R4, load_store_loop -#endif - SWPALD R1, (R0), R2 - MOVD R2, ret+16(FP) - RET -#ifndef GOARM64_LSE -load_store_loop: - LDAXR (R0), R2 - STLXR R1, (R0), R3 - CBNZ R3, load_store_loop - MOVD R2, ret+16(FP) - RET -#endif - -// bool Cas(uint32 *ptr, uint32 old, uint32 new) -// Atomically: -// if(*val == old){ -// *val = new; -// return 1; -// } else -// return 0; -TEXT ·Cas(SB), NOSPLIT, $0-17 - MOVD ptr+0(FP), R0 - MOVW old+8(FP), R1 - MOVW new+12(FP), R2 -#ifndef GOARM64_LSE - MOVBU internal∕cpu·ARM64+const_offsetARM64HasATOMICS(SB), R4 - CBZ R4, load_store_loop -#endif - MOVD R1, R3 - CASALW R3, (R0), R2 - CMP R1, R3 - CSET EQ, R0 - MOVB R0, ret+16(FP) - RET -#ifndef GOARM64_LSE -load_store_loop: - LDAXRW (R0), R3 - CMPW R1, R3 - BNE ok - STLXRW R2, (R0), R3 - CBNZ R3, load_store_loop -ok: - CSET EQ, R0 - MOVB R0, ret+16(FP) - RET -#endif - -// bool ·Cas64(uint64 *ptr, uint64 old, uint64 new) -// Atomically: -// if(*val == old){ -// *val = new; -// return 1; -// } else { -// return 0; -// } -TEXT ·Cas64(SB), NOSPLIT, $0-25 - MOVD ptr+0(FP), R0 - MOVD old+8(FP), R1 - MOVD new+16(FP), R2 -#ifndef GOARM64_LSE - MOVBU internal∕cpu·ARM64+const_offsetARM64HasATOMICS(SB), R4 - CBZ R4, load_store_loop -#endif - MOVD R1, R3 - CASALD R3, (R0), R2 - CMP R1, R3 - CSET EQ, R0 - MOVB R0, ret+24(FP) - RET -#ifndef GOARM64_LSE -load_store_loop: - LDAXR (R0), R3 - CMP R1, R3 - BNE ok - STLXR R2, (R0), R3 - CBNZ R3, load_store_loop -ok: - CSET EQ, R0 - MOVB R0, ret+24(FP) - RET -#endif - -// uint32 xadd(uint32 volatile *ptr, int32 delta) -// Atomically: -// *val += delta; -// return *val; -TEXT ·Xadd(SB), NOSPLIT, $0-20 - MOVD ptr+0(FP), R0 - MOVW delta+8(FP), R1 -#ifndef GOARM64_LSE - MOVBU internal∕cpu·ARM64+const_offsetARM64HasATOMICS(SB), R4 - CBZ R4, load_store_loop -#endif - LDADDALW R1, (R0), R2 - ADD R1, R2 - MOVW R2, ret+16(FP) - RET -#ifndef GOARM64_LSE -load_store_loop: - LDAXRW (R0), R2 - ADDW R2, R1, R2 - STLXRW R2, (R0), R3 - CBNZ R3, load_store_loop - MOVW R2, ret+16(FP) - RET -#endif - -// uint64 Xadd64(uint64 volatile *ptr, int64 delta) -// Atomically: -// *val += delta; -// return *val; -TEXT ·Xadd64(SB), NOSPLIT, $0-24 - MOVD ptr+0(FP), R0 - MOVD delta+8(FP), R1 -#ifndef GOARM64_LSE - MOVBU internal∕cpu·ARM64+const_offsetARM64HasATOMICS(SB), R4 - CBZ R4, load_store_loop -#endif - LDADDALD R1, (R0), R2 - ADD R1, R2 - MOVD R2, ret+16(FP) - RET -#ifndef GOARM64_LSE -load_store_loop: - LDAXR (R0), R2 - ADD R2, R1, R2 - STLXR R2, (R0), R3 - CBNZ R3, load_store_loop - MOVD R2, ret+16(FP) - RET -#endif - -TEXT ·Xchgint32(SB), NOSPLIT, $0-20 - B ·Xchg(SB) - -TEXT ·Xchgint64(SB), NOSPLIT, $0-24 - B ·Xchg64(SB) - -TEXT ·Xchguintptr(SB), NOSPLIT, $0-24 - B ·Xchg64(SB) - -TEXT ·And8(SB), NOSPLIT, $0-9 - MOVD ptr+0(FP), R0 - MOVB val+8(FP), R1 -#ifndef GOARM64_LSE - MOVBU internal∕cpu·ARM64+const_offsetARM64HasATOMICS(SB), R4 - CBZ R4, load_store_loop -#endif - MVN R1, R2 - LDCLRALB R2, (R0), R3 - RET -#ifndef GOARM64_LSE -load_store_loop: - LDAXRB (R0), R2 - AND R1, R2 - STLXRB R2, (R0), R3 - CBNZ R3, load_store_loop - RET -#endif - -TEXT ·Or8(SB), NOSPLIT, $0-9 - MOVD ptr+0(FP), R0 - MOVB val+8(FP), R1 -#ifndef GOARM64_LSE - MOVBU internal∕cpu·ARM64+const_offsetARM64HasATOMICS(SB), R4 - CBZ R4, load_store_loop -#endif - LDORALB R1, (R0), R2 - RET -#ifndef GOARM64_LSE -load_store_loop: - LDAXRB (R0), R2 - ORR R1, R2 - STLXRB R2, (R0), R3 - CBNZ R3, load_store_loop - RET -#endif - -// func And(addr *uint32, v uint32) -TEXT ·And(SB), NOSPLIT, $0-12 - MOVD ptr+0(FP), R0 - MOVW val+8(FP), R1 -#ifndef GOARM64_LSE - MOVBU internal∕cpu·ARM64+const_offsetARM64HasATOMICS(SB), R4 - CBZ R4, load_store_loop -#endif - MVN R1, R2 - LDCLRALW R2, (R0), R3 - RET -#ifndef GOARM64_LSE -load_store_loop: - LDAXRW (R0), R2 - AND R1, R2 - STLXRW R2, (R0), R3 - CBNZ R3, load_store_loop - RET -#endif - -// func Or(addr *uint32, v uint32) -TEXT ·Or(SB), NOSPLIT, $0-12 - MOVD ptr+0(FP), R0 - MOVW val+8(FP), R1 -#ifndef GOARM64_LSE - MOVBU internal∕cpu·ARM64+const_offsetARM64HasATOMICS(SB), R4 - CBZ R4, load_store_loop -#endif - LDORALW R1, (R0), R2 - RET -#ifndef GOARM64_LSE -load_store_loop: - LDAXRW (R0), R2 - ORR R1, R2 - STLXRW R2, (R0), R3 - CBNZ R3, load_store_loop - RET -#endif - -// func Or32(addr *uint32, v uint32) old uint32 -TEXT ·Or32(SB), NOSPLIT, $0-20 - MOVD ptr+0(FP), R0 - MOVW val+8(FP), R1 -#ifndef GOARM64_LSE - MOVBU internal∕cpu·ARM64+const_offsetARM64HasATOMICS(SB), R4 - CBZ R4, load_store_loop -#endif - LDORALW R1, (R0), R2 - MOVD R2, ret+16(FP) - RET -#ifndef GOARM64_LSE -load_store_loop: - LDAXRW (R0), R2 - ORR R1, R2, R3 - STLXRW R3, (R0), R4 - CBNZ R4, load_store_loop - MOVD R2, ret+16(FP) - RET -#endif - -// func And32(addr *uint32, v uint32) old uint32 -TEXT ·And32(SB), NOSPLIT, $0-20 - MOVD ptr+0(FP), R0 - MOVW val+8(FP), R1 -#ifndef GOARM64_LSE - MOVBU internal∕cpu·ARM64+const_offsetARM64HasATOMICS(SB), R4 - CBZ R4, load_store_loop -#endif - MVN R1, R2 - LDCLRALW R2, (R0), R3 - MOVD R3, ret+16(FP) - RET -#ifndef GOARM64_LSE -load_store_loop: - LDAXRW (R0), R2 - AND R1, R2, R3 - STLXRW R3, (R0), R4 - CBNZ R4, load_store_loop - MOVD R2, ret+16(FP) - RET -#endif - -// func Or64(addr *uint64, v uint64) old uint64 -TEXT ·Or64(SB), NOSPLIT, $0-24 - MOVD ptr+0(FP), R0 - MOVD val+8(FP), R1 -#ifndef GOARM64_LSE - MOVBU internal∕cpu·ARM64+const_offsetARM64HasATOMICS(SB), R4 - CBZ R4, load_store_loop -#endif - LDORALD R1, (R0), R2 - MOVD R2, ret+16(FP) - RET -#ifndef GOARM64_LSE -load_store_loop: - LDAXR (R0), R2 - ORR R1, R2, R3 - STLXR R3, (R0), R4 - CBNZ R4, load_store_loop - MOVD R2, ret+16(FP) - RET -#endif - -// func And64(addr *uint64, v uint64) old uint64 -TEXT ·And64(SB), NOSPLIT, $0-24 - MOVD ptr+0(FP), R0 - MOVD val+8(FP), R1 -#ifndef GOARM64_LSE - MOVBU internal∕cpu·ARM64+const_offsetARM64HasATOMICS(SB), R4 - CBZ R4, load_store_loop -#endif - MVN R1, R2 - LDCLRALD R2, (R0), R3 - MOVD R3, ret+16(FP) - RET -#ifndef GOARM64_LSE -load_store_loop: - LDAXR (R0), R2 - AND R1, R2, R3 - STLXR R3, (R0), R4 - CBNZ R4, load_store_loop - MOVD R2, ret+16(FP) - RET -#endif - -// func Anduintptr(addr *uintptr, v uintptr) old uintptr -TEXT ·Anduintptr(SB), NOSPLIT, $0-24 - B ·And64(SB) - -// func Oruintptr(addr *uintptr, v uintptr) old uintptr -TEXT ·Oruintptr(SB), NOSPLIT, $0-24 - B ·Or64(SB) diff --git a/src/runtime/internal/atomic/atomic_loong64.go b/src/runtime/internal/atomic/atomic_loong64.go deleted file mode 100644 index de6d4b4ba6..0000000000 --- a/src/runtime/internal/atomic/atomic_loong64.go +++ /dev/null @@ -1,107 +0,0 @@ -// Copyright 2022 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build loong64 - -package atomic - -import "unsafe" - -//go:noescape -func Xadd(ptr *uint32, delta int32) uint32 - -//go:noescape -func Xadd64(ptr *uint64, delta int64) uint64 - -//go:noescape -func Xadduintptr(ptr *uintptr, delta uintptr) uintptr - -//go:noescape -func Xchg(ptr *uint32, new uint32) uint32 - -//go:noescape -func Xchg64(ptr *uint64, new uint64) uint64 - -//go:noescape -func Xchguintptr(ptr *uintptr, new uintptr) uintptr - -//go:noescape -func Load(ptr *uint32) uint32 - -//go:noescape -func Load8(ptr *uint8) uint8 - -//go:noescape -func Load64(ptr *uint64) uint64 - -// NO go:noescape annotation; *ptr escapes if result escapes (#31525) -func Loadp(ptr unsafe.Pointer) unsafe.Pointer - -//go:noescape -func LoadAcq(ptr *uint32) uint32 - -//go:noescape -func LoadAcq64(ptr *uint64) uint64 - -//go:noescape -func LoadAcquintptr(ptr *uintptr) uintptr - -//go:noescape -func And8(ptr *uint8, val uint8) - -//go:noescape -func And(ptr *uint32, val uint32) - -//go:noescape -func Or8(ptr *uint8, val uint8) - -//go:noescape -func Or(ptr *uint32, val uint32) - -//go:noescape -func And32(ptr *uint32, val uint32) uint32 - -//go:noescape -func Or32(ptr *uint32, val uint32) uint32 - -//go:noescape -func And64(ptr *uint64, val uint64) uint64 - -//go:noescape -func Or64(ptr *uint64, val uint64) uint64 - -//go:noescape -func Anduintptr(ptr *uintptr, val uintptr) uintptr - -//go:noescape -func Oruintptr(ptr *uintptr, val uintptr) uintptr - -// NOTE: Do not add atomicxor8 (XOR is not idempotent). - -//go:noescape -func Cas64(ptr *uint64, old, new uint64) bool - -//go:noescape -func CasRel(ptr *uint32, old, new uint32) bool - -//go:noescape -func Store(ptr *uint32, val uint32) - -//go:noescape -func Store8(ptr *uint8, val uint8) - -//go:noescape -func Store64(ptr *uint64, val uint64) - -// NO go:noescape annotation; see atomic_pointer.go. -func StorepNoWB(ptr unsafe.Pointer, val unsafe.Pointer) - -//go:noescape -func StoreRel(ptr *uint32, val uint32) - -//go:noescape -func StoreRel64(ptr *uint64, val uint64) - -//go:noescape -func StoreReluintptr(ptr *uintptr, val uintptr) diff --git a/src/runtime/internal/atomic/atomic_loong64.s b/src/runtime/internal/atomic/atomic_loong64.s deleted file mode 100644 index c7452d2e11..0000000000 --- a/src/runtime/internal/atomic/atomic_loong64.s +++ /dev/null @@ -1,366 +0,0 @@ -// Copyright 2022 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -#include "textflag.h" - -// bool cas(uint32 *ptr, uint32 old, uint32 new) -// Atomically: -// if(*ptr == old){ -// *ptr = new; -// return 1; -// } else -// return 0; -TEXT ·Cas(SB), NOSPLIT, $0-17 - MOVV ptr+0(FP), R4 - MOVW old+8(FP), R5 - MOVW new+12(FP), R6 - DBAR -cas_again: - MOVV R6, R7 - LL (R4), R8 - BNE R5, R8, cas_fail - SC R7, (R4) - BEQ R7, cas_again - MOVV $1, R4 - MOVB R4, ret+16(FP) - DBAR - RET -cas_fail: - MOVV $0, R4 - JMP -4(PC) - -// bool cas64(uint64 *ptr, uint64 old, uint64 new) -// Atomically: -// if(*ptr == old){ -// *ptr = new; -// return 1; -// } else { -// return 0; -// } -TEXT ·Cas64(SB), NOSPLIT, $0-25 - MOVV ptr+0(FP), R4 - MOVV old+8(FP), R5 - MOVV new+16(FP), R6 - DBAR -cas64_again: - MOVV R6, R7 - LLV (R4), R8 - BNE R5, R8, cas64_fail - SCV R7, (R4) - BEQ R7, cas64_again - MOVV $1, R4 - MOVB R4, ret+24(FP) - DBAR - RET -cas64_fail: - MOVV $0, R4 - JMP -4(PC) - -TEXT ·Casuintptr(SB), NOSPLIT, $0-25 - JMP ·Cas64(SB) - -TEXT ·CasRel(SB), NOSPLIT, $0-17 - JMP ·Cas(SB) - -TEXT ·Loaduintptr(SB), NOSPLIT|NOFRAME, $0-16 - JMP ·Load64(SB) - -TEXT ·Loaduint(SB), NOSPLIT|NOFRAME, $0-16 - JMP ·Load64(SB) - -TEXT ·Storeuintptr(SB), NOSPLIT, $0-16 - JMP ·Store64(SB) - -TEXT ·Xadduintptr(SB), NOSPLIT, $0-24 - JMP ·Xadd64(SB) - -TEXT ·Loadint64(SB), NOSPLIT, $0-16 - JMP ·Load64(SB) - -TEXT ·Xaddint64(SB), NOSPLIT, $0-24 - JMP ·Xadd64(SB) - -// bool casp(void **val, void *old, void *new) -// Atomically: -// if(*val == old){ -// *val = new; -// return 1; -// } else -// return 0; -TEXT ·Casp1(SB), NOSPLIT, $0-25 - JMP ·Cas64(SB) - -// uint32 xadd(uint32 volatile *ptr, int32 delta) -// Atomically: -// *val += delta; -// return *val; -TEXT ·Xadd(SB), NOSPLIT, $0-20 - MOVV ptr+0(FP), R4 - MOVW delta+8(FP), R5 - DBAR - LL (R4), R6 - ADDU R6, R5, R7 - MOVV R7, R6 - SC R7, (R4) - BEQ R7, -4(PC) - MOVW R6, ret+16(FP) - DBAR - RET - -TEXT ·Xadd64(SB), NOSPLIT, $0-24 - MOVV ptr+0(FP), R4 - MOVV delta+8(FP), R5 - DBAR - LLV (R4), R6 - ADDVU R6, R5, R7 - MOVV R7, R6 - SCV R7, (R4) - BEQ R7, -4(PC) - MOVV R6, ret+16(FP) - DBAR - RET - -TEXT ·Xchg(SB), NOSPLIT, $0-20 - MOVV ptr+0(FP), R4 - MOVW new+8(FP), R5 - - DBAR - MOVV R5, R6 - LL (R4), R7 - SC R6, (R4) - BEQ R6, -3(PC) - MOVW R7, ret+16(FP) - DBAR - RET - -TEXT ·Xchg64(SB), NOSPLIT, $0-24 - MOVV ptr+0(FP), R4 - MOVV new+8(FP), R5 - - DBAR - MOVV R5, R6 - LLV (R4), R7 - SCV R6, (R4) - BEQ R6, -3(PC) - MOVV R7, ret+16(FP) - DBAR - RET - -TEXT ·Xchguintptr(SB), NOSPLIT, $0-24 - JMP ·Xchg64(SB) - -TEXT ·StorepNoWB(SB), NOSPLIT, $0-16 - JMP ·Store64(SB) - -TEXT ·StoreRel(SB), NOSPLIT, $0-12 - JMP ·Store(SB) - -TEXT ·StoreRel64(SB), NOSPLIT, $0-16 - JMP ·Store64(SB) - -TEXT ·StoreReluintptr(SB), NOSPLIT, $0-16 - JMP ·Store64(SB) - -TEXT ·Store(SB), NOSPLIT, $0-12 - MOVV ptr+0(FP), R4 - MOVW val+8(FP), R5 - DBAR - MOVW R5, 0(R4) - DBAR - RET - -TEXT ·Store8(SB), NOSPLIT, $0-9 - MOVV ptr+0(FP), R4 - MOVB val+8(FP), R5 - DBAR - MOVB R5, 0(R4) - DBAR - RET - -TEXT ·Store64(SB), NOSPLIT, $0-16 - MOVV ptr+0(FP), R4 - MOVV val+8(FP), R5 - DBAR - MOVV R5, 0(R4) - DBAR - RET - -// void Or8(byte volatile*, byte); -TEXT ·Or8(SB), NOSPLIT, $0-9 - MOVV ptr+0(FP), R4 - MOVBU val+8(FP), R5 - // Align ptr down to 4 bytes so we can use 32-bit load/store. - MOVV $~3, R6 - AND R4, R6 - // R7 = ((ptr & 3) * 8) - AND $3, R4, R7 - SLLV $3, R7 - // Shift val for aligned ptr. R5 = val << R4 - SLLV R7, R5 - - DBAR - LL (R6), R7 - OR R5, R7 - SC R7, (R6) - BEQ R7, -4(PC) - DBAR - RET - -// void And8(byte volatile*, byte); -TEXT ·And8(SB), NOSPLIT, $0-9 - MOVV ptr+0(FP), R4 - MOVBU val+8(FP), R5 - // Align ptr down to 4 bytes so we can use 32-bit load/store. - MOVV $~3, R6 - AND R4, R6 - // R7 = ((ptr & 3) * 8) - AND $3, R4, R7 - SLLV $3, R7 - // Shift val for aligned ptr. R5 = val << R7 | ^(0xFF << R7) - MOVV $0xFF, R8 - SLLV R7, R5 - SLLV R7, R8 - NOR R0, R8 - OR R8, R5 - - DBAR - LL (R6), R7 - AND R5, R7 - SC R7, (R6) - BEQ R7, -4(PC) - DBAR - RET - -// func Or(addr *uint32, v uint32) -TEXT ·Or(SB), NOSPLIT, $0-12 - MOVV ptr+0(FP), R4 - MOVW val+8(FP), R5 - DBAR - LL (R4), R6 - OR R5, R6 - SC R6, (R4) - BEQ R6, -4(PC) - DBAR - RET - -// func And(addr *uint32, v uint32) -TEXT ·And(SB), NOSPLIT, $0-12 - MOVV ptr+0(FP), R4 - MOVW val+8(FP), R5 - DBAR - LL (R4), R6 - AND R5, R6 - SC R6, (R4) - BEQ R6, -4(PC) - DBAR - RET - -// func Or32(addr *uint32, v uint32) old uint32 -TEXT ·Or32(SB), NOSPLIT, $0-20 - MOVV ptr+0(FP), R4 - MOVW val+8(FP), R5 - DBAR - LL (R4), R6 - OR R5, R6, R7 - SC R7, (R4) - BEQ R7, -4(PC) - DBAR - MOVW R6, ret+16(FP) - RET - -// func And32(addr *uint32, v uint32) old uint32 -TEXT ·And32(SB), NOSPLIT, $0-20 - MOVV ptr+0(FP), R4 - MOVW val+8(FP), R5 - DBAR - LL (R4), R6 - AND R5, R6, R7 - SC R7, (R4) - BEQ R7, -4(PC) - DBAR - MOVW R6, ret+16(FP) - RET - -// func Or64(addr *uint64, v uint64) old uint64 -TEXT ·Or64(SB), NOSPLIT, $0-24 - MOVV ptr+0(FP), R4 - MOVV val+8(FP), R5 - DBAR - LLV (R4), R6 - OR R5, R6, R7 - SCV R7, (R4) - BEQ R7, -4(PC) - DBAR - MOVV R6, ret+16(FP) - RET - -// func And64(addr *uint64, v uint64) old uint64 -TEXT ·And64(SB), NOSPLIT, $0-24 - MOVV ptr+0(FP), R4 - MOVV val+8(FP), R5 - DBAR - LLV (R4), R6 - AND R5, R6, R7 - SCV R7, (R4) - BEQ R7, -4(PC) - DBAR - MOVV R6, ret+16(FP) - RET - -// func Anduintptr(addr *uintptr, v uintptr) old uintptr -TEXT ·Anduintptr(SB), NOSPLIT, $0-24 - JMP ·And64(SB) - -// func Oruintptr(addr *uintptr, v uintptr) old uintptr -TEXT ·Oruintptr(SB), NOSPLIT, $0-24 - JMP ·Or64(SB) - -// uint32 runtime∕internal∕atomic·Load(uint32 volatile* ptr) -TEXT ·Load(SB),NOSPLIT|NOFRAME,$0-12 - MOVV ptr+0(FP), R19 - DBAR - MOVWU 0(R19), R19 - DBAR - MOVW R19, ret+8(FP) - RET - -// uint8 runtime∕internal∕atomic·Load8(uint8 volatile* ptr) -TEXT ·Load8(SB),NOSPLIT|NOFRAME,$0-9 - MOVV ptr+0(FP), R19 - DBAR - MOVBU 0(R19), R19 - DBAR - MOVB R19, ret+8(FP) - RET - -// uint64 runtime∕internal∕atomic·Load64(uint64 volatile* ptr) -TEXT ·Load64(SB),NOSPLIT|NOFRAME,$0-16 - MOVV ptr+0(FP), R19 - DBAR - MOVV 0(R19), R19 - DBAR - MOVV R19, ret+8(FP) - RET - -// void *runtime∕internal∕atomic·Loadp(void *volatile *ptr) -TEXT ·Loadp(SB),NOSPLIT|NOFRAME,$0-16 - MOVV ptr+0(FP), R19 - DBAR - MOVV 0(R19), R19 - DBAR - MOVV R19, ret+8(FP) - RET - -// uint32 runtime∕internal∕atomic·LoadAcq(uint32 volatile* ptr) -TEXT ·LoadAcq(SB),NOSPLIT|NOFRAME,$0-12 - JMP ·Load(SB) - -// uint64 ·LoadAcq64(uint64 volatile* ptr) -TEXT ·LoadAcq64(SB),NOSPLIT|NOFRAME,$0-16 - JMP ·Load64(SB) - -// uintptr ·LoadAcquintptr(uintptr volatile* ptr) -TEXT ·LoadAcquintptr(SB),NOSPLIT|NOFRAME,$0-16 - JMP ·Load64(SB) - diff --git a/src/runtime/internal/atomic/atomic_mips64x.go b/src/runtime/internal/atomic/atomic_mips64x.go deleted file mode 100644 index 1e12b83801..0000000000 --- a/src/runtime/internal/atomic/atomic_mips64x.go +++ /dev/null @@ -1,89 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build mips64 || mips64le - -package atomic - -import "unsafe" - -//go:noescape -func Xadd(ptr *uint32, delta int32) uint32 - -//go:noescape -func Xadd64(ptr *uint64, delta int64) uint64 - -//go:noescape -func Xadduintptr(ptr *uintptr, delta uintptr) uintptr - -//go:noescape -func Xchg(ptr *uint32, new uint32) uint32 - -//go:noescape -func Xchg64(ptr *uint64, new uint64) uint64 - -//go:noescape -func Xchguintptr(ptr *uintptr, new uintptr) uintptr - -//go:noescape -func Load(ptr *uint32) uint32 - -//go:noescape -func Load8(ptr *uint8) uint8 - -//go:noescape -func Load64(ptr *uint64) uint64 - -// NO go:noescape annotation; *ptr escapes if result escapes (#31525) -func Loadp(ptr unsafe.Pointer) unsafe.Pointer - -//go:noescape -func LoadAcq(ptr *uint32) uint32 - -//go:noescape -func LoadAcq64(ptr *uint64) uint64 - -//go:noescape -func LoadAcquintptr(ptr *uintptr) uintptr - -//go:noescape -func And8(ptr *uint8, val uint8) - -//go:noescape -func Or8(ptr *uint8, val uint8) - -// NOTE: Do not add atomicxor8 (XOR is not idempotent). - -//go:noescape -func And(ptr *uint32, val uint32) - -//go:noescape -func Or(ptr *uint32, val uint32) - -//go:noescape -func Cas64(ptr *uint64, old, new uint64) bool - -//go:noescape -func CasRel(ptr *uint32, old, new uint32) bool - -//go:noescape -func Store(ptr *uint32, val uint32) - -//go:noescape -func Store8(ptr *uint8, val uint8) - -//go:noescape -func Store64(ptr *uint64, val uint64) - -// NO go:noescape annotation; see atomic_pointer.go. -func StorepNoWB(ptr unsafe.Pointer, val unsafe.Pointer) - -//go:noescape -func StoreRel(ptr *uint32, val uint32) - -//go:noescape -func StoreRel64(ptr *uint64, val uint64) - -//go:noescape -func StoreReluintptr(ptr *uintptr, val uintptr) diff --git a/src/runtime/internal/atomic/atomic_mips64x.s b/src/runtime/internal/atomic/atomic_mips64x.s deleted file mode 100644 index b4411d87da..0000000000 --- a/src/runtime/internal/atomic/atomic_mips64x.s +++ /dev/null @@ -1,359 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build mips64 || mips64le - -#include "textflag.h" - -#define SYNC WORD $0xf - -// bool cas(uint32 *ptr, uint32 old, uint32 new) -// Atomically: -// if(*val == old){ -// *val = new; -// return 1; -// } else -// return 0; -TEXT ·Cas(SB), NOSPLIT, $0-17 - MOVV ptr+0(FP), R1 - MOVW old+8(FP), R2 - MOVW new+12(FP), R5 - SYNC -cas_again: - MOVV R5, R3 - LL (R1), R4 - BNE R2, R4, cas_fail - SC R3, (R1) - BEQ R3, cas_again - MOVV $1, R1 - MOVB R1, ret+16(FP) - SYNC - RET -cas_fail: - MOVV $0, R1 - JMP -4(PC) - -// bool cas64(uint64 *ptr, uint64 old, uint64 new) -// Atomically: -// if(*val == old){ -// *val = new; -// return 1; -// } else { -// return 0; -// } -TEXT ·Cas64(SB), NOSPLIT, $0-25 - MOVV ptr+0(FP), R1 - MOVV old+8(FP), R2 - MOVV new+16(FP), R5 - SYNC -cas64_again: - MOVV R5, R3 - LLV (R1), R4 - BNE R2, R4, cas64_fail - SCV R3, (R1) - BEQ R3, cas64_again - MOVV $1, R1 - MOVB R1, ret+24(FP) - SYNC - RET -cas64_fail: - MOVV $0, R1 - JMP -4(PC) - -TEXT ·Casint32(SB), NOSPLIT, $0-17 - JMP ·Cas(SB) - -TEXT ·Casint64(SB), NOSPLIT, $0-25 - JMP ·Cas64(SB) - -TEXT ·Casuintptr(SB), NOSPLIT, $0-25 - JMP ·Cas64(SB) - -TEXT ·CasRel(SB), NOSPLIT, $0-17 - JMP ·Cas(SB) - -TEXT ·Loaduintptr(SB), NOSPLIT|NOFRAME, $0-16 - JMP ·Load64(SB) - -TEXT ·Loaduint(SB), NOSPLIT|NOFRAME, $0-16 - JMP ·Load64(SB) - -TEXT ·Storeint32(SB), NOSPLIT, $0-12 - JMP ·Store(SB) - -TEXT ·Storeint64(SB), NOSPLIT, $0-16 - JMP ·Store64(SB) - -TEXT ·Storeuintptr(SB), NOSPLIT, $0-16 - JMP ·Store64(SB) - -TEXT ·Xadduintptr(SB), NOSPLIT, $0-24 - JMP ·Xadd64(SB) - -TEXT ·Loadint32(SB), NOSPLIT, $0-12 - JMP ·Load(SB) - -TEXT ·Loadint64(SB), NOSPLIT, $0-16 - JMP ·Load64(SB) - -TEXT ·Xaddint32(SB), NOSPLIT, $0-20 - JMP ·Xadd(SB) - -TEXT ·Xaddint64(SB), NOSPLIT, $0-24 - JMP ·Xadd64(SB) - -// bool casp(void **val, void *old, void *new) -// Atomically: -// if(*val == old){ -// *val = new; -// return 1; -// } else -// return 0; -TEXT ·Casp1(SB), NOSPLIT, $0-25 - JMP ·Cas64(SB) - -// uint32 xadd(uint32 volatile *ptr, int32 delta) -// Atomically: -// *val += delta; -// return *val; -TEXT ·Xadd(SB), NOSPLIT, $0-20 - MOVV ptr+0(FP), R2 - MOVW delta+8(FP), R3 - SYNC - LL (R2), R1 - ADDU R1, R3, R4 - MOVV R4, R1 - SC R4, (R2) - BEQ R4, -4(PC) - MOVW R1, ret+16(FP) - SYNC - RET - -// uint64 Xadd64(uint64 volatile *ptr, int64 delta) -// Atomically: -// *val += delta; -// return *val; -TEXT ·Xadd64(SB), NOSPLIT, $0-24 - MOVV ptr+0(FP), R2 - MOVV delta+8(FP), R3 - SYNC - LLV (R2), R1 - ADDVU R1, R3, R4 - MOVV R4, R1 - SCV R4, (R2) - BEQ R4, -4(PC) - MOVV R1, ret+16(FP) - SYNC - RET - -// uint32 Xchg(ptr *uint32, new uint32) -// Atomically: -// old := *ptr; -// *ptr = new; -// return old; -TEXT ·Xchg(SB), NOSPLIT, $0-20 - MOVV ptr+0(FP), R2 - MOVW new+8(FP), R5 - - SYNC - MOVV R5, R3 - LL (R2), R1 - SC R3, (R2) - BEQ R3, -3(PC) - MOVW R1, ret+16(FP) - SYNC - RET - -// uint64 Xchg64(ptr *uint64, new uint64) -// Atomically: -// old := *ptr; -// *ptr = new; -// return old; -TEXT ·Xchg64(SB), NOSPLIT, $0-24 - MOVV ptr+0(FP), R2 - MOVV new+8(FP), R5 - - SYNC - MOVV R5, R3 - LLV (R2), R1 - SCV R3, (R2) - BEQ R3, -3(PC) - MOVV R1, ret+16(FP) - SYNC - RET - -TEXT ·Xchgint32(SB), NOSPLIT, $0-20 - JMP ·Xchg(SB) - -TEXT ·Xchgint64(SB), NOSPLIT, $0-24 - JMP ·Xchg64(SB) - -TEXT ·Xchguintptr(SB), NOSPLIT, $0-24 - JMP ·Xchg64(SB) - -TEXT ·StorepNoWB(SB), NOSPLIT, $0-16 - JMP ·Store64(SB) - -TEXT ·StoreRel(SB), NOSPLIT, $0-12 - JMP ·Store(SB) - -TEXT ·StoreRel64(SB), NOSPLIT, $0-16 - JMP ·Store64(SB) - -TEXT ·StoreReluintptr(SB), NOSPLIT, $0-16 - JMP ·Store64(SB) - -TEXT ·Store(SB), NOSPLIT, $0-12 - MOVV ptr+0(FP), R1 - MOVW val+8(FP), R2 - SYNC - MOVW R2, 0(R1) - SYNC - RET - -TEXT ·Store8(SB), NOSPLIT, $0-9 - MOVV ptr+0(FP), R1 - MOVB val+8(FP), R2 - SYNC - MOVB R2, 0(R1) - SYNC - RET - -TEXT ·Store64(SB), NOSPLIT, $0-16 - MOVV ptr+0(FP), R1 - MOVV val+8(FP), R2 - SYNC - MOVV R2, 0(R1) - SYNC - RET - -// void Or8(byte volatile*, byte); -TEXT ·Or8(SB), NOSPLIT, $0-9 - MOVV ptr+0(FP), R1 - MOVBU val+8(FP), R2 - // Align ptr down to 4 bytes so we can use 32-bit load/store. - MOVV $~3, R3 - AND R1, R3 - // Compute val shift. -#ifdef GOARCH_mips64 - // Big endian. ptr = ptr ^ 3 - XOR $3, R1 -#endif - // R4 = ((ptr & 3) * 8) - AND $3, R1, R4 - SLLV $3, R4 - // Shift val for aligned ptr. R2 = val << R4 - SLLV R4, R2 - - SYNC - LL (R3), R4 - OR R2, R4 - SC R4, (R3) - BEQ R4, -4(PC) - SYNC - RET - -// void And8(byte volatile*, byte); -TEXT ·And8(SB), NOSPLIT, $0-9 - MOVV ptr+0(FP), R1 - MOVBU val+8(FP), R2 - // Align ptr down to 4 bytes so we can use 32-bit load/store. - MOVV $~3, R3 - AND R1, R3 - // Compute val shift. -#ifdef GOARCH_mips64 - // Big endian. ptr = ptr ^ 3 - XOR $3, R1 -#endif - // R4 = ((ptr & 3) * 8) - AND $3, R1, R4 - SLLV $3, R4 - // Shift val for aligned ptr. R2 = val << R4 | ^(0xFF << R4) - MOVV $0xFF, R5 - SLLV R4, R2 - SLLV R4, R5 - NOR R0, R5 - OR R5, R2 - - SYNC - LL (R3), R4 - AND R2, R4 - SC R4, (R3) - BEQ R4, -4(PC) - SYNC - RET - -// func Or(addr *uint32, v uint32) -TEXT ·Or(SB), NOSPLIT, $0-12 - MOVV ptr+0(FP), R1 - MOVW val+8(FP), R2 - - SYNC - LL (R1), R3 - OR R2, R3 - SC R3, (R1) - BEQ R3, -4(PC) - SYNC - RET - -// func And(addr *uint32, v uint32) -TEXT ·And(SB), NOSPLIT, $0-12 - MOVV ptr+0(FP), R1 - MOVW val+8(FP), R2 - - SYNC - LL (R1), R3 - AND R2, R3 - SC R3, (R1) - BEQ R3, -4(PC) - SYNC - RET - -// uint32 ·Load(uint32 volatile* ptr) -TEXT ·Load(SB),NOSPLIT|NOFRAME,$0-12 - MOVV ptr+0(FP), R1 - SYNC - MOVWU 0(R1), R1 - SYNC - MOVW R1, ret+8(FP) - RET - -// uint8 ·Load8(uint8 volatile* ptr) -TEXT ·Load8(SB),NOSPLIT|NOFRAME,$0-9 - MOVV ptr+0(FP), R1 - SYNC - MOVBU 0(R1), R1 - SYNC - MOVB R1, ret+8(FP) - RET - -// uint64 ·Load64(uint64 volatile* ptr) -TEXT ·Load64(SB),NOSPLIT|NOFRAME,$0-16 - MOVV ptr+0(FP), R1 - SYNC - MOVV 0(R1), R1 - SYNC - MOVV R1, ret+8(FP) - RET - -// void *·Loadp(void *volatile *ptr) -TEXT ·Loadp(SB),NOSPLIT|NOFRAME,$0-16 - MOVV ptr+0(FP), R1 - SYNC - MOVV 0(R1), R1 - SYNC - MOVV R1, ret+8(FP) - RET - -// uint32 ·LoadAcq(uint32 volatile* ptr) -TEXT ·LoadAcq(SB),NOSPLIT|NOFRAME,$0-12 - JMP atomic·Load(SB) - -// uint64 ·LoadAcq64(uint64 volatile* ptr) -TEXT ·LoadAcq64(SB),NOSPLIT|NOFRAME,$0-16 - JMP atomic·Load64(SB) - -// uintptr ·LoadAcquintptr(uintptr volatile* ptr) -TEXT ·LoadAcquintptr(SB),NOSPLIT|NOFRAME,$0-16 - JMP atomic·Load64(SB) diff --git a/src/runtime/internal/atomic/atomic_mipsx.go b/src/runtime/internal/atomic/atomic_mipsx.go deleted file mode 100644 index e3dcde1bde..0000000000 --- a/src/runtime/internal/atomic/atomic_mipsx.go +++ /dev/null @@ -1,162 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build mips || mipsle - -// Export some functions via linkname to assembly in sync/atomic. -// -//go:linkname Xadd64 -//go:linkname Xchg64 -//go:linkname Cas64 -//go:linkname Load64 -//go:linkname Store64 - -package atomic - -import ( - "internal/cpu" - "unsafe" -) - -// TODO implement lock striping -var lock struct { - state uint32 - pad [cpu.CacheLinePadSize - 4]byte -} - -//go:noescape -func spinLock(state *uint32) - -//go:noescape -func spinUnlock(state *uint32) - -//go:nosplit -func lockAndCheck(addr *uint64) { - // ensure 8-byte alignment - if uintptr(unsafe.Pointer(addr))&7 != 0 { - panicUnaligned() - } - // force dereference before taking lock - _ = *addr - - spinLock(&lock.state) -} - -//go:nosplit -func unlock() { - spinUnlock(&lock.state) -} - -//go:nosplit -func Xadd64(addr *uint64, delta int64) (new uint64) { - lockAndCheck(addr) - - new = *addr + uint64(delta) - *addr = new - - unlock() - return -} - -//go:nosplit -func Xchg64(addr *uint64, new uint64) (old uint64) { - lockAndCheck(addr) - - old = *addr - *addr = new - - unlock() - return -} - -//go:nosplit -func Cas64(addr *uint64, old, new uint64) (swapped bool) { - lockAndCheck(addr) - - if (*addr) == old { - *addr = new - unlock() - return true - } - - unlock() - return false -} - -//go:nosplit -func Load64(addr *uint64) (val uint64) { - lockAndCheck(addr) - - val = *addr - - unlock() - return -} - -//go:nosplit -func Store64(addr *uint64, val uint64) { - lockAndCheck(addr) - - *addr = val - - unlock() - return -} - -//go:noescape -func Xadd(ptr *uint32, delta int32) uint32 - -//go:noescape -func Xadduintptr(ptr *uintptr, delta uintptr) uintptr - -//go:noescape -func Xchg(ptr *uint32, new uint32) uint32 - -//go:noescape -func Xchguintptr(ptr *uintptr, new uintptr) uintptr - -//go:noescape -func Load(ptr *uint32) uint32 - -//go:noescape -func Load8(ptr *uint8) uint8 - -// NO go:noescape annotation; *ptr escapes if result escapes (#31525) -func Loadp(ptr unsafe.Pointer) unsafe.Pointer - -//go:noescape -func LoadAcq(ptr *uint32) uint32 - -//go:noescape -func LoadAcquintptr(ptr *uintptr) uintptr - -//go:noescape -func And8(ptr *uint8, val uint8) - -//go:noescape -func Or8(ptr *uint8, val uint8) - -//go:noescape -func And(ptr *uint32, val uint32) - -//go:noescape -func Or(ptr *uint32, val uint32) - -//go:noescape -func Store(ptr *uint32, val uint32) - -//go:noescape -func Store8(ptr *uint8, val uint8) - -// NO go:noescape annotation; see atomic_pointer.go. -func StorepNoWB(ptr unsafe.Pointer, val unsafe.Pointer) - -//go:noescape -func StoreRel(ptr *uint32, val uint32) - -//go:noescape -func StoreReluintptr(ptr *uintptr, val uintptr) - -//go:noescape -func CasRel(addr *uint32, old, new uint32) bool diff --git a/src/runtime/internal/atomic/atomic_mipsx.s b/src/runtime/internal/atomic/atomic_mipsx.s deleted file mode 100644 index 8f5fc53cb7..0000000000 --- a/src/runtime/internal/atomic/atomic_mipsx.s +++ /dev/null @@ -1,262 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build mips || mipsle - -#include "textflag.h" - -// bool Cas(int32 *val, int32 old, int32 new) -// Atomically: -// if(*val == old){ -// *val = new; -// return 1; -// } else -// return 0; -TEXT ·Cas(SB),NOSPLIT,$0-13 - MOVW ptr+0(FP), R1 - MOVW old+4(FP), R2 - MOVW new+8(FP), R5 - SYNC -try_cas: - MOVW R5, R3 - LL (R1), R4 // R4 = *R1 - BNE R2, R4, cas_fail - SC R3, (R1) // *R1 = R3 - BEQ R3, try_cas - SYNC - MOVB R3, ret+12(FP) - RET -cas_fail: - SYNC - MOVB R0, ret+12(FP) - RET - -TEXT ·Store(SB),NOSPLIT,$0-8 - MOVW ptr+0(FP), R1 - MOVW val+4(FP), R2 - SYNC - MOVW R2, 0(R1) - SYNC - RET - -TEXT ·Store8(SB),NOSPLIT,$0-5 - MOVW ptr+0(FP), R1 - MOVB val+4(FP), R2 - SYNC - MOVB R2, 0(R1) - SYNC - RET - -TEXT ·Load(SB),NOSPLIT,$0-8 - MOVW ptr+0(FP), R1 - SYNC - MOVW 0(R1), R1 - SYNC - MOVW R1, ret+4(FP) - RET - -TEXT ·Load8(SB),NOSPLIT,$0-5 - MOVW ptr+0(FP), R1 - SYNC - MOVB 0(R1), R1 - SYNC - MOVB R1, ret+4(FP) - RET - -// uint32 Xadd(uint32 volatile *val, int32 delta) -// Atomically: -// *val += delta; -// return *val; -TEXT ·Xadd(SB),NOSPLIT,$0-12 - MOVW ptr+0(FP), R2 - MOVW delta+4(FP), R3 - SYNC -try_xadd: - LL (R2), R1 // R1 = *R2 - ADDU R1, R3, R4 - MOVW R4, R1 - SC R4, (R2) // *R2 = R4 - BEQ R4, try_xadd - SYNC - MOVW R1, ret+8(FP) - RET - -// uint32 Xchg(ptr *uint32, new uint32) -// Atomically: -// old := *ptr; -// *ptr = new; -// return old; -TEXT ·Xchg(SB),NOSPLIT,$0-12 - MOVW ptr+0(FP), R2 - MOVW new+4(FP), R5 - SYNC -try_xchg: - MOVW R5, R3 - LL (R2), R1 // R1 = *R2 - SC R3, (R2) // *R2 = R3 - BEQ R3, try_xchg - SYNC - MOVW R1, ret+8(FP) - RET - -TEXT ·Casint32(SB),NOSPLIT,$0-13 - JMP ·Cas(SB) - -TEXT ·Casint64(SB),NOSPLIT,$0-21 - JMP ·Cas64(SB) - -TEXT ·Casuintptr(SB),NOSPLIT,$0-13 - JMP ·Cas(SB) - -TEXT ·CasRel(SB),NOSPLIT,$0-13 - JMP ·Cas(SB) - -TEXT ·Loaduintptr(SB),NOSPLIT,$0-8 - JMP ·Load(SB) - -TEXT ·Loaduint(SB),NOSPLIT,$0-8 - JMP ·Load(SB) - -TEXT ·Loadp(SB),NOSPLIT,$-0-8 - JMP ·Load(SB) - -TEXT ·Storeint32(SB),NOSPLIT,$0-8 - JMP ·Store(SB) - -TEXT ·Storeint64(SB),NOSPLIT,$0-12 - JMP ·Store64(SB) - -TEXT ·Storeuintptr(SB),NOSPLIT,$0-8 - JMP ·Store(SB) - -TEXT ·Xadduintptr(SB),NOSPLIT,$0-12 - JMP ·Xadd(SB) - -TEXT ·Loadint32(SB),NOSPLIT,$0-8 - JMP ·Load(SB) - -TEXT ·Loadint64(SB),NOSPLIT,$0-12 - JMP ·Load64(SB) - -TEXT ·Xaddint32(SB),NOSPLIT,$0-12 - JMP ·Xadd(SB) - -TEXT ·Xaddint64(SB),NOSPLIT,$0-20 - JMP ·Xadd64(SB) - -TEXT ·Casp1(SB),NOSPLIT,$0-13 - JMP ·Cas(SB) - -TEXT ·Xchgint32(SB),NOSPLIT,$0-12 - JMP ·Xchg(SB) - -TEXT ·Xchgint64(SB),NOSPLIT,$0-20 - JMP ·Xchg64(SB) - -TEXT ·Xchguintptr(SB),NOSPLIT,$0-12 - JMP ·Xchg(SB) - -TEXT ·StorepNoWB(SB),NOSPLIT,$0-8 - JMP ·Store(SB) - -TEXT ·StoreRel(SB),NOSPLIT,$0-8 - JMP ·Store(SB) - -TEXT ·StoreReluintptr(SB),NOSPLIT,$0-8 - JMP ·Store(SB) - -// void Or8(byte volatile*, byte); -TEXT ·Or8(SB),NOSPLIT,$0-5 - MOVW ptr+0(FP), R1 - MOVBU val+4(FP), R2 - MOVW $~3, R3 // Align ptr down to 4 bytes so we can use 32-bit load/store. - AND R1, R3 -#ifdef GOARCH_mips - // Big endian. ptr = ptr ^ 3 - XOR $3, R1 -#endif - AND $3, R1, R4 // R4 = ((ptr & 3) * 8) - SLL $3, R4 - SLL R4, R2, R2 // Shift val for aligned ptr. R2 = val << R4 - SYNC -try_or8: - LL (R3), R4 // R4 = *R3 - OR R2, R4 - SC R4, (R3) // *R3 = R4 - BEQ R4, try_or8 - SYNC - RET - -// void And8(byte volatile*, byte); -TEXT ·And8(SB),NOSPLIT,$0-5 - MOVW ptr+0(FP), R1 - MOVBU val+4(FP), R2 - MOVW $~3, R3 - AND R1, R3 -#ifdef GOARCH_mips - // Big endian. ptr = ptr ^ 3 - XOR $3, R1 -#endif - AND $3, R1, R4 // R4 = ((ptr & 3) * 8) - SLL $3, R4 - MOVW $0xFF, R5 - SLL R4, R2 - SLL R4, R5 - NOR R0, R5 - OR R5, R2 // Shift val for aligned ptr. R2 = val << R4 | ^(0xFF << R4) - SYNC -try_and8: - LL (R3), R4 // R4 = *R3 - AND R2, R4 - SC R4, (R3) // *R3 = R4 - BEQ R4, try_and8 - SYNC - RET - -// func Or(addr *uint32, v uint32) -TEXT ·Or(SB), NOSPLIT, $0-8 - MOVW ptr+0(FP), R1 - MOVW val+4(FP), R2 - - SYNC - LL (R1), R3 - OR R2, R3 - SC R3, (R1) - BEQ R3, -4(PC) - SYNC - RET - -// func And(addr *uint32, v uint32) -TEXT ·And(SB), NOSPLIT, $0-8 - MOVW ptr+0(FP), R1 - MOVW val+4(FP), R2 - - SYNC - LL (R1), R3 - AND R2, R3 - SC R3, (R1) - BEQ R3, -4(PC) - SYNC - RET - -TEXT ·spinLock(SB),NOSPLIT,$0-4 - MOVW state+0(FP), R1 - MOVW $1, R2 - SYNC -try_lock: - MOVW R2, R3 -check_again: - LL (R1), R4 - BNE R4, check_again - SC R3, (R1) - BEQ R3, try_lock - SYNC - RET - -TEXT ·spinUnlock(SB),NOSPLIT,$0-4 - MOVW state+0(FP), R1 - SYNC - MOVW R0, (R1) - SYNC - RET diff --git a/src/runtime/internal/atomic/atomic_ppc64x.go b/src/runtime/internal/atomic/atomic_ppc64x.go deleted file mode 100644 index 33a92b53f4..0000000000 --- a/src/runtime/internal/atomic/atomic_ppc64x.go +++ /dev/null @@ -1,107 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build ppc64 || ppc64le - -package atomic - -import "unsafe" - -//go:noescape -func Xadd(ptr *uint32, delta int32) uint32 - -//go:noescape -func Xadd64(ptr *uint64, delta int64) uint64 - -//go:noescape -func Xadduintptr(ptr *uintptr, delta uintptr) uintptr - -//go:noescape -func Xchg(ptr *uint32, new uint32) uint32 - -//go:noescape -func Xchg64(ptr *uint64, new uint64) uint64 - -//go:noescape -func Xchguintptr(ptr *uintptr, new uintptr) uintptr - -//go:noescape -func Load(ptr *uint32) uint32 - -//go:noescape -func Load8(ptr *uint8) uint8 - -//go:noescape -func Load64(ptr *uint64) uint64 - -// NO go:noescape annotation; *ptr escapes if result escapes (#31525) -func Loadp(ptr unsafe.Pointer) unsafe.Pointer - -//go:noescape -func LoadAcq(ptr *uint32) uint32 - -//go:noescape -func LoadAcq64(ptr *uint64) uint64 - -//go:noescape -func LoadAcquintptr(ptr *uintptr) uintptr - -//go:noescape -func And8(ptr *uint8, val uint8) - -//go:noescape -func Or8(ptr *uint8, val uint8) - -// NOTE: Do not add atomicxor8 (XOR is not idempotent). - -//go:noescape -func And(ptr *uint32, val uint32) - -//go:noescape -func Or(ptr *uint32, val uint32) - -//go:noescape -func And32(ptr *uint32, val uint32) uint32 - -//go:noescape -func Or32(ptr *uint32, val uint32) uint32 - -//go:noescape -func And64(ptr *uint64, val uint64) uint64 - -//go:noescape -func Or64(ptr *uint64, val uint64) uint64 - -//go:noescape -func Anduintptr(ptr *uintptr, val uintptr) uintptr - -//go:noescape -func Oruintptr(ptr *uintptr, val uintptr) uintptr - -//go:noescape -func Cas64(ptr *uint64, old, new uint64) bool - -//go:noescape -func CasRel(ptr *uint32, old, new uint32) bool - -//go:noescape -func Store(ptr *uint32, val uint32) - -//go:noescape -func Store8(ptr *uint8, val uint8) - -//go:noescape -func Store64(ptr *uint64, val uint64) - -//go:noescape -func StoreRel(ptr *uint32, val uint32) - -//go:noescape -func StoreRel64(ptr *uint64, val uint64) - -//go:noescape -func StoreReluintptr(ptr *uintptr, val uintptr) - -// NO go:noescape annotation; see atomic_pointer.go. -func StorepNoWB(ptr unsafe.Pointer, val unsafe.Pointer) diff --git a/src/runtime/internal/atomic/atomic_ppc64x.s b/src/runtime/internal/atomic/atomic_ppc64x.s deleted file mode 100644 index 75635b933d..0000000000 --- a/src/runtime/internal/atomic/atomic_ppc64x.s +++ /dev/null @@ -1,424 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build ppc64 || ppc64le - -#include "textflag.h" - -// For more details about how various memory models are -// enforced on POWER, the following paper provides more -// details about how they enforce C/C++ like models. This -// gives context about why the strange looking code -// sequences below work. -// -// http://www.rdrop.com/users/paulmck/scalability/paper/N2745r.2011.03.04a.html - -// uint32 ·Load(uint32 volatile* ptr) -TEXT ·Load(SB),NOSPLIT|NOFRAME,$-8-12 - MOVD ptr+0(FP), R3 - SYNC - MOVWZ 0(R3), R3 - CMPW R3, R3, CR7 - BC 4, 30, 1(PC) // bne- cr7,0x4 - ISYNC - MOVW R3, ret+8(FP) - RET - -// uint8 ·Load8(uint8 volatile* ptr) -TEXT ·Load8(SB),NOSPLIT|NOFRAME,$-8-9 - MOVD ptr+0(FP), R3 - SYNC - MOVBZ 0(R3), R3 - CMP R3, R3, CR7 - BC 4, 30, 1(PC) // bne- cr7,0x4 - ISYNC - MOVB R3, ret+8(FP) - RET - -// uint64 ·Load64(uint64 volatile* ptr) -TEXT ·Load64(SB),NOSPLIT|NOFRAME,$-8-16 - MOVD ptr+0(FP), R3 - SYNC - MOVD 0(R3), R3 - CMP R3, R3, CR7 - BC 4, 30, 1(PC) // bne- cr7,0x4 - ISYNC - MOVD R3, ret+8(FP) - RET - -// void *·Loadp(void *volatile *ptr) -TEXT ·Loadp(SB),NOSPLIT|NOFRAME,$-8-16 - MOVD ptr+0(FP), R3 - SYNC - MOVD 0(R3), R3 - CMP R3, R3, CR7 - BC 4, 30, 1(PC) // bne- cr7,0x4 - ISYNC - MOVD R3, ret+8(FP) - RET - -// uint32 ·LoadAcq(uint32 volatile* ptr) -TEXT ·LoadAcq(SB),NOSPLIT|NOFRAME,$-8-12 - MOVD ptr+0(FP), R3 - MOVWZ 0(R3), R3 - CMPW R3, R3, CR7 - BC 4, 30, 1(PC) // bne- cr7, 0x4 - ISYNC - MOVW R3, ret+8(FP) - RET - -// uint64 ·LoadAcq64(uint64 volatile* ptr) -TEXT ·LoadAcq64(SB),NOSPLIT|NOFRAME,$-8-16 - MOVD ptr+0(FP), R3 - MOVD 0(R3), R3 - CMP R3, R3, CR7 - BC 4, 30, 1(PC) // bne- cr7, 0x4 - ISYNC - MOVD R3, ret+8(FP) - RET - -// bool cas(uint32 *ptr, uint32 old, uint32 new) -// Atomically: -// if(*val == old){ -// *val = new; -// return 1; -// } else -// return 0; -TEXT ·Cas(SB), NOSPLIT, $0-17 - MOVD ptr+0(FP), R3 - MOVWZ old+8(FP), R4 - MOVWZ new+12(FP), R5 - LWSYNC -cas_again: - LWAR (R3), R6 - CMPW R6, R4 - BNE cas_fail - STWCCC R5, (R3) - BNE cas_again - MOVD $1, R3 - LWSYNC - MOVB R3, ret+16(FP) - RET -cas_fail: - LWSYNC - MOVB R0, ret+16(FP) - RET - -// bool ·Cas64(uint64 *ptr, uint64 old, uint64 new) -// Atomically: -// if(*val == old){ -// *val = new; -// return 1; -// } else { -// return 0; -// } -TEXT ·Cas64(SB), NOSPLIT, $0-25 - MOVD ptr+0(FP), R3 - MOVD old+8(FP), R4 - MOVD new+16(FP), R5 - LWSYNC -cas64_again: - LDAR (R3), R6 - CMP R6, R4 - BNE cas64_fail - STDCCC R5, (R3) - BNE cas64_again - MOVD $1, R3 - LWSYNC - MOVB R3, ret+24(FP) - RET -cas64_fail: - LWSYNC - MOVB R0, ret+24(FP) - RET - -TEXT ·CasRel(SB), NOSPLIT, $0-17 - MOVD ptr+0(FP), R3 - MOVWZ old+8(FP), R4 - MOVWZ new+12(FP), R5 - LWSYNC -cas_again: - LWAR (R3), $0, R6 // 0 = Mutex release hint - CMPW R6, R4 - BNE cas_fail - STWCCC R5, (R3) - BNE cas_again - MOVD $1, R3 - MOVB R3, ret+16(FP) - RET -cas_fail: - MOVB R0, ret+16(FP) - RET - -TEXT ·Casint32(SB), NOSPLIT, $0-17 - BR ·Cas(SB) - -TEXT ·Casint64(SB), NOSPLIT, $0-25 - BR ·Cas64(SB) - -TEXT ·Casuintptr(SB), NOSPLIT, $0-25 - BR ·Cas64(SB) - -TEXT ·Loaduintptr(SB), NOSPLIT|NOFRAME, $0-16 - BR ·Load64(SB) - -TEXT ·LoadAcquintptr(SB), NOSPLIT|NOFRAME, $0-16 - BR ·LoadAcq64(SB) - -TEXT ·Loaduint(SB), NOSPLIT|NOFRAME, $0-16 - BR ·Load64(SB) - -TEXT ·Storeint32(SB), NOSPLIT, $0-12 - BR ·Store(SB) - -TEXT ·Storeint64(SB), NOSPLIT, $0-16 - BR ·Store64(SB) - -TEXT ·Storeuintptr(SB), NOSPLIT, $0-16 - BR ·Store64(SB) - -TEXT ·StoreReluintptr(SB), NOSPLIT, $0-16 - BR ·StoreRel64(SB) - -TEXT ·Xadduintptr(SB), NOSPLIT, $0-24 - BR ·Xadd64(SB) - -TEXT ·Loadint32(SB), NOSPLIT, $0-12 - BR ·Load(SB) - -TEXT ·Loadint64(SB), NOSPLIT, $0-16 - BR ·Load64(SB) - -TEXT ·Xaddint32(SB), NOSPLIT, $0-20 - BR ·Xadd(SB) - -TEXT ·Xaddint64(SB), NOSPLIT, $0-24 - BR ·Xadd64(SB) - -// bool casp(void **val, void *old, void *new) -// Atomically: -// if(*val == old){ -// *val = new; -// return 1; -// } else -// return 0; -TEXT ·Casp1(SB), NOSPLIT, $0-25 - BR ·Cas64(SB) - -// uint32 xadd(uint32 volatile *ptr, int32 delta) -// Atomically: -// *val += delta; -// return *val; -TEXT ·Xadd(SB), NOSPLIT, $0-20 - MOVD ptr+0(FP), R4 - MOVW delta+8(FP), R5 - LWSYNC - LWAR (R4), R3 - ADD R5, R3 - STWCCC R3, (R4) - BNE -3(PC) - MOVW R3, ret+16(FP) - RET - -// uint64 Xadd64(uint64 volatile *val, int64 delta) -// Atomically: -// *val += delta; -// return *val; -TEXT ·Xadd64(SB), NOSPLIT, $0-24 - MOVD ptr+0(FP), R4 - MOVD delta+8(FP), R5 - LWSYNC - LDAR (R4), R3 - ADD R5, R3 - STDCCC R3, (R4) - BNE -3(PC) - MOVD R3, ret+16(FP) - RET - -// uint32 Xchg(ptr *uint32, new uint32) -// Atomically: -// old := *ptr; -// *ptr = new; -// return old; -TEXT ·Xchg(SB), NOSPLIT, $0-20 - MOVD ptr+0(FP), R4 - MOVW new+8(FP), R5 - LWSYNC - LWAR (R4), R3 - STWCCC R5, (R4) - BNE -2(PC) - ISYNC - MOVW R3, ret+16(FP) - RET - -// uint64 Xchg64(ptr *uint64, new uint64) -// Atomically: -// old := *ptr; -// *ptr = new; -// return old; -TEXT ·Xchg64(SB), NOSPLIT, $0-24 - MOVD ptr+0(FP), R4 - MOVD new+8(FP), R5 - LWSYNC - LDAR (R4), R3 - STDCCC R5, (R4) - BNE -2(PC) - ISYNC - MOVD R3, ret+16(FP) - RET - -TEXT ·Xchgint32(SB), NOSPLIT, $0-20 - BR ·Xchg(SB) - -TEXT ·Xchgint64(SB), NOSPLIT, $0-24 - BR ·Xchg64(SB) - -TEXT ·Xchguintptr(SB), NOSPLIT, $0-24 - BR ·Xchg64(SB) - -TEXT ·StorepNoWB(SB), NOSPLIT, $0-16 - BR ·Store64(SB) - -TEXT ·Store(SB), NOSPLIT, $0-12 - MOVD ptr+0(FP), R3 - MOVW val+8(FP), R4 - SYNC - MOVW R4, 0(R3) - RET - -TEXT ·Store8(SB), NOSPLIT, $0-9 - MOVD ptr+0(FP), R3 - MOVB val+8(FP), R4 - SYNC - MOVB R4, 0(R3) - RET - -TEXT ·Store64(SB), NOSPLIT, $0-16 - MOVD ptr+0(FP), R3 - MOVD val+8(FP), R4 - SYNC - MOVD R4, 0(R3) - RET - -TEXT ·StoreRel(SB), NOSPLIT, $0-12 - MOVD ptr+0(FP), R3 - MOVW val+8(FP), R4 - LWSYNC - MOVW R4, 0(R3) - RET - -TEXT ·StoreRel64(SB), NOSPLIT, $0-16 - MOVD ptr+0(FP), R3 - MOVD val+8(FP), R4 - LWSYNC - MOVD R4, 0(R3) - RET - -// void ·Or8(byte volatile*, byte); -TEXT ·Or8(SB), NOSPLIT, $0-9 - MOVD ptr+0(FP), R3 - MOVBZ val+8(FP), R4 - LWSYNC -again: - LBAR (R3), R6 - OR R4, R6 - STBCCC R6, (R3) - BNE again - RET - -// void ·And8(byte volatile*, byte); -TEXT ·And8(SB), NOSPLIT, $0-9 - MOVD ptr+0(FP), R3 - MOVBZ val+8(FP), R4 - LWSYNC -again: - LBAR (R3), R6 - AND R4, R6 - STBCCC R6, (R3) - BNE again - RET - -// func Or(addr *uint32, v uint32) -TEXT ·Or(SB), NOSPLIT, $0-12 - MOVD ptr+0(FP), R3 - MOVW val+8(FP), R4 - LWSYNC -again: - LWAR (R3), R6 - OR R4, R6 - STWCCC R6, (R3) - BNE again - RET - -// func And(addr *uint32, v uint32) -TEXT ·And(SB), NOSPLIT, $0-12 - MOVD ptr+0(FP), R3 - MOVW val+8(FP), R4 - LWSYNC -again: - LWAR (R3),R6 - AND R4, R6 - STWCCC R6, (R3) - BNE again - RET - -// func Or32(addr *uint32, v uint32) old uint32 -TEXT ·Or32(SB), NOSPLIT, $0-20 - MOVD ptr+0(FP), R3 - MOVW val+8(FP), R4 - LWSYNC -again: - LWAR (R3), R6 - OR R4, R6, R7 - STWCCC R7, (R3) - BNE again - MOVW R6, ret+16(FP) - RET - -// func And32(addr *uint32, v uint32) old uint32 -TEXT ·And32(SB), NOSPLIT, $0-20 - MOVD ptr+0(FP), R3 - MOVW val+8(FP), R4 - LWSYNC -again: - LWAR (R3),R6 - AND R4, R6, R7 - STWCCC R7, (R3) - BNE again - MOVW R6, ret+16(FP) - RET - -// func Or64(addr *uint64, v uint64) old uint64 -TEXT ·Or64(SB), NOSPLIT, $0-24 - MOVD ptr+0(FP), R3 - MOVD val+8(FP), R4 - LWSYNC -again: - LDAR (R3), R6 - OR R4, R6, R7 - STDCCC R7, (R3) - BNE again - MOVD R6, ret+16(FP) - RET - -// func And64(addr *uint64, v uint64) old uint64 -TEXT ·And64(SB), NOSPLIT, $0-24 - MOVD ptr+0(FP), R3 - MOVD val+8(FP), R4 - LWSYNC -again: - LDAR (R3),R6 - AND R4, R6, R7 - STDCCC R7, (R3) - BNE again - MOVD R6, ret+16(FP) - RET - -// func Anduintptr(addr *uintptr, v uintptr) old uintptr -TEXT ·Anduintptr(SB), NOSPLIT, $0-24 - JMP ·And64(SB) - -// func Oruintptr(addr *uintptr, v uintptr) old uintptr -TEXT ·Oruintptr(SB), NOSPLIT, $0-24 - JMP ·Or64(SB) diff --git a/src/runtime/internal/atomic/atomic_riscv64.go b/src/runtime/internal/atomic/atomic_riscv64.go deleted file mode 100644 index 9fc38376ae..0000000000 --- a/src/runtime/internal/atomic/atomic_riscv64.go +++ /dev/null @@ -1,103 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package atomic - -import "unsafe" - -//go:noescape -func Xadd(ptr *uint32, delta int32) uint32 - -//go:noescape -func Xadd64(ptr *uint64, delta int64) uint64 - -//go:noescape -func Xadduintptr(ptr *uintptr, delta uintptr) uintptr - -//go:noescape -func Xchg(ptr *uint32, new uint32) uint32 - -//go:noescape -func Xchg64(ptr *uint64, new uint64) uint64 - -//go:noescape -func Xchguintptr(ptr *uintptr, new uintptr) uintptr - -//go:noescape -func Load(ptr *uint32) uint32 - -//go:noescape -func Load8(ptr *uint8) uint8 - -//go:noescape -func Load64(ptr *uint64) uint64 - -// NO go:noescape annotation; *ptr escapes if result escapes (#31525) -func Loadp(ptr unsafe.Pointer) unsafe.Pointer - -//go:noescape -func LoadAcq(ptr *uint32) uint32 - -//go:noescape -func LoadAcq64(ptr *uint64) uint64 - -//go:noescape -func LoadAcquintptr(ptr *uintptr) uintptr - -//go:noescape -func Or8(ptr *uint8, val uint8) - -//go:noescape -func And8(ptr *uint8, val uint8) - -//go:noescape -func And(ptr *uint32, val uint32) - -//go:noescape -func Or(ptr *uint32, val uint32) - -//go:noescape -func And32(ptr *uint32, val uint32) uint32 - -//go:noescape -func Or32(ptr *uint32, val uint32) uint32 - -//go:noescape -func And64(ptr *uint64, val uint64) uint64 - -//go:noescape -func Or64(ptr *uint64, val uint64) uint64 - -//go:noescape -func Anduintptr(ptr *uintptr, val uintptr) uintptr - -//go:noescape -func Oruintptr(ptr *uintptr, val uintptr) uintptr - -//go:noescape -func Cas64(ptr *uint64, old, new uint64) bool - -//go:noescape -func CasRel(ptr *uint32, old, new uint32) bool - -//go:noescape -func Store(ptr *uint32, val uint32) - -//go:noescape -func Store8(ptr *uint8, val uint8) - -//go:noescape -func Store64(ptr *uint64, val uint64) - -// NO go:noescape annotation; see atomic_pointer.go. -func StorepNoWB(ptr unsafe.Pointer, val unsafe.Pointer) - -//go:noescape -func StoreRel(ptr *uint32, val uint32) - -//go:noescape -func StoreRel64(ptr *uint64, val uint64) - -//go:noescape -func StoreReluintptr(ptr *uintptr, val uintptr) diff --git a/src/runtime/internal/atomic/atomic_riscv64.s b/src/runtime/internal/atomic/atomic_riscv64.s deleted file mode 100644 index bf6bd35ed7..0000000000 --- a/src/runtime/internal/atomic/atomic_riscv64.s +++ /dev/null @@ -1,324 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// RISC-V's atomic operations have two bits, aq ("acquire") and rl ("release"), -// which may be toggled on and off. Their precise semantics are defined in -// section 6.3 of the specification, but the basic idea is as follows: -// -// - If neither aq nor rl is set, the CPU may reorder the atomic arbitrarily. -// It guarantees only that it will execute atomically. -// -// - If aq is set, the CPU may move the instruction backward, but not forward. -// -// - If rl is set, the CPU may move the instruction forward, but not backward. -// -// - If both are set, the CPU may not reorder the instruction at all. -// -// These four modes correspond to other well-known memory models on other CPUs. -// On ARM, aq corresponds to a dmb ishst, aq+rl corresponds to a dmb ish. On -// Intel, aq corresponds to an lfence, rl to an sfence, and aq+rl to an mfence -// (or a lock prefix). -// -// Go's memory model requires that -// - if a read happens after a write, the read must observe the write, and -// that -// - if a read happens concurrently with a write, the read may observe the -// write. -// aq is sufficient to guarantee this, so that's what we use here. (This jibes -// with ARM, which uses dmb ishst.) - -#include "textflag.h" - -// func Cas(ptr *uint64, old, new uint64) bool -// Atomically: -// if(*val == old){ -// *val = new; -// return 1; -// } else { -// return 0; -// } -TEXT ·Cas(SB), NOSPLIT, $0-17 - MOV ptr+0(FP), A0 - MOVW old+8(FP), A1 - MOVW new+12(FP), A2 -cas_again: - LRW (A0), A3 - BNE A3, A1, cas_fail - SCW A2, (A0), A4 - BNE A4, ZERO, cas_again - MOV $1, A0 - MOVB A0, ret+16(FP) - RET -cas_fail: - MOV $0, A0 - MOV A0, ret+16(FP) - RET - -// func Cas64(ptr *uint64, old, new uint64) bool -TEXT ·Cas64(SB), NOSPLIT, $0-25 - MOV ptr+0(FP), A0 - MOV old+8(FP), A1 - MOV new+16(FP), A2 -cas_again: - LRD (A0), A3 - BNE A3, A1, cas_fail - SCD A2, (A0), A4 - BNE A4, ZERO, cas_again - MOV $1, A0 - MOVB A0, ret+24(FP) - RET -cas_fail: - MOVB ZERO, ret+24(FP) - RET - -// func Load(ptr *uint32) uint32 -TEXT ·Load(SB),NOSPLIT|NOFRAME,$0-12 - MOV ptr+0(FP), A0 - LRW (A0), A0 - MOVW A0, ret+8(FP) - RET - -// func Load8(ptr *uint8) uint8 -TEXT ·Load8(SB),NOSPLIT|NOFRAME,$0-9 - MOV ptr+0(FP), A0 - FENCE - MOVBU (A0), A1 - FENCE - MOVB A1, ret+8(FP) - RET - -// func Load64(ptr *uint64) uint64 -TEXT ·Load64(SB),NOSPLIT|NOFRAME,$0-16 - MOV ptr+0(FP), A0 - LRD (A0), A0 - MOV A0, ret+8(FP) - RET - -// func Store(ptr *uint32, val uint32) -TEXT ·Store(SB), NOSPLIT, $0-12 - MOV ptr+0(FP), A0 - MOVW val+8(FP), A1 - AMOSWAPW A1, (A0), ZERO - RET - -// func Store8(ptr *uint8, val uint8) -TEXT ·Store8(SB), NOSPLIT, $0-9 - MOV ptr+0(FP), A0 - MOVBU val+8(FP), A1 - FENCE - MOVB A1, (A0) - FENCE - RET - -// func Store64(ptr *uint64, val uint64) -TEXT ·Store64(SB), NOSPLIT, $0-16 - MOV ptr+0(FP), A0 - MOV val+8(FP), A1 - AMOSWAPD A1, (A0), ZERO - RET - -TEXT ·Casp1(SB), NOSPLIT, $0-25 - JMP ·Cas64(SB) - -TEXT ·Casint32(SB),NOSPLIT,$0-17 - JMP ·Cas(SB) - -TEXT ·Casint64(SB),NOSPLIT,$0-25 - JMP ·Cas64(SB) - -TEXT ·Casuintptr(SB),NOSPLIT,$0-25 - JMP ·Cas64(SB) - -TEXT ·CasRel(SB), NOSPLIT, $0-17 - JMP ·Cas(SB) - -TEXT ·Loaduintptr(SB),NOSPLIT,$0-16 - JMP ·Load64(SB) - -TEXT ·Storeint32(SB),NOSPLIT,$0-12 - JMP ·Store(SB) - -TEXT ·Storeint64(SB),NOSPLIT,$0-16 - JMP ·Store64(SB) - -TEXT ·Storeuintptr(SB),NOSPLIT,$0-16 - JMP ·Store64(SB) - -TEXT ·Loaduint(SB),NOSPLIT,$0-16 - JMP ·Loaduintptr(SB) - -TEXT ·Loadint32(SB),NOSPLIT,$0-12 - JMP ·Load(SB) - -TEXT ·Loadint64(SB),NOSPLIT,$0-16 - JMP ·Load64(SB) - -TEXT ·Xaddint32(SB),NOSPLIT,$0-20 - JMP ·Xadd(SB) - -TEXT ·Xaddint64(SB),NOSPLIT,$0-24 - MOV ptr+0(FP), A0 - MOV delta+8(FP), A1 - AMOADDD A1, (A0), A0 - ADD A0, A1, A0 - MOVW A0, ret+16(FP) - RET - -TEXT ·LoadAcq(SB),NOSPLIT|NOFRAME,$0-12 - JMP ·Load(SB) - -TEXT ·LoadAcq64(SB),NOSPLIT|NOFRAME,$0-16 - JMP ·Load64(SB) - -TEXT ·LoadAcquintptr(SB),NOSPLIT|NOFRAME,$0-16 - JMP ·Load64(SB) - -// func Loadp(ptr unsafe.Pointer) unsafe.Pointer -TEXT ·Loadp(SB),NOSPLIT,$0-16 - JMP ·Load64(SB) - -// func StorepNoWB(ptr unsafe.Pointer, val unsafe.Pointer) -TEXT ·StorepNoWB(SB), NOSPLIT, $0-16 - JMP ·Store64(SB) - -TEXT ·StoreRel(SB), NOSPLIT, $0-12 - JMP ·Store(SB) - -TEXT ·StoreRel64(SB), NOSPLIT, $0-16 - JMP ·Store64(SB) - -TEXT ·StoreReluintptr(SB), NOSPLIT, $0-16 - JMP ·Store64(SB) - -// func Xchg(ptr *uint32, new uint32) uint32 -TEXT ·Xchg(SB), NOSPLIT, $0-20 - MOV ptr+0(FP), A0 - MOVW new+8(FP), A1 - AMOSWAPW A1, (A0), A1 - MOVW A1, ret+16(FP) - RET - -// func Xchg64(ptr *uint64, new uint64) uint64 -TEXT ·Xchg64(SB), NOSPLIT, $0-24 - MOV ptr+0(FP), A0 - MOV new+8(FP), A1 - AMOSWAPD A1, (A0), A1 - MOV A1, ret+16(FP) - RET - -// Atomically: -// *val += delta; -// return *val; - -// func Xadd(ptr *uint32, delta int32) uint32 -TEXT ·Xadd(SB), NOSPLIT, $0-20 - MOV ptr+0(FP), A0 - MOVW delta+8(FP), A1 - AMOADDW A1, (A0), A2 - ADD A2,A1,A0 - MOVW A0, ret+16(FP) - RET - -// func Xadd64(ptr *uint64, delta int64) uint64 -TEXT ·Xadd64(SB), NOSPLIT, $0-24 - MOV ptr+0(FP), A0 - MOV delta+8(FP), A1 - AMOADDD A1, (A0), A2 - ADD A2, A1, A0 - MOV A0, ret+16(FP) - RET - -// func Xadduintptr(ptr *uintptr, delta uintptr) uintptr -TEXT ·Xadduintptr(SB), NOSPLIT, $0-24 - JMP ·Xadd64(SB) - -// func Xchgint32(ptr *int32, new int32) int32 -TEXT ·Xchgint32(SB), NOSPLIT, $0-20 - JMP ·Xchg(SB) - -// func Xchgint64(ptr *int64, new int64) int64 -TEXT ·Xchgint64(SB), NOSPLIT, $0-24 - JMP ·Xchg64(SB) - -// func Xchguintptr(ptr *uintptr, new uintptr) uintptr -TEXT ·Xchguintptr(SB), NOSPLIT, $0-24 - JMP ·Xchg64(SB) - -// func And8(ptr *uint8, val uint8) -TEXT ·And8(SB), NOSPLIT, $0-9 - MOV ptr+0(FP), A0 - MOVBU val+8(FP), A1 - AND $3, A0, A2 - AND $-4, A0 - SLL $3, A2 - XOR $255, A1 - SLL A2, A1 - XOR $-1, A1 - AMOANDW A1, (A0), ZERO - RET - -// func Or8(ptr *uint8, val uint8) -TEXT ·Or8(SB), NOSPLIT, $0-9 - MOV ptr+0(FP), A0 - MOVBU val+8(FP), A1 - AND $3, A0, A2 - AND $-4, A0 - SLL $3, A2 - SLL A2, A1 - AMOORW A1, (A0), ZERO - RET - -// func And(ptr *uint32, val uint32) -TEXT ·And(SB), NOSPLIT, $0-12 - MOV ptr+0(FP), A0 - MOVW val+8(FP), A1 - AMOANDW A1, (A0), ZERO - RET - -// func Or(ptr *uint32, val uint32) -TEXT ·Or(SB), NOSPLIT, $0-12 - MOV ptr+0(FP), A0 - MOVW val+8(FP), A1 - AMOORW A1, (A0), ZERO - RET - -// func Or32(ptr *uint32, val uint32) uint32 -TEXT ·Or32(SB), NOSPLIT, $0-20 - MOV ptr+0(FP), A0 - MOVW val+8(FP), A1 - AMOORW A1, (A0), A2 - MOVW A2, ret+16(FP) - RET - -// func And32(ptr *uint32, val uint32) uint32 -TEXT ·And32(SB), NOSPLIT, $0-20 - MOV ptr+0(FP), A0 - MOVW val+8(FP), A1 - AMOANDW A1, (A0), A2 - MOVW A2, ret+16(FP) - RET - -// func Or64(ptr *uint64, val uint64) uint64 -TEXT ·Or64(SB), NOSPLIT, $0-24 - MOV ptr+0(FP), A0 - MOV val+8(FP), A1 - AMOORD A1, (A0), A2 - MOV A2, ret+16(FP) - RET - -// func And64(ptr *uint64, val uint64) uint64 -TEXT ·And64(SB), NOSPLIT, $0-24 - MOV ptr+0(FP), A0 - MOV val+8(FP), A1 - AMOANDD A1, (A0), A2 - MOV A2, ret+16(FP) - RET - -// func Anduintptr(ptr *uintptr, val uintptr) uintptr -TEXT ·Anduintptr(SB), NOSPLIT, $0-24 - JMP ·And64(SB) - -// func Oruintptr(ptr *uintptr, val uintptr) uintptr -TEXT ·Oruintptr(SB), NOSPLIT, $0-24 - JMP ·Or64(SB) diff --git a/src/runtime/internal/atomic/atomic_s390x.go b/src/runtime/internal/atomic/atomic_s390x.go deleted file mode 100644 index 68b4e160f9..0000000000 --- a/src/runtime/internal/atomic/atomic_s390x.go +++ /dev/null @@ -1,141 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package atomic - -import "unsafe" - -// Export some functions via linkname to assembly in sync/atomic. -// -//go:linkname Load -//go:linkname Loadp -//go:linkname Load64 - -//go:nosplit -//go:noinline -func Load(ptr *uint32) uint32 { - return *ptr -} - -//go:nosplit -//go:noinline -func Loadp(ptr unsafe.Pointer) unsafe.Pointer { - return *(*unsafe.Pointer)(ptr) -} - -//go:nosplit -//go:noinline -func Load8(ptr *uint8) uint8 { - return *ptr -} - -//go:nosplit -//go:noinline -func Load64(ptr *uint64) uint64 { - return *ptr -} - -//go:nosplit -//go:noinline -func LoadAcq(ptr *uint32) uint32 { - return *ptr -} - -//go:nosplit -//go:noinline -func LoadAcq64(ptr *uint64) uint64 { - return *ptr -} - -//go:nosplit -//go:noinline -func LoadAcquintptr(ptr *uintptr) uintptr { - return *ptr -} - -//go:noescape -func Store(ptr *uint32, val uint32) - -//go:noescape -func Store8(ptr *uint8, val uint8) - -//go:noescape -func Store64(ptr *uint64, val uint64) - -// NO go:noescape annotation; see atomic_pointer.go. -func StorepNoWB(ptr unsafe.Pointer, val unsafe.Pointer) - -//go:nosplit -//go:noinline -func StoreRel(ptr *uint32, val uint32) { - *ptr = val -} - -//go:nosplit -//go:noinline -func StoreRel64(ptr *uint64, val uint64) { - *ptr = val -} - -//go:nosplit -//go:noinline -func StoreReluintptr(ptr *uintptr, val uintptr) { - *ptr = val -} - -//go:noescape -func And8(ptr *uint8, val uint8) - -//go:noescape -func Or8(ptr *uint8, val uint8) - -// NOTE: Do not add atomicxor8 (XOR is not idempotent). - -//go:noescape -func And(ptr *uint32, val uint32) - -//go:noescape -func Or(ptr *uint32, val uint32) - -//go:noescape -func And32(ptr *uint32, val uint32) uint32 - -//go:noescape -func Or32(ptr *uint32, val uint32) uint32 - -//go:noescape -func And64(ptr *uint64, val uint64) uint64 - -//go:noescape -func Or64(ptr *uint64, val uint64) uint64 - -//go:noescape -func Anduintptr(ptr *uintptr, val uintptr) uintptr - -//go:noescape -func Oruintptr(ptr *uintptr, val uintptr) uintptr - -//go:noescape -func Xadd(ptr *uint32, delta int32) uint32 - -//go:noescape -func Xadd64(ptr *uint64, delta int64) uint64 - -//go:noescape -func Xadduintptr(ptr *uintptr, delta uintptr) uintptr - -//go:noescape -func Xchg(ptr *uint32, new uint32) uint32 - -//go:noescape -func Xchg64(ptr *uint64, new uint64) uint64 - -//go:noescape -func Xchguintptr(ptr *uintptr, new uintptr) uintptr - -//go:noescape -func Cas64(ptr *uint64, old, new uint64) bool - -//go:noescape -func CasRel(ptr *uint32, old, new uint32) bool diff --git a/src/runtime/internal/atomic/atomic_s390x.s b/src/runtime/internal/atomic/atomic_s390x.s deleted file mode 100644 index 6e4ea0e32a..0000000000 --- a/src/runtime/internal/atomic/atomic_s390x.s +++ /dev/null @@ -1,304 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -#include "textflag.h" - -// func Store(ptr *uint32, val uint32) -TEXT ·Store(SB), NOSPLIT, $0 - MOVD ptr+0(FP), R2 - MOVWZ val+8(FP), R3 - MOVW R3, 0(R2) - SYNC - RET - -// func Store8(ptr *uint8, val uint8) -TEXT ·Store8(SB), NOSPLIT, $0 - MOVD ptr+0(FP), R2 - MOVB val+8(FP), R3 - MOVB R3, 0(R2) - SYNC - RET - -// func Store64(ptr *uint64, val uint64) -TEXT ·Store64(SB), NOSPLIT, $0 - MOVD ptr+0(FP), R2 - MOVD val+8(FP), R3 - MOVD R3, 0(R2) - SYNC - RET - -// func StorepNoWB(ptr unsafe.Pointer, val unsafe.Pointer) -TEXT ·StorepNoWB(SB), NOSPLIT, $0 - MOVD ptr+0(FP), R2 - MOVD val+8(FP), R3 - MOVD R3, 0(R2) - SYNC - RET - -// func Cas(ptr *uint32, old, new uint32) bool -// Atomically: -// if *ptr == old { -// *val = new -// return 1 -// } else { -// return 0 -// } -TEXT ·Cas(SB), NOSPLIT, $0-17 - MOVD ptr+0(FP), R3 - MOVWZ old+8(FP), R4 - MOVWZ new+12(FP), R5 - CS R4, R5, 0(R3) // if (R4 == 0(R3)) then 0(R3)= R5 - BNE cas_fail - MOVB $1, ret+16(FP) - RET -cas_fail: - MOVB $0, ret+16(FP) - RET - -// func Cas64(ptr *uint64, old, new uint64) bool -// Atomically: -// if *ptr == old { -// *ptr = new -// return 1 -// } else { -// return 0 -// } -TEXT ·Cas64(SB), NOSPLIT, $0-25 - MOVD ptr+0(FP), R3 - MOVD old+8(FP), R4 - MOVD new+16(FP), R5 - CSG R4, R5, 0(R3) // if (R4 == 0(R3)) then 0(R3)= R5 - BNE cas64_fail - MOVB $1, ret+24(FP) - RET -cas64_fail: - MOVB $0, ret+24(FP) - RET - -// func Casint32(ptr *int32, old, new int32) bool -TEXT ·Casint32(SB), NOSPLIT, $0-17 - BR ·Cas(SB) - -// func Casint64(ptr *int64, old, new int64) bool -TEXT ·Casint64(SB), NOSPLIT, $0-25 - BR ·Cas64(SB) - -// func Casuintptr(ptr *uintptr, old, new uintptr) bool -TEXT ·Casuintptr(SB), NOSPLIT, $0-25 - BR ·Cas64(SB) - -// func CasRel(ptr *uint32, old, new uint32) bool -TEXT ·CasRel(SB), NOSPLIT, $0-17 - BR ·Cas(SB) - -// func Loaduintptr(ptr *uintptr) uintptr -TEXT ·Loaduintptr(SB), NOSPLIT, $0-16 - BR ·Load64(SB) - -// func Loaduint(ptr *uint) uint -TEXT ·Loaduint(SB), NOSPLIT, $0-16 - BR ·Load64(SB) - -// func Storeint32(ptr *int32, new int32) -TEXT ·Storeint32(SB), NOSPLIT, $0-12 - BR ·Store(SB) - -// func Storeint64(ptr *int64, new int64) -TEXT ·Storeint64(SB), NOSPLIT, $0-16 - BR ·Store64(SB) - -// func Storeuintptr(ptr *uintptr, new uintptr) -TEXT ·Storeuintptr(SB), NOSPLIT, $0-16 - BR ·Store64(SB) - -// func Loadint32(ptr *int32) int32 -TEXT ·Loadint32(SB), NOSPLIT, $0-12 - BR ·Load(SB) - -// func Loadint64(ptr *int64) int64 -TEXT ·Loadint64(SB), NOSPLIT, $0-16 - BR ·Load64(SB) - -// func Xadduintptr(ptr *uintptr, delta uintptr) uintptr -TEXT ·Xadduintptr(SB), NOSPLIT, $0-24 - BR ·Xadd64(SB) - -// func Xaddint32(ptr *int32, delta int32) int32 -TEXT ·Xaddint32(SB), NOSPLIT, $0-20 - BR ·Xadd(SB) - -// func Xaddint64(ptr *int64, delta int64) int64 -TEXT ·Xaddint64(SB), NOSPLIT, $0-24 - BR ·Xadd64(SB) - -// func Casp1(ptr *unsafe.Pointer, old, new unsafe.Pointer) bool -// Atomically: -// if *ptr == old { -// *ptr = new -// return 1 -// } else { -// return 0 -// } -TEXT ·Casp1(SB), NOSPLIT, $0-25 - BR ·Cas64(SB) - -// func Xadd(ptr *uint32, delta int32) uint32 -// Atomically: -// *ptr += delta -// return *ptr -TEXT ·Xadd(SB), NOSPLIT, $0-20 - MOVD ptr+0(FP), R4 - MOVW delta+8(FP), R5 - MOVW (R4), R3 -repeat: - ADD R5, R3, R6 - CS R3, R6, (R4) // if R3==(R4) then (R4)=R6 else R3=(R4) - BNE repeat - MOVW R6, ret+16(FP) - RET - -// func Xadd64(ptr *uint64, delta int64) uint64 -TEXT ·Xadd64(SB), NOSPLIT, $0-24 - MOVD ptr+0(FP), R4 - MOVD delta+8(FP), R5 - MOVD (R4), R3 -repeat: - ADD R5, R3, R6 - CSG R3, R6, (R4) // if R3==(R4) then (R4)=R6 else R3=(R4) - BNE repeat - MOVD R6, ret+16(FP) - RET - -// func Xchg(ptr *uint32, new uint32) uint32 -TEXT ·Xchg(SB), NOSPLIT, $0-20 - MOVD ptr+0(FP), R4 - MOVW new+8(FP), R3 - MOVW (R4), R6 -repeat: - CS R6, R3, (R4) // if R6==(R4) then (R4)=R3 else R6=(R4) - BNE repeat - MOVW R6, ret+16(FP) - RET - -// func Xchg64(ptr *uint64, new uint64) uint64 -TEXT ·Xchg64(SB), NOSPLIT, $0-24 - MOVD ptr+0(FP), R4 - MOVD new+8(FP), R3 - MOVD (R4), R6 -repeat: - CSG R6, R3, (R4) // if R6==(R4) then (R4)=R3 else R6=(R4) - BNE repeat - MOVD R6, ret+16(FP) - RET - -// func Xchgint32(ptr *int32, new int32) int32 -TEXT ·Xchgint32(SB), NOSPLIT, $0-20 - BR ·Xchg(SB) - -// func Xchgint64(ptr *int64, new int64) int64 -TEXT ·Xchgint64(SB), NOSPLIT, $0-24 - BR ·Xchg64(SB) - -// func Xchguintptr(ptr *uintptr, new uintptr) uintptr -TEXT ·Xchguintptr(SB), NOSPLIT, $0-24 - BR ·Xchg64(SB) - -// func Or8(addr *uint8, v uint8) -TEXT ·Or8(SB), NOSPLIT, $0-9 - MOVD ptr+0(FP), R3 - MOVBZ val+8(FP), R4 - // We don't have atomic operations that work on individual bytes so we - // need to align addr down to a word boundary and create a mask - // containing v to OR with the entire word atomically. - MOVD $(3<<3), R5 - RXSBG $59, $60, $3, R3, R5 // R5 = 24 - ((addr % 4) * 8) = ((addr & 3) << 3) ^ (3 << 3) - ANDW $~3, R3 // R3 = floor(addr, 4) = addr &^ 3 - SLW R5, R4 // R4 = uint32(v) << R5 - LAO R4, R6, 0(R3) // R6 = *R3; *R3 |= R4; (atomic) - RET - -// func And8(addr *uint8, v uint8) -TEXT ·And8(SB), NOSPLIT, $0-9 - MOVD ptr+0(FP), R3 - MOVBZ val+8(FP), R4 - // We don't have atomic operations that work on individual bytes so we - // need to align addr down to a word boundary and create a mask - // containing v to AND with the entire word atomically. - ORW $~0xff, R4 // R4 = uint32(v) | 0xffffff00 - MOVD $(3<<3), R5 - RXSBG $59, $60, $3, R3, R5 // R5 = 24 - ((addr % 4) * 8) = ((addr & 3) << 3) ^ (3 << 3) - ANDW $~3, R3 // R3 = floor(addr, 4) = addr &^ 3 - RLL R5, R4, R4 // R4 = rotl(R4, R5) - LAN R4, R6, 0(R3) // R6 = *R3; *R3 &= R4; (atomic) - RET - -// func Or(addr *uint32, v uint32) -TEXT ·Or(SB), NOSPLIT, $0-12 - MOVD ptr+0(FP), R3 - MOVW val+8(FP), R4 - LAO R4, R6, 0(R3) // R6 = *R3; *R3 |= R4; (atomic) - RET - -// func And(addr *uint32, v uint32) -TEXT ·And(SB), NOSPLIT, $0-12 - MOVD ptr+0(FP), R3 - MOVW val+8(FP), R4 - LAN R4, R6, 0(R3) // R6 = *R3; *R3 &= R4; (atomic) - RET - -// func Or32(addr *uint32, v uint32) old uint32 -TEXT ·Or32(SB), NOSPLIT, $0-20 - MOVD ptr+0(FP), R4 - MOVW val+8(FP), R5 - MOVW (R4), R3 -repeat: - OR R5, R3, R6 - CS R3, R6, (R4) // if R3==(R4) then (R4)=R6 else R3=(R4) - BNE repeat - MOVW R3, ret+16(FP) - RET - -// func And32(addr *uint32, v uint32) old uint32 -TEXT ·And32(SB), NOSPLIT, $0-20 - MOVD ptr+0(FP), R4 - MOVW val+8(FP), R5 - MOVW (R4), R3 -repeat: - AND R5, R3, R6 - CS R3, R6, (R4) // if R3==(R4) then (R4)=R6 else R3=(R4) - BNE repeat - MOVW R3, ret+16(FP) - RET - -// func Or64(addr *uint64, v uint64) old uint64 -TEXT ·Or64(SB), NOSPLIT, $0-24 - MOVD ptr+0(FP), R4 - MOVD val+8(FP), R5 - MOVD (R4), R3 -repeat: - OR R5, R3, R6 - CSG R3, R6, (R4) // if R3==(R4) then (R4)=R6 else R3=(R4) - BNE repeat - MOVD R3, ret+16(FP) - RET - -// func And64(addr *uint64, v uint64) old uint64 -TEXT ·And64(SB), NOSPLIT, $0-24 - MOVD ptr+0(FP), R4 - MOVD val+8(FP), R5 - MOVD (R4), R3 -repeat: - AND R5, R3, R6 - CSG R3, R6, (R4) // if R3==(R4) then (R4)=R6 else R3=(R4) - BNE repeat - MOVD R3, ret+16(FP) - RET - -// func Anduintptr(addr *uintptr, v uintptr) old uintptr -TEXT ·Anduintptr(SB), NOSPLIT, $0-24 - BR ·And64(SB) - -// func Oruintptr(addr *uintptr, v uintptr) old uintptr -TEXT ·Oruintptr(SB), NOSPLIT, $0-24 - BR ·Or64(SB) diff --git a/src/runtime/internal/atomic/atomic_test.go b/src/runtime/internal/atomic/atomic_test.go deleted file mode 100644 index 2427bfd211..0000000000 --- a/src/runtime/internal/atomic/atomic_test.go +++ /dev/null @@ -1,386 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package atomic_test - -import ( - "internal/goarch" - "runtime" - "runtime/internal/atomic" - "testing" - "unsafe" -) - -func runParallel(N, iter int, f func()) { - defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(int(N))) - done := make(chan bool) - for i := 0; i < N; i++ { - go func() { - for j := 0; j < iter; j++ { - f() - } - done <- true - }() - } - for i := 0; i < N; i++ { - <-done - } -} - -func TestXadduintptr(t *testing.T) { - N := 20 - iter := 100000 - if testing.Short() { - N = 10 - iter = 10000 - } - inc := uintptr(100) - total := uintptr(0) - runParallel(N, iter, func() { - atomic.Xadduintptr(&total, inc) - }) - if want := uintptr(N*iter) * inc; want != total { - t.Fatalf("xadduintpr error, want %d, got %d", want, total) - } - total = 0 - runParallel(N, iter, func() { - atomic.Xadduintptr(&total, inc) - atomic.Xadduintptr(&total, uintptr(-int64(inc))) - }) - if total != 0 { - t.Fatalf("xadduintpr total error, want %d, got %d", 0, total) - } -} - -// Tests that xadduintptr correctly updates 64-bit values. The place where -// we actually do so is mstats.go, functions mSysStat{Inc,Dec}. -func TestXadduintptrOnUint64(t *testing.T) { - if goarch.BigEndian { - // On big endian architectures, we never use xadduintptr to update - // 64-bit values and hence we skip the test. (Note that functions - // mSysStat{Inc,Dec} in mstats.go have explicit checks for - // big-endianness.) - t.Skip("skip xadduintptr on big endian architecture") - } - const inc = 100 - val := uint64(0) - atomic.Xadduintptr((*uintptr)(unsafe.Pointer(&val)), inc) - if inc != val { - t.Fatalf("xadduintptr should increase lower-order bits, want %d, got %d", inc, val) - } -} - -func shouldPanic(t *testing.T, name string, f func()) { - defer func() { - // Check that all GC maps are sane. - runtime.GC() - - err := recover() - want := "unaligned 64-bit atomic operation" - if err == nil { - t.Errorf("%s did not panic", name) - } else if s, _ := err.(string); s != want { - t.Errorf("%s: wanted panic %q, got %q", name, want, err) - } - }() - f() -} - -// Variant of sync/atomic's TestUnaligned64: -func TestUnaligned64(t *testing.T) { - // Unaligned 64-bit atomics on 32-bit systems are - // a continual source of pain. Test that on 32-bit systems they crash - // instead of failing silently. - - if unsafe.Sizeof(int(0)) != 4 { - t.Skip("test only runs on 32-bit systems") - } - - x := make([]uint32, 4) - u := unsafe.Pointer(uintptr(unsafe.Pointer(&x[0])) | 4) // force alignment to 4 - - up64 := (*uint64)(u) // misaligned - p64 := (*int64)(u) // misaligned - - shouldPanic(t, "Load64", func() { atomic.Load64(up64) }) - shouldPanic(t, "Loadint64", func() { atomic.Loadint64(p64) }) - shouldPanic(t, "Store64", func() { atomic.Store64(up64, 0) }) - shouldPanic(t, "Xadd64", func() { atomic.Xadd64(up64, 1) }) - shouldPanic(t, "Xchg64", func() { atomic.Xchg64(up64, 1) }) - shouldPanic(t, "Cas64", func() { atomic.Cas64(up64, 1, 2) }) -} - -func TestAnd8(t *testing.T) { - // Basic sanity check. - x := uint8(0xff) - for i := uint8(0); i < 8; i++ { - atomic.And8(&x, ^(1 << i)) - if r := uint8(0xff) << (i + 1); x != r { - t.Fatalf("clearing bit %#x: want %#x, got %#x", uint8(1<(SB),NOSPLIT,$0 - MOVW $0xffff0fc0, R15 // R15 is hardware PC. - -TEXT ·Cas(SB),NOSPLIT|NOFRAME,$0 - MOVB runtime·goarm(SB), R11 - CMP $7, R11 - BLT 2(PC) - JMP ·armcas(SB) - JMP kernelcas<>(SB) - -TEXT kernelcas<>(SB),NOSPLIT,$0 - MOVW ptr+0(FP), R2 - // trigger potential paging fault here, - // because we don't know how to traceback through __kuser_cmpxchg - MOVW (R2), R0 - MOVW old+4(FP), R0 - MOVW new+8(FP), R1 - BL cas<>(SB) - BCC ret0 - MOVW $1, R0 - MOVB R0, ret+12(FP) - RET -ret0: - MOVW $0, R0 - MOVB R0, ret+12(FP) - RET - -// As for cas, memory barriers are complicated on ARM, but the kernel -// provides a user helper. ARMv5 does not support SMP and has no -// memory barrier instruction at all. ARMv6 added SMP support and has -// a memory barrier, but it requires writing to a coprocessor -// register. ARMv7 introduced the DMB instruction, but it's expensive -// even on single-core devices. The kernel helper takes care of all of -// this for us. - -// Use kernel helper version of memory_barrier, when compiled with GOARM < 7. -TEXT memory_barrier<>(SB),NOSPLIT|NOFRAME,$0 - MOVW $0xffff0fa0, R15 // R15 is hardware PC. - -TEXT ·Load(SB),NOSPLIT,$0-8 - MOVW addr+0(FP), R0 - MOVW (R0), R1 - - MOVB runtime·goarm(SB), R11 - CMP $7, R11 - BGE native_barrier - BL memory_barrier<>(SB) - B end -native_barrier: - DMB MB_ISH -end: - MOVW R1, ret+4(FP) - RET - -TEXT ·Store(SB),NOSPLIT,$0-8 - MOVW addr+0(FP), R1 - MOVW v+4(FP), R2 - - MOVB runtime·goarm(SB), R8 - CMP $7, R8 - BGE native_barrier - BL memory_barrier<>(SB) - B store -native_barrier: - DMB MB_ISH - -store: - MOVW R2, (R1) - - CMP $7, R8 - BGE native_barrier2 - BL memory_barrier<>(SB) - RET -native_barrier2: - DMB MB_ISH - RET - -TEXT ·Load8(SB),NOSPLIT,$0-5 - MOVW addr+0(FP), R0 - MOVB (R0), R1 - - MOVB runtime·goarm(SB), R11 - CMP $7, R11 - BGE native_barrier - BL memory_barrier<>(SB) - B end -native_barrier: - DMB MB_ISH -end: - MOVB R1, ret+4(FP) - RET - -TEXT ·Store8(SB),NOSPLIT,$0-5 - MOVW addr+0(FP), R1 - MOVB v+4(FP), R2 - - MOVB runtime·goarm(SB), R8 - CMP $7, R8 - BGE native_barrier - BL memory_barrier<>(SB) - B store -native_barrier: - DMB MB_ISH - -store: - MOVB R2, (R1) - - CMP $7, R8 - BGE native_barrier2 - BL memory_barrier<>(SB) - RET -native_barrier2: - DMB MB_ISH - RET diff --git a/src/runtime/internal/atomic/sys_nonlinux_arm.s b/src/runtime/internal/atomic/sys_nonlinux_arm.s deleted file mode 100644 index b55bf908a2..0000000000 --- a/src/runtime/internal/atomic/sys_nonlinux_arm.s +++ /dev/null @@ -1,79 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build !linux - -#include "textflag.h" - -// TODO(minux): this is only valid for ARMv6+ -// bool armcas(int32 *val, int32 old, int32 new) -// Atomically: -// if(*val == old){ -// *val = new; -// return 1; -// }else -// return 0; -TEXT ·Cas(SB),NOSPLIT,$0 - JMP ·armcas(SB) - -// Non-linux OSes support only single processor machines before ARMv7. -// So we don't need memory barriers if goarm < 7. And we fail loud at -// startup (runtime.checkgoarm) if it is a multi-processor but goarm < 7. - -TEXT ·Load(SB),NOSPLIT|NOFRAME,$0-8 - MOVW addr+0(FP), R0 - MOVW (R0), R1 - - MOVB runtime·goarm(SB), R11 - CMP $7, R11 - BLT 2(PC) - DMB MB_ISH - - MOVW R1, ret+4(FP) - RET - -TEXT ·Store(SB),NOSPLIT,$0-8 - MOVW addr+0(FP), R1 - MOVW v+4(FP), R2 - - MOVB runtime·goarm(SB), R8 - CMP $7, R8 - BLT 2(PC) - DMB MB_ISH - - MOVW R2, (R1) - - CMP $7, R8 - BLT 2(PC) - DMB MB_ISH - RET - -TEXT ·Load8(SB),NOSPLIT|NOFRAME,$0-5 - MOVW addr+0(FP), R0 - MOVB (R0), R1 - - MOVB runtime·goarm(SB), R11 - CMP $7, R11 - BLT 2(PC) - DMB MB_ISH - - MOVB R1, ret+4(FP) - RET - -TEXT ·Store8(SB),NOSPLIT,$0-5 - MOVW addr+0(FP), R1 - MOVB v+4(FP), R2 - - MOVB runtime·goarm(SB), R8 - CMP $7, R8 - BLT 2(PC) - DMB MB_ISH - - MOVB R2, (R1) - - CMP $7, R8 - BLT 2(PC) - DMB MB_ISH - RET - diff --git a/src/runtime/internal/atomic/types.go b/src/runtime/internal/atomic/types.go deleted file mode 100644 index 287742fee5..0000000000 --- a/src/runtime/internal/atomic/types.go +++ /dev/null @@ -1,587 +0,0 @@ -// Copyright 2021 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package atomic - -import "unsafe" - -// Int32 is an atomically accessed int32 value. -// -// An Int32 must not be copied. -type Int32 struct { - noCopy noCopy - value int32 -} - -// Load accesses and returns the value atomically. -// -//go:nosplit -func (i *Int32) Load() int32 { - return Loadint32(&i.value) -} - -// Store updates the value atomically. -// -//go:nosplit -func (i *Int32) Store(value int32) { - Storeint32(&i.value, value) -} - -// CompareAndSwap atomically compares i's value with old, -// and if they're equal, swaps i's value with new. -// It reports whether the swap ran. -// -//go:nosplit -func (i *Int32) CompareAndSwap(old, new int32) bool { - return Casint32(&i.value, old, new) -} - -// Swap replaces i's value with new, returning -// i's value before the replacement. -// -//go:nosplit -func (i *Int32) Swap(new int32) int32 { - return Xchgint32(&i.value, new) -} - -// Add adds delta to i atomically, returning -// the new updated value. -// -// This operation wraps around in the usual -// two's-complement way. -// -//go:nosplit -func (i *Int32) Add(delta int32) int32 { - return Xaddint32(&i.value, delta) -} - -// Int64 is an atomically accessed int64 value. -// -// 8-byte aligned on all platforms, unlike a regular int64. -// -// An Int64 must not be copied. -type Int64 struct { - noCopy noCopy - _ align64 - value int64 -} - -// Load accesses and returns the value atomically. -// -//go:nosplit -func (i *Int64) Load() int64 { - return Loadint64(&i.value) -} - -// Store updates the value atomically. -// -//go:nosplit -func (i *Int64) Store(value int64) { - Storeint64(&i.value, value) -} - -// CompareAndSwap atomically compares i's value with old, -// and if they're equal, swaps i's value with new. -// It reports whether the swap ran. -// -//go:nosplit -func (i *Int64) CompareAndSwap(old, new int64) bool { - return Casint64(&i.value, old, new) -} - -// Swap replaces i's value with new, returning -// i's value before the replacement. -// -//go:nosplit -func (i *Int64) Swap(new int64) int64 { - return Xchgint64(&i.value, new) -} - -// Add adds delta to i atomically, returning -// the new updated value. -// -// This operation wraps around in the usual -// two's-complement way. -// -//go:nosplit -func (i *Int64) Add(delta int64) int64 { - return Xaddint64(&i.value, delta) -} - -// Uint8 is an atomically accessed uint8 value. -// -// A Uint8 must not be copied. -type Uint8 struct { - noCopy noCopy - value uint8 -} - -// Load accesses and returns the value atomically. -// -//go:nosplit -func (u *Uint8) Load() uint8 { - return Load8(&u.value) -} - -// Store updates the value atomically. -// -//go:nosplit -func (u *Uint8) Store(value uint8) { - Store8(&u.value, value) -} - -// And takes value and performs a bit-wise -// "and" operation with the value of u, storing -// the result into u. -// -// The full process is performed atomically. -// -//go:nosplit -func (u *Uint8) And(value uint8) { - And8(&u.value, value) -} - -// Or takes value and performs a bit-wise -// "or" operation with the value of u, storing -// the result into u. -// -// The full process is performed atomically. -// -//go:nosplit -func (u *Uint8) Or(value uint8) { - Or8(&u.value, value) -} - -// Bool is an atomically accessed bool value. -// -// A Bool must not be copied. -type Bool struct { - // Inherits noCopy from Uint8. - u Uint8 -} - -// Load accesses and returns the value atomically. -// -//go:nosplit -func (b *Bool) Load() bool { - return b.u.Load() != 0 -} - -// Store updates the value atomically. -// -//go:nosplit -func (b *Bool) Store(value bool) { - s := uint8(0) - if value { - s = 1 - } - b.u.Store(s) -} - -// Uint32 is an atomically accessed uint32 value. -// -// A Uint32 must not be copied. -type Uint32 struct { - noCopy noCopy - value uint32 -} - -// Load accesses and returns the value atomically. -// -//go:nosplit -func (u *Uint32) Load() uint32 { - return Load(&u.value) -} - -// LoadAcquire is a partially unsynchronized version -// of Load that relaxes ordering constraints. Other threads -// may observe operations that precede this operation to -// occur after it, but no operation that occurs after it -// on this thread can be observed to occur before it. -// -// WARNING: Use sparingly and with great care. -// -//go:nosplit -func (u *Uint32) LoadAcquire() uint32 { - return LoadAcq(&u.value) -} - -// Store updates the value atomically. -// -//go:nosplit -func (u *Uint32) Store(value uint32) { - Store(&u.value, value) -} - -// StoreRelease is a partially unsynchronized version -// of Store that relaxes ordering constraints. Other threads -// may observe operations that occur after this operation to -// precede it, but no operation that precedes it -// on this thread can be observed to occur after it. -// -// WARNING: Use sparingly and with great care. -// -//go:nosplit -func (u *Uint32) StoreRelease(value uint32) { - StoreRel(&u.value, value) -} - -// CompareAndSwap atomically compares u's value with old, -// and if they're equal, swaps u's value with new. -// It reports whether the swap ran. -// -//go:nosplit -func (u *Uint32) CompareAndSwap(old, new uint32) bool { - return Cas(&u.value, old, new) -} - -// CompareAndSwapRelease is a partially unsynchronized version -// of Cas that relaxes ordering constraints. Other threads -// may observe operations that occur after this operation to -// precede it, but no operation that precedes it -// on this thread can be observed to occur after it. -// It reports whether the swap ran. -// -// WARNING: Use sparingly and with great care. -// -//go:nosplit -func (u *Uint32) CompareAndSwapRelease(old, new uint32) bool { - return CasRel(&u.value, old, new) -} - -// Swap replaces u's value with new, returning -// u's value before the replacement. -// -//go:nosplit -func (u *Uint32) Swap(value uint32) uint32 { - return Xchg(&u.value, value) -} - -// And takes value and performs a bit-wise -// "and" operation with the value of u, storing -// the result into u. -// -// The full process is performed atomically. -// -//go:nosplit -func (u *Uint32) And(value uint32) { - And(&u.value, value) -} - -// Or takes value and performs a bit-wise -// "or" operation with the value of u, storing -// the result into u. -// -// The full process is performed atomically. -// -//go:nosplit -func (u *Uint32) Or(value uint32) { - Or(&u.value, value) -} - -// Add adds delta to u atomically, returning -// the new updated value. -// -// This operation wraps around in the usual -// two's-complement way. -// -//go:nosplit -func (u *Uint32) Add(delta int32) uint32 { - return Xadd(&u.value, delta) -} - -// Uint64 is an atomically accessed uint64 value. -// -// 8-byte aligned on all platforms, unlike a regular uint64. -// -// A Uint64 must not be copied. -type Uint64 struct { - noCopy noCopy - _ align64 - value uint64 -} - -// Load accesses and returns the value atomically. -// -//go:nosplit -func (u *Uint64) Load() uint64 { - return Load64(&u.value) -} - -// Store updates the value atomically. -// -//go:nosplit -func (u *Uint64) Store(value uint64) { - Store64(&u.value, value) -} - -// CompareAndSwap atomically compares u's value with old, -// and if they're equal, swaps u's value with new. -// It reports whether the swap ran. -// -//go:nosplit -func (u *Uint64) CompareAndSwap(old, new uint64) bool { - return Cas64(&u.value, old, new) -} - -// Swap replaces u's value with new, returning -// u's value before the replacement. -// -//go:nosplit -func (u *Uint64) Swap(value uint64) uint64 { - return Xchg64(&u.value, value) -} - -// Add adds delta to u atomically, returning -// the new updated value. -// -// This operation wraps around in the usual -// two's-complement way. -// -//go:nosplit -func (u *Uint64) Add(delta int64) uint64 { - return Xadd64(&u.value, delta) -} - -// Uintptr is an atomically accessed uintptr value. -// -// A Uintptr must not be copied. -type Uintptr struct { - noCopy noCopy - value uintptr -} - -// Load accesses and returns the value atomically. -// -//go:nosplit -func (u *Uintptr) Load() uintptr { - return Loaduintptr(&u.value) -} - -// LoadAcquire is a partially unsynchronized version -// of Load that relaxes ordering constraints. Other threads -// may observe operations that precede this operation to -// occur after it, but no operation that occurs after it -// on this thread can be observed to occur before it. -// -// WARNING: Use sparingly and with great care. -// -//go:nosplit -func (u *Uintptr) LoadAcquire() uintptr { - return LoadAcquintptr(&u.value) -} - -// Store updates the value atomically. -// -//go:nosplit -func (u *Uintptr) Store(value uintptr) { - Storeuintptr(&u.value, value) -} - -// StoreRelease is a partially unsynchronized version -// of Store that relaxes ordering constraints. Other threads -// may observe operations that occur after this operation to -// precede it, but no operation that precedes it -// on this thread can be observed to occur after it. -// -// WARNING: Use sparingly and with great care. -// -//go:nosplit -func (u *Uintptr) StoreRelease(value uintptr) { - StoreReluintptr(&u.value, value) -} - -// CompareAndSwap atomically compares u's value with old, -// and if they're equal, swaps u's value with new. -// It reports whether the swap ran. -// -//go:nosplit -func (u *Uintptr) CompareAndSwap(old, new uintptr) bool { - return Casuintptr(&u.value, old, new) -} - -// Swap replaces u's value with new, returning -// u's value before the replacement. -// -//go:nosplit -func (u *Uintptr) Swap(value uintptr) uintptr { - return Xchguintptr(&u.value, value) -} - -// Add adds delta to u atomically, returning -// the new updated value. -// -// This operation wraps around in the usual -// two's-complement way. -// -//go:nosplit -func (u *Uintptr) Add(delta uintptr) uintptr { - return Xadduintptr(&u.value, delta) -} - -// Float64 is an atomically accessed float64 value. -// -// 8-byte aligned on all platforms, unlike a regular float64. -// -// A Float64 must not be copied. -type Float64 struct { - // Inherits noCopy and align64 from Uint64. - u Uint64 -} - -// Load accesses and returns the value atomically. -// -//go:nosplit -func (f *Float64) Load() float64 { - r := f.u.Load() - return *(*float64)(unsafe.Pointer(&r)) -} - -// Store updates the value atomically. -// -//go:nosplit -func (f *Float64) Store(value float64) { - f.u.Store(*(*uint64)(unsafe.Pointer(&value))) -} - -// UnsafePointer is an atomically accessed unsafe.Pointer value. -// -// Note that because of the atomicity guarantees, stores to values -// of this type never trigger a write barrier, and the relevant -// methods are suffixed with "NoWB" to indicate that explicitly. -// As a result, this type should be used carefully, and sparingly, -// mostly with values that do not live in the Go heap anyway. -// -// An UnsafePointer must not be copied. -type UnsafePointer struct { - noCopy noCopy - value unsafe.Pointer -} - -// Load accesses and returns the value atomically. -// -//go:nosplit -func (u *UnsafePointer) Load() unsafe.Pointer { - return Loadp(unsafe.Pointer(&u.value)) -} - -// StoreNoWB updates the value atomically. -// -// WARNING: As the name implies this operation does *not* -// perform a write barrier on value, and so this operation may -// hide pointers from the GC. Use with care and sparingly. -// It is safe to use with values not found in the Go heap. -// Prefer Store instead. -// -//go:nosplit -func (u *UnsafePointer) StoreNoWB(value unsafe.Pointer) { - StorepNoWB(unsafe.Pointer(&u.value), value) -} - -// Store updates the value atomically. -func (u *UnsafePointer) Store(value unsafe.Pointer) { - storePointer(&u.value, value) -} - -// provided by runtime -// -//go:linkname storePointer -func storePointer(ptr *unsafe.Pointer, new unsafe.Pointer) - -// CompareAndSwapNoWB atomically (with respect to other methods) -// compares u's value with old, and if they're equal, -// swaps u's value with new. -// It reports whether the swap ran. -// -// WARNING: As the name implies this operation does *not* -// perform a write barrier on value, and so this operation may -// hide pointers from the GC. Use with care and sparingly. -// It is safe to use with values not found in the Go heap. -// Prefer CompareAndSwap instead. -// -//go:nosplit -func (u *UnsafePointer) CompareAndSwapNoWB(old, new unsafe.Pointer) bool { - return Casp1(&u.value, old, new) -} - -// CompareAndSwap atomically compares u's value with old, -// and if they're equal, swaps u's value with new. -// It reports whether the swap ran. -func (u *UnsafePointer) CompareAndSwap(old, new unsafe.Pointer) bool { - return casPointer(&u.value, old, new) -} - -func casPointer(ptr *unsafe.Pointer, old, new unsafe.Pointer) bool - -// Pointer is an atomic pointer of type *T. -type Pointer[T any] struct { - u UnsafePointer -} - -// Load accesses and returns the value atomically. -// -//go:nosplit -func (p *Pointer[T]) Load() *T { - return (*T)(p.u.Load()) -} - -// StoreNoWB updates the value atomically. -// -// WARNING: As the name implies this operation does *not* -// perform a write barrier on value, and so this operation may -// hide pointers from the GC. Use with care and sparingly. -// It is safe to use with values not found in the Go heap. -// Prefer Store instead. -// -//go:nosplit -func (p *Pointer[T]) StoreNoWB(value *T) { - p.u.StoreNoWB(unsafe.Pointer(value)) -} - -// Store updates the value atomically. -// -//go:nosplit -func (p *Pointer[T]) Store(value *T) { - p.u.Store(unsafe.Pointer(value)) -} - -// CompareAndSwapNoWB atomically (with respect to other methods) -// compares u's value with old, and if they're equal, -// swaps u's value with new. -// It reports whether the swap ran. -// -// WARNING: As the name implies this operation does *not* -// perform a write barrier on value, and so this operation may -// hide pointers from the GC. Use with care and sparingly. -// It is safe to use with values not found in the Go heap. -// Prefer CompareAndSwap instead. -// -//go:nosplit -func (p *Pointer[T]) CompareAndSwapNoWB(old, new *T) bool { - return p.u.CompareAndSwapNoWB(unsafe.Pointer(old), unsafe.Pointer(new)) -} - -// CompareAndSwap atomically (with respect to other methods) -// compares u's value with old, and if they're equal, -// swaps u's value with new. -// It reports whether the swap ran. -func (p *Pointer[T]) CompareAndSwap(old, new *T) bool { - return p.u.CompareAndSwap(unsafe.Pointer(old), unsafe.Pointer(new)) -} - -// noCopy may be embedded into structs which must not be copied -// after the first use. -// -// See https://golang.org/issues/8005#issuecomment-190753527 -// for details. -type noCopy struct{} - -// Lock is a no-op used by -copylocks checker from `go vet`. -func (*noCopy) Lock() {} -func (*noCopy) Unlock() {} - -// align64 may be added to structs that must be 64-bit aligned. -// This struct is recognized by a special case in the compiler -// and will not work if copied to any other package. -type align64 struct{} diff --git a/src/runtime/internal/atomic/types_64bit.go b/src/runtime/internal/atomic/types_64bit.go deleted file mode 100644 index 006e83ba87..0000000000 --- a/src/runtime/internal/atomic/types_64bit.go +++ /dev/null @@ -1,33 +0,0 @@ -// Copyright 2021 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build amd64 || arm64 || loong64 || mips64 || mips64le || ppc64 || ppc64le || riscv64 || s390x || wasm - -package atomic - -// LoadAcquire is a partially unsynchronized version -// of Load that relaxes ordering constraints. Other threads -// may observe operations that precede this operation to -// occur after it, but no operation that occurs after it -// on this thread can be observed to occur before it. -// -// WARNING: Use sparingly and with great care. -// -//go:nosplit -func (u *Uint64) LoadAcquire() uint64 { - return LoadAcq64(&u.value) -} - -// StoreRelease is a partially unsynchronized version -// of Store that relaxes ordering constraints. Other threads -// may observe operations that occur after this operation to -// precede it, but no operation that precedes it -// on this thread can be observed to occur after it. -// -// WARNING: Use sparingly and with great care. -// -//go:nosplit -func (u *Uint64) StoreRelease(value uint64) { - StoreRel64(&u.value, value) -} diff --git a/src/runtime/internal/atomic/unaligned.go b/src/runtime/internal/atomic/unaligned.go deleted file mode 100644 index a859de4144..0000000000 --- a/src/runtime/internal/atomic/unaligned.go +++ /dev/null @@ -1,9 +0,0 @@ -// Copyright 2020 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package atomic - -func panicUnaligned() { - panic("unaligned 64-bit atomic operation") -} diff --git a/src/runtime/lfstack.go b/src/runtime/lfstack.go index a91ae64e53..cbec6e8447 100644 --- a/src/runtime/lfstack.go +++ b/src/runtime/lfstack.go @@ -7,7 +7,7 @@ package runtime import ( - "runtime/internal/atomic" + "internal/runtime/atomic" "unsafe" ) diff --git a/src/runtime/lock_futex.go b/src/runtime/lock_futex.go index 867e2b34d0..58690e45e4 100644 --- a/src/runtime/lock_futex.go +++ b/src/runtime/lock_futex.go @@ -7,7 +7,7 @@ package runtime import ( - "runtime/internal/atomic" + "internal/runtime/atomic" "unsafe" ) diff --git a/src/runtime/lock_sema.go b/src/runtime/lock_sema.go index 073e7d410e..32d2235ad3 100644 --- a/src/runtime/lock_sema.go +++ b/src/runtime/lock_sema.go @@ -7,7 +7,7 @@ package runtime import ( - "runtime/internal/atomic" + "internal/runtime/atomic" "unsafe" ) diff --git a/src/runtime/lockrank_on.go b/src/runtime/lockrank_on.go index b1d9999794..e95190f0b2 100644 --- a/src/runtime/lockrank_on.go +++ b/src/runtime/lockrank_on.go @@ -7,7 +7,7 @@ package runtime import ( - "runtime/internal/atomic" + "internal/runtime/atomic" "unsafe" ) diff --git a/src/runtime/malloc.go b/src/runtime/malloc.go index 271e4c43db..b531eb7168 100644 --- a/src/runtime/malloc.go +++ b/src/runtime/malloc.go @@ -104,7 +104,7 @@ import ( "internal/goarch" "internal/goexperiment" "internal/goos" - "runtime/internal/atomic" + "internal/runtime/atomic" "runtime/internal/math" "runtime/internal/sys" "unsafe" diff --git a/src/runtime/map.go b/src/runtime/map.go index bb3ac39e94..6a9345e0b4 100644 --- a/src/runtime/map.go +++ b/src/runtime/map.go @@ -56,7 +56,7 @@ package runtime import ( "internal/abi" "internal/goarch" - "runtime/internal/atomic" + "internal/runtime/atomic" "runtime/internal/math" "unsafe" ) @@ -1498,7 +1498,7 @@ func mapclone2(t *maptype, src *hmap) *hmap { dst := makemap(t, src.count, nil) dst.hash0 = src.hash0 dst.nevacuate = 0 - //flags do not need to be copied here, just like a new map has no flags. + // flags do not need to be copied here, just like a new map has no flags. if src.count == 0 { return dst diff --git a/src/runtime/mbitmap.go b/src/runtime/mbitmap.go index cdd1c5fc3b..61530bbe7f 100644 --- a/src/runtime/mbitmap.go +++ b/src/runtime/mbitmap.go @@ -6,7 +6,7 @@ package runtime import ( "internal/goarch" - "runtime/internal/atomic" + "internal/runtime/atomic" "runtime/internal/sys" "unsafe" ) diff --git a/src/runtime/mcache.go b/src/runtime/mcache.go index d4b6eef13a..e8da133a69 100644 --- a/src/runtime/mcache.go +++ b/src/runtime/mcache.go @@ -5,7 +5,7 @@ package runtime import ( - "runtime/internal/atomic" + "internal/runtime/atomic" "runtime/internal/sys" "unsafe" ) diff --git a/src/runtime/mcentral.go b/src/runtime/mcentral.go index e190b56c86..bf597e1936 100644 --- a/src/runtime/mcentral.go +++ b/src/runtime/mcentral.go @@ -13,7 +13,7 @@ package runtime import ( - "runtime/internal/atomic" + "internal/runtime/atomic" "runtime/internal/sys" ) diff --git a/src/runtime/mcheckmark.go b/src/runtime/mcheckmark.go index 581a5de955..258f889272 100644 --- a/src/runtime/mcheckmark.go +++ b/src/runtime/mcheckmark.go @@ -14,7 +14,7 @@ package runtime import ( "internal/goarch" - "runtime/internal/atomic" + "internal/runtime/atomic" "runtime/internal/sys" "unsafe" ) diff --git a/src/runtime/mem_linux.go b/src/runtime/mem_linux.go index 15a406d97a..9aaa57ac9e 100644 --- a/src/runtime/mem_linux.go +++ b/src/runtime/mem_linux.go @@ -5,7 +5,7 @@ package runtime import ( - "runtime/internal/atomic" + "internal/runtime/atomic" "unsafe" ) diff --git a/src/runtime/mfinal.go b/src/runtime/mfinal.go index ea3d8a4579..7820f50e51 100644 --- a/src/runtime/mfinal.go +++ b/src/runtime/mfinal.go @@ -10,7 +10,7 @@ import ( "internal/abi" "internal/goarch" "internal/goexperiment" - "runtime/internal/atomic" + "internal/runtime/atomic" "runtime/internal/sys" "unsafe" ) diff --git a/src/runtime/mgc.go b/src/runtime/mgc.go index efadcdbc4e..da86fd517f 100644 --- a/src/runtime/mgc.go +++ b/src/runtime/mgc.go @@ -130,7 +130,7 @@ package runtime import ( "internal/cpu" - "runtime/internal/atomic" + "internal/runtime/atomic" "unsafe" ) diff --git a/src/runtime/mgclimit.go b/src/runtime/mgclimit.go index ef3cc081ce..bd6dc37e04 100644 --- a/src/runtime/mgclimit.go +++ b/src/runtime/mgclimit.go @@ -4,7 +4,7 @@ package runtime -import "runtime/internal/atomic" +import "internal/runtime/atomic" // gcCPULimiter is a mechanism to limit GC CPU utilization in situations // where it might become excessive and inhibit application progress (e.g. diff --git a/src/runtime/mgcmark.go b/src/runtime/mgcmark.go index e118ba69af..7622d1e0d8 100644 --- a/src/runtime/mgcmark.go +++ b/src/runtime/mgcmark.go @@ -10,7 +10,7 @@ import ( "internal/abi" "internal/goarch" "internal/goexperiment" - "runtime/internal/atomic" + "internal/runtime/atomic" "runtime/internal/sys" "unsafe" ) diff --git a/src/runtime/mgcpacer.go b/src/runtime/mgcpacer.go index e9af3d60cd..cda87fe948 100644 --- a/src/runtime/mgcpacer.go +++ b/src/runtime/mgcpacer.go @@ -7,7 +7,7 @@ package runtime import ( "internal/cpu" "internal/goexperiment" - "runtime/internal/atomic" + "internal/runtime/atomic" _ "unsafe" // for go:linkname ) diff --git a/src/runtime/mgcscavenge.go b/src/runtime/mgcscavenge.go index 9c76f8dd23..dede4a1ad3 100644 --- a/src/runtime/mgcscavenge.go +++ b/src/runtime/mgcscavenge.go @@ -92,7 +92,7 @@ package runtime import ( "internal/goos" - "runtime/internal/atomic" + "internal/runtime/atomic" "runtime/internal/sys" "unsafe" ) diff --git a/src/runtime/mgcscavenge_test.go b/src/runtime/mgcscavenge_test.go index d7624d6d72..9c4cf1f277 100644 --- a/src/runtime/mgcscavenge_test.go +++ b/src/runtime/mgcscavenge_test.go @@ -7,10 +7,10 @@ package runtime_test import ( "fmt" "internal/goos" + "internal/runtime/atomic" "math" "math/rand" . "runtime" - "runtime/internal/atomic" "testing" "time" ) diff --git a/src/runtime/mgcsweep.go b/src/runtime/mgcsweep.go index 3dbe9bcec7..bd53ed1fe1 100644 --- a/src/runtime/mgcsweep.go +++ b/src/runtime/mgcsweep.go @@ -27,7 +27,7 @@ package runtime import ( "internal/abi" "internal/goexperiment" - "runtime/internal/atomic" + "internal/runtime/atomic" "unsafe" ) diff --git a/src/runtime/mgcwork.go b/src/runtime/mgcwork.go index 7ab89754d4..b91a6bd464 100644 --- a/src/runtime/mgcwork.go +++ b/src/runtime/mgcwork.go @@ -6,7 +6,7 @@ package runtime import ( "internal/goarch" - "runtime/internal/atomic" + "internal/runtime/atomic" "runtime/internal/sys" "unsafe" ) diff --git a/src/runtime/mheap.go b/src/runtime/mheap.go index 0069328346..0d8f9d5ddd 100644 --- a/src/runtime/mheap.go +++ b/src/runtime/mheap.go @@ -12,7 +12,7 @@ import ( "internal/cpu" "internal/goarch" "internal/goexperiment" - "runtime/internal/atomic" + "internal/runtime/atomic" "runtime/internal/sys" "unsafe" ) diff --git a/src/runtime/mpagealloc.go b/src/runtime/mpagealloc.go index 7c117b0a9b..46d3ebacaf 100644 --- a/src/runtime/mpagealloc.go +++ b/src/runtime/mpagealloc.go @@ -48,7 +48,7 @@ package runtime import ( - "runtime/internal/atomic" + "internal/runtime/atomic" "unsafe" ) diff --git a/src/runtime/mprof.go b/src/runtime/mprof.go index c0836afa8b..87eed8d1dd 100644 --- a/src/runtime/mprof.go +++ b/src/runtime/mprof.go @@ -9,7 +9,7 @@ package runtime import ( "internal/abi" - "runtime/internal/atomic" + "internal/runtime/atomic" "runtime/internal/sys" "unsafe" ) diff --git a/src/runtime/mranges.go b/src/runtime/mranges.go index 6dd1a75247..85795a9418 100644 --- a/src/runtime/mranges.go +++ b/src/runtime/mranges.go @@ -11,7 +11,7 @@ package runtime import ( "internal/goarch" - "runtime/internal/atomic" + "internal/runtime/atomic" "unsafe" ) diff --git a/src/runtime/mspanset.go b/src/runtime/mspanset.go index 5687627e3a..3aa2b5b393 100644 --- a/src/runtime/mspanset.go +++ b/src/runtime/mspanset.go @@ -7,7 +7,7 @@ package runtime import ( "internal/cpu" "internal/goarch" - "runtime/internal/atomic" + "internal/runtime/atomic" "unsafe" ) diff --git a/src/runtime/mstats.go b/src/runtime/mstats.go index 87afec47c8..1b634bd81e 100644 --- a/src/runtime/mstats.go +++ b/src/runtime/mstats.go @@ -7,7 +7,7 @@ package runtime import ( - "runtime/internal/atomic" + "internal/runtime/atomic" "unsafe" ) diff --git a/src/runtime/mwbbuf.go b/src/runtime/mwbbuf.go index 7419bd291d..b998d2b2bd 100644 --- a/src/runtime/mwbbuf.go +++ b/src/runtime/mwbbuf.go @@ -24,7 +24,7 @@ package runtime import ( "internal/goarch" - "runtime/internal/atomic" + "internal/runtime/atomic" "unsafe" ) diff --git a/src/runtime/netpoll.go b/src/runtime/netpoll.go index 2c5c262c58..6a73f70988 100644 --- a/src/runtime/netpoll.go +++ b/src/runtime/netpoll.go @@ -7,7 +7,7 @@ package runtime import ( - "runtime/internal/atomic" + "internal/runtime/atomic" "runtime/internal/sys" "unsafe" ) diff --git a/src/runtime/netpoll_aix.go b/src/runtime/netpoll_aix.go index a34b4d8bcf..2df5a57111 100644 --- a/src/runtime/netpoll_aix.go +++ b/src/runtime/netpoll_aix.go @@ -5,7 +5,7 @@ package runtime import ( - "runtime/internal/atomic" + "internal/runtime/atomic" "unsafe" ) diff --git a/src/runtime/netpoll_epoll.go b/src/runtime/netpoll_epoll.go index 63b42a0014..ff6e0b5f89 100644 --- a/src/runtime/netpoll_epoll.go +++ b/src/runtime/netpoll_epoll.go @@ -7,8 +7,8 @@ package runtime import ( + "internal/runtime/atomic" "internal/runtime/syscall" - "runtime/internal/atomic" "unsafe" ) diff --git a/src/runtime/netpoll_kqueue.go b/src/runtime/netpoll_kqueue.go index d774dce303..32c21a2b2b 100644 --- a/src/runtime/netpoll_kqueue.go +++ b/src/runtime/netpoll_kqueue.go @@ -10,7 +10,7 @@ package runtime import ( "internal/goarch" - "runtime/internal/atomic" + "internal/runtime/atomic" "unsafe" ) diff --git a/src/runtime/netpoll_solaris.go b/src/runtime/netpoll_solaris.go index 41f145c866..fddc29000b 100644 --- a/src/runtime/netpoll_solaris.go +++ b/src/runtime/netpoll_solaris.go @@ -6,7 +6,7 @@ package runtime import ( "internal/goarch" - "runtime/internal/atomic" + "internal/runtime/atomic" "unsafe" ) diff --git a/src/runtime/netpoll_stub.go b/src/runtime/netpoll_stub.go index d950661acf..c1bda3fa8b 100644 --- a/src/runtime/netpoll_stub.go +++ b/src/runtime/netpoll_stub.go @@ -6,7 +6,7 @@ package runtime -import "runtime/internal/atomic" +import "internal/runtime/atomic" var netpollInited atomic.Uint32 diff --git a/src/runtime/netpoll_windows.go b/src/runtime/netpoll_windows.go index b01f5ba725..3f70429497 100644 --- a/src/runtime/netpoll_windows.go +++ b/src/runtime/netpoll_windows.go @@ -6,7 +6,7 @@ package runtime import ( "internal/goarch" - "runtime/internal/atomic" + "internal/runtime/atomic" "unsafe" ) diff --git a/src/runtime/os3_solaris.go b/src/runtime/os3_solaris.go index 92daf13b1a..cf163a6bf4 100644 --- a/src/runtime/os3_solaris.go +++ b/src/runtime/os3_solaris.go @@ -7,7 +7,7 @@ package runtime import ( "internal/abi" "internal/goarch" - "runtime/internal/atomic" + "internal/runtime/atomic" "unsafe" ) diff --git a/src/runtime/os_aix.go b/src/runtime/os_aix.go index 3a5078a64c..93464cb997 100644 --- a/src/runtime/os_aix.go +++ b/src/runtime/os_aix.go @@ -8,7 +8,7 @@ package runtime import ( "internal/abi" - "runtime/internal/atomic" + "internal/runtime/atomic" "unsafe" ) diff --git a/src/runtime/os_linux.go b/src/runtime/os_linux.go index c4f503c8c9..6ce656c70e 100644 --- a/src/runtime/os_linux.go +++ b/src/runtime/os_linux.go @@ -7,8 +7,8 @@ package runtime import ( "internal/abi" "internal/goarch" + "internal/runtime/atomic" "internal/runtime/syscall" - "runtime/internal/atomic" "unsafe" ) @@ -402,9 +402,9 @@ func unminit() { func mdestroy(mp *m) { } -//#ifdef GOARCH_386 -//#define sa_handler k_sa_handler -//#endif +// #ifdef GOARCH_386 +// #define sa_handler k_sa_handler +// #endif func sigreturn__sigaction() func sigtramp() // Called via C ABI diff --git a/src/runtime/os_netbsd.go b/src/runtime/os_netbsd.go index 8abb688aae..735ace25ad 100644 --- a/src/runtime/os_netbsd.go +++ b/src/runtime/os_netbsd.go @@ -7,7 +7,7 @@ package runtime import ( "internal/abi" "internal/goarch" - "runtime/internal/atomic" + "internal/runtime/atomic" "unsafe" ) diff --git a/src/runtime/os_openbsd.go b/src/runtime/os_openbsd.go index 856979910a..9a21d6a8d0 100644 --- a/src/runtime/os_openbsd.go +++ b/src/runtime/os_openbsd.go @@ -6,7 +6,7 @@ package runtime import ( "internal/abi" - "runtime/internal/atomic" + "internal/runtime/atomic" "unsafe" ) diff --git a/src/runtime/os_openbsd_syscall2.go b/src/runtime/os_openbsd_syscall2.go index 0b796ade43..072f53320d 100644 --- a/src/runtime/os_openbsd_syscall2.go +++ b/src/runtime/os_openbsd_syscall2.go @@ -7,7 +7,7 @@ package runtime import ( - "runtime/internal/atomic" + "internal/runtime/atomic" "unsafe" ) diff --git a/src/runtime/os_plan9.go b/src/runtime/os_plan9.go index 77446d09d3..5e355f1664 100644 --- a/src/runtime/os_plan9.go +++ b/src/runtime/os_plan9.go @@ -6,7 +6,7 @@ package runtime import ( "internal/abi" - "runtime/internal/atomic" + "internal/runtime/atomic" "unsafe" ) diff --git a/src/runtime/os_wasm.go b/src/runtime/os_wasm.go index ce260de67e..fbafc319b9 100644 --- a/src/runtime/os_wasm.go +++ b/src/runtime/os_wasm.go @@ -5,7 +5,7 @@ package runtime import ( - "runtime/internal/atomic" + "internal/runtime/atomic" "unsafe" ) diff --git a/src/runtime/os_windows.go b/src/runtime/os_windows.go index 244ab23dad..0074b9358a 100644 --- a/src/runtime/os_windows.go +++ b/src/runtime/os_windows.go @@ -7,7 +7,7 @@ package runtime import ( "internal/abi" "internal/goarch" - "runtime/internal/atomic" + "internal/runtime/atomic" "unsafe" ) diff --git a/src/runtime/panic.go b/src/runtime/panic.go index 99eb1c3e23..51b57520c1 100644 --- a/src/runtime/panic.go +++ b/src/runtime/panic.go @@ -7,7 +7,7 @@ package runtime import ( "internal/abi" "internal/goarch" - "runtime/internal/atomic" + "internal/runtime/atomic" "runtime/internal/sys" "unsafe" ) diff --git a/src/runtime/pinner.go b/src/runtime/pinner.go index 1ede1113ee..f9efe237c1 100644 --- a/src/runtime/pinner.go +++ b/src/runtime/pinner.go @@ -5,7 +5,7 @@ package runtime import ( - "runtime/internal/atomic" + "internal/runtime/atomic" "unsafe" ) diff --git a/src/runtime/proc.go b/src/runtime/proc.go index 7f70100538..3b7d4f4d5d 100644 --- a/src/runtime/proc.go +++ b/src/runtime/proc.go @@ -10,7 +10,7 @@ import ( "internal/goarch" "internal/goexperiment" "internal/goos" - "runtime/internal/atomic" + "internal/runtime/atomic" "runtime/internal/sys" "unsafe" ) @@ -5269,22 +5269,22 @@ func sigprof(pc, sp, lr uintptr, gp *g, mp *m) { } // On mips{,le}/arm, 64bit atomics are emulated with spinlocks, in - // runtime/internal/atomic. If SIGPROF arrives while the program is inside + // internal/runtime/atomic. If SIGPROF arrives while the program is inside // the critical section, it creates a deadlock (when writing the sample). // As a workaround, create a counter of SIGPROFs while in critical section // to store the count, and pass it to sigprof.add() later when SIGPROF is // received from somewhere else (with _LostSIGPROFDuringAtomic64 as pc). if GOARCH == "mips" || GOARCH == "mipsle" || GOARCH == "arm" { if f := findfunc(pc); f.valid() { - if hasPrefix(funcname(f), "runtime/internal/atomic") { + if hasPrefix(funcname(f), "internal/runtime/atomic") { cpuprof.lostAtomic++ return } } if GOARCH == "arm" && goarm < 7 && GOOS == "linux" && pc&0xffff0000 == 0xffff0000 { - // runtime/internal/atomic functions call into kernel + // internal/runtime/atomic functions call into kernel // helpers on arm < 7. See - // runtime/internal/atomic/sys_linux_arm.s. + // internal/runtime/atomic/sys_linux_arm.s. cpuprof.lostAtomic++ return } diff --git a/src/runtime/profbuf.go b/src/runtime/profbuf.go index 4be4bc9208..8ae626b1b0 100644 --- a/src/runtime/profbuf.go +++ b/src/runtime/profbuf.go @@ -5,7 +5,7 @@ package runtime import ( - "runtime/internal/atomic" + "internal/runtime/atomic" "unsafe" ) diff --git a/src/runtime/runtime.go b/src/runtime/runtime.go index 05a2098fcd..cc6f03d2a0 100644 --- a/src/runtime/runtime.go +++ b/src/runtime/runtime.go @@ -5,7 +5,7 @@ package runtime import ( - "runtime/internal/atomic" + "internal/runtime/atomic" "unsafe" ) diff --git a/src/runtime/runtime1.go b/src/runtime/runtime1.go index 8c8b20aa57..48603da600 100644 --- a/src/runtime/runtime1.go +++ b/src/runtime/runtime1.go @@ -7,7 +7,7 @@ package runtime import ( "internal/bytealg" "internal/goarch" - "runtime/internal/atomic" + "internal/runtime/atomic" "unsafe" ) diff --git a/src/runtime/runtime2.go b/src/runtime/runtime2.go index c03e7050a2..4a7ad27172 100644 --- a/src/runtime/runtime2.go +++ b/src/runtime/runtime2.go @@ -8,7 +8,7 @@ import ( "internal/abi" "internal/chacha8rand" "internal/goarch" - "runtime/internal/atomic" + "internal/runtime/atomic" "runtime/internal/sys" "unsafe" ) diff --git a/src/runtime/rwmutex.go b/src/runtime/rwmutex.go index bf3b9a1cae..5833d59576 100644 --- a/src/runtime/rwmutex.go +++ b/src/runtime/rwmutex.go @@ -5,7 +5,7 @@ package runtime import ( - "runtime/internal/atomic" + "internal/runtime/atomic" ) // This is a copy of sync/rwmutex.go rewritten to work in the runtime. diff --git a/src/runtime/sema.go b/src/runtime/sema.go index c87fc7658e..f86a19f705 100644 --- a/src/runtime/sema.go +++ b/src/runtime/sema.go @@ -21,7 +21,7 @@ package runtime import ( "internal/cpu" - "runtime/internal/atomic" + "internal/runtime/atomic" "unsafe" ) diff --git a/src/runtime/signal_unix.go b/src/runtime/signal_unix.go index 84391d58ed..6ca87561e8 100644 --- a/src/runtime/signal_unix.go +++ b/src/runtime/signal_unix.go @@ -8,7 +8,7 @@ package runtime import ( "internal/abi" - "runtime/internal/atomic" + "internal/runtime/atomic" "runtime/internal/sys" "unsafe" ) diff --git a/src/runtime/sigqueue.go b/src/runtime/sigqueue.go index 51e424d55b..62a8e8a702 100644 --- a/src/runtime/sigqueue.go +++ b/src/runtime/sigqueue.go @@ -33,7 +33,7 @@ package runtime import ( - "runtime/internal/atomic" + "internal/runtime/atomic" _ "unsafe" // for go:linkname ) diff --git a/src/runtime/stack.go b/src/runtime/stack.go index 61cd0a0fdd..8acc5e9f98 100644 --- a/src/runtime/stack.go +++ b/src/runtime/stack.go @@ -9,7 +9,7 @@ import ( "internal/cpu" "internal/goarch" "internal/goos" - "runtime/internal/atomic" + "internal/runtime/atomic" "runtime/internal/sys" "unsafe" ) diff --git a/src/runtime/stubs2.go b/src/runtime/stubs2.go index 9637347a35..39bde15b1c 100644 --- a/src/runtime/stubs2.go +++ b/src/runtime/stubs2.go @@ -7,7 +7,7 @@ package runtime import ( - "runtime/internal/atomic" + "internal/runtime/atomic" "unsafe" ) diff --git a/src/runtime/symtab.go b/src/runtime/symtab.go index 8b9977f428..bfe415360e 100644 --- a/src/runtime/symtab.go +++ b/src/runtime/symtab.go @@ -7,7 +7,7 @@ package runtime import ( "internal/abi" "internal/goarch" - "runtime/internal/atomic" + "internal/runtime/atomic" "runtime/internal/sys" "unsafe" ) diff --git a/src/runtime/sys_darwin.go b/src/runtime/sys_darwin.go index 45175d8663..8e728b9d17 100644 --- a/src/runtime/sys_darwin.go +++ b/src/runtime/sys_darwin.go @@ -6,7 +6,7 @@ package runtime import ( "internal/abi" - "runtime/internal/atomic" + "internal/runtime/atomic" "unsafe" ) diff --git a/src/runtime/sys_openbsd2.go b/src/runtime/sys_openbsd2.go index b38e49ee6f..8f5242018d 100644 --- a/src/runtime/sys_openbsd2.go +++ b/src/runtime/sys_openbsd2.go @@ -8,7 +8,7 @@ package runtime import ( "internal/abi" - "runtime/internal/atomic" + "internal/runtime/atomic" "unsafe" ) diff --git a/src/runtime/testdata/testprognet/waiters.go b/src/runtime/testdata/testprognet/waiters.go index a65c40bfbe..42d51ddbd4 100644 --- a/src/runtime/testdata/testprognet/waiters.go +++ b/src/runtime/testdata/testprognet/waiters.go @@ -9,7 +9,7 @@ import ( "io" "log" "net" - "runtime/internal/atomic" + "internal/runtime/atomic" "sync" "time" _ "unsafe" // for go:linkname diff --git a/src/runtime/time.go b/src/runtime/time.go index 06a56bf7ae..7846542a80 100644 --- a/src/runtime/time.go +++ b/src/runtime/time.go @@ -8,7 +8,7 @@ package runtime import ( "internal/abi" - "runtime/internal/atomic" + "internal/runtime/atomic" "runtime/internal/sys" "unsafe" ) diff --git a/src/runtime/trace.go b/src/runtime/trace.go index 8d7bf088dd..c641e2ba9f 100644 --- a/src/runtime/trace.go +++ b/src/runtime/trace.go @@ -18,7 +18,7 @@ import ( "internal/abi" "internal/goarch" "internal/goos" - "runtime/internal/atomic" + "internal/runtime/atomic" "runtime/internal/sys" "unsafe" ) diff --git a/src/runtime/trace2.go b/src/runtime/trace2.go index 48f969129a..a5ea9b4aa1 100644 --- a/src/runtime/trace2.go +++ b/src/runtime/trace2.go @@ -22,7 +22,7 @@ package runtime import ( - "runtime/internal/atomic" + "internal/runtime/atomic" "unsafe" ) diff --git a/src/runtime/trace2map.go b/src/runtime/trace2map.go index 195ec0bbe7..fc41d4f3c8 100644 --- a/src/runtime/trace2map.go +++ b/src/runtime/trace2map.go @@ -18,7 +18,7 @@ package runtime import ( - "runtime/internal/atomic" + "internal/runtime/atomic" "runtime/internal/sys" "unsafe" ) diff --git a/src/runtime/trace2runtime.go b/src/runtime/trace2runtime.go index b391fd79ff..3e55226f28 100644 --- a/src/runtime/trace2runtime.go +++ b/src/runtime/trace2runtime.go @@ -9,7 +9,7 @@ package runtime import ( - "runtime/internal/atomic" + "internal/runtime/atomic" _ "unsafe" // for go:linkname ) diff --git a/src/runtime/trace2status.go b/src/runtime/trace2status.go index 5016e08656..34f2e4c06f 100644 --- a/src/runtime/trace2status.go +++ b/src/runtime/trace2status.go @@ -8,7 +8,7 @@ package runtime -import "runtime/internal/atomic" +import "internal/runtime/atomic" // traceGoStatus is the status of a goroutine. // diff --git a/src/runtime/traceback.go b/src/runtime/traceback.go index bfdf70af9a..100662f274 100644 --- a/src/runtime/traceback.go +++ b/src/runtime/traceback.go @@ -182,8 +182,8 @@ func (u *unwinder) initAt(pc0, sp0, lr0 uintptr, gp *g, flags unwindFlags) { } } - // runtime/internal/atomic functions call into kernel helpers on - // arm < 7. See runtime/internal/atomic/sys_linux_arm.s. + // internal/runtime/atomic functions call into kernel helpers on + // arm < 7. See internal/runtime/atomic/sys_linux_arm.s. // // Start in the caller's frame. if GOARCH == "arm" && goarm < 7 && GOOS == "linux" && frame.pc&0xffff0000 == 0xffff0000 { diff --git a/src/runtime/vdso_freebsd.go b/src/runtime/vdso_freebsd.go index 0fe21cf647..feecada035 100644 --- a/src/runtime/vdso_freebsd.go +++ b/src/runtime/vdso_freebsd.go @@ -7,7 +7,7 @@ package runtime import ( - "runtime/internal/atomic" + "internal/runtime/atomic" "unsafe" ) diff --git a/src/runtime/vdso_freebsd_x86.go b/src/runtime/vdso_freebsd_x86.go index 66d1c65488..7ac09cb9f1 100644 --- a/src/runtime/vdso_freebsd_x86.go +++ b/src/runtime/vdso_freebsd_x86.go @@ -7,7 +7,7 @@ package runtime import ( - "runtime/internal/atomic" + "internal/runtime/atomic" "unsafe" ) diff --git a/src/sync/atomic/asm.s b/src/sync/atomic/asm.s index 2022304665..b9318fe8b7 100644 --- a/src/sync/atomic/asm.s +++ b/src/sync/atomic/asm.s @@ -7,79 +7,79 @@ #include "textflag.h" TEXT ·SwapInt32(SB),NOSPLIT,$0 - JMP runtime∕internal∕atomic·Xchg(SB) + JMP internal∕runtime∕atomic·Xchg(SB) TEXT ·SwapUint32(SB),NOSPLIT,$0 - JMP runtime∕internal∕atomic·Xchg(SB) + JMP internal∕runtime∕atomic·Xchg(SB) TEXT ·SwapInt64(SB),NOSPLIT,$0 - JMP runtime∕internal∕atomic·Xchg64(SB) + JMP internal∕runtime∕atomic·Xchg64(SB) TEXT ·SwapUint64(SB),NOSPLIT,$0 - JMP runtime∕internal∕atomic·Xchg64(SB) + JMP internal∕runtime∕atomic·Xchg64(SB) TEXT ·SwapUintptr(SB),NOSPLIT,$0 - JMP runtime∕internal∕atomic·Xchguintptr(SB) + JMP internal∕runtime∕atomic·Xchguintptr(SB) TEXT ·CompareAndSwapInt32(SB),NOSPLIT,$0 - JMP runtime∕internal∕atomic·Cas(SB) + JMP internal∕runtime∕atomic·Cas(SB) TEXT ·CompareAndSwapUint32(SB),NOSPLIT,$0 - JMP runtime∕internal∕atomic·Cas(SB) + JMP internal∕runtime∕atomic·Cas(SB) TEXT ·CompareAndSwapUintptr(SB),NOSPLIT,$0 - JMP runtime∕internal∕atomic·Casuintptr(SB) + JMP internal∕runtime∕atomic·Casuintptr(SB) TEXT ·CompareAndSwapInt64(SB),NOSPLIT,$0 - JMP runtime∕internal∕atomic·Cas64(SB) + JMP internal∕runtime∕atomic·Cas64(SB) TEXT ·CompareAndSwapUint64(SB),NOSPLIT,$0 - JMP runtime∕internal∕atomic·Cas64(SB) + JMP internal∕runtime∕atomic·Cas64(SB) TEXT ·AddInt32(SB),NOSPLIT,$0 - JMP runtime∕internal∕atomic·Xadd(SB) + JMP internal∕runtime∕atomic·Xadd(SB) TEXT ·AddUint32(SB),NOSPLIT,$0 - JMP runtime∕internal∕atomic·Xadd(SB) + JMP internal∕runtime∕atomic·Xadd(SB) TEXT ·AddUintptr(SB),NOSPLIT,$0 - JMP runtime∕internal∕atomic·Xadduintptr(SB) + JMP internal∕runtime∕atomic·Xadduintptr(SB) TEXT ·AddInt64(SB),NOSPLIT,$0 - JMP runtime∕internal∕atomic·Xadd64(SB) + JMP internal∕runtime∕atomic·Xadd64(SB) TEXT ·AddUint64(SB),NOSPLIT,$0 - JMP runtime∕internal∕atomic·Xadd64(SB) + JMP internal∕runtime∕atomic·Xadd64(SB) TEXT ·LoadInt32(SB),NOSPLIT,$0 - JMP runtime∕internal∕atomic·Load(SB) + JMP internal∕runtime∕atomic·Load(SB) TEXT ·LoadUint32(SB),NOSPLIT,$0 - JMP runtime∕internal∕atomic·Load(SB) + JMP internal∕runtime∕atomic·Load(SB) TEXT ·LoadInt64(SB),NOSPLIT,$0 - JMP runtime∕internal∕atomic·Load64(SB) + JMP internal∕runtime∕atomic·Load64(SB) TEXT ·LoadUint64(SB),NOSPLIT,$0 - JMP runtime∕internal∕atomic·Load64(SB) + JMP internal∕runtime∕atomic·Load64(SB) TEXT ·LoadUintptr(SB),NOSPLIT,$0 - JMP runtime∕internal∕atomic·Loaduintptr(SB) + JMP internal∕runtime∕atomic·Loaduintptr(SB) TEXT ·LoadPointer(SB),NOSPLIT,$0 - JMP runtime∕internal∕atomic·Loadp(SB) + JMP internal∕runtime∕atomic·Loadp(SB) TEXT ·StoreInt32(SB),NOSPLIT,$0 - JMP runtime∕internal∕atomic·Store(SB) + JMP internal∕runtime∕atomic·Store(SB) TEXT ·StoreUint32(SB),NOSPLIT,$0 - JMP runtime∕internal∕atomic·Store(SB) + JMP internal∕runtime∕atomic·Store(SB) TEXT ·StoreInt64(SB),NOSPLIT,$0 - JMP runtime∕internal∕atomic·Store64(SB) + JMP internal∕runtime∕atomic·Store64(SB) TEXT ·StoreUint64(SB),NOSPLIT,$0 - JMP runtime∕internal∕atomic·Store64(SB) + JMP internal∕runtime∕atomic·Store64(SB) TEXT ·StoreUintptr(SB),NOSPLIT,$0 - JMP runtime∕internal∕atomic·Storeuintptr(SB) + JMP internal∕runtime∕atomic·Storeuintptr(SB) diff --git a/src/sync/pool.go b/src/sync/pool.go index e094849974..9214bf6e34 100644 --- a/src/sync/pool.go +++ b/src/sync/pool.go @@ -295,12 +295,12 @@ func runtime_registerPoolCleanup(cleanup func()) func runtime_procPin() int func runtime_procUnpin() -// The below are implemented in runtime/internal/atomic and the +// The below are implemented in internal/runtime/atomic and the // compiler also knows to intrinsify the symbol we linkname into this // package. -//go:linkname runtime_LoadAcquintptr runtime/internal/atomic.LoadAcquintptr +//go:linkname runtime_LoadAcquintptr internal/runtime/atomic.LoadAcquintptr func runtime_LoadAcquintptr(ptr *uintptr) uintptr -//go:linkname runtime_StoreReluintptr runtime/internal/atomic.StoreReluintptr +//go:linkname runtime_StoreReluintptr internal/runtime/atomic.StoreReluintptr func runtime_StoreReluintptr(ptr *uintptr, val uintptr) uintptr -- cgit v1.3