aboutsummaryrefslogtreecommitdiff
path: root/src/runtime
diff options
context:
space:
mode:
Diffstat (limited to 'src/runtime')
-rw-r--r--src/runtime/chan_test.go2
-rw-r--r--src/runtime/checkptr_test.go2
-rw-r--r--src/runtime/crash_cgo_test.go2
-rw-r--r--src/runtime/map_benchmark_test.go1
-rw-r--r--src/runtime/memmove_test.go2
-rw-r--r--src/runtime/mgcpacer_test.go1
-rw-r--r--src/runtime/mgcscavenge_test.go3
-rw-r--r--src/runtime/mpagealloc_test.go5
-rw-r--r--src/runtime/mpagecache_test.go2
-rw-r--r--src/runtime/mpallocbits_test.go4
-rw-r--r--src/runtime/proc_test.go1
11 files changed, 0 insertions, 25 deletions
diff --git a/src/runtime/chan_test.go b/src/runtime/chan_test.go
index 526d45bb43..5a1ca52a8c 100644
--- a/src/runtime/chan_test.go
+++ b/src/runtime/chan_test.go
@@ -309,7 +309,6 @@ func TestSelfSelect(t *testing.T) {
wg.Add(2)
c := make(chan int, chanCap)
for p := 0; p < 2; p++ {
- p := p
go func() {
defer wg.Done()
for i := 0; i < 1000; i++ {
@@ -359,7 +358,6 @@ func TestSelectStress(t *testing.T) {
var wg sync.WaitGroup
wg.Add(10)
for k := 0; k < 4; k++ {
- k := k
go func() {
for i := 0; i < N; i++ {
c[k] <- 0
diff --git a/src/runtime/checkptr_test.go b/src/runtime/checkptr_test.go
index 119708be7f..d08b052449 100644
--- a/src/runtime/checkptr_test.go
+++ b/src/runtime/checkptr_test.go
@@ -45,7 +45,6 @@ func TestCheckPtr(t *testing.T) {
}
for _, tc := range testCases {
- tc := tc
t.Run(tc.cmd, func(t *testing.T) {
t.Parallel()
got, err := testenv.CleanCmdEnv(exec.Command(exe, tc.cmd)).CombinedOutput()
@@ -88,7 +87,6 @@ func TestCheckPtr2(t *testing.T) {
}
for _, tc := range testCases {
- tc := tc
t.Run(tc.cmd, func(t *testing.T) {
t.Parallel()
got, err := testenv.CleanCmdEnv(exec.Command(exe, tc.cmd)).CombinedOutput()
diff --git a/src/runtime/crash_cgo_test.go b/src/runtime/crash_cgo_test.go
index b77ff8dafd..baf4523a7a 100644
--- a/src/runtime/crash_cgo_test.go
+++ b/src/runtime/crash_cgo_test.go
@@ -752,8 +752,6 @@ func TestSegv(t *testing.T) {
}
for _, test := range []string{"Segv", "SegvInCgo", "TgkillSegv", "TgkillSegvInCgo"} {
- test := test
-
// The tgkill variants only run on Linux.
if runtime.GOOS != "linux" && strings.HasPrefix(test, "Tgkill") {
continue
diff --git a/src/runtime/map_benchmark_test.go b/src/runtime/map_benchmark_test.go
index a26b35b44d..9e93b219f1 100644
--- a/src/runtime/map_benchmark_test.go
+++ b/src/runtime/map_benchmark_test.go
@@ -493,7 +493,6 @@ func BenchmarkMapInterfacePtr(b *testing.B) {
m := map[any]bool{}
for i := 0; i < 100; i++ {
- i := i
m[&i] = true
}
diff --git a/src/runtime/memmove_test.go b/src/runtime/memmove_test.go
index 22905504d4..6065a84553 100644
--- a/src/runtime/memmove_test.go
+++ b/src/runtime/memmove_test.go
@@ -221,8 +221,6 @@ func TestMemmoveAtomicity(t *testing.T) {
for _, backward := range []bool{true, false} {
for _, n := range []int{3, 4, 5, 6, 7, 8, 9, 10, 15, 25, 49} {
- n := n
-
// test copying [N]*int.
sz := uintptr(n * PtrSize)
name := fmt.Sprint(sz)
diff --git a/src/runtime/mgcpacer_test.go b/src/runtime/mgcpacer_test.go
index ef1483d629..4b9cbf5589 100644
--- a/src/runtime/mgcpacer_test.go
+++ b/src/runtime/mgcpacer_test.go
@@ -603,7 +603,6 @@ func TestGcPacer(t *testing.T) {
// However, it is still possible to trigger this case if something exceptional
// happens between calls to revise; the framework just doesn't support this yet.
} {
- e := e
t.Run(e.name, func(t *testing.T) {
t.Parallel()
diff --git a/src/runtime/mgcscavenge_test.go b/src/runtime/mgcscavenge_test.go
index 7b86ae8ffc..4f9dbac481 100644
--- a/src/runtime/mgcscavenge_test.go
+++ b/src/runtime/mgcscavenge_test.go
@@ -285,7 +285,6 @@ func TestPallocDataFindScavengeCandidate(t *testing.T) {
}
}
for name, v := range tests {
- v := v
t.Run(name, func(t *testing.T) {
b := makePallocData(v.alloc, v.scavenged)
start, size := b.FindScavengeCandidate(PallocChunkPages-1, v.min, v.max)
@@ -447,7 +446,6 @@ func TestPageAllocScavenge(t *testing.T) {
}
}
for name, v := range tests {
- v := v
t.Run(name, func(t *testing.T) {
b := NewPageAlloc(v.beforeAlloc, v.beforeScav)
defer FreePageAlloc(b)
@@ -811,7 +809,6 @@ func TestScavengeIndex(t *testing.T) {
)
}
for _, test := range tests {
- test := test
t.Run("Bg/"+test.name, func(t *testing.T) {
mark, find, nextGen := setup(t, false)
test.mark(mark)
diff --git a/src/runtime/mpagealloc_test.go b/src/runtime/mpagealloc_test.go
index ded7a79922..45badcb260 100644
--- a/src/runtime/mpagealloc_test.go
+++ b/src/runtime/mpagealloc_test.go
@@ -181,7 +181,6 @@ func TestPageAllocGrow(t *testing.T) {
}
}
for name, v := range tests {
- v := v
t.Run(name, func(t *testing.T) {
// By creating a new pageAlloc, we will
// grow it for each chunk defined in x.
@@ -678,7 +677,6 @@ func TestPageAllocAlloc(t *testing.T) {
}
}
for name, v := range tests {
- v := v
t.Run(name, func(t *testing.T) {
b := NewPageAlloc(v.before, v.scav)
defer FreePageAlloc(b)
@@ -705,7 +703,6 @@ func TestPageAllocExhaust(t *testing.T) {
t.Skip("skipping because virtual memory is limited; see #36210")
}
for _, npages := range []uintptr{1, 2, 3, 4, 5, 8, 16, 64, 1024, 1025, 2048, 2049} {
- npages := npages
t.Run(fmt.Sprintf("%d", npages), func(t *testing.T) {
// Construct b.
bDesc := make(map[ChunkIdx][]BitRange)
@@ -973,7 +970,6 @@ func TestPageAllocFree(t *testing.T) {
}
}
for name, v := range tests {
- v := v
t.Run(name, func(t *testing.T) {
b := NewPageAlloc(v.before, nil)
defer FreePageAlloc(b)
@@ -1028,7 +1024,6 @@ func TestPageAllocAllocAndFree(t *testing.T) {
},
}
for name, v := range tests {
- v := v
t.Run(name, func(t *testing.T) {
b := NewPageAlloc(v.init, nil)
defer FreePageAlloc(b)
diff --git a/src/runtime/mpagecache_test.go b/src/runtime/mpagecache_test.go
index 19b4e04807..523a1c0b07 100644
--- a/src/runtime/mpagecache_test.go
+++ b/src/runtime/mpagecache_test.go
@@ -164,7 +164,6 @@ func TestPageCacheAlloc(t *testing.T) {
},
}
for name, test := range tests {
- test := test
t.Run(name, func(t *testing.T) {
c := test.cache
for i, h := range test.hits {
@@ -407,7 +406,6 @@ func TestPageAllocAllocToCache(t *testing.T) {
}
}
for name, v := range tests {
- v := v
t.Run(name, func(t *testing.T) {
b := NewPageAlloc(v.beforeAlloc, v.beforeScav)
defer FreePageAlloc(b)
diff --git a/src/runtime/mpallocbits_test.go b/src/runtime/mpallocbits_test.go
index cf49f77507..755f423f96 100644
--- a/src/runtime/mpallocbits_test.go
+++ b/src/runtime/mpallocbits_test.go
@@ -200,7 +200,6 @@ func TestMallocBitsPopcntRange(t *testing.T) {
}
}
for name, v := range tests {
- v := v
t.Run(name, func(t *testing.T) {
b := makePallocBits(v.init)
for _, h := range v.tests {
@@ -291,7 +290,6 @@ func TestPallocBitsSummarize(t *testing.T) {
},
}
for name, v := range tests {
- v := v
t.Run(name, func(t *testing.T) {
b := makePallocBits(v.free)
// In the PallocBits we create 1's represent free spots, but in our actual
@@ -436,7 +434,6 @@ func TestPallocBitsAlloc(t *testing.T) {
}
}
for name, v := range tests {
- v := v
t.Run(name, func(t *testing.T) {
b := makePallocBits(v.before)
for iter, i := range v.hits {
@@ -498,7 +495,6 @@ func TestPallocBitsFree(t *testing.T) {
}
}
for name, v := range tests {
- v := v
t.Run(name, func(t *testing.T) {
b := makePallocBits(v.beforeInv)
invertPallocBits(b)
diff --git a/src/runtime/proc_test.go b/src/runtime/proc_test.go
index 3b606f62e4..d10d4a1fc9 100644
--- a/src/runtime/proc_test.go
+++ b/src/runtime/proc_test.go
@@ -696,7 +696,6 @@ func BenchmarkCreateGoroutinesCapture(b *testing.B) {
var wg sync.WaitGroup
wg.Add(N)
for i := 0; i < N; i++ {
- i := i
go func() {
if i >= N {
b.Logf("bad") // just to capture b