aboutsummaryrefslogtreecommitdiff
path: root/internal/queue/inmemqueue
diff options
context:
space:
mode:
Diffstat (limited to 'internal/queue/inmemqueue')
-rw-r--r--internal/queue/inmemqueue/queue.go90
-rw-r--r--internal/queue/inmemqueue/queue_test.go118
2 files changed, 208 insertions, 0 deletions
diff --git a/internal/queue/inmemqueue/queue.go b/internal/queue/inmemqueue/queue.go
new file mode 100644
index 00000000..f930f5d5
--- /dev/null
+++ b/internal/queue/inmemqueue/queue.go
@@ -0,0 +1,90 @@
+// Copyright 2026 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package inmemqueue provides an in-memory queue implementation that can be
+// used for scheduling of fetch actions.
+package inmemqueue
+
+import (
+ "context"
+ "fmt"
+ "time"
+
+ "golang.org/x/pkgsite/internal"
+ "golang.org/x/pkgsite/internal/experiment"
+ "golang.org/x/pkgsite/internal/log"
+ "golang.org/x/pkgsite/internal/queue"
+)
+
+// InMemory is a Queue implementation that schedules in-process fetch
+// operations. Unlike the GCP task queue, it will not automatically retry tasks
+// on failure.
+//
+// This should only be used for local development.
+type InMemory struct {
+ queue chan internal.Modver
+ done chan struct{}
+ experiments []string
+}
+
+type InMemoryProcessFunc func(context.Context, string, string) (int, error)
+
+// New creates a new InMemory that asynchronously fetches from proxyClient and
+// stores in db. It uses workerCount parallelism to execute these fetches.
+func New(ctx context.Context, workerCount int, experiments []string, processFunc InMemoryProcessFunc) *InMemory {
+ q := &InMemory{
+ queue: make(chan internal.Modver, 1000),
+ experiments: experiments,
+ done: make(chan struct{}),
+ }
+ sem := make(chan struct{}, workerCount)
+ go func() {
+ for v := range q.queue {
+ select {
+ case <-ctx.Done():
+ return
+ case sem <- struct{}{}:
+ }
+
+ // If a worker is available, make a request to the fetch service inside a
+ // goroutine and wait for it to finish.
+ go func(v internal.Modver) {
+ defer func() { <-sem }()
+
+ log.Infof(ctx, "Fetch requested: %s (workerCount = %d)", v, cap(sem))
+
+ fetchCtx, cancel := context.WithTimeout(ctx, 5*time.Minute)
+ fetchCtx = experiment.NewContext(fetchCtx, experiments...)
+ defer cancel()
+
+ if _, err := processFunc(fetchCtx, v.Path, v.Version); err != nil {
+ log.Error(fetchCtx, err)
+ }
+ }(v)
+ }
+ for i := 0; i < cap(sem); i++ {
+ select {
+ case <-ctx.Done():
+ panic(fmt.Sprintf("InMemory queue context done: %v", ctx.Err()))
+ case sem <- struct{}{}:
+ }
+ }
+ close(q.done)
+ }()
+ return q
+}
+
+// ScheduleFetch pushes a fetch task into the local queue to be processed
+// asynchronously.
+func (q *InMemory) ScheduleFetch(ctx context.Context, modulePath, version string, _ *queue.Options) (bool, error) {
+ q.queue <- internal.Modver{Path: modulePath, Version: version}
+ return true, nil
+}
+
+// WaitForTesting waits for all queued requests to finish. It should only be
+// used by test code.
+func (q *InMemory) WaitForTesting(ctx context.Context) {
+ close(q.queue)
+ <-q.done
+}
diff --git a/internal/queue/inmemqueue/queue_test.go b/internal/queue/inmemqueue/queue_test.go
new file mode 100644
index 00000000..79847140
--- /dev/null
+++ b/internal/queue/inmemqueue/queue_test.go
@@ -0,0 +1,118 @@
+// Copyright 2026 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package inmemqueue
+
+import (
+ "context"
+ "errors"
+ "sync"
+ "testing"
+)
+
+func TestInMemory_ScheduleAndProcess(t *testing.T) {
+ ctx := context.Background()
+
+ var processed int
+ var mu sync.Mutex
+
+ processFunc := func(ctx context.Context, path, version string) (int, error) {
+ mu.Lock()
+ processed++
+ mu.Unlock()
+ return 200, nil
+ }
+
+ q := New(ctx, 2, nil, processFunc)
+
+ tests := []struct {
+ path string
+ version string
+ }{
+ {"example.com/mod1", "v1.0.0"},
+ {"example.com/mod2", "v1.1.0"},
+ {"example.com/mod3", "v2.0.0"},
+ }
+
+ for _, tc := range tests {
+ ok, err := q.ScheduleFetch(ctx, tc.path, tc.version, nil)
+ if err != nil {
+ t.Fatalf("ScheduleFetch(%q, %q) returned error: %v", tc.path, tc.version, err)
+ }
+ if !ok {
+ t.Errorf("ScheduleFetch(%q, %q) = false, want true", tc.path, tc.version)
+ }
+ }
+
+ q.WaitForTesting(ctx)
+
+ if got, want := processed, len(tests); got != want {
+ t.Errorf("processed tasks = %d, want %d", got, want)
+ }
+}
+
+func TestInMemory_ProcessFuncError(t *testing.T) {
+ ctx := context.Background()
+
+ processFunc := func(ctx context.Context, path, version string) (int, error) {
+ return 500, errors.New("simulated fetch error")
+ }
+
+ q := New(ctx, 1, nil, processFunc)
+
+ _, err := q.ScheduleFetch(ctx, "example.com/error-mod", "v1.0.0", nil)
+ if err != nil {
+ t.Fatalf("ScheduleFetch returned error: %v", err)
+ }
+
+ // This should complete without panicking or deadlocking.
+ q.WaitForTesting(ctx)
+}
+
+func TestInMemory_WorkerConcurrency(t *testing.T) {
+ ctx := context.Background()
+ workerCount := 2
+
+ var active, max int
+ var mu sync.Mutex
+ var wg sync.WaitGroup
+
+ blockCh := make(chan struct{})
+
+ processFunc := func(ctx context.Context, path, version string) (int, error) {
+ mu.Lock()
+ active++
+ if active > max {
+ max = active
+ }
+ mu.Unlock()
+
+ <-blockCh
+
+ mu.Lock()
+ active--
+ mu.Unlock()
+ wg.Done()
+ return 200, nil
+ }
+
+ q := New(ctx, workerCount, nil, processFunc)
+
+ taskCount := 5
+ wg.Add(taskCount)
+ for range taskCount {
+ _, err := q.ScheduleFetch(ctx, "example.com/concurrent", "v1.0.0", nil)
+ if err != nil {
+ t.Fatalf("ScheduleFetch returned error: %v", err)
+ }
+ }
+
+ close(blockCh)
+ wg.Wait()
+ q.WaitForTesting(ctx)
+
+ if max > workerCount {
+ t.Errorf("max concurrent workers = %d, want <= %d", max, workerCount)
+ }
+}