diff --git a/act/artifactcache/handler.go b/act/artifactcache/handler.go index 7f090799..50ffbd05 100644 --- a/act/artifactcache/handler.go +++ b/act/artifactcache/handler.go @@ -5,17 +5,24 @@ package artifactcache import ( + "context" + "crypto/hmac" + "crypto/rand" + "crypto/sha256" + "encoding/hex" "encoding/json" "errors" "fmt" "io" "net" "net/http" + "net/url" "os" "path/filepath" "regexp" "strconv" "strings" + "sync" "sync/atomic" "time" @@ -28,9 +35,36 @@ import ( ) const ( - urlBase = "/_apis/artifactcache" + apiPath = "/_apis/artifactcache" + internalPath = "/_internal" + + // artifactURLTTL bounds how long a signed artifactLocation URL stays valid. + // Short enough that a leaked URL is near-worthless; long enough to let the + // @actions/cache client download a big blob that was returned from /cache. + artifactURLTTL = 10 * time.Minute ) +type credKey struct{} + +// JobCredential ties a per-job bearer token (ACTIONS_RUNTIME_TOKEN) to the +// repository that owns it. Every cache entry is stamped with Repo on +// reserve/commit and checked on read/write so one repo can never observe or +// poison another repo's cache, even from inside a container that reaches the +// cache server over the docker bridge network. +type JobCredential struct { + Repo string +} + +// credEntry holds a registered job's credential along with an active +// registration count. RegisterJob is reference-counted so that if two tasks +// briefly share an ACTIONS_RUNTIME_TOKEN — e.g. a runner that retries a task +// after a crash before the old registration is revoked — the first task's +// revoker does not cut the second task's auth out from under it. +type credEntry struct { + cred JobCredential + refs int +} + type Handler struct { dir string storage *Storage @@ -43,10 +77,36 @@ type Handler struct { gcAt time.Time outboundIP string + + // internalSecret guards /_internal/{register,revoke}. When set, a remote + // runner can use these endpoints to pre-register per-job + // ACTIONS_RUNTIME_TOKENs against this server, enabling the same + // per-job auth and repo scoping as the embedded handler over the + // network. Empty disables the control-plane entirely. + internalSecret string + + // secret signs short-lived artifact download URLs. The @actions/cache + // toolkit does not send Authorization on the download request, so blob + // GETs authenticate via a per-URL HMAC signature with expiry rather than + // via the bearer token used for management endpoints. + secret []byte + + credMu sync.RWMutex + creds map[string]*credEntry } -func StartHandler(dir, outboundIP string, port uint16, logger logrus.FieldLogger) (*Handler, error) { - h := &Handler{} +// StartHandler opens the on-disk cache store and starts the HTTP server. +// +// internalSecret, when non-empty, enables a control-plane API at +// /_internal/{register,revoke} that lets a remote runner pre-register the +// per-job ACTIONS_RUNTIME_TOKENs it expects this server to honor. The +// embedded in-process handler leaves it empty and registers tokens via the +// in-process RegisterJob method directly. +func StartHandler(dir, outboundIP string, port uint16, internalSecret string, logger logrus.FieldLogger) (*Handler, error) { + h := &Handler{ + creds: make(map[string]*credEntry), + internalSecret: internalSecret, + } if logger == nil { discard := logrus.New() @@ -83,19 +143,37 @@ func StartHandler(dir, outboundIP string, port uint16, logger logrus.FieldLogger h.outboundIP = ip.String() } + secret, err := loadOrCreateSecret(dir) + if err != nil { + return nil, err + } + h.secret = secret + router := httprouter.New() - router.GET(urlBase+"/cache", h.middleware(h.find)) - router.POST(urlBase+"/caches", h.middleware(h.reserve)) - router.PATCH(urlBase+"/caches/:id", h.middleware(h.upload)) - router.POST(urlBase+"/caches/:id", h.middleware(h.commit)) - router.GET(urlBase+"/artifacts/:id", h.middleware(h.get)) - router.POST(urlBase+"/clean", h.middleware(h.clean)) + router.GET(apiPath+"/cache", h.bearerAuth(h.find)) + router.POST(apiPath+"/caches", h.bearerAuth(h.reserve)) + router.PATCH(apiPath+"/caches/:id", h.bearerAuth(h.upload)) + router.POST(apiPath+"/caches/:id", h.bearerAuth(h.commit)) + router.POST(apiPath+"/clean", h.bearerAuth(h.clean)) + // Artifact GET is signed via query-string HMAC because @actions/cache + // does not attach Authorization when downloading archiveLocation. + router.GET(apiPath+"/artifacts/:id", h.signedURLAuth(h.get)) + // Control-plane: a remote runner registers/revokes per-job tokens so the + // cache API can authenticate them. Always wired so the routes exist; the + // handlers themselves 401 when internalSecret is unset. + router.POST(internalPath+"/register", h.internalAuth(h.internalRegister)) + router.POST(internalPath+"/revoke", h.internalAuth(h.internalRevoke)) h.router = router h.gcCache() - listener, err := net.Listen("tcp", fmt.Sprintf(":%d", port)) // listen on all interfaces + // Listen on all interfaces. Binding to outboundIP only would give no real + // security benefit (it is the LAN/internet-facing address either way) and + // can break Docker Desktop variants where the host's outbound IP is not + // routable from inside the container network. Authentication is enforced + // by the bearer middleware and per-repo scoping, not by reachability. + listener, err := net.Listen("tcp", fmt.Sprintf(":%d", port)) if err != nil { return nil, err } @@ -121,6 +199,91 @@ func (h *Handler) ExternalURL() string { h.listener.Addr().(*net.TCPAddr).Port) } +// RegisterJob makes token a valid bearer credential for cache requests from +// the given repository and returns a function that removes it. The runner +// calls this at job start and defers the returned func so that the credential +// is only accepted while the job is running. +// +// Registrations are reference-counted: if a token is already registered, the +// existing repo is kept and the refcount is incremented. The entry is +// removed only when every revoker returned by RegisterJob has been called. +// This keeps a stray re-registration from silently revoking a live job. +func (h *Handler) RegisterJob(token, repo string) func() { + if h == nil || token == "" { + return func() {} + } + h.credMu.Lock() + if existing, ok := h.creds[token]; ok { + existing.refs++ + } else { + h.creds[token] = &credEntry{ + cred: JobCredential{Repo: repo}, + refs: 1, + } + } + h.credMu.Unlock() + return func() { + h.credMu.Lock() + if entry, ok := h.creds[token]; ok { + entry.refs-- + if entry.refs <= 0 { + delete(h.creds, token) + } + } + h.credMu.Unlock() + } +} + +// RevokeJob explicitly revokes one registration of token, mirroring one call +// of the closure returned by RegisterJob. Used by the control-plane endpoint +// so a remote runner can revoke without holding the closure. +func (h *Handler) RevokeJob(token string) { + if h == nil || token == "" { + return + } + h.credMu.Lock() + if entry, ok := h.creds[token]; ok { + entry.refs-- + if entry.refs <= 0 { + delete(h.creds, token) + } + } + h.credMu.Unlock() +} + +func (h *Handler) lookupCredential(token string) (JobCredential, bool) { + h.credMu.RLock() + entry, ok := h.creds[token] + h.credMu.RUnlock() + if !ok { + return JobCredential{}, false + } + return entry.cred, true +} + +// loadOrCreateSecret returns the 32-byte HMAC signing key for artifact URLs, +// persisted in dir/.secret so signed URLs handed out before a restart stay +// valid across the restart and so the standalone cache-server can be pointed +// at by config.Cache.ExternalServer without the URL rotating. +func loadOrCreateSecret(dir string) ([]byte, error) { + path := filepath.Join(dir, ".secret") + if data, err := os.ReadFile(path); err == nil { + if secret, err := hex.DecodeString(strings.TrimSpace(string(data))); err == nil && len(secret) >= 32 { + return secret, nil + } + } else if !os.IsNotExist(err) { + return nil, fmt.Errorf("read cache secret: %w", err) + } + secret := make([]byte, 32) + if _, err := rand.Read(secret); err != nil { + return nil, fmt.Errorf("generate cache secret: %w", err) + } + if err := os.WriteFile(path, []byte(hex.EncodeToString(secret)), 0o600); err != nil { + return nil, fmt.Errorf("write cache secret: %w", err) + } + return secret, nil +} + func (h *Handler) Close() error { if h == nil { return nil @@ -160,6 +323,7 @@ func (h *Handler) openDB() (*bolthold.Store, error) { // GET /_apis/artifactcache/cache func (h *Handler) find(w http.ResponseWriter, r *http.Request, _ httprouter.Params) { + cred := credFromContext(r.Context()) keys := strings.Split(r.URL.Query().Get("keys"), ",") // cache keys are case insensitive for i, key := range keys { @@ -174,7 +338,7 @@ func (h *Handler) find(w http.ResponseWriter, r *http.Request, _ httprouter.Para } defer db.Close() - cache, err := findCache(db, keys, version) + cache, err := findCache(db, cred.Repo, keys, version) if err != nil { h.responseJSON(w, r, 500, err) return @@ -194,13 +358,14 @@ func (h *Handler) find(w http.ResponseWriter, r *http.Request, _ httprouter.Para } h.responseJSON(w, r, 200, map[string]any{ "result": "hit", - "archiveLocation": fmt.Sprintf("%s%s/artifacts/%d", h.ExternalURL(), urlBase, cache.ID), + "archiveLocation": h.signedArtifactURL(cache.ID, time.Now().Add(artifactURLTTL)), "cacheKey": cache.Key, }) } // POST /_apis/artifactcache/caches func (h *Handler) reserve(w http.ResponseWriter, r *http.Request, _ httprouter.Params) { + cred := credFromContext(r.Context()) api := &Request{} if err := json.NewDecoder(r.Body).Decode(api); err != nil { h.responseJSON(w, r, 400, err) @@ -210,6 +375,7 @@ func (h *Handler) reserve(w http.ResponseWriter, r *http.Request, _ httprouter.P api.Key = strings.ToLower(api.Key) cache := api.ToCache() + cache.Repo = cred.Repo db, err := h.openDB() if err != nil { h.responseJSON(w, r, 500, err) @@ -231,6 +397,7 @@ func (h *Handler) reserve(w http.ResponseWriter, r *http.Request, _ httprouter.P // PATCH /_apis/artifactcache/caches/:id func (h *Handler) upload(w http.ResponseWriter, r *http.Request, params httprouter.Params) { + cred := credFromContext(r.Context()) id, err := strconv.ParseInt(params.ByName("id"), 10, 64) if err != nil { h.responseJSON(w, r, 400, err) @@ -253,6 +420,11 @@ func (h *Handler) upload(w http.ResponseWriter, r *http.Request, params httprout return } + if cache.Repo != cred.Repo { + h.responseJSON(w, r, 403, fmt.Errorf("cache %d: forbidden", id)) + return + } + if cache.Complete { h.responseJSON(w, r, 400, fmt.Errorf("cache %v %q: already complete", cache.ID, cache.Key)) return @@ -272,6 +444,7 @@ func (h *Handler) upload(w http.ResponseWriter, r *http.Request, params httprout // POST /_apis/artifactcache/caches/:id func (h *Handler) commit(w http.ResponseWriter, r *http.Request, params httprouter.Params) { + cred := credFromContext(r.Context()) id, err := strconv.ParseInt(params.ByName("id"), 10, 64) if err != nil { h.responseJSON(w, r, 400, err) @@ -294,6 +467,11 @@ func (h *Handler) commit(w http.ResponseWriter, r *http.Request, params httprout return } + if cache.Repo != cred.Repo { + h.responseJSON(w, r, 403, fmt.Errorf("cache %d: forbidden", id)) + return + } + if cache.Complete { h.responseJSON(w, r, 400, fmt.Errorf("cache %v %q: already complete", cache.ID, cache.Key)) return @@ -326,6 +504,10 @@ func (h *Handler) commit(w http.ResponseWriter, r *http.Request, params httprout } // GET /_apis/artifactcache/artifacts/:id +// Authenticated via signed URL (see signedURLAuth), not bearer, because the +// @actions/cache toolkit downloads archiveLocation without Authorization. +// Repository scoping is already enforced at find() time; the signature binds +// the URL to the specific cache ID and an expiry. func (h *Handler) get(w http.ResponseWriter, r *http.Request, params httprouter.Params) { id, err := strconv.ParseInt(params.ByName("id"), 10, 64) if err != nil { @@ -344,21 +526,158 @@ func (h *Handler) clean(w http.ResponseWriter, r *http.Request, _ httprouter.Par h.responseJSON(w, r, 200) } -func (h *Handler) middleware(handler httprouter.Handle) httprouter.Handle { +// bearerAuth resolves ACTIONS_RUNTIME_TOKEN against the set of currently +// registered jobs. A match attaches the job's JobCredential to the request +// context; a miss returns 401 before the handler body runs. +func (h *Handler) bearerAuth(handler httprouter.Handle) httprouter.Handle { return func(w http.ResponseWriter, r *http.Request, params httprouter.Params) { - h.logger.Debugf("%s %s", r.Method, r.RequestURI) + h.logger.Debugf("%s %s", r.Method, r.URL.Path) + token := bearerToken(r) + if token == "" { + h.responseJSON(w, r, http.StatusUnauthorized, errors.New("missing bearer token")) + return + } + cred, ok := h.lookupCredential(token) + if !ok { + h.responseJSON(w, r, http.StatusUnauthorized, errors.New("unknown bearer token")) + return + } + ctx := context.WithValue(r.Context(), credKey{}, cred) + handler(w, r.WithContext(ctx), params) + go h.gcCache() + } +} + +func (h *Handler) signedURLAuth(handler httprouter.Handle) httprouter.Handle { + return func(w http.ResponseWriter, r *http.Request, params httprouter.Params) { + h.logger.Debugf("%s %s", r.Method, r.URL.Path) + id, err := strconv.ParseInt(params.ByName("id"), 10, 64) + if err != nil { + h.responseJSON(w, r, 400, err) + return + } + expStr := r.URL.Query().Get("exp") + sig := r.URL.Query().Get("sig") + if expStr == "" || sig == "" { + h.responseJSON(w, r, http.StatusUnauthorized, errors.New("missing signature")) + return + } + exp, err := strconv.ParseInt(expStr, 10, 64) + if err != nil { + h.responseJSON(w, r, http.StatusUnauthorized, errors.New("invalid expiry")) + return + } + if time.Now().Unix() > exp { + h.responseJSON(w, r, http.StatusUnauthorized, errors.New("signature expired")) + return + } + expected := h.computeSignature(id, exp) + if !hmac.Equal([]byte(sig), []byte(expected)) { + h.responseJSON(w, r, http.StatusUnauthorized, errors.New("bad signature")) + return + } handler(w, r, params) go h.gcCache() } } +// internalAuth gates the control-plane endpoints. The bearer must +// constant-time-equal the configured internalSecret. If the secret is empty, +// the control-plane is disabled and every request gets 404 — which matches +// the upstream nektos/act behavior of "the route does not exist". +func (h *Handler) internalAuth(handler httprouter.Handle) httprouter.Handle { + return func(w http.ResponseWriter, r *http.Request, params httprouter.Params) { + if h.internalSecret == "" { + http.NotFound(w, r) + return + } + token := bearerToken(r) + if token == "" || !hmac.Equal([]byte(token), []byte(h.internalSecret)) { + h.responseJSON(w, r, http.StatusUnauthorized, errors.New("internal: bad secret")) + return + } + handler(w, r, params) + } +} + +type internalRegisterBody struct { + Token string `json:"token"` + Repo string `json:"repo"` +} + +type internalRevokeBody struct { + Token string `json:"token"` +} + +// POST /_internal/register +func (h *Handler) internalRegister(w http.ResponseWriter, r *http.Request, _ httprouter.Params) { + var body internalRegisterBody + if err := json.NewDecoder(r.Body).Decode(&body); err != nil { + h.responseJSON(w, r, http.StatusBadRequest, err) + return + } + if body.Token == "" { + h.responseJSON(w, r, http.StatusBadRequest, errors.New("token is required")) + return + } + h.RegisterJob(body.Token, body.Repo) + h.responseJSON(w, r, http.StatusOK) +} + +// POST /_internal/revoke +func (h *Handler) internalRevoke(w http.ResponseWriter, r *http.Request, _ httprouter.Params) { + var body internalRevokeBody + if err := json.NewDecoder(r.Body).Decode(&body); err != nil { + h.responseJSON(w, r, http.StatusBadRequest, err) + return + } + if body.Token == "" { + h.responseJSON(w, r, http.StatusBadRequest, errors.New("token is required")) + return + } + h.RevokeJob(body.Token) + h.responseJSON(w, r, http.StatusOK) +} + +func bearerToken(r *http.Request) string { + auth := r.Header.Get("Authorization") + const prefix = "Bearer " + if len(auth) > len(prefix) && strings.EqualFold(auth[:len(prefix)], prefix) { + return auth[len(prefix):] + } + return "" +} + +func credFromContext(ctx context.Context) JobCredential { + if cred, ok := ctx.Value(credKey{}).(JobCredential); ok { + return cred + } + return JobCredential{} +} + +func (h *Handler) computeSignature(cacheID, exp int64) string { + mac := hmac.New(sha256.New, h.secret) + fmt.Fprintf(mac, "%d:%d", cacheID, exp) + return hex.EncodeToString(mac.Sum(nil)) +} + +func (h *Handler) signedArtifactURL(cacheID uint64, exp time.Time) string { + expUnix := exp.Unix() + sig := h.computeSignature(int64(cacheID), expUnix) + q := url.Values{} + q.Set("exp", strconv.FormatInt(expUnix, 10)) + q.Set("sig", sig) + return fmt.Sprintf("%s%s/artifacts/%d?%s", h.ExternalURL(), apiPath, cacheID, q.Encode()) +} + // if not found, return (nil, nil) instead of an error. -func findCache(db *bolthold.Store, keys []string, version string) (*Cache, error) { +func findCache(db *bolthold.Store, repo string, keys []string, version string) (*Cache, error) { cache := &Cache{} for _, prefix := range keys { // if a key in the list matches exactly, don't return partial matches if err := db.FindOne(cache, - bolthold.Where("Key").Eq(prefix). + bolthold.Where("Repo").Eq(repo). + And("Key").Eq(prefix). And("Version").Eq(version). And("Complete").Eq(true). SortBy("CreatedAt").Reverse()); err == nil || !errors.Is(err, bolthold.ErrNotFound) { @@ -373,7 +692,8 @@ func findCache(db *bolthold.Store, keys []string, version string) (*Cache, error continue } if err := db.FindOne(cache, - bolthold.Where("Key").RegExp(re). + bolthold.Where("Repo").Eq(repo). + And("Key").RegExp(re). And("Version").Eq(version). And("Complete").Eq(true). SortBy("CreatedAt").Reverse()); err != nil { @@ -494,12 +814,16 @@ func (h *Handler) gcCache() { } } - // Remove the old caches with the same key and version, keep the latest one. + // Remove the old caches with the same key and version within the same + // repository, keep the latest one. Aggregation must include Repo so two + // repos that happen to share a (key, version) do not evict each other — + // otherwise per-repo scoping holds for reads but one repo can age + // another out after keepOld. // Also keep the olds which have been used recently for a while in case of the cache is still in use. if results, err := db.FindAggregate( &Cache{}, bolthold.Where("Complete").Eq(true), - "Key", "Version", + "Repo", "Key", "Version", ); err != nil { h.logger.Warnf("find aggregate caches: %v", err) } else { @@ -533,7 +857,7 @@ func (h *Handler) responseJSON(w http.ResponseWriter, r *http.Request, code int, if len(v) == 0 || v[0] == nil { data, _ = json.Marshal(struct{}{}) } else if err, ok := v[0].(error); ok { - h.logger.Errorf("%v %v: %v", r.Method, r.RequestURI, err) + h.logger.Errorf("%v %v: %v", r.Method, r.URL.Path, err) data, _ = json.Marshal(map[string]any{ "error": err.Error(), }) diff --git a/act/artifactcache/handler_test.go b/act/artifactcache/handler_test.go index c8d4ddb7..9c8a8a15 100644 --- a/act/artifactcache/handler_test.go +++ b/act/artifactcache/handler_test.go @@ -22,12 +22,38 @@ import ( "go.etcd.io/bbolt" ) +// testToken is registered with the cache server in every test that needs to +// make authenticated requests; testClient then attaches it as the +// Authorization: Bearer header. testRepo is the repository scope used when +// registering it; cross-repo isolation is exercised in its own test. +const ( + testToken = "test-runtime-token" + testRepo = "owner/repo" +) + +type bearerTransport struct{ token string } + +func (b *bearerTransport) RoundTrip(r *http.Request) (*http.Response, error) { + r.Header.Set("Authorization", "Bearer "+b.token) + return http.DefaultTransport.RoundTrip(r) +} + +var testClient = &http.Client{Transport: &bearerTransport{token: testToken}} + +// signArtifactURL builds a signed download URL the same way the server does; +// tests use it to reach the get handler directly without going through a +// find/cache-hit round trip. +func signArtifactURL(h *Handler, id int64) string { + return h.signedArtifactURL(uint64(id), time.Now().Add(artifactURLTTL)) +} + func TestHandler(t *testing.T) { dir := filepath.Join(t.TempDir(), "artifactcache") - handler, err := StartHandler(dir, "", 0, nil) + handler, err := StartHandler(dir, "", 0, "", nil) require.NoError(t, err) + handler.RegisterJob(testToken, testRepo) - base := fmt.Sprintf("%s%s", handler.ExternalURL(), urlBase) + base := fmt.Sprintf("%s%s", handler.ExternalURL(), apiPath) defer func() { t.Run("inpect db", func(t *testing.T) { @@ -45,7 +71,7 @@ func TestHandler(t *testing.T) { require.NoError(t, handler.Close()) assert.Nil(t, handler.server) assert.Nil(t, handler.listener) - _, err := http.Post(fmt.Sprintf("%s/caches/%d", base, 1), "", nil) //nolint:bodyclose // pre-existing issue from nektos/act + _, err := testClient.Post(fmt.Sprintf("%s/caches/%d", base, 1), "", nil) //nolint:bodyclose // pre-existing issue from nektos/act assert.Error(t, err) }) }() @@ -53,7 +79,7 @@ func TestHandler(t *testing.T) { t.Run("get not exist", func(t *testing.T) { key := strings.ToLower(t.Name()) version := "c19da02a2bd7e77277f1ac29ab45c09b7d46a4ee758284e26bb3045ad11d9d20" - resp, err := http.Get(fmt.Sprintf("%s/cache?keys=%s&version=%s", base, key, version)) //nolint:bodyclose // pre-existing issue from nektos/act + resp, err := testClient.Get(fmt.Sprintf("%s/cache?keys=%s&version=%s", base, key, version)) //nolint:bodyclose // pre-existing issue from nektos/act require.NoError(t, err) require.Equal(t, 204, resp.StatusCode) }) @@ -68,7 +94,7 @@ func TestHandler(t *testing.T) { }) t.Run("clean", func(t *testing.T) { - resp, err := http.Post(base+"/clean", "", nil) //nolint:bodyclose // pre-existing issue from nektos/act + resp, err := testClient.Post(base+"/clean", "", nil) //nolint:bodyclose // pre-existing issue from nektos/act require.NoError(t, err) assert.Equal(t, 200, resp.StatusCode) }) @@ -76,7 +102,7 @@ func TestHandler(t *testing.T) { t.Run("reserve with bad request", func(t *testing.T) { body := []byte(`invalid json`) require.NoError(t, err) - resp, err := http.Post(base+"/caches", "application/json", bytes.NewReader(body)) //nolint:bodyclose // pre-existing issue from nektos/act + resp, err := testClient.Post(base+"/caches", "application/json", bytes.NewReader(body)) //nolint:bodyclose // pre-existing issue from nektos/act require.NoError(t, err) assert.Equal(t, 400, resp.StatusCode) }) @@ -94,7 +120,7 @@ func TestHandler(t *testing.T) { Size: 100, }) require.NoError(t, err) - resp, err := http.Post(base+"/caches", "application/json", bytes.NewReader(body)) //nolint:bodyclose // pre-existing issue from nektos/act + resp, err := testClient.Post(base+"/caches", "application/json", bytes.NewReader(body)) //nolint:bodyclose // pre-existing issue from nektos/act require.NoError(t, err) assert.Equal(t, 200, resp.StatusCode) @@ -108,7 +134,7 @@ func TestHandler(t *testing.T) { Size: 100, }) require.NoError(t, err) - resp, err := http.Post(base+"/caches", "application/json", bytes.NewReader(body)) //nolint:bodyclose // pre-existing issue from nektos/act + resp, err := testClient.Post(base+"/caches", "application/json", bytes.NewReader(body)) //nolint:bodyclose // pre-existing issue from nektos/act require.NoError(t, err) assert.Equal(t, 200, resp.StatusCode) @@ -125,7 +151,7 @@ func TestHandler(t *testing.T) { require.NoError(t, err) req.Header.Set("Content-Type", "application/octet-stream") req.Header.Set("Content-Range", "bytes 0-99/*") - resp, err := http.DefaultClient.Do(req) //nolint:bodyclose // pre-existing issue from nektos/act + resp, err := testClient.Do(req) //nolint:bodyclose // pre-existing issue from nektos/act require.NoError(t, err) assert.Equal(t, 400, resp.StatusCode) }) @@ -136,7 +162,7 @@ func TestHandler(t *testing.T) { require.NoError(t, err) req.Header.Set("Content-Type", "application/octet-stream") req.Header.Set("Content-Range", "bytes 0-99/*") - resp, err := http.DefaultClient.Do(req) //nolint:bodyclose // pre-existing issue from nektos/act + resp, err := testClient.Do(req) //nolint:bodyclose // pre-existing issue from nektos/act require.NoError(t, err) assert.Equal(t, 400, resp.StatusCode) }) @@ -155,7 +181,7 @@ func TestHandler(t *testing.T) { Size: 100, }) require.NoError(t, err) - resp, err := http.Post(base+"/caches", "application/json", bytes.NewReader(body)) //nolint:bodyclose // pre-existing issue from nektos/act + resp, err := testClient.Post(base+"/caches", "application/json", bytes.NewReader(body)) //nolint:bodyclose // pre-existing issue from nektos/act require.NoError(t, err) assert.Equal(t, 200, resp.StatusCode) @@ -171,12 +197,12 @@ func TestHandler(t *testing.T) { require.NoError(t, err) req.Header.Set("Content-Type", "application/octet-stream") req.Header.Set("Content-Range", "bytes 0-99/*") - resp, err := http.DefaultClient.Do(req) //nolint:bodyclose // pre-existing issue from nektos/act + resp, err := testClient.Do(req) //nolint:bodyclose // pre-existing issue from nektos/act require.NoError(t, err) assert.Equal(t, 200, resp.StatusCode) } { - resp, err := http.Post(fmt.Sprintf("%s/caches/%d", base, id), "", nil) //nolint:bodyclose // pre-existing issue from nektos/act + resp, err := testClient.Post(fmt.Sprintf("%s/caches/%d", base, id), "", nil) //nolint:bodyclose // pre-existing issue from nektos/act require.NoError(t, err) assert.Equal(t, 200, resp.StatusCode) } @@ -186,7 +212,7 @@ func TestHandler(t *testing.T) { require.NoError(t, err) req.Header.Set("Content-Type", "application/octet-stream") req.Header.Set("Content-Range", "bytes 0-99/*") - resp, err := http.DefaultClient.Do(req) //nolint:bodyclose // pre-existing issue from nektos/act + resp, err := testClient.Do(req) //nolint:bodyclose // pre-existing issue from nektos/act require.NoError(t, err) assert.Equal(t, 400, resp.StatusCode) } @@ -206,7 +232,7 @@ func TestHandler(t *testing.T) { Size: 100, }) require.NoError(t, err) - resp, err := http.Post(base+"/caches", "application/json", bytes.NewReader(body)) //nolint:bodyclose // pre-existing issue from nektos/act + resp, err := testClient.Post(base+"/caches", "application/json", bytes.NewReader(body)) //nolint:bodyclose // pre-existing issue from nektos/act require.NoError(t, err) assert.Equal(t, 200, resp.StatusCode) @@ -222,7 +248,7 @@ func TestHandler(t *testing.T) { require.NoError(t, err) req.Header.Set("Content-Type", "application/octet-stream") req.Header.Set("Content-Range", "bytes xx-99/*") - resp, err := http.DefaultClient.Do(req) //nolint:bodyclose // pre-existing issue from nektos/act + resp, err := testClient.Do(req) //nolint:bodyclose // pre-existing issue from nektos/act require.NoError(t, err) assert.Equal(t, 400, resp.StatusCode) } @@ -230,7 +256,7 @@ func TestHandler(t *testing.T) { t.Run("commit with bad id", func(t *testing.T) { { - resp, err := http.Post(base+"/caches/invalid_id", "", nil) //nolint:bodyclose // pre-existing issue from nektos/act + resp, err := testClient.Post(base+"/caches/invalid_id", "", nil) //nolint:bodyclose // pre-existing issue from nektos/act require.NoError(t, err) assert.Equal(t, 400, resp.StatusCode) } @@ -238,7 +264,7 @@ func TestHandler(t *testing.T) { t.Run("commit with not exist id", func(t *testing.T) { { - resp, err := http.Post(fmt.Sprintf("%s/caches/%d", base, 100), "", nil) //nolint:bodyclose // pre-existing issue from nektos/act + resp, err := testClient.Post(fmt.Sprintf("%s/caches/%d", base, 100), "", nil) //nolint:bodyclose // pre-existing issue from nektos/act require.NoError(t, err) assert.Equal(t, 400, resp.StatusCode) } @@ -258,7 +284,7 @@ func TestHandler(t *testing.T) { Size: 100, }) require.NoError(t, err) - resp, err := http.Post(base+"/caches", "application/json", bytes.NewReader(body)) //nolint:bodyclose // pre-existing issue from nektos/act + resp, err := testClient.Post(base+"/caches", "application/json", bytes.NewReader(body)) //nolint:bodyclose // pre-existing issue from nektos/act require.NoError(t, err) assert.Equal(t, 200, resp.StatusCode) @@ -274,17 +300,17 @@ func TestHandler(t *testing.T) { require.NoError(t, err) req.Header.Set("Content-Type", "application/octet-stream") req.Header.Set("Content-Range", "bytes 0-99/*") - resp, err := http.DefaultClient.Do(req) //nolint:bodyclose // pre-existing issue from nektos/act + resp, err := testClient.Do(req) //nolint:bodyclose // pre-existing issue from nektos/act require.NoError(t, err) assert.Equal(t, 200, resp.StatusCode) } { - resp, err := http.Post(fmt.Sprintf("%s/caches/%d", base, id), "", nil) //nolint:bodyclose // pre-existing issue from nektos/act + resp, err := testClient.Post(fmt.Sprintf("%s/caches/%d", base, id), "", nil) //nolint:bodyclose // pre-existing issue from nektos/act require.NoError(t, err) assert.Equal(t, 200, resp.StatusCode) } { - resp, err := http.Post(fmt.Sprintf("%s/caches/%d", base, id), "", nil) //nolint:bodyclose // pre-existing issue from nektos/act + resp, err := testClient.Post(fmt.Sprintf("%s/caches/%d", base, id), "", nil) //nolint:bodyclose // pre-existing issue from nektos/act require.NoError(t, err) assert.Equal(t, 400, resp.StatusCode) } @@ -304,7 +330,7 @@ func TestHandler(t *testing.T) { Size: 100, }) require.NoError(t, err) - resp, err := http.Post(base+"/caches", "application/json", bytes.NewReader(body)) //nolint:bodyclose // pre-existing issue from nektos/act + resp, err := testClient.Post(base+"/caches", "application/json", bytes.NewReader(body)) //nolint:bodyclose // pre-existing issue from nektos/act require.NoError(t, err) assert.Equal(t, 200, resp.StatusCode) @@ -320,31 +346,31 @@ func TestHandler(t *testing.T) { require.NoError(t, err) req.Header.Set("Content-Type", "application/octet-stream") req.Header.Set("Content-Range", "bytes 0-59/*") - resp, err := http.DefaultClient.Do(req) //nolint:bodyclose // pre-existing issue from nektos/act + resp, err := testClient.Do(req) //nolint:bodyclose // pre-existing issue from nektos/act require.NoError(t, err) assert.Equal(t, 200, resp.StatusCode) } { - resp, err := http.Post(fmt.Sprintf("%s/caches/%d", base, id), "", nil) //nolint:bodyclose // pre-existing issue from nektos/act + resp, err := testClient.Post(fmt.Sprintf("%s/caches/%d", base, id), "", nil) //nolint:bodyclose // pre-existing issue from nektos/act require.NoError(t, err) assert.Equal(t, 500, resp.StatusCode) } }) t.Run("get with bad id", func(t *testing.T) { - resp, err := http.Get(base + "/artifacts/invalid_id") //nolint:bodyclose // pre-existing issue from nektos/act + resp, err := testClient.Get(base + "/artifacts/invalid_id") //nolint:bodyclose // pre-existing issue from nektos/act require.NoError(t, err) require.Equal(t, 400, resp.StatusCode) }) t.Run("get with not exist id", func(t *testing.T) { - resp, err := http.Get(fmt.Sprintf("%s/artifacts/%d", base, 100)) //nolint:bodyclose // pre-existing issue from nektos/act + resp, err := testClient.Get(signArtifactURL(handler, 100)) //nolint:bodyclose // pre-existing issue from nektos/act require.NoError(t, err) require.Equal(t, 404, resp.StatusCode) }) t.Run("get with not exist id", func(t *testing.T) { - resp, err := http.Get(fmt.Sprintf("%s/artifacts/%d", base, 100)) //nolint:bodyclose // pre-existing issue from nektos/act + resp, err := testClient.Get(signArtifactURL(handler, 100)) //nolint:bodyclose // pre-existing issue from nektos/act require.NoError(t, err) require.Equal(t, 404, resp.StatusCode) }) @@ -375,7 +401,7 @@ func TestHandler(t *testing.T) { key + "_a", }, ",") - resp, err := http.Get(fmt.Sprintf("%s/cache?keys=%s&version=%s", base, reqKeys, version)) //nolint:bodyclose // pre-existing issue from nektos/act + resp, err := testClient.Get(fmt.Sprintf("%s/cache?keys=%s&version=%s", base, reqKeys, version)) //nolint:bodyclose // pre-existing issue from nektos/act require.NoError(t, err) require.Equal(t, 200, resp.StatusCode) @@ -395,7 +421,7 @@ func TestHandler(t *testing.T) { assert.Equal(t, "hit", got.Result) assert.Equal(t, keys[except], got.CacheKey) - contentResp, err := http.Get(got.ArchiveLocation) //nolint:bodyclose // pre-existing issue from nektos/act + contentResp, err := testClient.Get(got.ArchiveLocation) //nolint:bodyclose // pre-existing issue from nektos/act require.NoError(t, err) require.Equal(t, 200, contentResp.StatusCode) content, err := io.ReadAll(contentResp.Body) @@ -413,7 +439,7 @@ func TestHandler(t *testing.T) { { reqKey := key + "_aBc" - resp, err := http.Get(fmt.Sprintf("%s/cache?keys=%s&version=%s", base, reqKey, version)) //nolint:bodyclose // pre-existing issue from nektos/act + resp, err := testClient.Get(fmt.Sprintf("%s/cache?keys=%s&version=%s", base, reqKey, version)) //nolint:bodyclose // pre-existing issue from nektos/act require.NoError(t, err) require.Equal(t, 200, resp.StatusCode) got := struct { @@ -452,7 +478,7 @@ func TestHandler(t *testing.T) { key + "_a_b", }, ",") - resp, err := http.Get(fmt.Sprintf("%s/cache?keys=%s&version=%s", base, reqKeys, version)) //nolint:bodyclose // pre-existing issue from nektos/act + resp, err := testClient.Get(fmt.Sprintf("%s/cache?keys=%s&version=%s", base, reqKeys, version)) //nolint:bodyclose // pre-existing issue from nektos/act require.NoError(t, err) require.Equal(t, 200, resp.StatusCode) @@ -470,7 +496,7 @@ func TestHandler(t *testing.T) { require.NoError(t, json.NewDecoder(resp.Body).Decode(&got)) assert.Equal(t, keys[expect], got.CacheKey) - contentResp, err := http.Get(got.ArchiveLocation) //nolint:bodyclose // pre-existing issue from nektos/act + contentResp, err := testClient.Get(got.ArchiveLocation) //nolint:bodyclose // pre-existing issue from nektos/act require.NoError(t, err) require.Equal(t, 200, contentResp.StatusCode) content, err := io.ReadAll(contentResp.Body) @@ -504,7 +530,7 @@ func TestHandler(t *testing.T) { key + "_a_b", }, ",") - resp, err := http.Get(fmt.Sprintf("%s/cache?keys=%s&version=%s", base, reqKeys, version)) //nolint:bodyclose // pre-existing issue from nektos/act + resp, err := testClient.Get(fmt.Sprintf("%s/cache?keys=%s&version=%s", base, reqKeys, version)) //nolint:bodyclose // pre-existing issue from nektos/act require.NoError(t, err) require.Equal(t, 200, resp.StatusCode) @@ -523,7 +549,7 @@ func TestHandler(t *testing.T) { require.NoError(t, json.NewDecoder(resp.Body).Decode(&got)) assert.Equal(t, keys[expect], got.CacheKey) - contentResp, err := http.Get(got.ArchiveLocation) //nolint:bodyclose // pre-existing issue from nektos/act + contentResp, err := testClient.Get(got.ArchiveLocation) //nolint:bodyclose // pre-existing issue from nektos/act require.NoError(t, err) require.Equal(t, 200, contentResp.StatusCode) content, err := io.ReadAll(contentResp.Body) @@ -541,7 +567,7 @@ func uploadCacheNormally(t *testing.T, base, key, version string, content []byte Size: int64(len(content)), }) require.NoError(t, err) - resp, err := http.Post(base+"/caches", "application/json", bytes.NewReader(body)) //nolint:bodyclose // pre-existing issue from nektos/act + resp, err := testClient.Post(base+"/caches", "application/json", bytes.NewReader(body)) //nolint:bodyclose // pre-existing issue from nektos/act require.NoError(t, err) assert.Equal(t, 200, resp.StatusCode) @@ -557,18 +583,18 @@ func uploadCacheNormally(t *testing.T, base, key, version string, content []byte require.NoError(t, err) req.Header.Set("Content-Type", "application/octet-stream") req.Header.Set("Content-Range", "bytes 0-99/*") - resp, err := http.DefaultClient.Do(req) //nolint:bodyclose // pre-existing issue from nektos/act + resp, err := testClient.Do(req) //nolint:bodyclose // pre-existing issue from nektos/act require.NoError(t, err) assert.Equal(t, 200, resp.StatusCode) } { - resp, err := http.Post(fmt.Sprintf("%s/caches/%d", base, id), "", nil) //nolint:bodyclose // pre-existing issue from nektos/act + resp, err := testClient.Post(fmt.Sprintf("%s/caches/%d", base, id), "", nil) //nolint:bodyclose // pre-existing issue from nektos/act require.NoError(t, err) assert.Equal(t, 200, resp.StatusCode) } var archiveLocation string { - resp, err := http.Get(fmt.Sprintf("%s/cache?keys=%s&version=%s", base, key, version)) //nolint:bodyclose // pre-existing issue from nektos/act + resp, err := testClient.Get(fmt.Sprintf("%s/cache?keys=%s&version=%s", base, key, version)) //nolint:bodyclose // pre-existing issue from nektos/act require.NoError(t, err) require.Equal(t, 200, resp.StatusCode) got := struct { @@ -582,7 +608,7 @@ func uploadCacheNormally(t *testing.T, base, key, version string, content []byte archiveLocation = got.ArchiveLocation } { - resp, err := http.Get(archiveLocation) //nolint:bodyclose // pre-existing issue from nektos/act + resp, err := testClient.Get(archiveLocation) //nolint:bodyclose // pre-existing issue from nektos/act require.NoError(t, err) require.Equal(t, 200, resp.StatusCode) got, err := io.ReadAll(resp.Body) @@ -593,7 +619,7 @@ func uploadCacheNormally(t *testing.T, base, key, version string, content []byte func TestHandler_gcCache(t *testing.T) { dir := filepath.Join(t.TempDir(), "artifactcache") - handler, err := StartHandler(dir, "", 0, nil) + handler, err := StartHandler(dir, "", 0, "", nil) require.NoError(t, err) defer func() { @@ -699,3 +725,421 @@ func TestHandler_gcCache(t *testing.T) { } require.NoError(t, db.Close()) } + +// TestHandler_RejectsMissingBearer covers the advisory's root cause: +// unauthenticated access to management endpoints is now refused with 401. +func TestHandler_RejectsMissingBearer(t *testing.T) { + dir := filepath.Join(t.TempDir(), "artifactcache") + handler, err := StartHandler(dir, "", 0, "", nil) + require.NoError(t, err) + defer handler.Close() + + base := handler.ExternalURL() + apiPath + + for _, tc := range []struct { + name string + method string + path string + body string + }{ + {"find", http.MethodGet, "/cache?keys=x&version=y", ""}, + {"reserve", http.MethodPost, "/caches", "{}"}, + {"upload", http.MethodPatch, "/caches/1", ""}, + {"commit", http.MethodPost, "/caches/1", ""}, + {"clean", http.MethodPost, "/clean", ""}, + } { + t.Run(tc.name, func(t *testing.T) { + req, err := http.NewRequest(tc.method, base+tc.path, strings.NewReader(tc.body)) + require.NoError(t, err) + resp, err := http.DefaultClient.Do(req) + require.NoError(t, err) + resp.Body.Close() + assert.Equal(t, http.StatusUnauthorized, resp.StatusCode) + }) + } +} + +// TestHandler_RejectsUnknownBearer verifies that a bearer token is only +// accepted after RegisterJob; stale/forged tokens cannot be replayed. +func TestHandler_RejectsUnknownBearer(t *testing.T) { + dir := filepath.Join(t.TempDir(), "artifactcache") + handler, err := StartHandler(dir, "", 0, "", nil) + require.NoError(t, err) + defer handler.Close() + + base := handler.ExternalURL() + apiPath + + req, err := http.NewRequest(http.MethodGet, base+"/cache?keys=x&version=y", nil) + require.NoError(t, err) + req.Header.Set("Authorization", "Bearer not-a-registered-token") + resp, err := http.DefaultClient.Do(req) + require.NoError(t, err) + resp.Body.Close() + assert.Equal(t, http.StatusUnauthorized, resp.StatusCode) +} + +// TestHandler_UnregisterRevokes ensures that the function returned by +// RegisterJob invalidates the credential, so a token leaked at job time stops +// working the moment the job ends instead of living for the runner's lifetime. +func TestHandler_UnregisterRevokes(t *testing.T) { + dir := filepath.Join(t.TempDir(), "artifactcache") + handler, err := StartHandler(dir, "", 0, "", nil) + require.NoError(t, err) + defer handler.Close() + + unregister := handler.RegisterJob("tmp-token", testRepo) + + base := handler.ExternalURL() + apiPath + req, err := http.NewRequest(http.MethodGet, base+"/cache?keys=x&version=y", nil) + require.NoError(t, err) + req.Header.Set("Authorization", "Bearer tmp-token") + + resp, err := http.DefaultClient.Do(req) + require.NoError(t, err) + resp.Body.Close() + assert.NotEqual(t, http.StatusUnauthorized, resp.StatusCode) + + unregister() + + resp, err = http.DefaultClient.Do(req) + require.NoError(t, err) + resp.Body.Close() + assert.Equal(t, http.StatusUnauthorized, resp.StatusCode) +} + +// TestHandler_CrossRepoIsolation addresses the intra-runner poisoning vector +// raised in GHSA-82g9-637c-2fx2: job containers can reach the cache server +// over the docker bridge, so IP allowlisting alone does not stop a malicious +// PR run from another repo. A cache entry created under repoA must be +// invisible to queries scoped to repoB. +func TestHandler_CrossRepoIsolation(t *testing.T) { + dir := filepath.Join(t.TempDir(), "artifactcache") + handler, err := StartHandler(dir, "", 0, "", nil) + require.NoError(t, err) + defer handler.Close() + handler.RegisterJob("token-a", "owner/repoA") + handler.RegisterJob("token-b", "owner/repoB") + + base := handler.ExternalURL() + apiPath + key := "shared-key" + version := "c19da02a2bd7e77277f1ac29ab45c09b7d46a4ee758284e26bb3045ad11d9d20" + content := []byte("repoA-payload") + + clientA := &http.Client{Transport: &bearerTransport{token: "token-a"}} + clientB := &http.Client{Transport: &bearerTransport{token: "token-b"}} + + // repoA reserves + uploads + commits. + reserveBody, err := json.Marshal(&Request{Key: key, Version: version, Size: int64(len(content))}) + require.NoError(t, err) + resp, err := clientA.Post(base+"/caches", "application/json", bytes.NewReader(reserveBody)) + require.NoError(t, err) + var reserved struct { + CacheID uint64 `json:"cacheId"` + } + require.NoError(t, json.NewDecoder(resp.Body).Decode(&reserved)) + resp.Body.Close() + require.NotZero(t, reserved.CacheID) + + req, err := http.NewRequest(http.MethodPatch, fmt.Sprintf("%s/caches/%d", base, reserved.CacheID), bytes.NewReader(content)) + require.NoError(t, err) + req.Header.Set("Content-Range", fmt.Sprintf("bytes 0-%d/*", len(content)-1)) + resp, err = clientA.Do(req) + require.NoError(t, err) + resp.Body.Close() + require.Equal(t, http.StatusOK, resp.StatusCode) + + resp, err = clientA.Post(fmt.Sprintf("%s/caches/%d", base, reserved.CacheID), "", nil) + require.NoError(t, err) + resp.Body.Close() + require.Equal(t, http.StatusOK, resp.StatusCode) + + // repoB with a matching key and version must NOT see repoA's cache. + resp, err = clientB.Get(fmt.Sprintf("%s/cache?keys=%s&version=%s", base, key, version)) + require.NoError(t, err) + resp.Body.Close() + assert.Equal(t, http.StatusNoContent, resp.StatusCode) + + // repoA still sees its own cache. + resp, err = clientA.Get(fmt.Sprintf("%s/cache?keys=%s&version=%s", base, key, version)) + require.NoError(t, err) + resp.Body.Close() + assert.Equal(t, http.StatusOK, resp.StatusCode) + + // repoB cannot upload to repoA's reserved id either (forbidden, not 401). + req, err = http.NewRequest(http.MethodPatch, fmt.Sprintf("%s/caches/%d", base, reserved.CacheID), bytes.NewReader([]byte("poison"))) + require.NoError(t, err) + req.Header.Set("Content-Range", "bytes 0-5/*") + resp, err = clientB.Do(req) + require.NoError(t, err) + resp.Body.Close() + assert.Equal(t, http.StatusForbidden, resp.StatusCode) +} + +// TestHandler_ArtifactSignature verifies that archive downloads reject +// missing / tampered / expired signatures, so a leaked archiveLocation stops +// working after artifactURLTTL even if the bearer token is still registered. +func TestHandler_ArtifactSignature(t *testing.T) { + dir := filepath.Join(t.TempDir(), "artifactcache") + handler, err := StartHandler(dir, "", 0, "", nil) + require.NoError(t, err) + defer handler.Close() + handler.RegisterJob(testToken, testRepo) + + base := handler.ExternalURL() + apiPath + + t.Run("missing signature", func(t *testing.T) { + resp, err := testClient.Get(fmt.Sprintf("%s/artifacts/%d", base, 1)) + require.NoError(t, err) + resp.Body.Close() + assert.Equal(t, http.StatusUnauthorized, resp.StatusCode) + }) + + t.Run("tampered signature", func(t *testing.T) { + good := handler.signedArtifactURL(1, time.Now().Add(artifactURLTTL)) + bad := good[:len(good)-4] + "dead" + resp, err := testClient.Get(bad) + require.NoError(t, err) + resp.Body.Close() + assert.Equal(t, http.StatusUnauthorized, resp.StatusCode) + }) + + t.Run("expired signature", func(t *testing.T) { + expired := handler.signedArtifactURL(1, time.Now().Add(-time.Second)) + resp, err := testClient.Get(expired) + require.NoError(t, err) + resp.Body.Close() + assert.Equal(t, http.StatusUnauthorized, resp.StatusCode) + }) + + t.Run("signature from a different server", func(t *testing.T) { + dir2 := filepath.Join(t.TempDir(), "artifactcache2") + other, err := StartHandler(dir2, "", 0, "", nil) + require.NoError(t, err) + defer other.Close() + otherURL := other.signedArtifactURL(1, time.Now().Add(artifactURLTTL)) + // Rewrite the host so the request still lands on our handler, but + // the signature was computed with a different secret. + parts := strings.SplitN(otherURL, apiPath, 2) + forged := base + parts[1] + resp, err := testClient.Get(forged) + require.NoError(t, err) + resp.Body.Close() + assert.Equal(t, http.StatusUnauthorized, resp.StatusCode) + }) +} + +// TestHandler_SecretPersistsAcrossRestarts is the property that lets +// act_runner cache-server be pointed at via cfg.Cache.ExternalServer: a +// restart must not invalidate signed URLs the handler has already issued +// (within their expiry window). +func TestHandler_SecretPersistsAcrossRestarts(t *testing.T) { + dir := filepath.Join(t.TempDir(), "artifactcache") + + first, err := StartHandler(dir, "127.0.0.1", 0, "", nil) + require.NoError(t, err) + exp := time.Now().Add(artifactURLTTL).Unix() + sig := first.computeSignature(42, exp) + require.NoError(t, first.Close()) + + second, err := StartHandler(dir, "127.0.0.1", 0, "", nil) + require.NoError(t, err) + defer second.Close() + + assert.Equal(t, sig, second.computeSignature(42, exp)) +} + +// TestHandler_ArtifactSignatureDownload is a happy-path round trip that +// ensures a real reserve/upload/commit/find/download flow still works after +// the auth refactor. +func TestHandler_ArtifactSignatureDownload(t *testing.T) { + dir := filepath.Join(t.TempDir(), "artifactcache") + handler, err := StartHandler(dir, "", 0, "", nil) + require.NoError(t, err) + defer handler.Close() + handler.RegisterJob(testToken, testRepo) + + base := handler.ExternalURL() + apiPath + key := "download-key" + version := "c19da02a2bd7e77277f1ac29ab45c09b7d46a4ee758284e26bb3045ad11d9d20" + content := []byte("hello") + uploadCacheNormally(t, base, key, version, content) + + resp, err := testClient.Get(fmt.Sprintf("%s/cache?keys=%s&version=%s", base, key, version)) + require.NoError(t, err) + require.Equal(t, http.StatusOK, resp.StatusCode) + var hit struct { + ArchiveLocation string `json:"archiveLocation"` + } + require.NoError(t, json.NewDecoder(resp.Body).Decode(&hit)) + resp.Body.Close() + + require.Contains(t, hit.ArchiveLocation, "sig=") + require.Contains(t, hit.ArchiveLocation, "exp=") + + // Download without any Authorization header — the signature alone must + // be enough, because @actions/cache downloads archiveLocation unauth'd. + dl, err := http.Get(hit.ArchiveLocation) + require.NoError(t, err) + body, err := io.ReadAll(dl.Body) + dl.Body.Close() + require.NoError(t, err) + assert.Equal(t, http.StatusOK, dl.StatusCode) + assert.Equal(t, content, body) +} + +// TestHandler_RegisterJob_RefCounted verifies that a duplicate RegisterJob +// for the same token does not silently revoke the first registration on the +// first revoker call. This matters if a runner ever re-registers a token +// (restart mid-task, retry), which must not kill the live job's auth. +func TestHandler_RegisterJob_RefCounted(t *testing.T) { + dir := filepath.Join(t.TempDir(), "artifactcache") + handler, err := StartHandler(dir, "", 0, "", nil) + require.NoError(t, err) + defer handler.Close() + + first := handler.RegisterJob("shared", testRepo) + second := handler.RegisterJob("shared", testRepo) + + base := handler.ExternalURL() + apiPath + probe := func() int { + req, err := http.NewRequest(http.MethodGet, base+"/cache?keys=x&version=v", nil) + require.NoError(t, err) + req.Header.Set("Authorization", "Bearer shared") + resp, err := http.DefaultClient.Do(req) + require.NoError(t, err) + resp.Body.Close() + return resp.StatusCode + } + + require.NotEqual(t, http.StatusUnauthorized, probe()) + first() + assert.NotEqual(t, http.StatusUnauthorized, probe(), + "token must stay valid while another registration holds the refcount") + second() + assert.Equal(t, http.StatusUnauthorized, probe(), + "token is revoked only after every revoker has run") +} + +// TestHandler_GC_PerRepoDedup ensures duplicate-pruning does not evict +// another repo's entry. Two repos reserve the same (key, version); after the +// keepOld window, GC must keep the one from each repo. +func TestHandler_GC_PerRepoDedup(t *testing.T) { + dir := filepath.Join(t.TempDir(), "artifactcache") + handler, err := StartHandler(dir, "", 0, "", nil) + require.NoError(t, err) + defer handler.Close() + handler.RegisterJob("tok-a", "owner/repoA") + handler.RegisterJob("tok-b", "owner/repoB") + + key := "shared-dedup-key" + version := "c19da02a2bd7e77277f1ac29ab45c09b7d46a4ee758284e26bb3045ad11d9d20" + + // Seed one completed cache per repo directly via the DB, bypassing the + // HTTP round trip so we can precisely control UsedAt. + db, err := handler.openDB() + require.NoError(t, err) + now := time.Now().Unix() + stale := time.Now().Add(-keepOld - time.Minute).Unix() + a := &Cache{Repo: "owner/repoA", Key: key, Version: version, Complete: true, CreatedAt: stale, UsedAt: stale, Size: 1} + b := &Cache{Repo: "owner/repoB", Key: key, Version: version, Complete: true, CreatedAt: now, UsedAt: now, Size: 1} + require.NoError(t, insertCache(db, a)) + require.NoError(t, insertCache(db, b)) + // Write the backing blobs so the dedup deletion has something to remove. + require.NoError(t, handler.storage.Write(a.ID, 0, strings.NewReader("a"))) + _, err = handler.storage.Commit(a.ID, 1) + require.NoError(t, err) + require.NoError(t, handler.storage.Write(b.ID, 0, strings.NewReader("b"))) + _, err = handler.storage.Commit(b.ID, 1) + require.NoError(t, err) + require.NoError(t, db.Close()) + + // Force GC to run regardless of the cooldown. + handler.gcAt = time.Time{} + handler.gcCache() + + db, err = handler.openDB() + require.NoError(t, err) + defer db.Close() + var after []Cache + require.NoError(t, db.Find(&after, bolthold.Where("Key").Eq(key).And("Version").Eq(version))) + + repos := make(map[string]bool) + for _, c := range after { + repos[c.Repo] = true + } + assert.True(t, repos["owner/repoA"], "repoA's cache must survive dedup against repoB") + assert.True(t, repos["owner/repoB"], "repoB's cache must survive dedup against repoA") +} + +// TestHandler_InternalAPI_Disabled verifies that without an internalSecret +// the control-plane routes are 404 — operators can't accidentally hit +// register/revoke when the feature is off. +func TestHandler_InternalAPI_Disabled(t *testing.T) { + dir := filepath.Join(t.TempDir(), "artifactcache") + handler, err := StartHandler(dir, "", 0, "", nil) + require.NoError(t, err) + defer handler.Close() + + for _, ep := range []string{"/_internal/register", "/_internal/revoke"} { + resp, err := http.Post(handler.ExternalURL()+ep, "application/json", strings.NewReader(`{}`)) + require.NoError(t, err) + resp.Body.Close() + assert.Equal(t, http.StatusNotFound, resp.StatusCode, ep) + } +} + +// TestHandler_InternalAPI_AuthAndUsage covers the control-plane: bad/missing +// secret → 401, malformed body → 400, happy path round-trips a token through +// register → cache-API accepts it → revoke → cache-API rejects it. +func TestHandler_InternalAPI_AuthAndUsage(t *testing.T) { + dir := filepath.Join(t.TempDir(), "artifactcache") + const secret = "internal-secret" + handler, err := StartHandler(dir, "", 0, secret, nil) + require.NoError(t, err) + defer handler.Close() + + base := handler.ExternalURL() + + post := func(path, bearer, body string) int { + req, err := http.NewRequest(http.MethodPost, base+path, strings.NewReader(body)) + require.NoError(t, err) + if bearer != "" { + req.Header.Set("Authorization", "Bearer "+bearer) + } + req.Header.Set("Content-Type", "application/json") + resp, err := http.DefaultClient.Do(req) + require.NoError(t, err) + resp.Body.Close() + return resp.StatusCode + } + + t.Run("missing secret 401", func(t *testing.T) { + assert.Equal(t, http.StatusUnauthorized, post("/_internal/register", "", `{"token":"x","repo":"r"}`)) + }) + t.Run("wrong secret 401", func(t *testing.T) { + assert.Equal(t, http.StatusUnauthorized, post("/_internal/register", "wrong", `{"token":"x","repo":"r"}`)) + }) + t.Run("malformed body 400", func(t *testing.T) { + assert.Equal(t, http.StatusBadRequest, post("/_internal/register", secret, `not json`)) + }) + t.Run("missing token 400", func(t *testing.T) { + assert.Equal(t, http.StatusBadRequest, post("/_internal/register", secret, `{"repo":"r"}`)) + }) + + t.Run("register then revoke round-trip", func(t *testing.T) { + probe := func(token string) int { + req, _ := http.NewRequest(http.MethodGet, base+apiPath+"/cache?keys=k&version=v", nil) + req.Header.Set("Authorization", "Bearer "+token) + resp, err := http.DefaultClient.Do(req) + require.NoError(t, err) + resp.Body.Close() + return resp.StatusCode + } + + assert.Equal(t, http.StatusUnauthorized, probe("via-internal-api")) + assert.Equal(t, http.StatusOK, post("/_internal/register", secret, `{"token":"via-internal-api","repo":"owner/repo"}`)) + assert.NotEqual(t, http.StatusUnauthorized, probe("via-internal-api")) + assert.Equal(t, http.StatusOK, post("/_internal/revoke", secret, `{"token":"via-internal-api"}`)) + assert.Equal(t, http.StatusUnauthorized, probe("via-internal-api")) + }) +} diff --git a/act/artifactcache/model.go b/act/artifactcache/model.go index 070b4476..c550f63b 100644 --- a/act/artifactcache/model.go +++ b/act/artifactcache/model.go @@ -29,6 +29,7 @@ func (c *Request) ToCache() *Cache { type Cache struct { ID uint64 `json:"id" boltholdKey:"ID"` + Repo string `json:"repo" boltholdIndex:"Repo"` Key string `json:"key" boltholdIndex:"Key"` Version string `json:"version" boltholdIndex:"Version"` Size int64 `json:"cacheSize"` diff --git a/internal/app/cmd/cache-server.go b/internal/app/cmd/cache-server.go index eed57d96..6c88e746 100644 --- a/internal/app/cmd/cache-server.go +++ b/internal/app/cmd/cache-server.go @@ -4,6 +4,7 @@ package cmd import ( + "errors" "fmt" "os" "os/signal" @@ -47,10 +48,15 @@ func runCacheServer(configFile *string, cacheArgs *cacheServerArgs) func(cmd *co port = cacheArgs.Port } + secret := cfg.Cache.ExternalSecret + if secret == "" { + return errors.New("cache.external_secret must be set for cache-server; configure the same value on each runner that points at this server via cache.external_server") + } cacheHandler, err := artifactcache.StartHandler( dir, host, port, + secret, log.StandardLogger().WithField("module", "cache_request"), ) if err != nil { diff --git a/internal/app/cmd/exec.go b/internal/app/cmd/exec.go index 750fb3c6..01226c0e 100644 --- a/internal/app/cmd/exec.go +++ b/internal/app/cmd/exec.go @@ -6,6 +6,8 @@ package cmd import ( "context" + "crypto/rand" + "encoding/hex" "errors" "fmt" "maps" @@ -368,7 +370,7 @@ func runExec(ctx context.Context, execArgs *executeArgs) func(cmd *cobra.Command } // init a cache server - handler, err := artifactcache.StartHandler("", "", 0, log.StandardLogger().WithField("module", "cache_request")) + handler, err := artifactcache.StartHandler("", "", 0, "", log.StandardLogger().WithField("module", "cache_request")) if err != nil { return err } @@ -393,6 +395,25 @@ func runExec(ctx context.Context, execArgs *executeArgs) func(cmd *cobra.Command execArgs.artifactServerPath = tempDir } + // Register ACTIONS_RUNTIME_TOKEN against local cache server + env := execArgs.LoadEnvs() + const actionsRuntimeTokenEnvName = "ACTIONS_RUNTIME_TOKEN" + actionsRuntimeToken := env[actionsRuntimeTokenEnvName] + if actionsRuntimeToken == "" { + actionsRuntimeToken = os.Getenv(actionsRuntimeTokenEnvName) + } + if actionsRuntimeToken == "" { + tmpBranch := make([]byte, 12) + if _, err := rand.Read(tmpBranch); err != nil { + actionsRuntimeToken = "token" + } else { + actionsRuntimeToken = hex.EncodeToString(tmpBranch) + } + env[actionsRuntimeTokenEnvName] = actionsRuntimeToken + os.Setenv(actionsRuntimeTokenEnvName, actionsRuntimeToken) + } + handler.RegisterJob(actionsRuntimeToken, "__local/__exec") + // run the plan config := &runner.Config{ Workdir: execArgs.Workdir(), @@ -402,7 +423,7 @@ func runExec(ctx context.Context, execArgs *executeArgs) func(cmd *cobra.Command ForceRebuild: execArgs.forceRebuild, LogOutput: true, JSONLogger: execArgs.jsonLogger, - Env: execArgs.LoadEnvs(), + Env: env, Vars: execArgs.LoadVars(), Secrets: execArgs.LoadSecrets(), InsecureSecrets: execArgs.insecureSecrets, diff --git a/internal/app/run/runner.go b/internal/app/run/runner.go index 64101eba..8b3976b0 100644 --- a/internal/app/run/runner.go +++ b/internal/app/run/runner.go @@ -4,10 +4,12 @@ package run import ( + "bytes" "context" "encoding/json" "fmt" "maps" + "net/http" "os" "path/filepath" "strings" @@ -38,9 +40,10 @@ type Runner struct { cfg *config.Config - client client.Client - labels labels.Labels - envs map[string]string + client client.Client + labels labels.Labels + envs map[string]string + cacheHandler *artifactcache.Handler runningTasks sync.Map runningCount atomic.Int64 @@ -55,21 +58,24 @@ func NewRunner(cfg *config.Config, reg *config.Registration, cli client.Client) } envs := make(map[string]string, len(cfg.Runner.Envs)) maps.Copy(envs, cfg.Runner.Envs) + var cacheHandler *artifactcache.Handler if cfg.Cache.Enabled == nil || *cfg.Cache.Enabled { if cfg.Cache.ExternalServer != "" { envs["ACTIONS_CACHE_URL"] = cfg.Cache.ExternalServer } else { - cacheHandler, err := artifactcache.StartHandler( + handler, err := artifactcache.StartHandler( cfg.Cache.Dir, cfg.Cache.Host, cfg.Cache.Port, + "", log.StandardLogger().WithField("module", "cache_request"), ) if err != nil { log.Errorf("cannot init cache server, it will be disabled: %v", err) // go on } else { - envs["ACTIONS_CACHE_URL"] = cacheHandler.ExternalURL() + "/" + cacheHandler = handler + envs["ACTIONS_CACHE_URL"] = handler.ExternalURL() + "/" } } } @@ -84,11 +90,12 @@ func NewRunner(cfg *config.Config, reg *config.Registration, cli client.Client) envs["GITEA_ACTIONS_RUNNER_VERSION"] = ver.Version() return &Runner{ - name: reg.Name, - cfg: cfg, - client: cli, - labels: ls, - envs: envs, + name: reg.Name, + cfg: cfg, + client: cli, + labels: ls, + envs: envs, + cacheHandler: cacheHandler, } } @@ -199,6 +206,21 @@ func (r *Runner) run(ctx context.Context, task *runnerv1.Task, reporter *report. giteaRuntimeToken = preset.Token } r.envs["ACTIONS_RUNTIME_TOKEN"] = giteaRuntimeToken + // Mask the runtime token so it cannot be echoed in user step output; it is + // now also the cache server's bearer credential and leaking it would let + // any reader of the log impersonate this job against the cache. + if giteaRuntimeToken != "" { + task.Secrets["ACTIONS_RUNTIME_TOKEN"] = giteaRuntimeToken + } + + // Register this job's runtime token with the local cache server so that + // cache requests from the job container can authenticate. The credential + // is removed when the task finishes, so a leaked token stops working as + // soon as the job ends rather than remaining valid for the runner's + // lifetime. Only applies to the embedded cache server; when the operator + // points the runner at an external cache via cfg.Cache.ExternalServer, it + // is that server's responsibility to authenticate requests. + defer r.registerCacheForTask(giteaRuntimeToken, preset.Repository, reporter)() eventJSON, err := json.Marshal(preset.Event) if err != nil { @@ -278,6 +300,82 @@ func (r *Runner) run(ctx context.Context, task *runnerv1.Task, reporter *report. return execErr } +// registerCacheForTask tells the cache server to accept requests authenticated +// with the given runtime token for the duration of this task. Returns a +// function the caller must invoke (typically via defer) to revoke the +// credential when the task finishes. +// +// Three modes: +// - Embedded handler: register in-process via RegisterJob. +// - external_server + external_secret: POST to the remote server's +// /_internal/register, defer a POST to /_internal/revoke. This is what +// enables full per-job auth and repo scoping over the network. +// - external_server alone (no secret): no-op revoker. The remote server is +// in legacy openMode and ignores the runtime token; trust is at the +// network layer. +// +// Safe with an empty token (older Gitea did not issue one). +func (r *Runner) registerCacheForTask(token, repo string, reporter *report.Reporter) func() { + if token == "" { + return func() {} + } + if r.cacheHandler != nil { + return r.cacheHandler.RegisterJob(token, repo) + } + if r.cfg.Cache.ExternalServer != "" && r.cfg.Cache.ExternalSecret != "" { + return r.registerExternalCacheJob(token, repo, reporter) + } + return func() {} +} + +// registerExternalCacheJob POSTs to the remote cache-server's control-plane. +// Failures are logged but not fatal: if registration fails, the cache will +// 401 the job's requests — better than failing the whole task for a cache +// outage. The warning is mirrored to the job log so users can see why their +// cache calls 401, instead of having to read the runner daemon's stderr. +func (r *Runner) registerExternalCacheJob(token, repo string, reporter *report.Reporter) func() { + base := strings.TrimRight(r.cfg.Cache.ExternalServer, "/") + if err := postInternalCache(base+"/_internal/register", r.cfg.Cache.ExternalSecret, + map[string]string{"token": token, "repo": repo}); err != nil { + log.Warnf("cache external_server register failed (%s): %v", base, err) + if reporter != nil { + reporter.Logf("::warning::cache external_server register failed (%s): %v — cache requests from this job will be unauthenticated and likely return 401", base, err) + } + } + return func() { + if err := postInternalCache(base+"/_internal/revoke", r.cfg.Cache.ExternalSecret, + map[string]string{"token": token}); err != nil { + log.Warnf("cache external_server revoke failed (%s): %v", base, err) + if reporter != nil { + reporter.Logf("::warning::cache external_server revoke failed (%s): %v", base, err) + } + } + } +} + +func postInternalCache(url, secret string, body map[string]string) error { + buf, err := json.Marshal(body) + if err != nil { + return err + } + req, err := http.NewRequest(http.MethodPost, url, bytes.NewReader(buf)) + if err != nil { + return err + } + req.Header.Set("Authorization", "Bearer "+secret) + req.Header.Set("Content-Type", "application/json") + client := &http.Client{Timeout: 5 * time.Second} + resp, err := client.Do(req) + if err != nil { + return err + } + defer resp.Body.Close() + if resp.StatusCode/100 != 2 { + return fmt.Errorf("status %d", resp.StatusCode) + } + return nil +} + func (r *Runner) RunningCount() int64 { return r.runningCount.Load() } diff --git a/internal/app/run/runner_cache_test.go b/internal/app/run/runner_cache_test.go new file mode 100644 index 00000000..74d6bef6 --- /dev/null +++ b/internal/app/run/runner_cache_test.go @@ -0,0 +1,239 @@ +// Copyright 2026 The Gitea Authors. All rights reserved. +// SPDX-License-Identifier: MIT + +package run + +import ( + "bytes" + "encoding/json" + "fmt" + "net/http" + "path/filepath" + "strings" + "testing" + + "gitea.com/gitea/act_runner/act/artifactcache" + "gitea.com/gitea/act_runner/internal/pkg/config" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func emptyCfg() *config.Config { return &config.Config{} } + +func TestRunner_registerCacheForTask(t *testing.T) { + dir := filepath.Join(t.TempDir(), "artifactcache") + handler, err := artifactcache.StartHandler(dir, "127.0.0.1", 0, "", nil) + require.NoError(t, err) + defer handler.Close() + + r := &Runner{cfg: emptyCfg(), cacheHandler: handler} + token := "run-token-123" + unregister := r.registerCacheForTask(token, "owner/repo", nil) + + base := handler.ExternalURL() + "/_apis/artifactcache" + probe := func() int { + req, err := http.NewRequest(http.MethodGet, base+"/cache?keys=x&version=v", nil) + require.NoError(t, err) + req.Header.Set("Authorization", "Bearer "+token) + resp, err := http.DefaultClient.Do(req) + require.NoError(t, err) + resp.Body.Close() + return resp.StatusCode + } + + assert.NotEqual(t, http.StatusUnauthorized, probe(), + "token should be accepted while task is registered") + + unregister() + assert.Equal(t, http.StatusUnauthorized, probe(), + "token must be rejected after the revoker runs") +} + +func TestRunner_registerCacheForTask_NoOps(t *testing.T) { + t.Run("nil cacheHandler", func(t *testing.T) { + r := &Runner{cfg: emptyCfg()} + unregister := r.registerCacheForTask("tok", "owner/repo", nil) + require.NotNil(t, unregister) + unregister() + }) + + t.Run("empty token", func(t *testing.T) { + dir := filepath.Join(t.TempDir(), "artifactcache") + handler, err := artifactcache.StartHandler(dir, "127.0.0.1", 0, "", nil) + require.NoError(t, err) + defer handler.Close() + + r := &Runner{cfg: emptyCfg(), cacheHandler: handler} + unregister := r.registerCacheForTask("", "owner/repo", nil) + require.NotNil(t, unregister) + unregister() + }) +} + +// Locks in @actions/cache's wire protocol: bearer on reserve/upload/commit +// /find, no auth on the signed archiveLocation download. +func TestRunner_CacheFullFlow_MatchesToolkit(t *testing.T) { + dir := filepath.Join(t.TempDir(), "artifactcache") + handler, err := artifactcache.StartHandler(dir, "127.0.0.1", 0, "", nil) + require.NoError(t, err) + defer handler.Close() + + r := &Runner{cfg: emptyCfg(), cacheHandler: handler} + token := "full-flow-token" + unregister := r.registerCacheForTask(token, "owner/repo", nil) + defer unregister() + + base := handler.ExternalURL() + "/_apis/artifactcache" + do := func(method, url, contentType, contentRange, body string) *http.Response { + req, err := http.NewRequest(method, url, strings.NewReader(body)) + require.NoError(t, err) + req.Header.Set("Authorization", "Bearer "+token) + if contentType != "" { + req.Header.Set("Content-Type", contentType) + } + if contentRange != "" { + req.Header.Set("Content-Range", contentRange) + } + resp, err := http.DefaultClient.Do(req) + require.NoError(t, err) + return resp + } + + key := "toolkit-flow" + version := "c19da02a2bd7e77277f1ac29ab45c09b7d46a4ee758284e26bb3045ad11d9d20" + body := `hello-cache-body` + + // reserve + resp := do(http.MethodPost, base+"/caches", "application/json", "", + fmt.Sprintf(`{"key":"%s","version":"%s","cacheSize":%d}`, key, version, len(body))) + require.Equal(t, http.StatusOK, resp.StatusCode) + var reserved struct { + CacheID uint64 `json:"cacheId"` + } + require.NoError(t, decodeJSON(resp, &reserved)) + require.NotZero(t, reserved.CacheID) + + // upload + resp = do(http.MethodPatch, fmt.Sprintf("%s/caches/%d", base, reserved.CacheID), + "application/octet-stream", fmt.Sprintf("bytes 0-%d/*", len(body)-1), body) + require.Equal(t, http.StatusOK, resp.StatusCode) + resp.Body.Close() + + // commit + resp = do(http.MethodPost, fmt.Sprintf("%s/caches/%d", base, reserved.CacheID), "", "", "") + require.Equal(t, http.StatusOK, resp.StatusCode) + resp.Body.Close() + + // find — @actions/cache always sends comma-separated keys here + resp = do(http.MethodGet, + fmt.Sprintf("%s/cache?keys=%s,fallback&version=%s", base, key, version), "", "", "") + require.Equal(t, http.StatusOK, resp.StatusCode) + var hit struct { + ArchiveLocation string `json:"archiveLocation"` + CacheKey string `json:"cacheKey"` + } + require.NoError(t, decodeJSON(resp, &hit)) + require.Equal(t, key, hit.CacheKey) + require.NotEmpty(t, hit.ArchiveLocation) + + // download — toolkit does NOT attach Authorization here; the signature + // in the URL must be enough. + dl, err := http.Get(hit.ArchiveLocation) + require.NoError(t, err) + defer dl.Body.Close() + require.Equal(t, http.StatusOK, dl.StatusCode) + got := make([]byte, 64) + n, _ := dl.Body.Read(got) + assert.Equal(t, body, string(got[:n])) +} + +func decodeJSON(resp *http.Response, v any) error { + defer resp.Body.Close() + return json.NewDecoder(resp.Body).Decode(v) +} + +// End-to-end against a remote cache-server: token unknown → 401, register → +// reserve/upload/commit/find/download all OK, revoke → 401 again. +func TestRunner_ExternalCacheServer_RegisterRevoke(t *testing.T) { + dir := filepath.Join(t.TempDir(), "remote-cache") + const secret = "shared-secret-for-tests" + remote, err := artifactcache.StartHandler(dir, "127.0.0.1", 0, secret, nil) + require.NoError(t, err) + defer remote.Close() + + r := &Runner{cfg: &config.Config{Cache: config.Cache{ + ExternalServer: remote.ExternalURL(), + ExternalSecret: secret, + }}} + + token := "external-task-token" + repo := "owner/repoX" + base := remote.ExternalURL() + "/_apis/artifactcache" + probe := func() int { + req, _ := http.NewRequest(http.MethodGet, base+"/cache?keys=k&version=v", nil) + req.Header.Set("Authorization", "Bearer "+token) + resp, err := http.DefaultClient.Do(req) + require.NoError(t, err) + resp.Body.Close() + return resp.StatusCode + } + + require.Equal(t, http.StatusUnauthorized, probe(), + "token must be unknown to the remote server before registration") + + unregister := r.registerCacheForTask(token, repo, nil) + require.NotEqual(t, http.StatusUnauthorized, probe(), + "token must be accepted after registerCacheForTask") + + // Full reserve→upload→commit→find→download cycle, identical to what + // @actions/cache does, against the remote (external) server. + body := []byte("payload-from-task") + reserveBody, _ := json.Marshal(&artifactcache.Request{Key: "ext-key", Version: "v", Size: int64(len(body))}) + req, _ := http.NewRequest(http.MethodPost, base+"/caches", bytes.NewReader(reserveBody)) + req.Header.Set("Authorization", "Bearer "+token) + req.Header.Set("Content-Type", "application/json") + resp, err := http.DefaultClient.Do(req) + require.NoError(t, err) + require.Equal(t, http.StatusOK, resp.StatusCode) + var reserved struct { + CacheID uint64 `json:"cacheId"` + } + require.NoError(t, decodeJSON(resp, &reserved)) + require.NotZero(t, reserved.CacheID) + + req, _ = http.NewRequest(http.MethodPatch, fmt.Sprintf("%s/caches/%d", base, reserved.CacheID), bytes.NewReader(body)) + req.Header.Set("Authorization", "Bearer "+token) + req.Header.Set("Content-Range", fmt.Sprintf("bytes 0-%d/*", len(body)-1)) + resp, err = http.DefaultClient.Do(req) + require.NoError(t, err) + resp.Body.Close() + require.Equal(t, http.StatusOK, resp.StatusCode) + + req, _ = http.NewRequest(http.MethodPost, fmt.Sprintf("%s/caches/%d", base, reserved.CacheID), nil) + req.Header.Set("Authorization", "Bearer "+token) + resp, err = http.DefaultClient.Do(req) + require.NoError(t, err) + resp.Body.Close() + require.Equal(t, http.StatusOK, resp.StatusCode) + + req, _ = http.NewRequest(http.MethodGet, base+"/cache?keys=ext-key&version=v", nil) + req.Header.Set("Authorization", "Bearer "+token) + resp, err = http.DefaultClient.Do(req) + require.NoError(t, err) + require.Equal(t, http.StatusOK, resp.StatusCode) + var hit struct { + ArchiveLocation string `json:"archiveLocation"` + } + require.NoError(t, decodeJSON(resp, &hit)) + require.NotEmpty(t, hit.ArchiveLocation) + + dl, err := http.Get(hit.ArchiveLocation) + require.NoError(t, err) + defer dl.Body.Close() + require.Equal(t, http.StatusOK, dl.StatusCode) + + unregister() + assert.Equal(t, http.StatusUnauthorized, probe(), + "token must be rejected after the revoker runs") +} diff --git a/internal/pkg/config/config.example.yaml b/internal/pkg/config/config.example.yaml index 14da4c73..5e0232be 100644 --- a/internal/pkg/config/config.example.yaml +++ b/internal/pkg/config/config.example.yaml @@ -81,7 +81,12 @@ cache: # The external cache server URL. Valid only when enable is true. # If it's specified, act_runner will use this URL as the ACTIONS_CACHE_URL rather than start a server by itself. # The URL should generally end with "/". + # Requires external_secret below to be set to the same value on both this runner and the cache-server. external_server: "" + # Shared secret between this runner and the external `act_runner cache-server`. Required when external_server + # (or `act_runner cache-server`) is in use: the runner pre-registers each job's ACTIONS_RUNTIME_TOKEN with the + # cache-server, and the cache-server enforces bearer auth + per-repo cache isolation. + external_secret: "" container: # Specifies the network to which the container will connect. diff --git a/internal/pkg/config/config.go b/internal/pkg/config/config.go index 248b79bc..08e1c748 100644 --- a/internal/pkg/config/config.go +++ b/internal/pkg/config/config.go @@ -4,6 +4,7 @@ package config import ( + "errors" "fmt" "maps" "os" @@ -47,6 +48,7 @@ type Cache struct { Host string `yaml:"host"` // Host specifies the caching host. Port uint16 `yaml:"port"` // Port specifies the caching port. ExternalServer string `yaml:"external_server"` // ExternalServer specifies the URL of external cache server + ExternalSecret string `yaml:"external_secret"` // ExternalSecret is a shared secret between this runner and an external act_runner cache-server, enabling per-job ACTIONS_RUNTIME_TOKEN authentication and repo scoping over the network. Leave empty to keep the legacy unauthenticated behavior. } // Container represents the configuration for the container. @@ -135,6 +137,9 @@ func LoadDefault(file string) (*Config, error) { home, _ := os.UserHomeDir() cfg.Cache.Dir = filepath.Join(home, ".cache", "actcache") } + if cfg.Cache.ExternalServer != "" && cfg.Cache.ExternalSecret == "" { + return nil, errors.New("cache.external_server is set but cache.external_secret is empty; configure the same external_secret on this runner and the act_runner cache-server") + } } if cfg.Container.WorkdirParent == "" { cfg.Container.WorkdirParent = "workspace" diff --git a/internal/pkg/config/config_test.go b/internal/pkg/config/config_test.go new file mode 100644 index 00000000..da0e414b --- /dev/null +++ b/internal/pkg/config/config_test.go @@ -0,0 +1,41 @@ +// Copyright 2026 The Gitea Authors. All rights reserved. +// SPDX-License-Identifier: MIT + +package config + +import ( + "os" + "path/filepath" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestLoadDefault_RejectsExternalServerWithoutSecret(t *testing.T) { + dir := t.TempDir() + path := filepath.Join(dir, "config.yaml") + require.NoError(t, os.WriteFile(path, []byte(` +cache: + enabled: true + external_server: "http://cache.invalid/" +`), 0o600)) + + _, err := LoadDefault(path) + require.Error(t, err) + assert.Contains(t, err.Error(), "external_secret") +} + +func TestLoadDefault_AcceptsExternalServerWithSecret(t *testing.T) { + dir := t.TempDir() + path := filepath.Join(dir, "config.yaml") + require.NoError(t, os.WriteFile(path, []byte(` +cache: + enabled: true + external_server: "http://cache.invalid/" + external_secret: "shh" +`), 0o600)) + + _, err := LoadDefault(path) + require.NoError(t, err) +}