Files
act_runner/act/runner/job_executor.go
Lunny Xiao 13dc9386fe Rename act_runner to runner (#850)
## Consumer-facing breaking changes

- **Go module path**: `gitea.com/gitea/act_runner` → `gitea.com/gitea/runner`. Anything importing `act/...` or `internal/...` packages (notably Gitea itself) must update imports.
- **Binary name**: `act_runner` → `gitea-runner`. Wrapper scripts, systemd units, init scripts, and documentation referencing the binary by `act_runner` will break.
- **Docker image**: `gitea/act_runner` → `gitea/runner` (incl. `*-dind-rootless` variants). Users pulling `gitea/act_runner:nightly` etc. will get stale images. Note: the image name is `gitea/runner`, not `gitea/gitea-runner`.
- **Release artifact paths**: S3 directory `act_runner/{{.Version}}` → `gitea-runner/{{.Version}}`, and artifact filenames change with the new project name. Existing download URLs break.
- **Metrics namespace**: changed from `act_runner` to `gitea_runner` (e.g. `act_runner_jobs_total` → `gitea_runner_jobs_total`); existing monitors/dashboards must be updated.
- **ldflags version path**: `gitea.com/gitea/act_runner/internal/pkg/ver.version` → `gitea.com/gitea/runner/internal/pkg/ver.version`. Affects anyone building with custom ldflags.
- **Kubernetes example resource names**: `act-runner` / `act-runner-vol` → `runner` / `runner-vol`. Users who copied the manifests verbatim will see resource churn on apply.
- **s6 service name**: `scripts/s6/act_runner/` → `scripts/s6/gitea-runner/` (image-internal; only matters for downstream image overrides).

Unchanged: YAML config field names, env vars (`GITEA_*`), CLI flags/subcommands, registration file format.
---------

Co-authored-by: silverwind <me@silverwind.io>
Reviewed-on: https://gitea.com/gitea/runner/pulls/850
Reviewed-by: Zettat123 <39446+zettat123@noreply.gitea.com>
Reviewed-by: silverwind <2021+silverwind@noreply.gitea.com>
Reviewed-by: Nicolas <bircni@icloud.com>
Co-authored-by: Lunny Xiao <xiaolunwen@gmail.com>
Co-committed-by: Lunny Xiao <xiaolunwen@gmail.com>
2026-04-30 20:12:51 +00:00

237 lines
7.1 KiB
Go

// Copyright 2022 The Gitea Authors. All rights reserved.
// Copyright 2022 The nektos/act Authors. All rights reserved.
// SPDX-License-Identifier: MIT
package runner
import (
"context"
"fmt"
"strconv"
"time"
"gitea.com/gitea/runner/act/common"
"gitea.com/gitea/runner/act/model"
)
type jobInfo interface {
matrix() map[string]any
steps() []*model.Step
startContainer() common.Executor
stopContainer() common.Executor
closeContainer() common.Executor
interpolateOutputs() common.Executor
result(result string)
}
func newJobExecutor(info jobInfo, sf stepFactory, rc *RunContext) common.Executor {
steps := make([]common.Executor, 0)
preSteps := make([]common.Executor, 0)
var postExecutor common.Executor
steps = append(steps, func(ctx context.Context) error {
logger := common.Logger(ctx)
if len(info.matrix()) > 0 {
logger.Infof("\U0001F9EA Matrix: %v", info.matrix())
}
return nil
})
infoSteps := info.steps()
if len(infoSteps) == 0 {
return common.NewDebugExecutor("No steps found")
}
preSteps = append(preSteps, func(ctx context.Context) error {
// Have to be skipped for some Tests
if rc.Run == nil {
return nil
}
rc.ExprEval = rc.NewExpressionEvaluator(ctx)
// evaluate environment variables since they can contain
// GitHub's special environment variables.
for k, v := range rc.GetEnv() {
rc.Env[k] = rc.ExprEval.Interpolate(ctx, v)
}
return nil
})
for i, stepModel := range infoSteps {
if stepModel == nil {
return func(ctx context.Context) error {
return fmt.Errorf("invalid Step %v: missing run or uses key", i)
}
}
if stepModel.ID == "" {
stepModel.ID = strconv.Itoa(i)
}
stepModel.Number = i
step, err := sf.newStep(stepModel, rc)
if err != nil {
return common.NewErrorExecutor(err)
}
preExec := step.pre()
preSteps = append(preSteps, useStepLogger(rc, stepModel, stepStagePre, func(ctx context.Context) error {
logger := common.Logger(ctx)
preErr := preExec(ctx)
if preErr != nil {
logger.Errorf("%v", preErr)
common.SetJobError(ctx, preErr)
} else if ctx.Err() != nil {
logger.Errorf("%v", ctx.Err())
common.SetJobError(ctx, ctx.Err())
}
return preErr
}))
stepExec := step.main()
steps = append(steps, useStepLogger(rc, stepModel, stepStageMain, func(ctx context.Context) error {
logger := common.Logger(ctx)
err := stepExec(ctx)
if err != nil {
logger.Errorf("%v", err)
common.SetJobError(ctx, err)
} else if ctx.Err() != nil {
logger.Errorf("%v", ctx.Err())
common.SetJobError(ctx, ctx.Err())
}
return nil
}))
postExec := useStepLogger(rc, stepModel, stepStagePost, step.post())
if postExecutor != nil {
// run the post executor in reverse order
postExecutor = postExec.Finally(postExecutor)
} else {
postExecutor = postExec
}
}
postExecutor = postExecutor.Finally(func(ctx context.Context) error {
jobError := common.JobError(ctx)
var err error
if rc.Config.AutoRemove || jobError == nil {
// always allow 1 min for stopping and removing the runner, even if we were cancelled
ctx, cancel := context.WithTimeout(common.WithLogger(context.Background(), common.Logger(ctx)), time.Minute)
defer cancel()
logger := common.Logger(ctx)
// For Gitea
// We don't need to call `stopServiceContainers` here since it will be called by following `info.stopContainer`
// logger.Infof("Cleaning up services for job %s", rc.JobName)
// if err := rc.stopServiceContainers()(ctx); err != nil {
// logger.Errorf("Error while cleaning services: %v", err)
// }
logger.Infof("Cleaning up container for job %s", rc.JobName)
if err = info.stopContainer()(ctx); err != nil {
logger.Errorf("Error while stop job container: %v", err)
}
// For Gitea
// We don't need to call `NewDockerNetworkRemoveExecutor` here since it is called by above `info.stopContainer`
// if !rc.IsHostEnv(ctx) && rc.Config.ContainerNetworkMode == "" {
// // clean network in docker mode only
// // if the value of `ContainerNetworkMode` is empty string,
// // it means that the network to which containers are connecting is created by `runner`,
// // so, we should remove the network at last.
// networkName, _ := rc.networkName()
// logger.Infof("Cleaning up network for job %s, and network name is: %s", rc.JobName, networkName)
// if err := container.NewDockerNetworkRemoveExecutor(networkName)(ctx); err != nil {
// logger.Errorf("Error while cleaning network: %v", err)
// }
// }
}
setJobResult(ctx, info, rc, jobError == nil)
setJobOutputs(ctx, rc)
return err
})
pipeline := make([]common.Executor, 0)
pipeline = append(pipeline, preSteps...)
pipeline = append(pipeline, steps...)
return common.NewPipelineExecutor(info.startContainer(), common.NewPipelineExecutor(pipeline...).
Finally(func(ctx context.Context) error {
var cancel context.CancelFunc
if ctx.Err() == context.Canceled {
// in case of an aborted run, we still should execute the
// post steps to allow cleanup.
ctx, cancel = context.WithTimeout(common.WithLogger(context.Background(), common.Logger(ctx)), 5*time.Minute)
defer cancel()
}
return postExecutor(ctx)
}).
Finally(info.interpolateOutputs()).
Finally(info.closeContainer()))
}
func setJobResult(ctx context.Context, info jobInfo, rc *RunContext, success bool) {
logger := common.Logger(ctx)
jobResult := "success"
// we have only one result for a whole matrix build, so we need
// to keep an existing result state if we run a matrix
if len(info.matrix()) > 0 && rc.Run.Job().Result != "" {
jobResult = rc.Run.Job().Result
}
if !success {
jobResult = "failure"
}
info.result(jobResult)
if rc.caller != nil {
// set reusable workflow job result
rc.caller.setReusedWorkflowJobResult(rc.JobName, jobResult) // For Gitea
return
}
jobResultMessage := "succeeded"
if jobResult != "success" {
jobResultMessage = "failed"
}
logger.WithField("jobResult", jobResult).Infof("\U0001F3C1 Job %s", jobResultMessage)
}
func setJobOutputs(ctx context.Context, rc *RunContext) {
if rc.caller != nil {
// map outputs for reusable workflows
callerOutputs := make(map[string]string)
ee := rc.NewExpressionEvaluator(ctx)
for k, v := range rc.Run.Workflow.WorkflowCallConfig().Outputs {
callerOutputs[k] = ee.Interpolate(ctx, ee.Interpolate(ctx, v.Value))
}
rc.caller.runContext.Run.Job().Outputs = callerOutputs
}
}
func useStepLogger(rc *RunContext, stepModel *model.Step, stage stepStage, executor common.Executor) common.Executor {
return func(ctx context.Context) error {
ctx = withStepLogger(ctx, stepModel.Number, stepModel.ID, rc.ExprEval.Interpolate(ctx, stepModel.String()), stage.String())
rawLogger := common.Logger(ctx).WithField("raw_output", true)
logWriter := common.NewLineWriter(rc.commandHandler(ctx), func(s string) bool {
if rc.Config.LogOutput {
rawLogger.Infof("%s", s)
} else {
rawLogger.Debugf("%s", s)
}
return true
})
oldout, olderr := rc.JobContainer.ReplaceLogWriter(logWriter, logWriter)
defer rc.JobContainer.ReplaceLogWriter(oldout, olderr)
return executor(ctx)
}
}