mirror of
https://gitea.com/gitea/act_runner.git
synced 2026-04-24 04:40:22 +08:00
## Summary Many teams self-host Gitea + Act Runner at scale. The current runner design causes excessive HTTP requests to the Gitea server, leading to high server load. This PR addresses three root causes: aggressive fixed-interval polling, per-task status reporting every 1 second regardless of activity, and unoptimized HTTP client configuration. ## Problem The original architecture has these issues: **1. Fixed 1-second reporting interval (RunDaemon)** - Every running task calls ReportLog + ReportState every 1 second (2 HTTP requests/sec/task) - These requests are sent even when there are no new log rows or state changes - With 200 runners × 3 tasks each = **1,200 req/sec just for status reporting** **2. Fixed 2-second polling interval (no backoff)** - Idle runners poll FetchTask every 2 seconds forever, even when no jobs are queued - No exponential backoff or jitter — all runners can synchronize after network recovery (thundering herd) - 200 idle runners = **100 req/sec doing nothing useful** **3. HTTP client not tuned** - Uses http.DefaultClient with MaxIdleConnsPerHost=2, causing frequent TCP/TLS reconnects - Creates two separate http.Client instances (one for Ping, one for Runner service) instead of sharing **Total: ~1,300 req/sec for 200 runners with 3 tasks each** ## Solution ### Adaptive Event-Driven Log Reporting Replace the recursive `time.AfterFunc(1s)` pattern in RunDaemon with a goroutine-based select event loop using three trigger mechanisms: | Trigger | Default | Purpose | |---------|---------|---------| | `log_report_max_latency` | 3s | Guarantee even a single log line is delivered within this time | | `log_report_interval` | 5s | Periodic sweep — steady-state cadence | | `log_report_batch_size` | 100 rows | Immediate flush during bursty output (e.g., npm install) | **Key design**: `log_report_max_latency` (3s) must be less than `log_report_interval` (5s) so the max-latency timer fires before the periodic ticker for single-line scenarios. State reporting is decoupled to its own `state_report_interval` (default 5s), with immediate flush on step transitions (start/stop) via a stateNotify channel for responsive frontend UX. Additionally: - Skip ReportLog when `len(rows) == 0` (no pending log rows) - Skip ReportState when `stateChanged == false && len(outputs) == 0` (nothing changed) - Move expensive `proto.Clone` after the early-return check to avoid deep copies on no-op paths ### Polling Backoff with Jitter Replace fixed `rate.Limiter` with adaptive exponential backoff: - Track `consecutiveEmpty` and `consecutiveErrors` counters - Interval doubles with each empty/error response: `base × 2^(n-1)`, capped at `fetch_interval_max` (default 60s) - Add ±20% random jitter to prevent thundering herd - Fetch first, sleep after ��� preserves burst=1 behavior for immediate first fetch on startup and after task completion ### HTTP Client Tuning - Configure custom `http.Transport` with `MaxIdleConnsPerHost=10` (was 2) - Share a single `http.Client` between PingService and RunnerService - Add `IdleConnTimeout=90s` for clean connection lifecycle ## Load Reduction For 200 runners × 3 tasks (70% with active log output): | Component | Before | After | Reduction | |-----------|--------|-------|-----------| | Polling (idle) | 100 req/s | ~3.4 req/s | 97% | | Log reporting | 420 req/s | ~84 req/s | 80% | | State reporting | 126 req/s | ~25 req/s | 80% | | **Total** | **~1,300 req/s** | **~113 req/s** | **~91%** | ## Frontend UX Impact | Scenario | Before | After | Notes | |----------|--------|-------|-------| | Continuous output (npm install) | ~1s | ~5s | Periodic ticker sweep | | Single line then silence | ~1s | ≤3s | maxLatencyTimer guarantee | | Bursty output (100+ lines) | ~1s | <1s | Batch size immediate flush | | Step start/stop | ~1s | <1s | stateNotify immediate flush | | Job completion | ~1s | ~1s | Close() retry unchanged | ## New Configuration Options All have safe defaults — existing config files need no changes: ```yaml runner: fetch_interval_max: 60s # Max backoff interval when idle log_report_interval: 5s # Periodic log flush interval log_report_max_latency: 3s # Max time a log row waits (must be < log_report_interval) log_report_batch_size: 100 # Immediate flush threshold state_report_interval: 5s # State flush interval (step transitions are always immediate) ``` Config validation warns on invalid combinations: - `fetch_interval_max < fetch_interval` → auto-corrected - `log_report_max_latency >= log_report_interval` → warning (timer would be redundant) ## Test Plan - [x] `go build ./...` passes - [x] `go test ./...` passes (all existing + 3 new tests) - [x] `golangci-lint run` — 0 issues - [x] TestReporter_MaxLatencyTimer — verifies single log line flushed by maxLatencyTimer before logTicker - [x] TestReporter_BatchSizeFlush — verifies batch size threshold triggers immediate flush - [x] TestReporter_StateNotifyFlush — verifies step transition triggers immediate state flush - [x] TestReporter_EphemeralRunnerDeletion — verifies Close/RunDaemon race safety - [x] TestReporter_RunDaemonClose_Race — verifies concurrent Close safety Reviewed-on: https://gitea.com/gitea/act_runner/pulls/819 Reviewed-by: Nicolas <173651+bircni@noreply.gitea.com> Co-authored-by: Bo-Yi Wu <appleboy.tw@gmail.com> Co-committed-by: Bo-Yi Wu <appleboy.tw@gmail.com>
612 lines
15 KiB
Go
612 lines
15 KiB
Go
// Copyright 2022 The Gitea Authors. All rights reserved.
|
|
// SPDX-License-Identifier: MIT
|
|
|
|
package report
|
|
|
|
import (
|
|
"context"
|
|
"errors"
|
|
"fmt"
|
|
"regexp"
|
|
"strings"
|
|
"sync"
|
|
"time"
|
|
|
|
runnerv1 "code.gitea.io/actions-proto-go/runner/v1"
|
|
"connectrpc.com/connect"
|
|
"github.com/avast/retry-go/v4"
|
|
log "github.com/sirupsen/logrus"
|
|
"google.golang.org/protobuf/proto"
|
|
"google.golang.org/protobuf/types/known/timestamppb"
|
|
|
|
"gitea.com/gitea/act_runner/internal/pkg/client"
|
|
"gitea.com/gitea/act_runner/internal/pkg/config"
|
|
)
|
|
|
|
type Reporter struct {
|
|
ctx context.Context
|
|
cancel context.CancelFunc
|
|
|
|
closed bool
|
|
client client.Client
|
|
clientM sync.Mutex
|
|
|
|
logOffset int
|
|
logRows []*runnerv1.LogRow
|
|
logReplacer *strings.Replacer
|
|
oldnew []string
|
|
|
|
state *runnerv1.TaskState
|
|
stateChanged bool
|
|
stateMu sync.RWMutex
|
|
outputs sync.Map
|
|
daemon chan struct{}
|
|
|
|
// Adaptive batching control
|
|
logReportInterval time.Duration
|
|
logReportMaxLatency time.Duration
|
|
logBatchSize int
|
|
stateReportInterval time.Duration
|
|
|
|
// Event notification channels (non-blocking, buffered 1)
|
|
logNotify chan struct{} // signal: new log rows arrived
|
|
stateNotify chan struct{} // signal: step transition (start/stop)
|
|
|
|
debugOutputEnabled bool
|
|
stopCommandEndToken string
|
|
}
|
|
|
|
func NewReporter(ctx context.Context, cancel context.CancelFunc, client client.Client, task *runnerv1.Task, cfg *config.Config) *Reporter {
|
|
var oldnew []string
|
|
if v := task.Context.Fields["token"].GetStringValue(); v != "" {
|
|
oldnew = append(oldnew, v, "***")
|
|
}
|
|
if v := task.Context.Fields["gitea_runtime_token"].GetStringValue(); v != "" {
|
|
oldnew = append(oldnew, v, "***")
|
|
}
|
|
for _, v := range task.Secrets {
|
|
oldnew = append(oldnew, v, "***")
|
|
}
|
|
|
|
rv := &Reporter{
|
|
ctx: ctx,
|
|
cancel: cancel,
|
|
client: client,
|
|
oldnew: oldnew,
|
|
logReplacer: strings.NewReplacer(oldnew...),
|
|
logReportInterval: cfg.Runner.LogReportInterval,
|
|
logReportMaxLatency: cfg.Runner.LogReportMaxLatency,
|
|
logBatchSize: cfg.Runner.LogReportBatchSize,
|
|
stateReportInterval: cfg.Runner.StateReportInterval,
|
|
logNotify: make(chan struct{}, 1),
|
|
stateNotify: make(chan struct{}, 1),
|
|
state: &runnerv1.TaskState{
|
|
Id: task.Id,
|
|
},
|
|
daemon: make(chan struct{}),
|
|
}
|
|
|
|
if task.Secrets["ACTIONS_STEP_DEBUG"] == "true" {
|
|
rv.debugOutputEnabled = true
|
|
}
|
|
|
|
return rv
|
|
}
|
|
|
|
func (r *Reporter) ResetSteps(l int) {
|
|
r.stateMu.Lock()
|
|
defer r.stateMu.Unlock()
|
|
for i := range l {
|
|
r.state.Steps = append(r.state.Steps, &runnerv1.StepState{
|
|
Id: int64(i),
|
|
})
|
|
}
|
|
}
|
|
|
|
func (r *Reporter) Levels() []log.Level {
|
|
return log.AllLevels
|
|
}
|
|
|
|
func appendIfNotNil[T any](s []*T, v *T) []*T {
|
|
if v != nil {
|
|
return append(s, v)
|
|
}
|
|
return s
|
|
}
|
|
|
|
// isJobStepEntry is used to not report composite step results incorrectly as step result
|
|
// returns true if the logentry is on job level
|
|
// returns false for composite action step messages
|
|
func isJobStepEntry(entry *log.Entry) bool {
|
|
if v, ok := entry.Data["stepID"]; ok {
|
|
if v, ok := v.([]string); ok && len(v) > 1 {
|
|
return false
|
|
}
|
|
}
|
|
return true
|
|
}
|
|
|
|
// notifyLog sends a non-blocking signal that new log rows are available.
|
|
func (r *Reporter) notifyLog() {
|
|
select {
|
|
case r.logNotify <- struct{}{}:
|
|
default:
|
|
}
|
|
}
|
|
|
|
// notifyState sends a non-blocking signal that a UX-critical state change occurred (step start/stop, job result).
|
|
func (r *Reporter) notifyState() {
|
|
select {
|
|
case r.stateNotify <- struct{}{}:
|
|
default:
|
|
}
|
|
}
|
|
|
|
// unlockAndNotify releases stateMu and sends channel notifications.
|
|
// Must be called with stateMu held.
|
|
func (r *Reporter) unlockAndNotify(urgentState bool) {
|
|
r.stateMu.Unlock()
|
|
r.notifyLog()
|
|
if urgentState {
|
|
r.notifyState()
|
|
}
|
|
}
|
|
|
|
func (r *Reporter) Fire(entry *log.Entry) error {
|
|
urgentState := false
|
|
|
|
r.stateMu.Lock()
|
|
|
|
r.stateChanged = true
|
|
|
|
if log.IsLevelEnabled(log.TraceLevel) {
|
|
log.WithFields(entry.Data).Trace(entry.Message)
|
|
}
|
|
|
|
timestamp := entry.Time
|
|
if r.state.StartedAt == nil {
|
|
r.state.StartedAt = timestamppb.New(timestamp)
|
|
}
|
|
|
|
stage := entry.Data["stage"]
|
|
|
|
if stage != "Main" {
|
|
if v, ok := entry.Data["jobResult"]; ok {
|
|
if jobResult, ok := r.parseResult(v); ok {
|
|
// We need to ensure log upload before this upload
|
|
r.state.Result = jobResult
|
|
r.state.StoppedAt = timestamppb.New(timestamp)
|
|
for _, s := range r.state.Steps {
|
|
if s.Result == runnerv1.Result_RESULT_UNSPECIFIED {
|
|
s.Result = runnerv1.Result_RESULT_CANCELLED
|
|
if jobResult == runnerv1.Result_RESULT_SKIPPED {
|
|
s.Result = runnerv1.Result_RESULT_SKIPPED
|
|
}
|
|
}
|
|
}
|
|
urgentState = true
|
|
}
|
|
}
|
|
if !r.duringSteps() {
|
|
r.logRows = appendIfNotNil(r.logRows, r.parseLogRow(entry))
|
|
}
|
|
r.unlockAndNotify(urgentState)
|
|
return nil
|
|
}
|
|
|
|
var step *runnerv1.StepState
|
|
if v, ok := entry.Data["stepNumber"]; ok {
|
|
if v, ok := v.(int); ok && len(r.state.Steps) > v {
|
|
step = r.state.Steps[v]
|
|
}
|
|
}
|
|
if step == nil {
|
|
if !r.duringSteps() {
|
|
r.logRows = appendIfNotNil(r.logRows, r.parseLogRow(entry))
|
|
}
|
|
r.unlockAndNotify(false)
|
|
return nil
|
|
}
|
|
|
|
if step.StartedAt == nil {
|
|
step.StartedAt = timestamppb.New(timestamp)
|
|
urgentState = true
|
|
}
|
|
|
|
// Force reporting log errors as raw output to prevent silent failures
|
|
if entry.Level == log.ErrorLevel {
|
|
entry.Data["raw_output"] = true
|
|
}
|
|
|
|
if v, ok := entry.Data["raw_output"]; ok {
|
|
if rawOutput, ok := v.(bool); ok && rawOutput {
|
|
if row := r.parseLogRow(entry); row != nil {
|
|
if step.LogLength == 0 {
|
|
step.LogIndex = int64(r.logOffset + len(r.logRows))
|
|
}
|
|
step.LogLength++
|
|
r.logRows = append(r.logRows, row)
|
|
}
|
|
}
|
|
} else if !r.duringSteps() {
|
|
r.logRows = appendIfNotNil(r.logRows, r.parseLogRow(entry))
|
|
}
|
|
if v, ok := entry.Data["stepResult"]; ok && isJobStepEntry(entry) {
|
|
if stepResult, ok := r.parseResult(v); ok {
|
|
if step.LogLength == 0 {
|
|
step.LogIndex = int64(r.logOffset + len(r.logRows))
|
|
}
|
|
step.Result = stepResult
|
|
step.StoppedAt = timestamppb.New(timestamp)
|
|
urgentState = true
|
|
}
|
|
}
|
|
|
|
r.unlockAndNotify(urgentState)
|
|
return nil
|
|
}
|
|
|
|
func (r *Reporter) RunDaemon() {
|
|
go r.runDaemonLoop()
|
|
}
|
|
|
|
func (r *Reporter) stopLatencyTimer(active *bool, timer *time.Timer) {
|
|
if *active {
|
|
if !timer.Stop() {
|
|
select {
|
|
case <-timer.C:
|
|
default:
|
|
}
|
|
}
|
|
*active = false
|
|
}
|
|
}
|
|
|
|
func (r *Reporter) runDaemonLoop() {
|
|
logTicker := time.NewTicker(r.logReportInterval)
|
|
stateTicker := time.NewTicker(r.stateReportInterval)
|
|
|
|
// maxLatencyTimer ensures the first buffered log row is sent within logReportMaxLatency.
|
|
// Start inactive — it is armed when the first log row arrives in an empty buffer.
|
|
maxLatencyTimer := time.NewTimer(0)
|
|
if !maxLatencyTimer.Stop() {
|
|
<-maxLatencyTimer.C
|
|
}
|
|
maxLatencyActive := false
|
|
|
|
defer logTicker.Stop()
|
|
defer stateTicker.Stop()
|
|
defer maxLatencyTimer.Stop()
|
|
|
|
for {
|
|
select {
|
|
case <-logTicker.C:
|
|
_ = r.ReportLog(false)
|
|
r.stopLatencyTimer(&maxLatencyActive, maxLatencyTimer)
|
|
|
|
case <-stateTicker.C:
|
|
_ = r.ReportState(false)
|
|
|
|
case <-r.logNotify:
|
|
r.stateMu.RLock()
|
|
n := len(r.logRows)
|
|
r.stateMu.RUnlock()
|
|
|
|
if n >= r.logBatchSize {
|
|
_ = r.ReportLog(false)
|
|
r.stopLatencyTimer(&maxLatencyActive, maxLatencyTimer)
|
|
} else if !maxLatencyActive && n > 0 {
|
|
maxLatencyTimer.Reset(r.logReportMaxLatency)
|
|
maxLatencyActive = true
|
|
}
|
|
|
|
case <-r.stateNotify:
|
|
// Step transition or job result — flush both immediately for frontend UX.
|
|
_ = r.ReportLog(false)
|
|
_ = r.ReportState(false)
|
|
r.stopLatencyTimer(&maxLatencyActive, maxLatencyTimer)
|
|
|
|
case <-maxLatencyTimer.C:
|
|
maxLatencyActive = false
|
|
_ = r.ReportLog(false)
|
|
|
|
case <-r.ctx.Done():
|
|
close(r.daemon)
|
|
return
|
|
}
|
|
|
|
r.stateMu.RLock()
|
|
closed := r.closed
|
|
r.stateMu.RUnlock()
|
|
if closed {
|
|
close(r.daemon)
|
|
return
|
|
}
|
|
}
|
|
}
|
|
|
|
func (r *Reporter) Logf(format string, a ...any) {
|
|
r.stateMu.Lock()
|
|
defer r.stateMu.Unlock()
|
|
|
|
r.logf(format, a...)
|
|
}
|
|
|
|
func (r *Reporter) logf(format string, a ...any) {
|
|
if !r.duringSteps() {
|
|
r.logRows = append(r.logRows, &runnerv1.LogRow{
|
|
Time: timestamppb.Now(),
|
|
Content: fmt.Sprintf(format, a...),
|
|
})
|
|
}
|
|
}
|
|
|
|
func (r *Reporter) SetOutputs(outputs map[string]string) {
|
|
r.stateMu.Lock()
|
|
defer r.stateMu.Unlock()
|
|
|
|
for k, v := range outputs {
|
|
if len(k) > 255 {
|
|
r.logf("ignore output because the key is too long: %q", k)
|
|
continue
|
|
}
|
|
if l := len(v); l > 1024*1024 {
|
|
log.Println("ignore output because the value is too long:", k, l)
|
|
r.logf("ignore output because the value %q is too long: %d", k, l)
|
|
}
|
|
if _, ok := r.outputs.Load(k); ok {
|
|
continue
|
|
}
|
|
r.outputs.Store(k, v)
|
|
}
|
|
}
|
|
|
|
func (r *Reporter) Close(lastWords string) error {
|
|
r.stateMu.Lock()
|
|
r.closed = true
|
|
if r.state.Result == runnerv1.Result_RESULT_UNSPECIFIED {
|
|
if lastWords == "" {
|
|
lastWords = "Early termination"
|
|
}
|
|
for _, v := range r.state.Steps {
|
|
if v.Result == runnerv1.Result_RESULT_UNSPECIFIED {
|
|
v.Result = runnerv1.Result_RESULT_CANCELLED
|
|
}
|
|
}
|
|
r.state.Result = runnerv1.Result_RESULT_FAILURE
|
|
r.logRows = append(r.logRows, &runnerv1.LogRow{
|
|
Time: timestamppb.Now(),
|
|
Content: lastWords,
|
|
})
|
|
r.state.StoppedAt = timestamppb.Now()
|
|
} else if lastWords != "" {
|
|
r.logRows = append(r.logRows, &runnerv1.LogRow{
|
|
Time: timestamppb.Now(),
|
|
Content: lastWords,
|
|
})
|
|
}
|
|
r.stateMu.Unlock()
|
|
|
|
// Wake up the daemon loop so it detects closed promptly.
|
|
r.notifyLog()
|
|
|
|
// Wait for Acknowledge
|
|
select {
|
|
case <-r.daemon:
|
|
case <-time.After(60 * time.Second):
|
|
close(r.daemon)
|
|
log.Error("No Response from RunDaemon for 60s, continue best effort")
|
|
}
|
|
|
|
// Report the job outcome even when all log upload retry attempts have been exhausted
|
|
return errors.Join(
|
|
retry.Do(func() error {
|
|
return r.ReportLog(true)
|
|
}, retry.Context(r.ctx)),
|
|
retry.Do(func() error {
|
|
return r.ReportState(true)
|
|
}, retry.Context(r.ctx)),
|
|
)
|
|
}
|
|
|
|
func (r *Reporter) ReportLog(noMore bool) error {
|
|
r.clientM.Lock()
|
|
defer r.clientM.Unlock()
|
|
|
|
r.stateMu.RLock()
|
|
rows := r.logRows
|
|
r.stateMu.RUnlock()
|
|
|
|
if !noMore && len(rows) == 0 {
|
|
return nil
|
|
}
|
|
|
|
resp, err := r.client.UpdateLog(r.ctx, connect.NewRequest(&runnerv1.UpdateLogRequest{
|
|
TaskId: r.state.Id,
|
|
Index: int64(r.logOffset),
|
|
Rows: rows,
|
|
NoMore: noMore,
|
|
}))
|
|
if err != nil {
|
|
return err
|
|
}
|
|
|
|
ack := int(resp.Msg.AckIndex)
|
|
if ack < r.logOffset {
|
|
return errors.New("submitted logs are lost")
|
|
}
|
|
|
|
r.stateMu.Lock()
|
|
r.logRows = r.logRows[ack-r.logOffset:]
|
|
submitted := r.logOffset + len(rows)
|
|
r.logOffset = ack
|
|
r.stateMu.Unlock()
|
|
|
|
if noMore && ack < submitted {
|
|
return errors.New("not all logs are submitted")
|
|
}
|
|
|
|
return nil
|
|
}
|
|
|
|
// ReportState only reports the job result if reportResult is true
|
|
// RunDaemon never reports results even if result is set
|
|
func (r *Reporter) ReportState(reportResult bool) error {
|
|
r.clientM.Lock()
|
|
defer r.clientM.Unlock()
|
|
|
|
// Build the outputs map first (single Range pass instead of two).
|
|
outputs := make(map[string]string)
|
|
r.outputs.Range(func(k, v any) bool {
|
|
if val, ok := v.(string); ok {
|
|
outputs[k.(string)] = val
|
|
}
|
|
return true
|
|
})
|
|
|
|
// Consume stateChanged atomically with the snapshot; restored on error
|
|
// below so a concurrent Fire() during UpdateTask isn't silently lost.
|
|
r.stateMu.Lock()
|
|
if !reportResult && !r.stateChanged && len(outputs) == 0 {
|
|
r.stateMu.Unlock()
|
|
return nil
|
|
}
|
|
state := proto.Clone(r.state).(*runnerv1.TaskState)
|
|
r.stateChanged = false
|
|
r.stateMu.Unlock()
|
|
|
|
if !reportResult {
|
|
state.Result = runnerv1.Result_RESULT_UNSPECIFIED
|
|
}
|
|
|
|
resp, err := r.client.UpdateTask(r.ctx, connect.NewRequest(&runnerv1.UpdateTaskRequest{
|
|
State: state,
|
|
Outputs: outputs,
|
|
}))
|
|
if err != nil {
|
|
r.stateMu.Lock()
|
|
r.stateChanged = true
|
|
r.stateMu.Unlock()
|
|
return err
|
|
}
|
|
|
|
for _, k := range resp.Msg.SentOutputs {
|
|
r.outputs.Store(k, struct{}{})
|
|
}
|
|
|
|
if resp.Msg.State != nil && resp.Msg.State.Result == runnerv1.Result_RESULT_CANCELLED {
|
|
r.cancel()
|
|
}
|
|
|
|
var noSent []string
|
|
r.outputs.Range(func(k, v any) bool {
|
|
if _, ok := v.(string); ok {
|
|
noSent = append(noSent, k.(string))
|
|
}
|
|
return true
|
|
})
|
|
if len(noSent) > 0 {
|
|
return fmt.Errorf("there are still outputs that have not been sent: %v", noSent)
|
|
}
|
|
|
|
return nil
|
|
}
|
|
|
|
func (r *Reporter) duringSteps() bool {
|
|
if steps := r.state.Steps; len(steps) == 0 {
|
|
return false
|
|
} else if first := steps[0]; first.Result == runnerv1.Result_RESULT_UNSPECIFIED && first.LogLength == 0 {
|
|
return false
|
|
} else if last := steps[len(steps)-1]; last.Result != runnerv1.Result_RESULT_UNSPECIFIED {
|
|
return false
|
|
}
|
|
return true
|
|
}
|
|
|
|
var stringToResult = map[string]runnerv1.Result{
|
|
"success": runnerv1.Result_RESULT_SUCCESS,
|
|
"failure": runnerv1.Result_RESULT_FAILURE,
|
|
"skipped": runnerv1.Result_RESULT_SKIPPED,
|
|
"cancelled": runnerv1.Result_RESULT_CANCELLED,
|
|
}
|
|
|
|
func (r *Reporter) parseResult(result any) (runnerv1.Result, bool) {
|
|
str := ""
|
|
if v, ok := result.(string); ok { // for jobResult
|
|
str = v
|
|
} else if v, ok := result.(fmt.Stringer); ok { // for stepResult
|
|
str = v.String()
|
|
}
|
|
|
|
ret, ok := stringToResult[str]
|
|
return ret, ok
|
|
}
|
|
|
|
var cmdRegex = regexp.MustCompile(`^::([^ :]+)( .*)?::(.*)$`)
|
|
|
|
func (r *Reporter) handleCommand(originalContent, command, value string) *string {
|
|
if r.stopCommandEndToken != "" && command != r.stopCommandEndToken {
|
|
return &originalContent
|
|
}
|
|
|
|
switch command {
|
|
case "add-mask":
|
|
r.addMask(value)
|
|
return nil
|
|
case "debug":
|
|
if r.debugOutputEnabled {
|
|
return &value
|
|
}
|
|
return nil
|
|
|
|
case "notice":
|
|
// Not implemented yet, so just return the original content.
|
|
return &originalContent
|
|
case "warning":
|
|
// Not implemented yet, so just return the original content.
|
|
return &originalContent
|
|
case "error":
|
|
// Not implemented yet, so just return the original content.
|
|
return &originalContent
|
|
case "group":
|
|
// Returning the original content, because I think the frontend
|
|
// will use it when rendering the output.
|
|
return &originalContent
|
|
case "endgroup":
|
|
// Ditto
|
|
return &originalContent
|
|
case "stop-commands":
|
|
r.stopCommandEndToken = value
|
|
return nil
|
|
case r.stopCommandEndToken:
|
|
r.stopCommandEndToken = ""
|
|
return nil
|
|
}
|
|
return &originalContent
|
|
}
|
|
|
|
func (r *Reporter) parseLogRow(entry *log.Entry) *runnerv1.LogRow {
|
|
content := strings.TrimRightFunc(entry.Message, func(r rune) bool { return r == '\r' || r == '\n' })
|
|
|
|
matches := cmdRegex.FindStringSubmatch(content)
|
|
if matches != nil {
|
|
if output := r.handleCommand(content, matches[1], matches[3]); output != nil {
|
|
content = *output
|
|
} else {
|
|
return nil
|
|
}
|
|
}
|
|
|
|
content = r.logReplacer.Replace(content)
|
|
|
|
return &runnerv1.LogRow{
|
|
Time: timestamppb.New(entry.Time),
|
|
Content: strings.ToValidUTF8(content, "?"),
|
|
}
|
|
}
|
|
|
|
func (r *Reporter) addMask(msg string) {
|
|
r.oldnew = append(r.oldnew, msg, "***")
|
|
r.logReplacer = strings.NewReplacer(r.oldnew...)
|
|
}
|