This commit is contained in:
Christopher Homberger
2026-02-22 20:38:07 +01:00
1913 changed files with 526913 additions and 57 deletions

6
.codespellrc Normal file
View File

@@ -0,0 +1,6 @@
[codespell]
# Ref: https://github.com/codespell-project/codespell#using-a-config-file
skip = .git*,go.sum,package-lock.json,*.min.*,.codespellrc,testdata,./pkg/runner/hashfiles/index.js
check-hidden = true
ignore-regex = .*Te\{0\}st.*
# ignore-words-list =

156
.gitea/workflows/checks.yml Normal file
View File

@@ -0,0 +1,156 @@
name: checks
on: [pull_request, workflow_dispatch]
concurrency:
cancel-in-progress: true
group: ${{ github.workflow }}-${{ github.ref }}
env:
ACT_OWNER: ${{ github.repository_owner }}
ACT_REPOSITORY: ${{ github.repository }}
CGO_ENABLED: 0
NO_QEMU: 1
NO_EXTERNAL_IP: 1
DOOD: 1
jobs:
lint:
name: lint
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v5
with:
fetch-depth: 0
- uses: actions/setup-go@v6
with:
go-version-file: go.mod
check-latest: true
- uses: golangci/golangci-lint-action@v8.0.0
with:
version: v2.1.6
- uses: megalinter/megalinter/flavors/go@v9.1.0
env:
DEFAULT_BRANCH: ${{ github.event.repository.default_branch }}
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
VALIDATE_ALL_CODEBASE: false
GITHUB_STATUS_REPORTER: ${{ !env.ACT }}
GITHUB_COMMENT_REPORTER: ${{ !env.ACT }}
test-linux:
name: test-linux
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v5
with:
fetch-depth: 2
- name: Cleanup Docker Engine
run: |
docker ps -a --format '{{ if eq (truncate .Names 4) "act-" }}
{{ .ID }}
{{end}}' | xargs -r docker rm -f || :
docker volume ls --format '{{ if eq (truncate .Name 4) "act-" }}
{{ .Name }}
{{ end }}' | xargs -r docker volume rm -f || :
docker images --format '{{ if eq (truncate .Repository 4) "act-" }}
{{ .ID }}
{{ end }}' | xargs -r docker rmi -f || :
docker images -q | xargs -r docker rmi || :
- name: Set up QEMU
if: '!env.NO_QEMU'
uses: docker/setup-qemu-action@v3
- uses: actions/setup-go@v6
with:
go-version-file: go.mod
check-latest: true
- uses: actions/cache@v4
if: ${{ !env.ACT }}
with:
path: ~/go/pkg/mod
key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }}
restore-keys: |
${{ runner.os }}-go-
- name: Install gotestfmt
run: go install github.com/gotesttools/gotestfmt/v2/cmd/gotestfmt@v2.5.0
# Regressions by Gitea Actions CI Migration
# GITHUB_REPOSITORY contains the server url
# ACTIONS_RUNTIME_URL provided to every step, act does not override
- name: Run Tests
run: |
unset ACTIONS_RUNTIME_URL
unset ACTIONS_RESULTS_URL
unset ACTIONS_RUNTIME_TOKEN
export GITHUB_REPOSITORY="${GITHUB_REPOSITORY#${SERVER_URL%/}/}"
export ACT_REPOSITORY="${GITHUB_REPOSITORY#${SERVER_URL%/}/}"
export ACT_OWNER="${ACT_OWNER#${SERVER_URL%/}/}"
env
go test -json -v -cover -coverpkg=./... -coverprofile=coverage.txt -covermode=atomic -timeout 20m ./... | gotestfmt -hide successful-packages,empty-packages 2>&1
env:
SERVER_URL: ${{ github.server_url }}
- name: Run act from cli
run: go run main.go -P ubuntu-latest=node:16-buster-slim -C ./pkg/runner/testdata/ -W ./basic/push.yml
- name: Run act from cli without docker support
run: go run -tags WITHOUT_DOCKER main.go -P ubuntu-latest=-self-hosted -C ./pkg/runner/testdata/ -W ./local-action-js/push.yml
snapshot:
name: snapshot
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v5
- uses: actions/setup-go@v6
with:
go-version-file: go.mod
check-latest: true
- uses: actions/cache@v4
if: ${{ !env.ACT }}
with:
path: ~/go/pkg/mod
key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }}
restore-keys: |
${{ runner.os }}-go-
- name: GoReleaser
id: goreleaser
uses: goreleaser/goreleaser-action@v6
with:
version: v2
args: release --snapshot --clean
- name: Setup Node
continue-on-error: true
uses: actions/setup-node@v6
with:
node-version: 20
- name: Install @actions/artifact@2.1.0
continue-on-error: true
run: npm install @actions/artifact@2.1.0
- name: Upload All
uses: actions/github-script@v8
continue-on-error: true
with:
script: |
// We do not use features depending on GITHUB_API_URL so we can hardcode it to avoid the GHES no support error
process.env["GITHUB_SERVER_URL"] = "https://github.com";
const {DefaultArtifactClient} = require('@actions/artifact');
const aartifact = new DefaultArtifactClient();
var artifacts = JSON.parse(process.env.ARTIFACTS);
for(var artifact of artifacts) {
if(artifact.type === "Binary") {
const {id, size} = await aartifact.uploadArtifact(
// name of the artifact
`${artifact.name}-${artifact.target}`,
// files to include (supports absolute and relative paths)
[artifact.path],
process.cwd(),
{
// optional: how long to retain the artifact
// if unspecified, defaults to repository/org retention settings (the limit of this value)
retentionDays: 10
}
);
console.log(`Created artifact with id: ${id} (bytes: ${size}`);
}
}
env:
ARTIFACTS: ${{ steps.goreleaser.outputs.artifacts }}
- name: Chocolatey
uses: ./.github/actions/choco
with:
version: v0.0.0-pr

View File

@@ -0,0 +1,72 @@
name: release
on:
push:
tags:
- v*
jobs:
release:
# TODO use environment to scope secrets
name: release
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v5
with:
fetch-depth: 0
- uses: actions/setup-go@v6
with:
go-version-file: go.mod
check-latest: true
- uses: actions/cache@v4
if: ${{ !env.ACT }}
with:
path: ~/go/pkg/mod
key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }}
restore-keys: |
${{ runner.os }}-go-
- name: GoReleaser
uses: goreleaser/goreleaser-action@v6
with:
version: latest
args: release --clean -f ./.goreleaser.yml -f ./.goreleaser.gitea.yml
env:
GITEA_TOKEN: ${{ secrets.GORELEASER_GITHUB_TOKEN || github.token }}
- name: Winget
uses: vedantmgoyal2009/winget-releaser@v2
with:
identifier: nektos.act
installers-regex: '_Windows_\w+\.zip$'
token: ${{ secrets.WINGET_TOKEN }}
if: env.ENABLED
env:
ENABLED: ${{ secrets.WINGET_TOKEN && '1' || '' }}
- name: Chocolatey
uses: ./.github/actions/choco
with:
version: ${{ github.ref }}
apiKey: ${{ secrets.CHOCO_APIKEY }}
push: true
if: env.ENABLED
env:
ENABLED: ${{ secrets.CHOCO_APIKEY && '1' || '' }}
# TODO use ssh deployment key
- name: GitHub CLI extension
uses: actions/github-script@v8
with:
github-token: ${{ secrets.CLI_GITHUB_TOKEN || secrets.GORELEASER_GITHUB_TOKEN }}
script: |
const mainRef = (await github.rest.git.getRef({
owner: context.repo.owner,
repo: 'gh-act',
ref: 'heads/main',
})).data;
console.log(mainRef);
github.rest.git.createRef({
owner: 'nektos',
repo: 'gh-act',
ref: context.ref,
sha: mainRef.object.sha,
});
if: env.ENABLED
env:
ENABLED: ${{ (secrets.CLI_GITHUB_TOKEN || secrets.GORELEASER_GITHUB_TOKEN) && '1' || '' }}

88
.github/ISSUE_TEMPLATE/bug_report.yml vendored Normal file
View File

@@ -0,0 +1,88 @@
name: Bug report
description: Use this template for reporting bugs/issues.
labels:
- 'kind/bug'
body:
- type: markdown
attributes:
value: |
Thanks for taking the time to fill out this bug report!
- type: textarea
id: act-debug
attributes:
label: Bug report info
render: plain text
description: |
Output of `act --bug-report`
placeholder: |
act --bug-report
validations:
required: true
- type: textarea
id: act-command
attributes:
label: Command used with act
description: |
Please paste your whole command
placeholder: |
act -P ubuntu-latest=node:12 -v -d ...
render: sh
validations:
required: true
- type: textarea
id: what-happened
attributes:
label: Describe issue
description: |
Also tell us what did you expect to happen?
placeholder: |
Describe issue
validations:
required: true
- type: input
id: repo
attributes:
label: Link to GitHub repository
description: |
Provide link to GitHub repository, you can skip it if the repository is private or you don't have it on GitHub, otherwise please provide it as it might help us troubleshoot problem
placeholder: |
https://github.com/nektos/act
validations:
required: false
- type: textarea
id: workflow
attributes:
label: Workflow content
description: |
Please paste your **whole** workflow here
placeholder: |
name: My workflow
on: ['push', 'schedule']
jobs:
test:
runs-on: ubuntu-latest
env:
KEY: VAL
[...]
render: yml
validations:
required: true
- type: textarea
id: logs
attributes:
label: Relevant log output
description: |
Please copy and paste any relevant log output. This will be automatically formatted into code, so no need for backticks. Please verify that the log output doesn't contain any sensitive data.
render: sh
placeholder: |
Use `act -v` for verbose output
validations:
required: true
- type: textarea
id: additional-info
attributes:
label: Additional information
placeholder: |
Additional information that doesn't fit elsewhere
validations:
required: false

8
.github/ISSUE_TEMPLATE/config.yml vendored Normal file
View File

@@ -0,0 +1,8 @@
blank_issues_enabled: true
contact_links:
- name: Start a discussion
url: https://github.com/actions-oss/act-cli/discussions/new
about: You can ask for help here!
- name: Want to contribute to act?
url: https://github.com/actions-oss/act-cli/blob/main/CONTRIBUTING.md
about: Be sure to read contributing guidelines!

View File

@@ -0,0 +1,28 @@
name: Feature request
description: Use this template for requesting a feature/enhancement.
labels:
- 'kind/feature-request'
body:
- type: markdown
attributes:
value: |
Please note that incompatibility with GitHub Actions should be opened as a bug report, not a new feature.
- type: input
id: act-version
attributes:
label: Act version
description: |
What version of `act` are you using? Version can be obtained via `act --version`
If you've built it from source, please provide commit hash
placeholder: |
act --version
validations:
required: true
- type: textarea
id: feature
attributes:
label: Feature description
description: Describe feature that you would like to see
placeholder: ...
validations:
required: true

20
.github/actions/choco/Dockerfile vendored Normal file
View File

@@ -0,0 +1,20 @@
FROM alpine:3.21
ARG CHOCOVERSION=1.1.0
RUN apk add --no-cache bash ca-certificates git \
&& apk --no-cache --repository http://dl-cdn.alpinelinux.org/alpine/edge/community add mono mono-dev \
&& cert-sync /etc/ssl/certs/ca-certificates.crt \
&& wget "https://github.com/chocolatey/choco/archive/${CHOCOVERSION}.tar.gz" -O- | tar -xzf - \
&& cd choco-"${CHOCOVERSION}" \
&& chmod +x build.sh zip.sh \
&& ./build.sh -v \
&& mv ./code_drop/chocolatey/console /opt/chocolatey \
&& mkdir -p /opt/chocolatey/lib \
&& rm -rf /choco-"${CHOCOVERSION}" \
&& apk del mono-dev \
&& rm -rf /var/cache/apk/*
ENV ChocolateyInstall=/opt/chocolatey
COPY entrypoint.sh /entrypoint.sh
ENTRYPOINT ["/entrypoint.sh"]

16
.github/actions/choco/action.yml vendored Normal file
View File

@@ -0,0 +1,16 @@
name: 'Chocolatey Packager'
description: 'Create the choco package and push it'
inputs:
version:
description: 'Version of package'
required: false
apiKey:
description: 'API Key for chocolately'
required: false
push:
description: 'Option for if package is going to be pushed'
required: false
default: 'false'
runs:
using: 'docker'
image: 'Dockerfile'

31
.github/actions/choco/entrypoint.sh vendored Executable file
View File

@@ -0,0 +1,31 @@
#!/bin/bash
set -e
function choco {
mono /opt/chocolatey/choco.exe "$@" --allow-unofficial --nocolor
}
function get_version {
local version=${INPUT_VERSION:-$(git describe --tags)}
version=(${version//[!0-9.-]/})
local version_parts=(${version//-/ })
version=${version_parts[0]}
if [ ${#version_parts[@]} -gt 1 ]; then
version=${version_parts}.${version_parts[1]}
fi
echo "$version"
}
## Determine the version to pack
VERSION=$(get_version)
echo "Packing version ${VERSION} of act"
rm -f act-cli.*.nupkg
mkdir -p tools
cp LICENSE tools/LICENSE.txt
cp VERIFICATION tools/VERIFICATION.txt
cp dist/act-cli_windows_amd64*/act.exe tools/
choco pack act-cli.nuspec --version ${VERSION}
if [[ "$INPUT_PUSH" == "true" ]]; then
choco push act-cli.${VERSION}.nupkg --api-key ${INPUT_APIKEY} -s https://push.chocolatey.org/ --timeout 180
fi

23
.github/dependabot.yml vendored Normal file
View File

@@ -0,0 +1,23 @@
# To get started with Dependabot version updates, you'll need to specify which
# package ecosystems to update and where the package manifests are located.
# Please see the documentation for all configuration options:
# https://docs.github.com/github/administering-a-repository/configuration-options-for-dependency-updates
version: 2
updates:
- package-ecosystem: 'github-actions'
directory: '/'
schedule:
interval: 'monthly'
groups:
dependencies:
patterns:
- '*'
- package-ecosystem: 'gomod'
directory: '/'
schedule:
interval: 'monthly'
groups:
dependencies:
patterns:
- '*'

1
.github/workflows/.gitignore vendored Normal file
View File

@@ -0,0 +1 @@
test-*.yml

151
.github/workflows/checks.yml vendored Normal file
View File

@@ -0,0 +1,151 @@
name: checks
on: [pull_request, workflow_dispatch]
concurrency:
cancel-in-progress: true
group: ${{ github.workflow }}-${{ github.ref }}
env:
ACT_OWNER: ${{ github.repository_owner }}
ACT_REPOSITORY: ${{ github.repository }}
CGO_ENABLED: 0
jobs:
lint:
name: lint
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v5
with:
fetch-depth: 0
- uses: actions/setup-go@v6
with:
go-version-file: go.mod
check-latest: true
- uses: golangci/golangci-lint-action@v8.0.0
with:
version: v2.1.6
- uses: megalinter/megalinter/flavors/go@v9.1.0
env:
DEFAULT_BRANCH: ${{ github.event.repository.default_branch }}
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
VALIDATE_ALL_CODEBASE: false
GITHUB_STATUS_REPORTER: ${{ !env.ACT }}
GITHUB_COMMENT_REPORTER: ${{ !env.ACT }}
test-linux:
name: test-linux
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v5
with:
fetch-depth: 2
- name: Set up QEMU
uses: docker/setup-qemu-action@v3
- uses: actions/setup-go@v6
with:
go-version-file: go.mod
check-latest: true
- uses: actions/cache@v4
if: ${{ !env.ACT }}
with:
path: ~/go/pkg/mod
key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }}
restore-keys: |
${{ runner.os }}-go-
- name: Install gotestfmt
run: go install github.com/gotesttools/gotestfmt/v2/cmd/gotestfmt@v2.5.0
- name: Run Tests
run: go test -json -v -cover -coverpkg=./... -coverprofile=coverage.txt -covermode=atomic -timeout 20m ./... | gotestfmt -hide successful-packages,empty-packages 2>&1
- name: Run act from cli
run: go run main.go -P ubuntu-latest=node:16-buster-slim -C ./pkg/runner/testdata/ -W ./basic/push.yml
- name: Run act from cli without docker support
run: go run -tags WITHOUT_DOCKER main.go -P ubuntu-latest=-self-hosted -C ./pkg/runner/testdata/ -W ./local-action-js/push.yml
- name: Upload Codecov report
uses: codecov/codecov-action@v5
with:
files: coverage.txt
fail_ci_if_error: true # optional (default = false)
token: ${{ secrets.CODECOV_TOKEN }}
test-host:
strategy:
matrix:
os:
- windows-latest
- macos-latest
name: test-host-${{matrix.os}}
runs-on: ${{matrix.os}}
steps:
- uses: actions/checkout@v5
with:
fetch-depth: 2
- uses: actions/setup-go@v6
with:
go-version-file: go.mod
check-latest: true
- name: Install gotestfmt
run: go install github.com/gotesttools/gotestfmt/v2/cmd/gotestfmt@v2.5.0
- name: Run Tests
run: go test -v -cover -coverpkg=./... -coverprofile=coverage.txt -covermode=atomic -timeout 20m -run ^TestRunEventHostEnvironment$ ./...
shell: bash
snapshot:
name: snapshot
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v5
- uses: actions/setup-go@v6
with:
go-version-file: go.mod
check-latest: true
- uses: actions/cache@v4
if: ${{ !env.ACT }}
with:
path: ~/go/pkg/mod
key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }}
restore-keys: |
${{ runner.os }}-go-
- name: GoReleaser
id: goreleaser
uses: goreleaser/goreleaser-action@v6
with:
version: v2
args: release --snapshot --clean
- name: Setup Node
uses: actions/setup-node@v6
with:
node-version: 20
- name: Install @actions/artifact
run: npm install @actions/artifact
- name: Upload All
uses: actions/github-script@v8
with:
script: |
const {DefaultArtifactClient} = require('@actions/artifact');
const aartifact = new DefaultArtifactClient();
var artifacts = JSON.parse(process.env.ARTIFACTS);
for(var artifact of artifacts) {
if(artifact.type === "Binary") {
const {id, size} = await aartifact.uploadArtifact(
// name of the artifact
`${artifact.name}-${artifact.target}`,
// files to include (supports absolute and relative paths)
[artifact.path],
process.cwd(),
{
// optional: how long to retain the artifact
// if unspecified, defaults to repository/org retention settings (the limit of this value)
retentionDays: 10
}
);
console.log(`Created artifact with id: ${id} (bytes: ${size}`);
}
}
env:
ARTIFACTS: ${{ steps.goreleaser.outputs.artifacts }}
- name: Chocolatey
uses: ./.github/actions/choco
with:
version: v0.0.0-pr

23
.github/workflows/codespell.yml vendored Normal file
View File

@@ -0,0 +1,23 @@
# Codespell configuration is within .codespellrc
---
name: Codespell
on:
push:
branches: [master]
pull_request:
branches: [master]
permissions:
contents: read
jobs:
codespell:
name: Check for spelling errors
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v5
- name: Codespell
uses: codespell-project/actions-codespell@v2

30
.github/workflows/promote.yml vendored Normal file
View File

@@ -0,0 +1,30 @@
name: promote
on:
schedule:
- cron: '0 2 1 * *'
workflow_dispatch: {}
jobs:
release:
if: vars.ENABLE_PROMOTE || github.event_name != 'schedule'
name: promote
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v5
with:
fetch-depth: 0
ref: master
token: ${{ secrets.GORELEASER_GITHUB_TOKEN }}
- uses: fregante/setup-git-user@v2
- uses: actions/setup-go@v6
with:
go-version-file: go.mod
check-latest: true
- uses: actions/cache@v4
if: ${{ !env.ACT }}
with:
path: ~/go/pkg/mod
key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }}
restore-keys: |
${{ runner.os }}-go-
- run: make promote

72
.github/workflows/release.yml vendored Normal file
View File

@@ -0,0 +1,72 @@
name: release
on:
push:
tags:
- v*
jobs:
release:
# TODO use environment to scope secrets
name: release
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v5
with:
fetch-depth: 0
- uses: actions/setup-go@v6
with:
go-version-file: go.mod
check-latest: true
- uses: actions/cache@v4
if: ${{ !env.ACT }}
with:
path: ~/go/pkg/mod
key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }}
restore-keys: |
${{ runner.os }}-go-
- name: GoReleaser
uses: goreleaser/goreleaser-action@v6
with:
version: latest
args: release --clean
env:
GITHUB_TOKEN: ${{ secrets.GORELEASER_GITHUB_TOKEN || github.token }}
- name: Winget
uses: vedantmgoyal2009/winget-releaser@v2
with:
identifier: nektos.act
installers-regex: '_Windows_\w+\.zip$'
token: ${{ secrets.WINGET_TOKEN }}
if: env.ENABLED
env:
ENABLED: ${{ secrets.WINGET_TOKEN && '1' || '' }}
- name: Chocolatey
uses: ./.github/actions/choco
with:
version: ${{ github.ref }}
apiKey: ${{ secrets.CHOCO_APIKEY }}
push: true
if: env.ENABLED
env:
ENABLED: ${{ secrets.CHOCO_APIKEY && '1' || '' }}
# TODO use ssh deployment key
- name: GitHub CLI extension
uses: actions/github-script@v8
with:
github-token: ${{ secrets.CLI_GITHUB_TOKEN || secrets.GORELEASER_GITHUB_TOKEN }}
script: |
const mainRef = (await github.rest.git.getRef({
owner: context.repo.owner,
repo: 'gh-act',
ref: 'heads/main',
})).data;
console.log(mainRef);
github.rest.git.createRef({
owner: 'nektos',
repo: 'gh-act',
ref: context.ref,
sha: mainRef.object.sha,
});
if: env.ENABLED
env:
ENABLED: ${{ (secrets.CLI_GITHUB_TOKEN || secrets.GORELEASER_GITHUB_TOKEN) && '1' || '' }}

23
.github/workflows/stale.yml vendored Normal file
View File

@@ -0,0 +1,23 @@
name: 'Close stale issues'
on:
schedule:
- cron: '0 0 * * *'
jobs:
stale:
name: Stale
runs-on: ubuntu-latest
steps:
- uses: actions/stale@v10
with:
repo-token: ${{ secrets.GITHUB_TOKEN }}
stale-issue-message: 'Issue is stale and will be closed in 14 days unless there is new activity'
stale-pr-message: 'PR is stale and will be closed in 14 days unless there is new activity'
stale-issue-label: 'stale'
exempt-issue-labels: 'stale-exempt,kind/feature-request'
stale-pr-label: 'stale'
exempt-pr-labels: 'stale-exempt'
remove-stale-when-updated: 'True'
operations-per-run: 500
days-before-stale: 180
days-before-close: 14

2
.gitleaksignore Normal file
View File

@@ -0,0 +1,2 @@
b910a42edfab7a02b08a52ecef203fd419725642:pkg/container/testdata/docker-pull-options/config.json:generic-api-key:4
710a3ac94c3dc0eaf680d417c87f37f92b4887f4:pkg/container/docker_pull_test.go:generic-api-key:45

View File

@@ -35,56 +35,23 @@ linters:
rules:
main:
deny:
- pkg: io/ioutil
desc: use os or io instead
- pkg: golang.org/x/exp
desc: it's experimental and unreliable
- pkg: github.com/pkg/errors
desc: use builtin errors package instead
nolintlint:
allow-unused: false
require-explanation: true
require-specific: true
desc: Please use "errors" package from standard library
- pkg: gotest.tools/v3
desc: Please keep tests unified using only github.com/stretchr/testify
- pkg: log
desc: Please keep logging unified using only github.com/sirupsen/logrus
gocritic:
enabled-checks:
- equalFold
disabled-checks:
- ifElseChain
revive:
severity: error
rules:
- name: blank-imports
- name: constant-logical-expr
- name: context-as-argument
- name: context-keys-type
- name: dot-imports
- name: empty-lines
- name: error-return
- name: error-strings
- name: exported
- name: identical-branches
- name: if-return
- name: increment-decrement
- name: modifies-value-receiver
- name: package-comments
- name: redefines-builtin-id
- name: superfluous-else
- name: time-naming
- name: unexported-return
- name: var-declaration
- name: var-naming
staticcheck:
checks:
- all
- -ST1005
usetesting:
os-temp-dir: true
perfsprint:
concat-loop: false
govet:
enable:
- nilness
- unusedwrite
gocyclo:
min-complexity: 20
importas:
alias:
- pkg: github.com/sirupsen/logrus
alias: log
- pkg: github.com/stretchr/testify/assert
alias: assert
exclusions:
generated: lax
presets:
@@ -92,21 +59,21 @@ linters:
- common-false-positives
- legacy
- std-error-handling
rules:
- linters:
- forbidigo
path: cmd
paths:
- report
- third_party$
- builtin$
- examples$
issues:
max-issues-per-linter: 0
max-same-issues: 0
formatters:
enable:
- gofmt
- gofumpt
settings:
gofumpt:
extra-rules: true
- goimports
exclusions:
generated: lax
run:
timeout: 10m
paths:
- report
- third_party$
- builtin$
- examples$

3
.goreleaser.gitea.yml Normal file
View File

@@ -0,0 +1,3 @@
gitea_urls:
api: https://gitea.com/api/v1/
download: https://gitea.com/

54
.goreleaser.yml Normal file
View File

@@ -0,0 +1,54 @@
version: 2
before:
hooks:
- go mod tidy
builds:
- env:
- CGO_ENABLED=0
goos:
- darwin
- linux
- windows
goarch:
- amd64
- '386'
- arm64
- arm
- riscv64
goarm:
- '6'
- '7'
ignore:
- goos: windows
goarm: '6'
binary: act
checksum:
name_template: 'checksums.txt'
archives:
- name_template: >-
{{ .ProjectName }}_
{{- title .Os }}_
{{- if eq .Arch "amd64" }}x86_64
{{- else if eq .Arch "386" }}i386
{{- else }}{{ .Arch }}{{ end }}
{{- if .Arm }}v{{ .Arm }}{{ end }}
format_overrides:
- goos: windows
formats:
- zip
changelog:
groups:
- title: 'New Features'
regexp: "^.*feat[(\\w)]*:+.*$"
order: 0
- title: 'Bug fixes'
regexp: "^.*fix[(\\w)]*:+.*$"
order: 1
- title: 'Documentation updates'
regexp: "^.*docs[(\\w)]*:+.*$"
order: 2
- title: 'Other'
order: 999
release:
prerelease: auto
mode: append

12
.markdownlint.yml Normal file
View File

@@ -0,0 +1,12 @@
# Default state for all rules
default: true
# MD013/line-length - Line length
MD013:
line_length: 1024
# MD033/no-inline-html - Inline HTML
MD033: false
# MD041/first-line-heading/first-line-h1 - First line in a file should be a top-level heading
MD041: false

20
.mega-linter.yml Normal file
View File

@@ -0,0 +1,20 @@
---
APPLY_FIXES: none
DISABLE:
- ACTION
- BASH
- COPYPASTE
- DOCKERFILE
- GO
- JAVASCRIPT
- SPELL
DISABLE_LINTERS:
- YAML_YAMLLINT
- MARKDOWN_MARKDOWN_TABLE_FORMATTER
- MARKDOWN_MARKDOWN_LINK_CHECK
- REPOSITORY_CHECKOV
- REPOSITORY_TRIVY
FILTER_REGEX_EXCLUDE: (.*testdata/*|install.sh|pkg/container/docker_cli.go|pkg/container/DOCKER_LICENSE|VERSION)
MARKDOWN_MARKDOWNLINT_CONFIG_FILE: .markdownlint.yml
PARALLEL: false
PRINT_ALPACA: false

98
.mergify.yml Normal file
View File

@@ -0,0 +1,98 @@
pull_request_rules:
- name: warn on conflicts
conditions:
- -draft
- -closed
- -merged
- conflict
actions:
comment:
message: '@{{author}} this pull request is now in conflict 😩'
label:
add:
- conflict
- name: remove conflict label if not needed
conditions:
- -conflict
actions:
label:
remove:
- conflict
- name: warn on needs-work
conditions:
- -draft
- -closed
- -merged
- or:
- check-failure=lint
- check-failure=test-linux
- check-failure=codecov/patch
- check-failure=codecov/project
- check-failure=snapshot
actions:
comment:
message: '@{{author}} this pull request has failed checks 🛠'
label:
add:
- needs-work
- name: remove needs-work label if not needed
conditions:
- check-success=lint
- check-success=test-linux
- check-success=codecov/patch
- check-success=codecov/project
- check-success=snapshot
actions:
label:
remove:
- needs-work
- name: Automatic maintainer assignment
conditions:
- '-approved-reviews-by=@nektos/act-maintainers'
- -draft
- -merged
- -closed
- -conflict
- check-success=lint
- check-success=test-linux
- check-success=codecov/patch
- check-success=codecov/project
- check-success=snapshot
actions:
request_reviews:
teams:
- '@nektos/act-maintainers'
- name: Automatic merge on approval
conditions: []
actions:
queue:
queue_rules:
- name: default
queue_conditions:
- '#changes-requested-reviews-by=0'
- or:
- 'approved-reviews-by=@nektos/act-committers'
- 'author~=^dependabot(|-preview)\[bot\]$'
- and:
- 'approved-reviews-by=@nektos/act-maintainers'
- '#approved-reviews-by>=2'
- and:
- 'author=@nektos/act-maintainers'
- 'approved-reviews-by=@nektos/act-maintainers'
- '#approved-reviews-by>=1'
- -draft
- -merged
- -closed
- check-success=lint
- check-success=test-linux
- check-success=codecov/patch
- check-success=codecov/project
- check-success=snapshot
merge_conditions:
- check-success=lint
- check-success=test-linux
- check-success=codecov/patch
- check-success=codecov/project
- check-success=snapshot
merge_method: squash

2
.prettierignore Normal file
View File

@@ -0,0 +1,2 @@
**/testdata
pkg/runner/res

7
.prettierrc.yml Normal file
View File

@@ -0,0 +1,7 @@
overrides:
- files: '*.yml'
options:
singleQuote: true
- files: '*.json'
options:
singleQuote: false

9
.vscode/extensions.json vendored Normal file
View File

@@ -0,0 +1,9 @@
{
"recommendations": [
"editorconfig.editorconfig",
"golang.go",
"davidanson.vscode-markdownlint",
"esbenp.prettier-vscode",
"redhat.vscode-yaml"
]
}

14
.vscode/settings.json vendored Normal file
View File

@@ -0,0 +1,14 @@
{
"go.lintTool": "golangci-lint",
"go.lintFlags": ["--fix"],
"go.testTimeout": "300s",
"[json]": {
"editor.defaultFormatter": "esbenp.prettier-vscode"
},
"[markdown]": {
"editor.defaultFormatter": "esbenp.prettier-vscode"
},
"[yaml]": {
"editor.defaultFormatter": "esbenp.prettier-vscode"
}
}

1
CODEOWNERS Normal file
View File

@@ -0,0 +1 @@
* @nektos/act-maintainers

69
CONTRIBUTING.md Normal file
View File

@@ -0,0 +1,69 @@
# Contributing to Act
Help wanted! We'd love your contributions to Act. Please review the following guidelines before contributing. Also, feel free to propose changes to these guidelines by updating this file and submitting a pull request.
- [I have a question...](#questions)
- [I found a bug...](#bugs)
- [I have a feature request...](#features)
- [I have a contribution to share...](#process)
## <a id="questions"></a> Have a Question?
Please don't open a GitHub issue for questions about how to use `act`, as the goal is to use issues for managing bugs and feature requests. Issues that are related to general support will be closed and redirected to our gitter room.
For all support related questions, please ask the question in discussions: [actions-oss/act-cli](https://github.com/actions-oss/act-cli/discussions).
## <a id="bugs"></a> Found a Bug?
If you've identified a bug in `act`, please [submit an issue](#issue) to our GitHub repo: [actions-oss/act-cli](https://github.com/actions-oss/act-cli/issues/new). Please also feel free to submit a [Pull Request](#pr) with a fix for the bug!
## <a id="features"></a> Have a Feature Request?
All feature requests should start with [submitting an issue](#issue) documenting the user story and acceptance criteria. Again, feel free to submit a [Pull Request](#pr) with a proposed implementation of the feature.
## <a id="process"></a> Ready to Contribute
### <a id="issue"></a> Create an issue
Before submitting a new issue, please search the issues to make sure there isn't a similar issue doesn't already exist.
Assuming no existing issues exist, please ensure you include required information when submitting the issue to ensure we can quickly reproduce your issue.
We may have additional questions and will communicate through the GitHub issue, so please respond back to our questions to help reproduce and resolve the issue as quickly as possible.
New issues can be created with in our [GitHub repo](https://github.com/actions-oss/act-cli/issues/new).
### <a id="pr"></a>Pull Requests
Pull requests should target the `master` branch. Please also reference the issue from the description of the pull request using [special keyword syntax](https://help.github.com/articles/closing-issues-via-commit-messages/) to auto close the issue when the PR is merged. For example, include the phrase `fixes #14` in the PR description to have issue #14 auto close. Please send documentation updates for the [act user guide](https://actions-oss.github.io/act-docs/) to [actions-oss/act-docs](https://github.com/actions-oss/act-docs).
### <a id="style"></a> Styleguide
When submitting code, please make every effort to follow existing conventions and style in order to keep the code as readable as possible. Here are a few points to keep in mind:
- Please run `go fmt ./...` before committing to ensure code aligns with go standards.
- We use [`golangci-lint`](https://golangci-lint.run/) for linting Go code, run `golangci-lint run --fix` before submitting PR. Editors such as Visual Studio Code or JetBrains IntelliJ; with Go support plugin will offer `golangci-lint` automatically.
- There are additional linters and formatters for files such as Markdown documents or YAML/JSON:
- Please refer to the [Makefile](Makefile) or [`lint` job in our workflow](.github/workflows/checks.yml) to see how to those linters/formatters work.
- You can lint codebase by running `go run main.go -j lint --env RUN_LOCAL=true` or `act -j lint --env RUN_LOCAL=true`
- In `Makefile`, there are tools that require `npx` which is shipped with `nodejs`.
- Our `Makefile` exports `GITHUB_TOKEN` from `~/.config/github/token`, you have been warned.
- You can run `make pr` to cleanup dependencies, format/lint code and run tests.
- All dependencies must be defined in the `go.mod` file.
- Advanced IDEs and code editors (like VSCode) will take care of that, but to be sure, run `go mod tidy` to validate dependencies.
- For details on the approved style, check out [Effective Go](https://golang.org/doc/effective_go.html).
- Before running tests, please be aware that they are multi-architecture so for them to not fail, you need to run `docker run --privileged --rm tonistiigi/binfmt --install amd64,arm64` before ([more info available in #765](https://github.com/nektos/act/issues/765)).
Also, consider the original design principles:
- **Polyglot** - There will be no prescribed language or framework for developing the microservices. The only requirement will be that the service will be run inside a container and exposed via an HTTP endpoint.
- **Cloud Provider** - At this point, the tool will assume AWS for the cloud provider and will not be written in a cloud agnostic manner. However, this does not preclude refactoring to add support for other providers at a later time.
- **Declarative** - All resource administration will be handled in a declarative vs. imperative manner. A file will be used to declared the desired state of the resources and the tool will simply assert the actual state matches the desired state. The tool will accomplish this by generating CloudFormation templates.
- **Stateless** - The tool will not maintain its own state. Rather, it will rely on the CloudFormation stacks to determine the state of the platform.
- **Secure** - All security will be managed by AWS IAM credentials. No additional authentication or authorization mechanisms will be introduced.
### License
By contributing your code, you agree to license your contribution under the terms of the [MIT License](LICENSE).
All files are released with the MIT license.

5
VERIFICATION Normal file
View File

@@ -0,0 +1,5 @@
VERIFICATION
Verification is intended to assist the Chocolatey moderators and community
in verifying that this package's contents are trustworthy.
Checksums: https://github.com/nektos/act/releases, in the checksums.txt file

1
VERSION Normal file
View File

@@ -0,0 +1 @@
0.4.0

26
act-cli.nuspec Normal file
View File

@@ -0,0 +1,26 @@
<?xml version="1.0" encoding="utf-8"?>
<!-- Do not remove this test for UTF-8: if “Ω” doesnt appear as greek uppercase omega letter enclosed in quotation marks, you should use an editor that supports UTF-8, not this one. -->
<package xmlns="http://schemas.microsoft.com/packaging/2015/06/nuspec.xsd">
<metadata>
<id>act-cli</id>
<version>0.0.0</version>
<packageSourceUrl>https://github.com/nektos/act</packageSourceUrl>
<owners>nektos</owners>
<title>act (GitHub Actions CLI)</title>
<authors>nektos</authors>
<projectUrl>https://github.com/nektos/act</projectUrl>
<iconUrl>https://raw.githubusercontent.com/wiki/nektos/act/img/logo-150.png</iconUrl>
<copyright>Nektos</copyright>
<licenseUrl>https://raw.githubusercontent.com/nektos/act/master/LICENSE</licenseUrl>
<requireLicenseAcceptance>true</requireLicenseAcceptance>
<projectSourceUrl>https://github.com/nektos/act</projectSourceUrl>
<docsUrl>https://raw.githubusercontent.com/nektos/act/master/README.md</docsUrl>
<bugTrackerUrl>https://github.com/nektos/act/issues</bugTrackerUrl>
<tags>act github-actions actions golang ci devops</tags>
<summary>Run your GitHub Actions locally 🚀</summary>
<description>Run your GitHub Actions locally 🚀</description>
</metadata>
<files>
<file src="tools/**" target="tools" />
</files>
</package>

27
cmd/dir.go Normal file
View File

@@ -0,0 +1,27 @@
package cmd
import (
"os"
"path/filepath"
log "github.com/sirupsen/logrus"
)
var (
UserHomeDir string
CacheHomeDir string
)
func init() {
home, err := os.UserHomeDir()
if err != nil {
log.Fatal(err)
}
UserHomeDir = home
if v := os.Getenv("XDG_CACHE_HOME"); v != "" {
CacheHomeDir = v
} else {
CacheHomeDir = filepath.Join(UserHomeDir, ".cache")
}
}

45
cmd/execute_test.go Normal file
View File

@@ -0,0 +1,45 @@
package cmd
import (
"context"
"os"
"testing"
)
// Helper function to test main with different os.Args
func testMain(args []string) (exitCode int) {
// Save original os.Args and defer restoring it
origArgs := os.Args
defer func() { os.Args = origArgs }()
// Save original os.Exit and defer restoring it
defer func() { exitFunc = os.Exit }()
// Mock os.Exit
fakeExit := func(code int) {
exitCode = code
}
exitFunc = fakeExit
// Mock os.Args
os.Args = args
// Run the main function
Execute(context.Background(), "")
return exitCode
}
func TestMainHelp(t *testing.T) {
exitCode := testMain([]string{"cmd", "--help"})
if exitCode != 0 {
t.Errorf("expected exit code 0, got %d", exitCode)
}
}
func TestMainNoArgsError(t *testing.T) {
exitCode := testMain([]string{"cmd"})
if exitCode != 1 {
t.Errorf("expected exit code 1, got %d", exitCode)
}
}

38
cmd/graph.go Normal file
View File

@@ -0,0 +1,38 @@
package cmd
import (
"os"
"github.com/actions-oss/act-cli/pkg/common"
"github.com/actions-oss/act-cli/pkg/model"
)
func drawGraph(plan *model.Plan) error {
drawings := make([]*common.Drawing, 0)
jobPen := common.NewPen(common.StyleSingleLine, 96)
arrowPen := common.NewPen(common.StyleNoLine, 97)
for i, stage := range plan.Stages {
if i > 0 {
drawings = append(drawings, arrowPen.DrawArrow())
}
ids := make([]string, 0)
for _, r := range stage.Runs {
ids = append(ids, r.String())
}
drawings = append(drawings, jobPen.DrawBoxes(ids...))
}
maxWidth := 0
for _, d := range drawings {
if d.GetWidth() > maxWidth {
maxWidth = d.GetWidth()
}
}
for _, d := range drawings {
d.Draw(os.Stdout, maxWidth)
}
return nil
}

118
cmd/input.go Normal file
View File

@@ -0,0 +1,118 @@
package cmd
import (
"path/filepath"
log "github.com/sirupsen/logrus"
)
// Input contains the input for the root command
type Input struct {
actor string
workdir string
workflowsPath string
autodetectEvent bool
eventPath string
reuseContainers bool
bindWorkdir bool
secrets []string
vars []string
envs []string
inputs []string
platforms []string
dryrun bool
pullIfNeeded bool
noRebuild bool
noOutput bool
envfile string
inputfile string
secretfile string
varfile string
insecureSecrets bool
defaultBranch string
privileged bool
usernsMode string
containerArchitecture string
containerDaemonSocket string
containerOptions string
workflowRecurse bool
useGitIgnore bool
githubInstance string
gitHubServerURL string
gitHubAPIServerURL string
gitHubGraphQlAPIServerURL string
containerCapAdd []string
containerCapDrop []string
autoRemove bool
artifactServerPath string
artifactServerAddr string
artifactServerPort string
noCacheServer bool
cacheServerPath string
cacheServerAddr string
cacheServerPort uint16
jsonLogger bool
noSkipCheckout bool
remoteName string
replaceGheActionWithGithubCom []string
replaceGheActionTokenWithGithubCom string
matrix []string
actionCachePath string
actionOfflineMode bool
logPrefixJobID bool
networkName string
localRepository []string
listOptions bool
validate bool
strict bool
parallel int
gitea bool
}
func (i *Input) resolve(path string) string {
basedir, err := filepath.Abs(i.workdir)
if err != nil {
log.Fatal(err)
}
if path == "" {
return path
}
if !filepath.IsAbs(path) {
path = filepath.Join(basedir, path)
}
return path
}
// Envfile returns path to .env
func (i *Input) Envfile() string {
return i.resolve(i.envfile)
}
// Secretfile returns path to secrets
func (i *Input) Secretfile() string {
return i.resolve(i.secretfile)
}
func (i *Input) Varfile() string {
return i.resolve(i.varfile)
}
// Workdir returns path to workdir
func (i *Input) Workdir() string {
return i.resolve(".")
}
// WorkflowsPath returns path to workflow file(s)
func (i *Input) WorkflowsPath() string {
return i.resolve(i.workflowsPath)
}
// EventPath returns the path to events file
func (i *Input) EventPath() string {
return i.resolve(i.eventPath)
}
// Inputfile returns the path to the input file
func (i *Input) Inputfile() string {
return i.resolve(i.inputfile)
}

107
cmd/list.go Normal file
View File

@@ -0,0 +1,107 @@
package cmd
import (
"fmt"
"strconv"
"strings"
"github.com/actions-oss/act-cli/pkg/model"
)
func printList(plan *model.Plan) error {
type lineInfoDef struct {
jobID string
jobName string
stage string
wfName string
wfFile string
events string
}
lineInfos := []lineInfoDef{}
header := lineInfoDef{
jobID: "Job ID",
jobName: "Job name",
stage: "Stage",
wfName: "Workflow name",
wfFile: "Workflow file",
events: "Events",
}
jobs := map[string]bool{}
duplicateJobIDs := false
jobIDMaxWidth := len(header.jobID)
jobNameMaxWidth := len(header.jobName)
stageMaxWidth := len(header.stage)
wfNameMaxWidth := len(header.wfName)
wfFileMaxWidth := len(header.wfFile)
eventsMaxWidth := len(header.events)
for i, stage := range plan.Stages {
for _, r := range stage.Runs {
jobID := r.JobID
line := lineInfoDef{
jobID: jobID,
jobName: r.String(),
stage: strconv.Itoa(i),
wfName: r.Workflow.Name,
wfFile: r.Workflow.File,
events: strings.Join(r.Workflow.On(), `,`),
}
if _, ok := jobs[jobID]; ok {
duplicateJobIDs = true
} else {
jobs[jobID] = true
}
lineInfos = append(lineInfos, line)
if jobIDMaxWidth < len(line.jobID) {
jobIDMaxWidth = len(line.jobID)
}
if jobNameMaxWidth < len(line.jobName) {
jobNameMaxWidth = len(line.jobName)
}
if stageMaxWidth < len(line.stage) {
stageMaxWidth = len(line.stage)
}
if wfNameMaxWidth < len(line.wfName) {
wfNameMaxWidth = len(line.wfName)
}
if wfFileMaxWidth < len(line.wfFile) {
wfFileMaxWidth = len(line.wfFile)
}
if eventsMaxWidth < len(line.events) {
eventsMaxWidth = len(line.events)
}
}
}
jobIDMaxWidth += 2
jobNameMaxWidth += 2
stageMaxWidth += 2
wfNameMaxWidth += 2
wfFileMaxWidth += 2
fmt.Printf("%*s%*s%*s%*s%*s%*s\n",
-stageMaxWidth, header.stage,
-jobIDMaxWidth, header.jobID,
-jobNameMaxWidth, header.jobName,
-wfNameMaxWidth, header.wfName,
-wfFileMaxWidth, header.wfFile,
-eventsMaxWidth, header.events,
)
for _, line := range lineInfos {
fmt.Printf("%*s%*s%*s%*s%*s%*s\n",
-stageMaxWidth, line.stage,
-jobIDMaxWidth, line.jobID,
-jobNameMaxWidth, line.jobName,
-wfNameMaxWidth, line.wfName,
-wfFileMaxWidth, line.wfFile,
-eventsMaxWidth, line.events,
)
}
if duplicateJobIDs {
fmt.Print("\nDetected multiple jobs with the same job name, use `-W` to specify the path to the specific workflow.\n")
}
return nil
}

22
cmd/platforms.go Normal file
View File

@@ -0,0 +1,22 @@
package cmd
import (
"strings"
)
func (i *Input) newPlatforms() map[string]string {
platforms := map[string]string{
"ubuntu-latest": "node:16-buster-slim",
"ubuntu-22.04": "node:16-bullseye-slim",
"ubuntu-20.04": "node:16-buster-slim",
"ubuntu-18.04": "node:16-buster-slim",
}
for _, p := range i.platforms {
pParts := strings.SplitN(p, "=", 2)
if len(pParts) == 2 {
platforms[strings.ToLower(pParts[0])] = pParts[1]
}
}
return platforms
}

860
cmd/root.go Normal file
View File

@@ -0,0 +1,860 @@
package cmd
import (
"bufio"
"bytes"
"context"
"encoding/json"
"fmt"
"os"
"path/filepath"
"regexp"
"runtime"
"runtime/debug"
"strings"
"github.com/AlecAivazis/survey/v2"
"github.com/adrg/xdg"
"github.com/andreaskoch/go-fswatch"
docker_container "github.com/docker/docker/api/types/container"
"github.com/joho/godotenv"
gitignore "github.com/sabhiram/go-gitignore"
log "github.com/sirupsen/logrus"
"github.com/spf13/cobra"
"github.com/spf13/cobra/doc"
"github.com/spf13/pflag"
"gopkg.in/yaml.v3"
"github.com/actions-oss/act-cli/pkg/artifactcache"
"github.com/actions-oss/act-cli/pkg/artifacts"
"github.com/actions-oss/act-cli/pkg/common"
"github.com/actions-oss/act-cli/pkg/container"
"github.com/actions-oss/act-cli/pkg/gh"
"github.com/actions-oss/act-cli/pkg/model"
"github.com/actions-oss/act-cli/pkg/runner"
"github.com/actions-oss/act-cli/pkg/schema"
)
type Flag struct {
Name string `json:"name"`
Default string `json:"default"`
Type string `json:"type"`
Description string `json:"description"`
}
var exitFunc = os.Exit
// Execute is the entry point to running the CLI
func Execute(ctx context.Context, version string) {
input := new(Input)
rootCmd := createRootCommand(ctx, input, version)
if err := rootCmd.Execute(); err != nil {
exitFunc(1)
}
}
func createRootCommand(ctx context.Context, input *Input, version string) *cobra.Command {
rootCmd := &cobra.Command{
Use: "act [event name to run] [flags]\n\nIf no event name passed, will default to \"on: push\"\nIf actions handles only one event it will be used as default instead of \"on: push\"\nSee documentation at: https://gitea.com/actions-oss/act-cli or https://github.com/actions-oss/act-cli",
Short: "Run GitHub actions locally by specifying the event name (e.g. `push`) or an action name directly.",
Args: cobra.MaximumNArgs(1),
RunE: newRunCommand(ctx, input),
PersistentPreRun: setup(input),
Version: version,
SilenceUsage: true,
}
rootCmd.Flags().BoolP("watch", "w", false, "watch the contents of the local repo and run when files change")
rootCmd.Flags().BoolVar(&input.validate, "validate", false, "validate workflows")
rootCmd.Flags().BoolVar(&input.strict, "strict", false, "use strict workflow schema")
rootCmd.Flags().BoolP("list", "l", false, "list workflows")
rootCmd.Flags().BoolP("graph", "g", false, "draw workflows")
rootCmd.Flags().StringP("job", "j", "", "run a specific job ID")
rootCmd.Flags().BoolP("bug-report", "", false, "Display system information for bug report")
rootCmd.Flags().BoolP("man-page", "", false, "Print a generated manual page to stdout")
rootCmd.Flags().StringVar(&input.remoteName, "remote-name", "origin", "git remote name that will be used to retrieve url of git repo")
rootCmd.Flags().StringArrayVarP(&input.secrets, "secret", "s", []string{}, "secret to make available to actions with optional value (e.g. -s mysecret=foo or -s mysecret)")
rootCmd.Flags().StringArrayVar(&input.vars, "var", []string{}, "variable to make available to actions with optional value (e.g. --var myvar=foo or --var myvar)")
rootCmd.Flags().StringArrayVarP(&input.envs, "env", "", []string{}, "env to make available to actions with optional value (e.g. --env myenv=foo or --env myenv)")
rootCmd.Flags().StringArrayVarP(&input.inputs, "input", "", []string{}, "action input to make available to actions (e.g. --input myinput=foo)")
rootCmd.Flags().StringArrayVarP(&input.platforms, "platform", "P", []string{}, "custom image to use per platform (e.g. -P ubuntu-18.04=nektos/act-environments-ubuntu:18.04)")
rootCmd.Flags().BoolVarP(&input.reuseContainers, "reuse", "r", false, "don't remove container(s) on successfully completed workflow(s) to maintain state between runs")
rootCmd.Flags().BoolVarP(&input.bindWorkdir, "bind", "b", false, "bind working directory to container, rather than copy")
rootCmd.Flags().BoolVarP(&input.pullIfNeeded, "pull-if-needed", "", false, "only pull docker image(s) if not present")
rootCmd.Flags().BoolVarP(&input.noRebuild, "no-rebuild", "", false, "don't rebuild local action docker action image(s) if already present for correct platform")
rootCmd.Flags().BoolVarP(&input.autodetectEvent, "detect-event", "", false, "Use first event type from workflow as event that triggered the workflow")
rootCmd.Flags().StringVarP(&input.eventPath, "eventpath", "e", "", "path to event JSON file")
rootCmd.Flags().StringVar(&input.defaultBranch, "defaultbranch", "", "the name of the main branch")
rootCmd.Flags().BoolVar(&input.privileged, "privileged", false, "use privileged mode")
rootCmd.Flags().StringVar(&input.usernsMode, "userns", "", "user namespace to use")
rootCmd.Flags().BoolVar(&input.useGitIgnore, "use-gitignore", true, "Controls whether paths specified in .gitignore should be copied into container")
rootCmd.Flags().StringArrayVarP(&input.containerCapAdd, "container-cap-add", "", []string{}, "kernel capabilities to add to the workflow containers (e.g. --container-cap-add SYS_PTRACE)")
rootCmd.Flags().StringArrayVarP(&input.containerCapDrop, "container-cap-drop", "", []string{}, "kernel capabilities to remove from the workflow containers (e.g. --container-cap-drop SYS_PTRACE)")
rootCmd.Flags().BoolVar(&input.autoRemove, "rm", false, "automatically remove container(s)/volume(s) after a workflow(s) failure")
rootCmd.Flags().StringArrayVarP(&input.replaceGheActionWithGithubCom, "replace-ghe-action-with-github-com", "", []string{}, "If you are using GitHub Enterprise Server and allow specified actions from GitHub (github.com), you can set actions on this. (e.g. --replace-ghe-action-with-github-com =github/super-linter)")
rootCmd.Flags().StringVar(&input.replaceGheActionTokenWithGithubCom, "replace-ghe-action-token-with-github-com", "", "If you are using replace-ghe-action-with-github-com and you want to use private actions on GitHub, you have to set personal access token")
rootCmd.Flags().StringArrayVarP(&input.matrix, "matrix", "", []string{}, "specify which matrix configuration to include (e.g. --matrix java:13")
rootCmd.Flags().IntVarP(&input.parallel, "parallel", "", 0, "number of jobs to run in parallel")
rootCmd.Flags().IntVarP(&input.parallel, "concurrent-jobs", "", 0, "number of jobs to run in parallel")
rootCmd.PersistentFlags().StringVarP(&input.actor, "actor", "a", "nektos/act", "user that triggered the event")
rootCmd.PersistentFlags().StringVarP(&input.workflowsPath, "workflows", "W", "./.github/workflows/", "path to workflow file(s)")
rootCmd.PersistentFlags().BoolVarP(&input.workflowRecurse, "recurse", "", false, "Flag to enable running workflows from subdirectories of specified path in '--workflows'/'-W' flag, this feature doesn't exist on GitHub Actions as of 2024/11")
rootCmd.PersistentFlags().StringVarP(&input.workdir, "directory", "C", ".", "working directory")
rootCmd.PersistentFlags().BoolP("verbose", "v", false, "verbose output")
rootCmd.PersistentFlags().BoolVar(&input.jsonLogger, "json", false, "Output logs in json format")
rootCmd.PersistentFlags().BoolVar(&input.logPrefixJobID, "log-prefix-job-id", false, "Output the job id within non-json logs instead of the entire name")
rootCmd.PersistentFlags().BoolVarP(&input.noOutput, "quiet", "q", false, "disable logging of output from steps")
rootCmd.PersistentFlags().BoolVarP(&input.dryrun, "dryrun", "n", false, "disable container creation, validates only workflow correctness")
rootCmd.PersistentFlags().StringVarP(&input.secretfile, "secret-file", "", ".secrets", "file with list of secrets to read from (e.g. --secret-file .secrets)")
rootCmd.PersistentFlags().StringVarP(&input.varfile, "var-file", "", ".vars", "file with list of vars to read from (e.g. --var-file .vars)")
rootCmd.PersistentFlags().BoolVarP(&input.insecureSecrets, "insecure-secrets", "", false, "NOT RECOMMENDED! Doesn't hide secrets while printing logs.")
rootCmd.PersistentFlags().StringVarP(&input.envfile, "env-file", "", ".env", "environment file to read and use as env in the containers")
rootCmd.PersistentFlags().StringVarP(&input.inputfile, "input-file", "", ".input", "input file to read and use as action input")
rootCmd.PersistentFlags().StringVarP(&input.containerArchitecture, "container-architecture", "", "", "Architecture which should be used to run containers, e.g.: linux/amd64. If not specified, will use host default architecture. Requires Docker server API Version 1.41+. Ignored on earlier Docker server platforms.")
rootCmd.PersistentFlags().StringVarP(&input.containerDaemonSocket, "container-daemon-socket", "", "", "URI to Docker Engine socket (e.g.: unix://~/.docker/run/docker.sock or - to disable bind mounting the socket)")
rootCmd.PersistentFlags().StringVarP(&input.containerOptions, "container-options", "", "", "Custom docker container options for the job container without an options property in the job definition")
rootCmd.PersistentFlags().StringVarP(&input.githubInstance, "github-instance", "", "github.com", "GitHub instance to use. Only use this when using GitHub Enterprise Server.")
rootCmd.PersistentFlags().StringVarP(&input.gitHubServerURL, "github-server-url", "", "", "Fully qualified URL to the GitHub instance to use with http/https protocol. Only use this when using GitHub Enterprise Server or Gitea.")
rootCmd.PersistentFlags().StringVarP(&input.gitHubAPIServerURL, "github-api-server-url", "", "", "Fully qualified URL to the GitHub instance api url to use with http/https protocol. Only use this when using GitHub Enterprise Server or Gitea.")
rootCmd.PersistentFlags().StringVarP(&input.gitHubGraphQlAPIServerURL, "github-graph-ql-api-server-url", "", "", "Fully qualified URL to the GitHub instance graphql api to use with http/https protocol. Only use this when using GitHub Enterprise Server or Gitea.")
rootCmd.PersistentFlags().StringVarP(&input.artifactServerPath, "artifact-server-path", "", "", "Defines the path where the artifact server stores uploads and retrieves downloads from. If not specified the artifact server will not start.")
rootCmd.PersistentFlags().StringVarP(&input.artifactServerAddr, "artifact-server-addr", "", common.GetOutboundIP().String(), "Defines the address to which the artifact server binds.")
rootCmd.PersistentFlags().StringVarP(&input.artifactServerPort, "artifact-server-port", "", "34567", "Defines the port where the artifact server listens.")
rootCmd.PersistentFlags().BoolVarP(&input.noSkipCheckout, "no-skip-checkout", "", false, "Do not skip actions/checkout")
rootCmd.PersistentFlags().BoolVarP(&input.noCacheServer, "no-cache-server", "", false, "Disable cache server")
rootCmd.PersistentFlags().StringVarP(&input.cacheServerPath, "cache-server-path", "", filepath.Join(CacheHomeDir, "actcache"), "Defines the path where the cache server stores caches.")
rootCmd.PersistentFlags().StringVarP(&input.cacheServerAddr, "cache-server-addr", "", common.GetOutboundIP().String(), "Defines the address to which the cache server binds.")
rootCmd.PersistentFlags().Uint16VarP(&input.cacheServerPort, "cache-server-port", "", 0, "Defines the port where the artifact server listens. 0 means a randomly available port.")
rootCmd.PersistentFlags().StringVarP(&input.actionCachePath, "action-cache-path", "", filepath.Join(CacheHomeDir, "act"), "Defines the path where the actions get cached and host workspaces created.")
rootCmd.PersistentFlags().BoolVarP(&input.actionOfflineMode, "action-offline-mode", "", false, "If action contents exists, it will not be fetch and pull again. If turn on this, will turn off force pull")
rootCmd.PersistentFlags().StringVarP(&input.networkName, "network", "", "host", "Sets a docker network name. Defaults to host.")
rootCmd.PersistentFlags().StringArrayVarP(&input.localRepository, "local-repository", "", []string{}, "Replaces the specified repository and ref with a local folder (e.g. https://github.com/test/test@v0=/home/act/test or test/test@v0=/home/act/test, the latter matches any hosts or protocols)")
rootCmd.PersistentFlags().BoolVar(&input.listOptions, "list-options", false, "Print a json structure of compatible options")
rootCmd.PersistentFlags().BoolVar(&input.gitea, "gitea", false, "Use Gitea instead of GitHub")
rootCmd.SetArgs(args())
return rootCmd
}
// Return locations where Act's config can be found in order: XDG spec, .actrc in HOME directory, .actrc in invocation directory
func configLocations() []string {
configFileName := ".actrc"
homePath := filepath.Join(UserHomeDir, configFileName)
invocationPath := filepath.Join(".", configFileName)
// Though named xdg, adrg's lib support macOS and Windows config paths as well
// It also takes cares of creating the parent folder so we don't need to bother later
specPath, err := xdg.ConfigFile("act/actrc")
if err != nil {
specPath = homePath
}
// This order should be enforced since the survey part relies on it
return []string{specPath, homePath, invocationPath}
}
func args() []string {
actrc := configLocations()
args := make([]string, 0)
for _, f := range actrc {
args = append(args, readArgsFile(f, true)...)
}
args = append(args, os.Args[1:]...)
return args
}
func bugReport(ctx context.Context, version string) error {
sprintf := func(key, val string) string {
return fmt.Sprintf("%-24s%s\n", key, val)
}
report := sprintf("act version:", version)
report += sprintf("Variant:", "https://gitea.com/actions-oss/act-cli / https://github.com/actions-oss/act-cli")
report += sprintf("GOOS:", runtime.GOOS)
report += sprintf("GOARCH:", runtime.GOARCH)
report += sprintf("NumCPU:", fmt.Sprint(runtime.NumCPU()))
var dockerHost string
var exists bool
if dockerHost, exists = os.LookupEnv("DOCKER_HOST"); !exists {
dockerHost = "DOCKER_HOST environment variable is not set"
} else if dockerHost == "" {
dockerHost = "DOCKER_HOST environment variable is empty."
}
report += sprintf("Docker host:", dockerHost)
report += fmt.Sprintln("Sockets found:")
for _, p := range container.CommonSocketLocations {
if _, err := os.Lstat(os.ExpandEnv(p)); err != nil {
continue
} else if _, err := os.Stat(os.ExpandEnv(p)); err != nil {
report += fmt.Sprintf("\t%s(broken)\n", p)
} else {
report += fmt.Sprintf("\t%s\n", p)
}
}
report += sprintf("Config files:", "")
for _, c := range configLocations() {
args := readArgsFile(c, false)
if len(args) > 0 {
report += fmt.Sprintf("\t%s:\n", c)
for _, l := range args {
report += fmt.Sprintf("\t\t%s\n", l)
}
}
}
vcs, ok := debug.ReadBuildInfo()
if ok && vcs != nil {
report += fmt.Sprintln("Build info:")
vcs := *vcs
report += sprintf("\tGo version:", vcs.GoVersion)
report += sprintf("\tModule path:", vcs.Path)
report += sprintf("\tMain version:", vcs.Main.Version)
report += sprintf("\tMain path:", vcs.Main.Path)
report += sprintf("\tMain checksum:", vcs.Main.Sum)
report += fmt.Sprintln("\tBuild settings:")
for _, set := range vcs.Settings {
report += sprintf(fmt.Sprintf("\t\t%s:", set.Key), set.Value)
}
}
info, err := container.GetHostInfo(ctx)
if err != nil {
fmt.Println(report)
return err
}
report += fmt.Sprintln("Docker Engine:")
report += sprintf("\tEngine version:", info.ServerVersion)
report += sprintf("\tEngine runtime:", info.DefaultRuntime)
report += sprintf("\tCgroup version:", info.CgroupVersion)
report += sprintf("\tCgroup driver:", info.CgroupDriver)
report += sprintf("\tStorage driver:", info.Driver)
report += sprintf("\tRegistry URI:", info.IndexServerAddress)
report += sprintf("\tOS:", info.OperatingSystem)
report += sprintf("\tOS type:", info.OSType)
report += sprintf("\tOS version:", info.OSVersion)
report += sprintf("\tOS arch:", info.Architecture)
report += sprintf("\tOS kernel:", info.KernelVersion)
report += sprintf("\tOS CPU:", fmt.Sprint(info.NCPU))
report += sprintf("\tOS memory:", fmt.Sprintf("%d MB", info.MemTotal/1024/1024))
report += fmt.Sprintln("\tSecurity options:")
for _, secopt := range info.SecurityOptions {
report += fmt.Sprintf("\t\t%s\n", secopt)
}
fmt.Println(report)
return nil
}
func generateManPage(cmd *cobra.Command) error {
header := &doc.GenManHeader{
Title: "act",
Section: "1",
Source: fmt.Sprintf("act %s", cmd.Version),
}
buf := new(bytes.Buffer)
cobra.CheckErr(doc.GenMan(cmd, header, buf))
fmt.Print(buf.String())
return nil
}
func listOptions(cmd *cobra.Command) error {
flags := []Flag{}
cmd.LocalFlags().VisitAll(func(f *pflag.Flag) {
flags = append(flags, Flag{Name: f.Name, Default: f.DefValue, Description: f.Usage, Type: f.Value.Type()})
})
a, err := json.Marshal(flags)
fmt.Println(string(a))
return err
}
func readArgsFile(file string, split bool) []string {
args := make([]string, 0)
f, err := os.Open(file)
if err != nil {
return args
}
defer func() {
err := f.Close()
if err != nil {
log.Errorf("failed to close args file: %v", err)
}
}()
scanner := bufio.NewScanner(f)
scanner.Buffer(nil, 1024*1024*1024) // increase buffer to 1GB to avoid scanner buffer overflow
for scanner.Scan() {
arg := os.ExpandEnv(strings.TrimSpace(scanner.Text()))
if strings.HasPrefix(arg, "-") && split {
args = append(args, regexp.MustCompile(`\s`).Split(arg, 2)...)
} else if !split {
args = append(args, arg)
}
}
return args
}
func setup(_ *Input) func(*cobra.Command, []string) {
return func(cmd *cobra.Command, _ []string) {
verbose, _ := cmd.Flags().GetBool("verbose")
if verbose {
log.SetLevel(log.DebugLevel)
}
}
}
func parseEnvs(env []string) map[string]string {
envs := make(map[string]string, len(env))
for _, envVar := range env {
e := strings.SplitN(envVar, `=`, 2)
if len(e) == 2 {
envs[e[0]] = e[1]
} else {
envs[e[0]] = ""
}
}
return envs
}
func readYamlFile(file string) (map[string]string, error) {
content, err := os.ReadFile(file)
if err != nil {
return nil, err
}
ret := map[string]string{}
if err = yaml.Unmarshal(content, &ret); err != nil {
return nil, err
}
return ret, nil
}
func readEnvs(path string, envs map[string]string) bool {
return readEnvsEx(path, envs, false)
}
func readEnvsEx(path string, envs map[string]string, caseInsensitive bool) bool {
if _, err := os.Stat(path); err == nil {
var env map[string]string
if ext := filepath.Ext(path); ext == ".yml" || ext == ".yaml" {
env, err = readYamlFile(path)
} else {
env, err = godotenv.Read(path)
}
if err != nil {
log.Fatalf("Error loading from %s: %v", path, err)
}
for k, v := range env {
if caseInsensitive {
k = strings.ToUpper(k)
}
if _, ok := envs[k]; !ok {
envs[k] = v
}
}
return true
}
return false
}
func parseMatrix(matrix []string) map[string]map[string]bool {
// each matrix entry should be of the form - string:string
r := regexp.MustCompile(":")
matrixes := make(map[string]map[string]bool)
for _, m := range matrix {
matrix := r.Split(m, 2)
if len(matrix) < 2 {
log.Fatalf("Invalid matrix format. Failed to parse %s", m)
}
if _, ok := matrixes[matrix[0]]; !ok {
matrixes[matrix[0]] = make(map[string]bool)
}
matrixes[matrix[0]][matrix[1]] = true
}
return matrixes
}
//nolint:gocyclo
func newRunCommand(ctx context.Context, input *Input) func(*cobra.Command, []string) error {
return func(cmd *cobra.Command, args []string) error {
if input.jsonLogger {
log.SetFormatter(&log.JSONFormatter{})
}
if ok, _ := cmd.Flags().GetBool("bug-report"); ok {
ctx, cancel := common.EarlyCancelContext(ctx)
defer cancel()
return bugReport(ctx, cmd.Version)
}
if ok, _ := cmd.Flags().GetBool("man-page"); ok {
return generateManPage(cmd)
}
if input.listOptions {
return listOptions(cmd)
}
if ret, err := container.GetSocketAndHost(input.containerDaemonSocket); err != nil {
log.Warnf("Couldn't get a valid docker connection: %+v", err)
} else {
os.Setenv("DOCKER_HOST", ret.Host)
input.containerDaemonSocket = ret.Socket
log.Infof("Using docker host '%s', and daemon socket '%s'", ret.Host, ret.Socket)
}
if runtime.GOOS == "darwin" && runtime.GOARCH == "arm64" && input.containerArchitecture == "" {
l := log.New()
l.SetFormatter(&log.TextFormatter{
DisableQuote: true,
DisableTimestamp: true,
})
l.Warnf(" \U000026A0 You are using Apple M-series chip and you have not specified container architecture, you might encounter issues while running act. If so, try running it with '--container-architecture linux/amd64'. \U000026A0 \n")
}
log.Debugf("Loading environment from %s", input.Envfile())
envs := parseEnvs(input.envs)
_ = readEnvs(input.Envfile(), envs)
log.Debugf("Loading action inputs from %s", input.Inputfile())
inputs := parseEnvs(input.inputs)
_ = readEnvs(input.Inputfile(), inputs)
log.Debugf("Loading secrets from %s", input.Secretfile())
secrets := newSecrets(input.secrets)
_ = readEnvsEx(input.Secretfile(), secrets, true)
if _, hasGitHubToken := secrets["GITHUB_TOKEN"]; !hasGitHubToken {
ctx, cancel := common.EarlyCancelContext(ctx)
defer cancel()
secrets["GITHUB_TOKEN"], _ = gh.GetToken(ctx, "")
}
log.Debugf("Loading vars from %s", input.Varfile())
vars := newSecrets(input.vars)
_ = readEnvs(input.Varfile(), vars)
log.Debugf("Cleaning up %s old action cache format", input.actionCachePath)
entries, _ := os.ReadDir(input.actionCachePath)
for _, entry := range entries {
if strings.Contains(entry.Name(), "@") {
fullPath := filepath.Join(input.actionCachePath, entry.Name())
log.Debugf("Removing %s", fullPath)
_ = os.RemoveAll(fullPath)
}
}
matrixes := parseMatrix(input.matrix)
log.Debugf("Evaluated matrix inclusions: %v", matrixes)
// TODO switch to Gitea Schema when supported
plannerConfig := model.PlannerConfig{
Recursive: input.workflowRecurse,
Workflow: model.WorkflowConfig{
Strict: input.strict,
},
}
if input.gitea {
plannerConfig.Workflow.Schema = schema.GetGiteaWorkflowSchema()
}
planner, err := model.NewWorkflowPlanner(input.WorkflowsPath(), plannerConfig)
if err != nil {
return err
}
jobID, err := cmd.Flags().GetString("job")
if err != nil {
return err
}
// check if we should just list the workflows
list, err := cmd.Flags().GetBool("list")
if err != nil {
return err
}
// check if we should just validate the workflows
if input.validate {
return err
}
// check if we should just draw the graph
graph, err := cmd.Flags().GetBool("graph")
if err != nil {
return err
}
// collect all events from loaded workflows
events := planner.GetEvents()
// plan with filtered jobs - to be used for filtering only
var filterPlan *model.Plan
// Determine the event name to be filtered
var filterEventName string
if len(args) > 0 {
log.Debugf("Using first passed in arguments event for filtering: %s", args[0])
filterEventName = args[0]
} else if input.autodetectEvent && len(events) > 0 && len(events[0]) > 0 {
// set default event type to first event from many available
// this way user dont have to specify the event.
log.Debugf("Using first detected workflow event for filtering: %s", events[0])
filterEventName = events[0]
}
var plannerErr error
if jobID != "" {
log.Debugf("Preparing plan with a job: %s", jobID)
filterPlan, plannerErr = planner.PlanJob(jobID)
} else if filterEventName != "" {
log.Debugf("Preparing plan for a event: %s", filterEventName)
filterPlan, plannerErr = planner.PlanEvent(filterEventName)
} else {
log.Debugf("Preparing plan with all jobs")
filterPlan, plannerErr = planner.PlanAll()
}
if filterPlan == nil && plannerErr != nil {
return plannerErr
}
if list {
err = printList(filterPlan)
if err != nil {
return err
}
return plannerErr
}
if graph {
err = drawGraph(filterPlan)
if err != nil {
return err
}
return plannerErr
}
// plan with triggered jobs
var plan *model.Plan
// Determine the event name to be triggered
var eventName string
if len(args) > 0 {
log.Debugf("Using first passed in arguments event: %s", args[0])
eventName = args[0]
} else if len(events) == 1 && len(events[0]) > 0 {
log.Debugf("Using the only detected workflow event: %s", events[0])
eventName = events[0]
} else if input.autodetectEvent && len(events) > 0 && len(events[0]) > 0 {
// set default event type to first event from many available
// this way user dont have to specify the event.
log.Debugf("Using first detected workflow event: %s", events[0])
eventName = events[0]
} else {
log.Debugf("Using default workflow event: push")
eventName = "push"
}
// build the plan for this run
if jobID != "" {
log.Debugf("Planning job: %s", jobID)
plan, plannerErr = planner.PlanJob(jobID)
} else {
log.Debugf("Planning jobs for event: %s", eventName)
plan, plannerErr = planner.PlanEvent(eventName)
}
if plan != nil {
if len(plan.Stages) == 0 {
plannerErr = fmt.Errorf("could not find any stages to run. View the valid jobs with `act --list`. Use `act --help` to find how to filter by Job ID/Workflow/Event Name")
}
}
if plan == nil && plannerErr != nil {
return plannerErr
}
// check to see if the main branch was defined
defaultbranch, err := cmd.Flags().GetString("defaultbranch")
if err != nil {
return err
}
// Check if platforms flag is set, if not, run default image survey
if len(input.platforms) == 0 {
cfgFound := false
cfgLocations := configLocations()
for _, v := range cfgLocations {
_, err := os.Stat(v)
if os.IsExist(err) {
cfgFound = true
}
}
if !cfgFound && len(cfgLocations) > 0 {
// The first config location refers to the global config folder one
if err := defaultImageSurvey(cfgLocations[0]); err != nil {
log.Fatal(err)
}
input.platforms = readArgsFile(cfgLocations[0], true)
}
}
deprecationWarning := "--%s is deprecated and will be removed soon, please switch to cli: `--container-options \"%[2]s\"` or `.actrc`: `--container-options %[2]s`."
if input.privileged {
log.Warnf(deprecationWarning, "privileged", "--privileged")
}
if len(input.usernsMode) > 0 {
log.Warnf(deprecationWarning, "userns", fmt.Sprintf("--userns=%s", input.usernsMode))
}
if len(input.containerCapAdd) > 0 {
log.Warnf(deprecationWarning, "container-cap-add", fmt.Sprintf("--cap-add=%s", input.containerCapAdd))
}
if len(input.containerCapDrop) > 0 {
log.Warnf(deprecationWarning, "container-cap-drop", fmt.Sprintf("--cap-drop=%s", input.containerCapDrop))
}
// run the plan
config := &runner.Config{
Actor: input.actor,
EventName: eventName,
EventPath: input.EventPath(),
DefaultBranch: defaultbranch,
ForcePull: !input.actionOfflineMode && !input.pullIfNeeded,
ForceRebuild: !input.noRebuild,
ReuseContainers: input.reuseContainers,
Workdir: input.Workdir(),
ActionCacheDir: input.actionCachePath,
ActionOfflineMode: input.actionOfflineMode,
BindWorkdir: input.bindWorkdir,
LogOutput: !input.noOutput,
JSONLogger: input.jsonLogger,
LogPrefixJobID: input.logPrefixJobID,
Env: envs,
Secrets: secrets,
Vars: vars,
Inputs: inputs,
Token: secrets["GITHUB_TOKEN"],
InsecureSecrets: input.insecureSecrets,
Platforms: input.newPlatforms(),
Privileged: input.privileged,
UsernsMode: input.usernsMode,
ContainerArchitecture: input.containerArchitecture,
ContainerDaemonSocket: input.containerDaemonSocket,
ContainerOptions: input.containerOptions,
UseGitIgnore: input.useGitIgnore,
GitHubInstance: input.githubInstance,
GitHubServerURL: input.gitHubServerURL,
GitHubAPIServerURL: input.gitHubAPIServerURL,
GitHubGraphQlAPIServerURL: input.gitHubGraphQlAPIServerURL,
ContainerCapAdd: input.containerCapAdd,
ContainerCapDrop: input.containerCapDrop,
AutoRemove: input.autoRemove,
ArtifactServerPath: input.artifactServerPath,
ArtifactServerAddr: input.artifactServerAddr,
ArtifactServerPort: input.artifactServerPort,
NoSkipCheckout: input.noSkipCheckout,
RemoteName: input.remoteName,
ReplaceGheActionWithGithubCom: input.replaceGheActionWithGithubCom,
ReplaceGheActionTokenWithGithubCom: input.replaceGheActionTokenWithGithubCom,
Matrix: matrixes,
ContainerNetworkMode: docker_container.NetworkMode(input.networkName),
Parallel: input.parallel,
Planner: plannerConfig,
Action: model.ActionConfig{}, // TODO Gitea Action Schema
MainContextNames: []string{"github"},
}
if input.gitea {
config.Action.Schema = schema.GetGiteaActionSchema()
config.MainContextNames = append(config.MainContextNames, "gitea")
}
actionCache := runner.GoGitActionCache{
Path: config.ActionCacheDir,
}
config.ActionCache = &actionCache
if input.actionOfflineMode {
config.ActionCache = &runner.GoGitActionCacheOfflineMode{
Parent: actionCache,
}
}
if len(input.localRepository) > 0 {
localRepositories := map[string]string{}
for _, l := range input.localRepository {
k, v, _ := strings.Cut(l, "=")
localRepositories[k] = v
}
config.ActionCache = &runner.LocalRepositoryCache{
Parent: config.ActionCache,
LocalRepositories: localRepositories,
CacheDirCache: map[string]string{},
}
}
var r runner.Runner
if eventName == "workflow_call" {
// Do not use the totally broken code and instead craft a fake caller
convertedInputs := make(map[string]interface{})
for k, v := range inputs {
var raw interface{}
if err := yaml.Unmarshal([]byte(v), &raw); err != nil {
return fmt.Errorf("failed to unmarshal input %s: %w", k, err)
}
convertedInputs[k] = raw
}
r, err = runner.NewReusableWorkflowRunner(&runner.RunContext{
Config: config,
Name: "_",
JobName: "_",
Run: &model.Run{
JobID: "_",
Workflow: &model.Workflow{
Jobs: map[string]*model.Job{
"_": {
Name: "_",
With: convertedInputs,
},
},
},
},
})
} else {
r, err = runner.New(config)
}
if err != nil {
return err
}
cancel := artifacts.Serve(ctx, input.artifactServerPath, input.artifactServerAddr, input.artifactServerPort)
const cacheURLKey = "ACTIONS_CACHE_URL"
var cacheHandler *artifactcache.Handler
if !input.noCacheServer && envs[cacheURLKey] == "" {
var err error
cacheHandler, err = artifactcache.StartHandler(input.cacheServerPath, input.cacheServerAddr, input.cacheServerPort, common.Logger(ctx))
if err != nil {
return err
}
envs[cacheURLKey] = cacheHandler.ExternalURL() + "/"
}
ctx = common.WithDryrun(ctx, input.dryrun)
if watch, err := cmd.Flags().GetBool("watch"); err != nil {
return err
} else if watch {
err = watchAndRun(ctx, r.NewPlanExecutor(plan))
if err != nil {
return err
}
return plannerErr
}
executor := r.NewPlanExecutor(plan).Finally(func(_ context.Context) error {
cancel()
_ = cacheHandler.Close()
return nil
})
err = executor(ctx)
if err != nil {
return err
}
return plannerErr
}
}
func defaultImageSurvey(actrc string) error {
var answer string
confirmation := &survey.Select{
Message: "Please choose the default image you want to use with act:\n - Large size image: ca. 17GB download + 53.1GB storage, you will need 75GB of free disk space, snapshots of GitHub Hosted Runners without snap and pulled docker images\n - Medium size image: ~500MB, includes only necessary tools to bootstrap actions and aims to be compatible with most actions\n - Micro size image: <200MB, contains only NodeJS required to bootstrap actions, doesn't work with all actions\n\nDefault image and other options can be changed manually in " + configLocations()[0] + " (please refer to https://github.com/nektos/act#configuration for additional information about file structure)",
Help: "If you want to know why act asks you that, please go to https://github.com/actions-oss/act-cli/issues/107",
Default: "Medium",
Options: []string{"Large", "Medium", "Micro"},
}
err := survey.AskOne(confirmation, &answer)
if err != nil {
return err
}
var option string
switch answer {
case "Large":
option = "-P ubuntu-latest=catthehacker/ubuntu:full-latest\n-P ubuntu-22.04=catthehacker/ubuntu:full-22.04\n-P ubuntu-20.04=catthehacker/ubuntu:full-20.04\n-P ubuntu-18.04=catthehacker/ubuntu:full-18.04\n"
case "Medium":
option = "-P ubuntu-latest=catthehacker/ubuntu:act-latest\n-P ubuntu-22.04=catthehacker/ubuntu:act-22.04\n-P ubuntu-20.04=catthehacker/ubuntu:act-20.04\n-P ubuntu-18.04=catthehacker/ubuntu:act-18.04\n"
case "Micro":
option = "-P ubuntu-latest=node:16-buster-slim\n-P ubuntu-22.04=node:16-bullseye-slim\n-P ubuntu-20.04=node:16-buster-slim\n-P ubuntu-18.04=node:16-buster-slim\n"
}
f, err := os.Create(actrc)
if err != nil {
return err
}
_, err = f.WriteString(option)
if err != nil {
_ = f.Close()
return err
}
err = f.Close()
if err != nil {
return err
}
return nil
}
func watchAndRun(ctx context.Context, fn common.Executor) error {
dir, err := os.Getwd()
if err != nil {
return err
}
ignoreFile := filepath.Join(dir, ".gitignore")
ignore := &gitignore.GitIgnore{}
if info, err := os.Stat(ignoreFile); err == nil && !info.IsDir() {
ignore, err = gitignore.CompileIgnoreFile(ignoreFile)
if err != nil {
return fmt.Errorf("compile %q: %w", ignoreFile, err)
}
}
folderWatcher := fswatch.NewFolderWatcher(
dir,
true,
ignore.MatchesPath,
2, // 2 seconds
)
folderWatcher.Start()
defer folderWatcher.Stop()
// run once before watching
if err := fn(ctx); err != nil {
return err
}
earlyCancelCtx, cancel := common.EarlyCancelContext(ctx)
defer cancel()
for folderWatcher.IsRunning() {
log.Debugf("Watching %s for changes", dir)
select {
case <-earlyCancelCtx.Done():
return nil
case changes := <-folderWatcher.ChangeDetails():
log.Debugf("%s", changes.String())
if err := fn(ctx); err != nil {
return err
}
}
}
return nil
}

109
cmd/root_test.go Normal file
View File

@@ -0,0 +1,109 @@
package cmd
import (
"context"
"path"
"path/filepath"
"testing"
"github.com/stretchr/testify/assert"
)
func TestReadSecrets(t *testing.T) {
secrets := map[string]string{}
ret := readEnvsEx(path.Join("testdata", "secrets.yml"), secrets, true)
assert.True(t, ret)
assert.Equal(t, `line1
line2
line3
`, secrets["MYSECRET"])
}
func TestReadEnv(t *testing.T) {
secrets := map[string]string{}
ret := readEnvs(path.Join("testdata", "secrets.yml"), secrets)
assert.True(t, ret)
assert.Equal(t, `line1
line2
line3
`, secrets["mysecret"])
}
func TestListOptions(t *testing.T) {
rootCmd := createRootCommand(context.Background(), &Input{}, "")
err := newRunCommand(context.Background(), &Input{
listOptions: true,
})(rootCmd, []string{})
assert.NoError(t, err)
}
func TestRun(t *testing.T) {
rootCmd := createRootCommand(context.Background(), &Input{}, "")
err := newRunCommand(context.Background(), &Input{
platforms: []string{"ubuntu-latest=node:16-buster-slim"},
workdir: "../pkg/runner/testdata/",
workflowsPath: "./basic/push.yml",
})(rootCmd, []string{})
assert.NoError(t, err)
}
func TestRunPush(t *testing.T) {
rootCmd := createRootCommand(context.Background(), &Input{}, "")
err := newRunCommand(context.Background(), &Input{
platforms: []string{"ubuntu-latest=node:16-buster-slim"},
workdir: "../pkg/runner/testdata/",
workflowsPath: "./basic/push.yml",
})(rootCmd, []string{"push"})
assert.NoError(t, err)
}
func TestRunPushJsonLogger(t *testing.T) {
rootCmd := createRootCommand(context.Background(), &Input{}, "")
err := newRunCommand(context.Background(), &Input{
platforms: []string{"ubuntu-latest=node:16-buster-slim"},
workdir: "../pkg/runner/testdata/",
workflowsPath: "./basic/push.yml",
jsonLogger: true,
})(rootCmd, []string{"push"})
assert.NoError(t, err)
}
func TestFlags(t *testing.T) {
for _, f := range []string{"graph", "list", "bug-report", "man-page"} {
t.Run("TestFlag-"+f, func(t *testing.T) {
rootCmd := createRootCommand(context.Background(), &Input{}, "")
err := rootCmd.Flags().Set(f, "true")
assert.NoError(t, err)
err = newRunCommand(context.Background(), &Input{
platforms: []string{"ubuntu-latest=node:16-buster-slim"},
workdir: "../pkg/runner/testdata/",
workflowsPath: "./basic/push.yml",
})(rootCmd, []string{})
assert.NoError(t, err)
})
}
}
func TestWorkflowCall(t *testing.T) {
rootCmd := createRootCommand(context.Background(), &Input{}, "")
err := newRunCommand(context.Background(), &Input{
platforms: []string{"ubuntu-latest=node:16-buster-slim"},
workdir: "../pkg/runner/testdata/",
workflowsPath: "./workflow_call_inputs/workflow_call_inputs.yml",
inputs: []string{"required=required input", "boolean=true"},
})(rootCmd, []string{"workflow_call"})
assert.NoError(t, err)
}
func TestLocalRepositories(t *testing.T) {
wd, _ := filepath.Abs("../pkg/runner/testdata/")
rootCmd := createRootCommand(context.Background(), &Input{}, "")
err := newRunCommand(context.Background(), &Input{
githubInstance: "github.com",
platforms: []string{"ubuntu-latest=node:16-buster-slim"},
workdir: wd,
workflowsPath: "./remote-action-composite-action-ref-partial-override/push.yml",
localRepository: []string{"needs/override@main=" + wd + "/actions-environment-and-context-tests"},
})(rootCmd, []string{"push"})
assert.NoError(t, err)
}

42
cmd/secrets.go Normal file
View File

@@ -0,0 +1,42 @@
package cmd
import (
"fmt"
"os"
"strings"
log "github.com/sirupsen/logrus"
"golang.org/x/term"
)
type secrets map[string]string
func newSecrets(secretList []string) secrets {
s := make(map[string]string)
for _, secretPair := range secretList {
secretPairParts := strings.SplitN(secretPair, "=", 2)
secretPairParts[0] = strings.ToUpper(secretPairParts[0])
if strings.ToUpper(s[secretPairParts[0]]) == secretPairParts[0] {
log.Errorf("secret %s is already defined (secrets are case insensitive)", secretPairParts[0])
}
if len(secretPairParts) == 2 {
s[secretPairParts[0]] = secretPairParts[1]
} else if env, ok := os.LookupEnv(secretPairParts[0]); ok && env != "" {
s[secretPairParts[0]] = env
} else {
fmt.Printf("Provide value for '%s': ", secretPairParts[0])
val, err := term.ReadPassword(int(os.Stdin.Fd()))
fmt.Println()
if err != nil {
log.Errorf("failed to read input: %v", err)
os.Exit(1)
}
s[secretPairParts[0]] = string(val)
}
}
return s
}
func (s secrets) AsMap() map[string]string {
return s
}

4
cmd/testdata/secrets.yml vendored Normal file
View File

@@ -0,0 +1,4 @@
mysecret: |
line1
line2
line3

12
codecov.yml Normal file
View File

@@ -0,0 +1,12 @@
coverage:
status:
project:
default:
target: auto # auto compares coverage to the previous base commit
threshold: 1%
patch:
default:
target: 50%
ignore:
# Files generated by Google Protobuf do not require coverage
- '**/*.pb.go'

433
install.sh Executable file
View File

@@ -0,0 +1,433 @@
#!/bin/sh
set -e
# Code originally generated by godownloader on 2021-12-22T16:10:52Z. DO NOT EDIT.
# (godownloader is deprecated, so changes to this script are maintained in install.sh in https://github.com/nektos/act)
#
usage() {
this=$1
cat <<EOF
$this: download go binaries for nektos/act
Usage: $this [-b bindir] [-d] [-f] [tag]
-b sets bindir or installation directory, Defaults to ./bin
-d turns on debug logging
-f forces installation, bypassing version checks
[tag] is a tag from
https://github.com/nektos/act/releases
If tag is missing, then the latest will be used.
EOF
exit 2
}
parse_args() {
#BINDIR is ./bin unless set be ENV
# over-ridden by flag below
BINDIR=${BINDIR:-./bin}
while getopts "b:dfh?x" arg; do
case "$arg" in
b) BINDIR="$OPTARG" ;;
d) log_set_priority 10 ;;
f) FORCE_INSTALL="true" ;;
h | \?) usage "$0" ;;
x) set -x ;;
esac
done
shift $((OPTIND - 1))
TAG=$1
}
# this function wraps all the destructive operations
# if a curl|bash cuts off the end of the script due to
# network, either nothing will happen or will syntax error
# out preventing half-done work
execute() {
tmpdir=$(mktemp -d)
log_debug "downloading files into ${tmpdir}"
http_download "${tmpdir}/${TARBALL}" "${TARBALL_URL}"
http_download "${tmpdir}/${CHECKSUM}" "${CHECKSUM_URL}"
hash_sha256_verify "${tmpdir}/${TARBALL}" "${tmpdir}/${CHECKSUM}"
srcdir="${tmpdir}"
(cd "${tmpdir}" && untar "${TARBALL}")
test ! -d "${BINDIR}" && install -d "${BINDIR}"
for binexe in $BINARIES; do
if [ "$OS" = "windows" ]; then
binexe="${binexe}.exe"
fi
install "${srcdir}/${binexe}" "${BINDIR}/"
log_info "installed ${BINDIR}/${binexe}"
done
rm -rf "${tmpdir}"
}
get_binaries() {
case "$PLATFORM" in
darwin/386) BINARIES="act" ;;
darwin/amd64) BINARIES="act" ;;
darwin/arm64) BINARIES="act" ;;
darwin/armv6) BINARIES="act" ;;
darwin/armv7) BINARIES="act" ;;
linux/386) BINARIES="act" ;;
linux/amd64) BINARIES="act" ;;
linux/arm64) BINARIES="act" ;;
linux/armv6) BINARIES="act" ;;
linux/armv7) BINARIES="act" ;;
windows/386) BINARIES="act" ;;
windows/amd64) BINARIES="act" ;;
windows/arm64) BINARIES="act" ;;
windows/armv6) BINARIES="act" ;;
windows/armv7) BINARIES="act" ;;
*)
log_crit "platform $PLATFORM is not supported. Make sure this script is up-to-date and file request at https://github.com/${PREFIX}/issues/new"
exit 1
;;
esac
}
tag_to_version() {
if [ -z "${TAG}" ]; then
log_info "checking GitHub for latest tag"
else
log_info "checking GitHub for tag '${TAG}'"
fi
REALTAG=$(github_release "$OWNER/$REPO" "${TAG}") && true
if test -z "$REALTAG"; then
log_crit "unable to find '${TAG}' - use 'latest' or see https://github.com/${PREFIX}/releases for details"
exit 1
fi
# if version starts with 'v', remove it
TAG="$REALTAG"
VERSION=${TAG#v}
}
adjust_format() {
# change format (tar.gz or zip) based on OS
case ${OS} in
windows) FORMAT=zip ;;
esac
true
}
adjust_os() {
# adjust archive name based on OS
case ${OS} in
386) OS=i386 ;;
amd64) OS=x86_64 ;;
darwin) OS=Darwin ;;
linux) OS=Linux ;;
windows) OS=Windows ;;
esac
true
}
adjust_arch() {
# adjust archive name based on ARCH
case ${ARCH} in
386) ARCH=i386 ;;
amd64) ARCH=x86_64 ;;
darwin) ARCH=Darwin ;;
linux) ARCH=Linux ;;
windows) ARCH=Windows ;;
esac
true
}
check_installed_version() {
# Check if force install flag is set
if [ "${FORCE_INSTALL}" = "true" ]; then
log_info "force install enabled. Skipping version check."
return
fi
# Check if the binary exists
if is_command "$BINARY"; then
# Extract installed version using cut
INSTALLED_VERSION=$($BINARY --version | cut -d' ' -f3)
if [ -z "$INSTALLED_VERSION" ]; then
log_err "failed to detect installed version. Proceeding with installation."
return
fi
log_info "found installed version: $INSTALLED_VERSION"
# Compare versions
if [ "$INSTALLED_VERSION" = "$VERSION" ]; then
log_info "$BINARY version $INSTALLED_VERSION is already installed."
exit 0
else
log_debug "updating $BINARY from version $INSTALLED_VERSION to $VERSION..."
fi
else
log_debug "$BINARY is not installed. Proceeding with installation..."
fi
}
cat /dev/null <<EOF
------------------------------------------------------------------------
https://github.com/client9/shlib - portable posix shell functions
Public domain - http://unlicense.org
https://github.com/client9/shlib/blob/master/LICENSE.md
but credit (and pull requests) appreciated.
------------------------------------------------------------------------
EOF
is_command() {
command -v "$1" >/dev/null
}
echoerr() {
echo "$@" 1>&2
}
log_prefix() {
echo "$0"
}
_logp=6
log_set_priority() {
_logp="$1"
}
log_priority() {
if test -z "$1"; then
echo "$_logp"
return
fi
[ "$1" -le "$_logp" ]
}
log_tag() {
case $1 in
0) echo "emerg" ;;
1) echo "alert" ;;
2) echo "crit" ;;
3) echo "err" ;;
4) echo "warning" ;;
5) echo "notice" ;;
6) echo "info" ;;
7) echo "debug" ;;
*) echo "$1" ;;
esac
}
log_debug() {
log_priority 7 || return 0
echoerr "$(log_prefix)" "$(log_tag 7)" "$@"
}
log_info() {
log_priority 6 || return 0
echoerr "$(log_prefix)" "$(log_tag 6)" "$@"
}
log_err() {
log_priority 3 || return 0
echoerr "$(log_prefix)" "$(log_tag 3)" "$@"
}
log_crit() {
log_priority 2 || return 0
echoerr "$(log_prefix)" "$(log_tag 2)" "$@"
}
uname_os() {
os=$(uname -s | tr '[:upper:]' '[:lower:]')
case "$os" in
cygwin_nt*) os="windows" ;;
mingw*) os="windows" ;;
msys_nt*) os="windows" ;;
esac
echo "$os"
}
uname_arch() {
arch=$(uname -m)
case $arch in
x86_64) arch="amd64" ;;
x86) arch="386" ;;
i686) arch="386" ;;
i386) arch="386" ;;
aarch64) arch="arm64" ;;
armv5*) arch="armv5" ;;
armv6*) arch="armv6" ;;
armv7*) arch="armv7" ;;
esac
echo ${arch}
}
uname_os_check() {
os=$(uname_os)
case "$os" in
darwin) return 0 ;;
dragonfly) return 0 ;;
freebsd) return 0 ;;
linux) return 0 ;;
android) return 0 ;;
nacl) return 0 ;;
netbsd) return 0 ;;
openbsd) return 0 ;;
plan9) return 0 ;;
solaris) return 0 ;;
windows) return 0 ;;
esac
log_crit "uname_os_check '$(uname -s)' got converted to '$os' which is not a GOOS value. Please file bug at https://github.com/client9/shlib"
return 1
}
uname_arch_check() {
arch=$(uname_arch)
case "$arch" in
386) return 0 ;;
amd64) return 0 ;;
arm64) return 0 ;;
armv5) return 0 ;;
armv6) return 0 ;;
armv7) return 0 ;;
ppc64) return 0 ;;
ppc64le) return 0 ;;
mips) return 0 ;;
mipsle) return 0 ;;
mips64) return 0 ;;
mips64le) return 0 ;;
s390x) return 0 ;;
amd64p32) return 0 ;;
esac
log_crit "uname_arch_check '$(uname -m)' got converted to '$arch' which is not a GOARCH value. Please file bug report at https://github.com/client9/shlib"
return 1
}
untar() {
tarball=$1
case "${tarball}" in
*.tar.gz | *.tgz) tar --no-same-owner -xzf "${tarball}" ;;
*.tar) tar --no-same-owner -xf "${tarball}" ;;
*.zip) unzip "${tarball}" ;;
*)
log_err "untar unknown archive format for ${tarball}"
return 1
;;
esac
}
http_download_curl() {
local_file=$1
source_url=$2
header=$3
if [ -z "$header" ]; then
code=$(curl -w '%{http_code}' -sL -o "$local_file" "$source_url")
else
code=$(curl -w '%{http_code}' -sL -H "$header" -o "$local_file" "$source_url")
fi
if [ "$code" != "200" ]; then
log_debug "http_download_curl received HTTP status $code"
return 1
fi
return 0
}
http_download_wget() {
local_file=$1
source_url=$2
header=$3
if [ -z "$header" ]; then
wget -q -O "$local_file" "$source_url"
else
wget -q --header "$header" -O "$local_file" "$source_url"
fi
}
http_download() {
log_debug "http_download $2"
if is_command curl; then
http_download_curl "$@"
return
elif is_command wget; then
http_download_wget "$@"
return
fi
log_crit "http_download unable to find wget or curl"
return 1
}
http_copy() {
tmp=$(mktemp)
http_download "${tmp}" "$1" "$2" || return 1
body=$(cat "$tmp")
rm -f "${tmp}"
echo "$body"
}
github_release() {
owner_repo=$1
version=$2
test -z "$version" && version="latest"
giturl="https://github.com/${owner_repo}/releases/${version}"
json=$(http_copy "$giturl" "Accept:application/json")
test -z "$json" && return 1
version=$(echo "$json" | tr -s '\n' ' ' | sed 's/.*"tag_name":"//' | sed 's/".*//')
test -z "$version" && return 1
echo "$version"
}
hash_sha256() {
TARGET=${1:-/dev/stdin}
if is_command gsha256sum; then
hash=$(gsha256sum "$TARGET") || return 1
echo "$hash" | cut -d ' ' -f 1
elif is_command sha256sum; then
hash=$(sha256sum "$TARGET") || return 1
echo "$hash" | cut -d ' ' -f 1
elif is_command shasum; then
hash=$(shasum -a 256 "$TARGET" 2>/dev/null) || return 1
echo "$hash" | cut -d ' ' -f 1
elif is_command openssl; then
hash=$(openssl -dst openssl dgst -sha256 "$TARGET") || return 1
echo "$hash" | cut -d ' ' -f a
else
log_crit "hash_sha256 unable to find command to compute sha-256 hash"
return 1
fi
}
hash_sha256_verify() {
TARGET=$1
checksums=$2
if [ -z "$checksums" ]; then
log_err "hash_sha256_verify checksum file not specified in arg2"
return 1
fi
BASENAME=${TARGET##*/}
want=$(grep "${BASENAME}" "${checksums}" 2>/dev/null | tr '\t' ' ' | cut -d ' ' -f 1)
if [ -z "$want" ]; then
log_err "hash_sha256_verify unable to find checksum for '${TARGET}' in '${checksums}'"
return 1
fi
got=$(hash_sha256 "$TARGET")
if [ "$want" != "$got" ]; then
log_err "hash_sha256_verify checksum for '$TARGET' did not verify ${want} vs $got"
return 1
fi
}
cat /dev/null <<EOF
------------------------------------------------------------------------
End of functions from https://github.com/client9/shlib
------------------------------------------------------------------------
EOF
PROJECT_NAME="act"
OWNER=nektos
REPO="act"
BINARY=act
FORMAT=tar.gz
OS=$(uname_os)
ARCH=$(uname_arch)
PREFIX="$OWNER/$REPO"
# use in logging routines
log_prefix() {
echo "$PREFIX"
}
PLATFORM="${OS}/${ARCH}"
GITHUB_DOWNLOAD=https://github.com/${OWNER}/${REPO}/releases/download
uname_os_check "$OS"
uname_arch_check "$ARCH"
parse_args "$@"
get_binaries
tag_to_version
check_installed_version
adjust_format
adjust_os
adjust_arch
log_info "found version: ${VERSION} for ${TAG}/${OS}/${ARCH}"
NAME=${PROJECT_NAME}_${OS}_${ARCH}
TARBALL=${NAME}.${FORMAT}
TARBALL_URL=${GITHUB_DOWNLOAD}/${TAG}/${TARBALL}
CHECKSUM=checksums.txt
CHECKSUM_URL=${GITHUB_DOWNLOAD}/${TAG}/${CHECKSUM}
execute

View File

@@ -0,0 +1,122 @@
package functions
import (
"fmt"
"strconv"
"strings"
)
// Format evaluates a format string with the supplied arguments.
// It behaves like the C# implementation in the repository
// it supports escaped braces and numeric argument indices.
// Format specifiers (e.g. :D) are recognised but currently ignored.
func Format(formatStr string, args ...interface{}) (string, error) {
var sb strings.Builder
i := 0
for i < len(formatStr) {
lbrace := strings.IndexByte(formatStr[i:], '{')
rbrace := strings.IndexByte(formatStr[i:], '}')
// left brace
if lbrace >= 0 && (rbrace < 0 || rbrace > lbrace) {
l := i + lbrace
sb.WriteString(formatStr[i:l])
// escaped left brace
if l+1 < len(formatStr) && formatStr[l+1] == '{' {
sb.WriteString(formatStr[l : l+1])
i = l + 2
continue
}
// normal placeholder
if rbrace > lbrace+1 {
// read index
idx, endIdx, ok := readArgIndex(formatStr, l+1)
if !ok {
return "", fmt.Errorf("invalid format string: %s", formatStr)
}
// read optional format specifier
spec, r, ok := readFormatSpecifiers(formatStr, endIdx+1)
if !ok {
return "", fmt.Errorf("invalid format string: %s", formatStr)
}
if idx >= len(args) {
return "", fmt.Errorf("argument index %d out of range", idx)
}
// append argument (format specifier is ignored here)
arg := args[idx]
sb.WriteString(fmt.Sprintf("%v", arg))
if spec != "" {
// placeholder for future specifier handling
_ = spec
}
i = r + 1
continue
}
return "", fmt.Errorf("invalid format string: %s", formatStr)
}
// right brace
if rbrace >= 0 {
// escaped right brace
if i+rbrace+1 < len(formatStr) && formatStr[i+rbrace+1] == '}' {
sb.WriteString(formatStr[i : i+rbrace+1])
i += rbrace + 2
continue
}
return "", fmt.Errorf("invalid format string: %s", formatStr)
}
// rest of string
sb.WriteString(formatStr[i:])
break
}
return sb.String(), nil
}
// readArgIndex parses a decimal number starting at pos.
// It returns the parsed value, the index of the last digit and true on success.
func readArgIndex(s string, pos int) (int, int, bool) {
start := pos
for pos < len(s) && s[pos] >= '0' && s[pos] <= '9' {
pos++
}
if start == pos {
return 0, 0, false
}
idx, err := strconv.Atoi(s[start:pos])
if err != nil {
return 0, 0, false
}
return idx, pos - 1, true
}
// readFormatSpecifiers reads an optional format specifier block.
// It returns the specifier string, the index of the closing '}' and true on success.
func readFormatSpecifiers(s string, pos int) (string, int, bool) {
if pos >= len(s) {
return "", 0, false
}
if s[pos] == '}' {
return "", pos, true
}
if s[pos] != ':' {
return "", 0, false
}
pos++ // skip ':'
start := pos
for pos < len(s) {
if s[pos] == '}' {
return s[start:pos], pos, true
}
if s[pos] == '}' && pos+1 < len(s) && s[pos+1] == '}' {
// escaped '}'
pos += 2
continue
}
pos++
}
return "", 0, false
}

View File

@@ -0,0 +1,14 @@
package functions
import (
"fmt"
"testing"
"github.com/stretchr/testify/assert"
)
func TestFormat(t *testing.T) {
s, err := Format("Hello {0}, you have {1} new messages", "Alice", 5)
assert.NoError(t, err)
fmt.Println(s) // Hello Alice, you have 5 new messages
}

View File

@@ -0,0 +1,474 @@
package v2
import (
"fmt"
"math"
"strconv"
"strings"
)
// ValueKind represents the type of a value in the evaluation engine.
// The values mirror the C# ValueKind enum.
//
// Note: The names are kept identical to the C# implementation for easier mapping.
//
// The lexer is intentionally simple it only tokenises the subset of
// expressions that are used in GitHub Actions workflow `if:` expressions.
// It does not evaluate the expression that is left to the parser.
type ValueKind int
const (
ValueKindNull ValueKind = iota
ValueKindBoolean
ValueKindNumber
ValueKindString
ValueKindObject
ValueKindArray
)
type ReadOnlyArray[T any] interface {
GetAt(i int64) T
GetEnumerator() []T
}
type ReadOnlyObject[T any] interface {
Get(key string) T
GetKv(key string) (string, T) // Returns the actual key used (for case-insensitive objects)
GetEnumerator() map[string]T
}
type BasicArray[T any] []T
func (a BasicArray[T]) GetAt(i int64) T {
if int(i) >= len(a) {
var zero T
return zero
}
return a[i]
}
func (a BasicArray[T]) GetEnumerator() []T {
return a
}
type CaseInsensitiveObject[T any] map[string]T
func (o CaseInsensitiveObject[T]) Get(key string) T {
_, v := o.GetKv(key)
return v
}
func (o CaseInsensitiveObject[T]) GetKv(key string) (k string, v T) {
for k, v := range o {
if strings.EqualFold(k, key) {
return k, v
}
}
var zero T
return key, zero
}
func (o CaseInsensitiveObject[T]) GetEnumerator() map[string]T {
return o
}
type CaseSensitiveObject[T any] map[string]T
func (o CaseSensitiveObject[T]) Get(key string) T {
return o[key]
}
func (o CaseSensitiveObject[T]) GetKv(key string) (string, T) {
return key, o[key]
}
func (o CaseSensitiveObject[T]) GetEnumerator() map[string]T {
return o
}
// EvaluationResult holds the result of evaluating an expression node.
// It mirrors the C# EvaluationResult class.
type EvaluationResult struct {
context *EvaluationContext
level int
value interface{}
kind ValueKind
raw interface{}
omitTracing bool
}
// NewEvaluationResult creates a new EvaluationResult.
func NewEvaluationResult(context *EvaluationContext, level int, val interface{}, kind ValueKind, raw interface{}, omitTracing bool) *EvaluationResult {
er := &EvaluationResult{context: context, level: level, value: val, kind: kind, raw: raw, omitTracing: omitTracing}
if !omitTracing {
er.traceValue()
}
return er
}
// Kind returns the ValueKind of the result.
func (er *EvaluationResult) Kind() ValueKind { return er.kind }
// Raw returns the raw value that was passed to the constructor.
func (er *EvaluationResult) Raw() interface{} { return er.raw }
// Value returns the canonical value.
func (er *EvaluationResult) Value() interface{} { return er.value }
// IsFalsy implements the logic from the C# class.
func (er *EvaluationResult) IsFalsy() bool {
switch er.kind {
case ValueKindNull:
return true
case ValueKindBoolean:
return !er.value.(bool)
case ValueKindNumber:
v := er.value.(float64)
return v == 0 || isNaN(v)
case ValueKindString:
return er.value.(string) == ""
default:
return false
}
}
func isNaN(v float64) bool { return v != v }
// IsPrimitive returns true if the kind is a primitive type.
func (er *EvaluationResult) IsPrimitive() bool { return er.kind <= ValueKindString }
// IsTruthy is the negation of IsFalsy.
func (er *EvaluationResult) IsTruthy() bool { return !er.IsFalsy() }
// AbstractEqual compares two EvaluationResults using the abstract equality algorithm.
func (er *EvaluationResult) AbstractEqual(other *EvaluationResult) bool {
return abstractEqual(er.value, other.value)
}
// AbstractGreaterThan compares two EvaluationResults.
func (er *EvaluationResult) AbstractGreaterThan(other *EvaluationResult) bool {
return abstractGreaterThan(er.value, other.value)
}
// AbstractGreaterThanOrEqual
func (er *EvaluationResult) AbstractGreaterThanOrEqual(other *EvaluationResult) bool {
return er.AbstractEqual(other) || er.AbstractGreaterThan(other)
}
// AbstractLessThan
func (er *EvaluationResult) AbstractLessThan(other *EvaluationResult) bool {
return abstractLessThan(er.value, other.value)
}
// AbstractLessThanOrEqual
func (er *EvaluationResult) AbstractLessThanOrEqual(other *EvaluationResult) bool {
return er.AbstractEqual(other) || er.AbstractLessThan(other)
}
// AbstractNotEqual
func (er *EvaluationResult) AbstractNotEqual(other *EvaluationResult) bool {
return !er.AbstractEqual(other)
}
// ConvertToNumber converts the value to a float64.
func (er *EvaluationResult) ConvertToNumber() float64 { return convertToNumber(er.value) }
// ConvertToString converts the value to a string.
func (er *EvaluationResult) ConvertToString() string {
switch er.kind {
case ValueKindNull:
return ""
case ValueKindBoolean:
if er.value.(bool) {
return ExpressionConstants.True
}
return ExpressionConstants.False
case ValueKindNumber:
return fmt.Sprintf(ExpressionConstants.NumberFormat, er.value.(float64))
case ValueKindString:
return er.value.(string)
default:
return fmt.Sprintf("%v", er.value)
}
}
// TryGetCollectionInterface returns the underlying collection if the value is an array or object.
func (er *EvaluationResult) TryGetCollectionInterface() (interface{}, bool) {
switch v := er.value.(type) {
case ReadOnlyArray[any]:
return v, true
case ReadOnlyObject[any]:
return v, true
default:
return nil, false
}
}
// CreateIntermediateResult creates an EvaluationResult from an arbitrary object.
func CreateIntermediateResult(context *EvaluationContext, obj interface{}) *EvaluationResult {
val, kind, raw := convertToCanonicalValue(obj)
return NewEvaluationResult(context, 0, val, kind, raw, true)
}
// --- Helper functions and constants ---------------------------------------
// ExpressionConstants holds string constants used in conversions.
var ExpressionConstants = struct {
True string
False string
NumberFormat string
}{
True: "true",
False: "false",
NumberFormat: "%.15g",
}
// convertToCanonicalValue converts an arbitrary Go value to a canonical form.
func convertToCanonicalValue(obj interface{}) (interface{}, ValueKind, interface{}) {
switch v := obj.(type) {
case nil:
return nil, ValueKindNull, nil
case bool:
return v, ValueKindBoolean, v
case int, int8, int16, int32, int64:
f := float64(toInt64(v))
return f, ValueKindNumber, f
case uint, uint8, uint16, uint32, uint64:
f := float64(toUint64(v))
return f, ValueKindNumber, f
case float32, float64:
f := toFloat64(v)
return f, ValueKindNumber, f
case string:
return v, ValueKindString, v
case []interface{}:
return BasicArray[any](v), ValueKindArray, v
case ReadOnlyArray[any]:
return v, ValueKindArray, v
case map[string]interface{}:
return CaseInsensitiveObject[any](v), ValueKindObject, v
case ReadOnlyObject[any]:
return v, ValueKindObject, v
default:
// Fallback: treat as object
return v, ValueKindObject, v
}
}
func toInt64(v interface{}) int64 {
switch i := v.(type) {
case int:
return int64(i)
case int8:
return int64(i)
case int16:
return int64(i)
case int32:
return int64(i)
case int64:
return i
default:
return 0
}
}
func toUint64(v interface{}) uint64 {
switch i := v.(type) {
case uint:
return uint64(i)
case uint8:
return uint64(i)
case uint16:
return uint64(i)
case uint32:
return uint64(i)
case uint64:
return i
default:
return 0
}
}
func toFloat64(v interface{}) float64 {
switch f := v.(type) {
case float32:
return float64(f)
case float64:
return f
default:
return 0
}
}
// coerceTypes implements the C# CoerceTypes logic.
// It converts values to compatible types before comparison.
func coerceTypes(left, right interface{}) (interface{}, interface{}, ValueKind, ValueKind) {
leftKind := getKind(left)
rightKind := getKind(right)
// same kind nothing to do
if leftKind == rightKind {
return left, right, leftKind, rightKind
}
// Number <-> String
if leftKind == ValueKindNumber && rightKind == ValueKindString {
right = convertToNumber(right)
rightKind = ValueKindNumber
return left, right, leftKind, rightKind
}
if leftKind == ValueKindString && rightKind == ValueKindNumber {
left = convertToNumber(left)
leftKind = ValueKindNumber
return left, right, leftKind, rightKind
}
// Boolean or Null -> Number
if leftKind == ValueKindBoolean || leftKind == ValueKindNull {
left = convertToNumber(left)
return coerceTypes(left, right)
}
if rightKind == ValueKindBoolean || rightKind == ValueKindNull {
right = convertToNumber(right)
return coerceTypes(left, right)
}
// otherwise keep as is
return left, right, leftKind, rightKind
}
// abstractEqual uses coerceTypes before comparing.
func abstractEqual(left, right interface{}) bool {
left, right, leftKind, rightKind := coerceTypes(left, right)
if leftKind != rightKind {
return false
}
switch leftKind {
case ValueKindNull:
return true
case ValueKindNumber:
l := left.(float64)
r := right.(float64)
if isNaN(l) || isNaN(r) {
return false
}
return l == r
case ValueKindString:
return strings.EqualFold(left.(string), right.(string))
case ValueKindBoolean:
return left.(bool) == right.(bool)
// Compare object equality fails via panic
// case ValueKindObject, ValueKindArray:
// return left == right
}
return false
}
// abstractGreaterThan uses coerceTypes before comparing.
func abstractGreaterThan(left, right interface{}) bool {
left, right, leftKind, rightKind := coerceTypes(left, right)
if leftKind != rightKind {
return false
}
switch leftKind {
case ValueKindNumber:
l := left.(float64)
r := right.(float64)
if isNaN(l) || isNaN(r) {
return false
}
return l > r
case ValueKindString:
return strings.Compare(left.(string), right.(string)) > 0
case ValueKindBoolean:
return left.(bool) && !right.(bool)
}
return false
}
// abstractLessThan uses coerceTypes before comparing.
func abstractLessThan(left, right interface{}) bool {
left, right, leftKind, rightKind := coerceTypes(left, right)
if leftKind != rightKind {
return false
}
switch leftKind {
case ValueKindNumber:
l := left.(float64)
r := right.(float64)
if isNaN(l) || isNaN(r) {
return false
}
return l < r
case ValueKindString:
return strings.Compare(left.(string), right.(string)) < 0
case ValueKindBoolean:
return !left.(bool) && right.(bool)
}
return false
}
// convertToNumber converts a value to a float64 following JavaScript rules.
func convertToNumber(v interface{}) float64 {
switch val := v.(type) {
case nil:
return 0
case bool:
if val {
return 1
}
return 0
case float64:
return val
case float32:
return float64(val)
case string:
// parsenumber
if val == "" {
return float64(0)
}
if len(val) > 2 {
switch val[:2] {
case "0x", "0o":
if i, err := strconv.ParseInt(val, 0, 32); err == nil {
return float64(i)
}
}
}
if f, err := strconv.ParseFloat(val, 64); err == nil {
return f
}
return math.NaN()
default:
return math.NaN()
}
}
// getKind returns the ValueKind for a Go value.
func getKind(v interface{}) ValueKind {
switch v.(type) {
case nil:
return ValueKindNull
case bool:
return ValueKindBoolean
case float64, float32, int, int8, int16, int32, int64, uint, uint8, uint16, uint32, uint64:
return ValueKindNumber
case string:
return ValueKindString
case []interface{}:
return ValueKindArray
case map[string]interface{}:
return ValueKindObject
default:
return ValueKindObject
}
}
// traceValue is a placeholder for tracing logic.
func (er *EvaluationResult) traceValue() {
// No-op in this simplified implementation.
}
// --- End of file ---------------------------------------

View File

@@ -0,0 +1,276 @@
package v2
import (
"errors"
"fmt"
exprparser "github.com/actions-oss/act-cli/internal/expr"
)
// EvaluationContext holds variables that can be referenced in expressions.
type EvaluationContext struct {
Variables ReadOnlyObject[any]
Functions ReadOnlyObject[Function]
}
func NewEvaluationContext() *EvaluationContext {
return &EvaluationContext{}
}
type Function interface {
Evaluate(eval *Evaluator, args []exprparser.Node) (*EvaluationResult, error)
}
// Evaluator evaluates workflow expressions using the lexer and parser from workflow.
type Evaluator struct {
ctx *EvaluationContext
}
// NewEvaluator creates an Evaluator with the supplied context.
func NewEvaluator(ctx *EvaluationContext) *Evaluator {
return &Evaluator{ctx: ctx}
}
func (e *Evaluator) Context() *EvaluationContext {
return e.ctx
}
func (e *Evaluator) Evaluate(root exprparser.Node) (*EvaluationResult, error) {
result, err := e.evalNode(root)
if err != nil {
return nil, err
}
return result, nil
}
// EvaluateBoolean parses and evaluates the expression, returning a boolean result.
func (e *Evaluator) EvaluateBoolean(expr string) (bool, error) {
root, err := exprparser.Parse(expr)
if err != nil {
return false, fmt.Errorf("parse error: %w", err)
}
result, err := e.evalNode(root)
if err != nil {
return false, err
}
return result.IsTruthy(), nil
}
func (e *Evaluator) ToRaw(result *EvaluationResult) (interface{}, error) {
if col, ok := result.TryGetCollectionInterface(); ok {
switch node := col.(type) {
case ReadOnlyObject[any]:
rawMap := map[string]interface{}{}
for k, v := range node.GetEnumerator() {
rawRes, err := e.ToRaw(CreateIntermediateResult(e.Context(), v))
if err != nil {
return nil, err
}
rawMap[k] = rawRes
}
return rawMap, nil
case ReadOnlyArray[any]:
rawArray := []interface{}{}
for _, v := range node.GetEnumerator() {
rawRes, err := e.ToRaw(CreateIntermediateResult(e.Context(), v))
if err != nil {
return nil, err
}
rawArray = append(rawArray, rawRes)
}
return rawArray, nil
}
}
return result.Value(), nil
}
// Evaluate parses and evaluates the expression, returning a boolean result.
func (e *Evaluator) EvaluateRaw(expr string) (interface{}, error) {
root, err := exprparser.Parse(expr)
if err != nil {
return false, fmt.Errorf("parse error: %w", err)
}
result, err := e.evalNode(root)
if err != nil {
return false, err
}
return e.ToRaw(result)
}
type FilteredArray []interface{}
func (a FilteredArray) GetAt(i int64) interface{} {
if int(i) > len(a) {
return nil
}
return a[i]
}
func (a FilteredArray) GetEnumerator() []interface{} {
return a
}
// evalNode recursively evaluates a parser node and returns an EvaluationResult.
func (e *Evaluator) evalNode(n exprparser.Node) (*EvaluationResult, error) {
switch node := n.(type) {
case *exprparser.ValueNode:
return e.evalValueNode(node)
case *exprparser.FunctionNode:
return e.evalFunctionNode(node)
case *exprparser.BinaryNode:
return e.evalBinaryNode(node)
case *exprparser.UnaryNode:
return e.evalUnaryNode(node)
}
return nil, errors.New("unknown node type")
}
func (e *Evaluator) evalValueNode(node *exprparser.ValueNode) (*EvaluationResult, error) {
if node.Kind == exprparser.TokenKindNamedValue {
if e.ctx != nil {
val := e.ctx.Variables.Get(node.Value.(string))
if val == nil {
return nil, fmt.Errorf("undefined variable %s", node.Value)
}
return CreateIntermediateResult(e.Context(), val), nil
}
return nil, errors.New("no evaluation context")
}
return CreateIntermediateResult(e.Context(), node.Value), nil
}
func (e *Evaluator) evalFunctionNode(node *exprparser.FunctionNode) (*EvaluationResult, error) {
fn := e.ctx.Functions.Get(node.Name)
if fn == nil {
return nil, fmt.Errorf("unknown function %v", node.Name)
}
return fn.Evaluate(e, node.Args)
}
func (e *Evaluator) evalBinaryNode(node *exprparser.BinaryNode) (*EvaluationResult, error) {
left, err := e.evalNode(node.Left)
if err != nil {
return nil, err
}
if res, err := e.evalBinaryNodeLeft(node, left); res != nil || err != nil {
return res, err
}
right, err := e.evalNode(node.Right)
if err != nil {
return nil, err
}
return e.evalBinaryNodeRight(node, left, right)
}
func (e *Evaluator) evalBinaryNodeLeft(node *exprparser.BinaryNode, left *EvaluationResult) (*EvaluationResult, error) {
switch node.Op {
case "&&":
if left.IsFalsy() {
return left, nil
}
case "||":
if left.IsTruthy() {
return left, nil
}
case ".":
if v, ok := node.Right.(*exprparser.ValueNode); ok && v.Kind == exprparser.TokenKindWildcard {
var ret FilteredArray
if col, ok := left.TryGetCollectionInterface(); ok {
if farray, ok := col.(FilteredArray); ok {
for _, subcol := range farray.GetEnumerator() {
ret = processStar(CreateIntermediateResult(e.Context(), subcol).Value(), ret)
}
} else {
ret = processStar(col, ret)
}
}
return CreateIntermediateResult(e.Context(), ret), nil
}
}
return nil, nil
}
func (e *Evaluator) evalBinaryNodeRight(node *exprparser.BinaryNode, left *EvaluationResult, right *EvaluationResult) (*EvaluationResult, error) {
switch node.Op {
case "&&":
return right, nil
case "||":
return right, nil
case "==":
// Use abstract equality per spec
return CreateIntermediateResult(e.Context(), left.AbstractEqual(right)), nil
case "!=":
return CreateIntermediateResult(e.Context(), left.AbstractNotEqual(right)), nil
case ">":
return CreateIntermediateResult(e.Context(), left.AbstractGreaterThan(right)), nil
case "<":
return CreateIntermediateResult(e.Context(), left.AbstractLessThan(right)), nil
case ">=":
return CreateIntermediateResult(e.Context(), left.AbstractGreaterThanOrEqual(right)), nil
case "<=":
return CreateIntermediateResult(e.Context(), left.AbstractLessThanOrEqual(right)), nil
case ".", "[":
if farray, ok := left.Value().(FilteredArray); ok {
var ret FilteredArray
for _, subcol := range farray.GetEnumerator() {
res := processIndex(CreateIntermediateResult(e.Context(), subcol).Value(), right)
if res != nil {
ret = append(ret, res)
}
}
if ret == nil {
return CreateIntermediateResult(e.Context(), nil), nil
}
return CreateIntermediateResult(e.Context(), ret), nil
}
col, _ := left.TryGetCollectionInterface()
result := processIndex(col, right)
return CreateIntermediateResult(e.Context(), result), nil
default:
return nil, fmt.Errorf("unsupported operator %s", node.Op)
}
}
func (e *Evaluator) evalUnaryNode(node *exprparser.UnaryNode) (*EvaluationResult, error) {
operand, err := e.evalNode(node.Operand)
if err != nil {
return nil, err
}
switch node.Op {
case "!":
return CreateIntermediateResult(e.Context(), !operand.IsTruthy()), nil
default:
return nil, fmt.Errorf("unsupported unary operator %s", node.Op)
}
}
func processIndex(col interface{}, right *EvaluationResult) interface{} {
if mapVal, ok := col.(ReadOnlyObject[any]); ok {
key, ok := right.Value().(string)
if !ok {
return nil
}
val := mapVal.Get(key)
return val
}
if arrayVal, ok := col.(ReadOnlyArray[any]); ok {
key, ok := right.Value().(float64)
if !ok || key < 0 {
return nil
}
val := arrayVal.GetAt(int64(key))
return val
}
return nil
}
func processStar(subcol interface{}, ret FilteredArray) FilteredArray {
if array, ok := subcol.(ReadOnlyArray[any]); ok {
ret = append(ret, array.GetEnumerator()...)
} else if obj, ok := subcol.(ReadOnlyObject[any]); ok {
for _, v := range obj.GetEnumerator() {
ret = append(ret, v)
}
}
return ret
}

View File

@@ -0,0 +1,117 @@
package v2
import (
"testing"
)
// Test boolean and comparison operations using the evaluator.
func TestEvaluator_BooleanOps(t *testing.T) {
ctx := &EvaluationContext{Variables: CaseInsensitiveObject[any](map[string]interface{}{"a": 5, "b": 3})}
eval := NewEvaluator(ctx)
tests := []struct {
expr string
want bool
}{
{"1 == 1", true},
{"1 != 2", true},
{"5 > 3", true},
{"2 < 4", true},
{"5 >= 5", true},
{"3 <= 4", true},
{"true && false", false},
{"!false", true},
{"a > b", true},
}
for _, tt := range tests {
got, err := eval.EvaluateBoolean(tt.expr)
if err != nil {
t.Fatalf("evaluate %s error: %v", tt.expr, err)
}
if got != tt.want {
t.Fatalf("evaluate %s expected %v got %v", tt.expr, tt.want, got)
}
}
}
func TestEvaluator_Raw(t *testing.T) {
ctx := &EvaluationContext{
Variables: CaseInsensitiveObject[any](map[string]any{"a": 5, "b": 3}),
Functions: GetFunctions(),
}
eval := NewEvaluator(ctx)
tests := []struct {
expr string
want interface{}
}{
{"a.b['x']", nil},
{"(a.b).c['x']", nil},
{"(a.b).*['x']", nil},
{"(a['x'])", nil},
{"true || false", true},
{"false || false", false},
{"false || true", true},
{"false || true || false", true},
{"contains('', '') || contains('', '') || contains('', '')", true},
{"1 == 1", true},
{"1 != 2", true},
{"5 > 3", true},
{"2 < 4", true},
{"5 >= 5", true},
{"3 <= 4", true},
{"true && false", false},
{"!false", true},
{"a > b", true},
{"!(a > b)", false},
{"!(a > b) || !0", true},
{"!(a > b) || !(1)", false},
{"'Hello World'", "Hello World"},
{"23.5", 23.5},
{"fromjson('{\"twst\":\"x\"}')['twst']", "x"},
{"fromjson('{\"Twst\":\"x\"}')['twst']", "x"},
{"fromjson('{\"TwsT\":\"x\"}')['twst']", "x"},
{"fromjson('{\"TwsT\":\"x\"}')['tWst']", "x"},
{"fromjson('{\"TwsT\":{\"a\":\"y\"}}').TwsT.a", "y"},
{"fromjson('{\"TwsT\":{\"a\":\"y\"}}')['TwsT'].a", "y"},
{"fromjson('{\"TwsT\":{\"a\":\"y\"}}')['TwsT']['a']", "y"},
{"fromjson('{\"TwsT\":{\"a\":\"y\"}}').TwsT['a']", "y"},
// {"fromjson('{\"TwsT\":\"x\"}').*[0]", "x"},
{"fromjson('{\"TwsT\":[\"x\"]}')['TwsT'][0]", "x"},
{"fromjson('[]')['tWst']", nil},
{"fromjson('[]').tWst", nil},
{"contains('a', 'a')", true},
{"contains('bab', 'a')", true},
{"contains('bab', 'ac')", false},
{"contains(fromjson('[\"ac\"]'), 'ac')", true},
{"contains(fromjson('[\"ac\"]'), 'a')", false},
// {"fromjson('{\"TwsT\":{\"a\":\"y\"}}').*['a']", "y"},
{"fromjson(tojson(fromjson('{\"TwsT\":{\"a\":\"y\"}}').*.a))[0]", "y"},
{"fromjson(tojson(fromjson('{\"TwsT\":{\"a\":\"y\"}}').*['a']))[0]", "y"},
{"fromjson('{}').x", nil},
{"format('{0}', fromjson('{}').x)", ""},
{"format('{0}', fromjson('{}')[0])", ""},
{"fromjson(tojson(fromjson('[[3,5],[5,6]]').*[1]))[1]", float64(6)},
{"contains(fromjson('[[3,5],[5,6]]').*[1], 5)", true},
{"contains(fromjson('[[3,5],[5,6]]').*[1], 6)", true},
{"contains(fromjson('[[3,5],[5,6]]').*[1], 3)", false},
{"contains(fromjson('[[3,5],[5,6]]').*[1], '6')", true},
{"case(6 == 6, 0, 1)", 0.0},
{"case(6 != 6, 0, 1)", 1.0},
{"case(6 != 6, 0, 'test')", "test"},
{"case(contains(fromjson('[\"ac\"]'), 'a'), 0, 'test')", "test"},
{"case(0 == 1, 0, 2 == 2, 1, 0)", 1.0},
{"case(0 == 1, 0, 2 != 2, 1, 0)", 0.0},
}
for _, tt := range tests {
got, err := eval.EvaluateRaw(tt.expr)
if err != nil {
t.Fatalf("evaluate %s error: %v", tt.expr, err)
}
if got != tt.want {
t.Fatalf("evaluate %s expected %v got %v", tt.expr, tt.want, got)
}
}
}

View File

@@ -0,0 +1,202 @@
package v2
import (
"encoding/json"
"errors"
"strings"
"github.com/actions-oss/act-cli/internal/eval/functions"
exprparser "github.com/actions-oss/act-cli/internal/expr"
)
type FromJSON struct {
}
func (FromJSON) Evaluate(eval *Evaluator, args []exprparser.Node) (*EvaluationResult, error) {
r, err := eval.Evaluate(args[0])
if err != nil {
return nil, err
}
var res any
if err := json.Unmarshal([]byte(r.ConvertToString()), &res); err != nil {
return nil, err
}
return CreateIntermediateResult(eval.Context(), res), nil
}
type ToJSON struct {
}
func (ToJSON) Evaluate(eval *Evaluator, args []exprparser.Node) (*EvaluationResult, error) {
r, err := eval.Evaluate(args[0])
if err != nil {
return nil, err
}
raw, err := eval.ToRaw(r)
if err != nil {
return nil, err
}
data, err := json.MarshalIndent(raw, "", " ")
if err != nil {
return nil, err
}
return CreateIntermediateResult(eval.Context(), string(data)), nil
}
type Contains struct {
}
func (Contains) Evaluate(eval *Evaluator, args []exprparser.Node) (*EvaluationResult, error) {
collection, err := eval.Evaluate(args[0])
if err != nil {
return nil, err
}
el, err := eval.Evaluate(args[1])
if err != nil {
return nil, err
}
// Array
if col, ok := collection.TryGetCollectionInterface(); ok {
if node, ok := col.(ReadOnlyArray[any]); ok {
for _, v := range node.GetEnumerator() {
canon := CreateIntermediateResult(eval.Context(), v)
if canon.AbstractEqual(el) {
return CreateIntermediateResult(eval.Context(), true), nil
}
}
}
return CreateIntermediateResult(eval.Context(), false), nil
}
// String
return CreateIntermediateResult(eval.Context(), strings.Contains(strings.ToLower(collection.ConvertToString()), strings.ToLower(el.ConvertToString()))), nil
}
type StartsWith struct {
}
func (StartsWith) Evaluate(eval *Evaluator, args []exprparser.Node) (*EvaluationResult, error) {
collection, err := eval.Evaluate(args[0])
if err != nil {
return nil, err
}
el, err := eval.Evaluate(args[1])
if err != nil {
return nil, err
}
// String
return CreateIntermediateResult(eval.Context(), strings.HasPrefix(strings.ToLower(collection.ConvertToString()), strings.ToLower(el.ConvertToString()))), nil
}
type EndsWith struct {
}
func (EndsWith) Evaluate(eval *Evaluator, args []exprparser.Node) (*EvaluationResult, error) {
collection, err := eval.Evaluate(args[0])
if err != nil {
return nil, err
}
el, err := eval.Evaluate(args[1])
if err != nil {
return nil, err
}
// String
return CreateIntermediateResult(eval.Context(), strings.HasSuffix(strings.ToLower(collection.ConvertToString()), strings.ToLower(el.ConvertToString()))), nil
}
type Format struct {
}
func (Format) Evaluate(eval *Evaluator, args []exprparser.Node) (*EvaluationResult, error) {
collection, err := eval.Evaluate(args[0])
if err != nil {
return nil, err
}
sargs := []interface{}{}
for _, arg := range args[1:] {
el, err := eval.Evaluate(arg)
if err != nil {
return nil, err
}
sargs = append(sargs, el.ConvertToString())
}
ret, err := functions.Format(collection.ConvertToString(), sargs...)
return CreateIntermediateResult(eval.Context(), ret), err
}
type Join struct {
}
func (Join) Evaluate(eval *Evaluator, args []exprparser.Node) (*EvaluationResult, error) {
collection, err := eval.Evaluate(args[0])
if err != nil {
return nil, err
}
var el *EvaluationResult
if len(args) > 1 {
if el, err = eval.Evaluate(args[1]); err != nil {
return nil, err
}
}
// Array
if col, ok := collection.TryGetCollectionInterface(); ok {
var elements []string
if node, ok := col.(ReadOnlyArray[any]); ok {
for _, v := range node.GetEnumerator() {
elements = append(elements, CreateIntermediateResult(eval.Context(), v).ConvertToString())
}
}
var sep string
if el != nil {
sep = el.ConvertToString()
} else {
sep = ","
}
return CreateIntermediateResult(eval.Context(), strings.Join(elements, sep)), nil
}
// Primitive
if collection.IsPrimitive() {
return CreateIntermediateResult(eval.Context(), collection.ConvertToString()), nil
}
return CreateIntermediateResult(eval.Context(), ""), nil
}
type Case struct {
}
func (Case) Evaluate(eval *Evaluator, args []exprparser.Node) (*EvaluationResult, error) {
if len(args)%2 == 0 {
return nil, errors.New("case function requires an odd number of arguments")
}
for i := 0; i < len(args)-1; i += 2 {
condition, err := eval.Evaluate(args[i])
if err != nil {
return nil, err
}
if condition.kind != ValueKindBoolean {
return nil, errors.New("case function conditions must evaluate to boolean")
}
if condition.IsTruthy() {
return eval.Evaluate(args[i+1])
}
}
return eval.Evaluate(args[len(args)-1])
}
func GetFunctions() CaseInsensitiveObject[Function] {
return CaseInsensitiveObject[Function](map[string]Function{
"fromjson": &FromJSON{},
"tojson": &ToJSON{},
"contains": &Contains{},
"startswith": &StartsWith{},
"endswith": &EndsWith{},
"format": &Format{},
"join": &Join{},
"case": &Case{},
})
}

View File

@@ -0,0 +1,27 @@
package workflow
import "testing"
func TestExpressionParser(t *testing.T) {
node, err := Parse("github.event_name")
if err != nil {
t.Fatalf("unexpected error: %v", err)
}
t.Logf("Parsed expression: %+v", node)
}
func TestExpressionParserWildcard(t *testing.T) {
node, err := Parse("github.commits.*.message")
if err != nil {
t.Fatalf("unexpected error: %v", err)
}
t.Logf("Parsed expression: %+v", node)
}
func TestExpressionParserDot(t *testing.T) {
node, err := Parse("github.head_commit.message")
if err != nil {
t.Fatalf("unexpected error: %v", err)
}
t.Logf("Parsed expression: %+v", node)
}

View File

@@ -0,0 +1,306 @@
package workflow
import (
"errors"
"fmt"
"strings"
)
// Node represents a node in the expression tree.
// It is intentionally minimal only the fields needed for the parser.
// Users can extend it with more information if required.
type Node interface {
String() string
}
// ValueNode represents a literal value (number, string, boolean, null) or a named value.
// The Kind field indicates the type.
// For named values the Value is nil.
type ValueNode struct {
Kind TokenKind
Value interface{}
}
// FunctionNode represents a function call with arguments.
type FunctionNode struct {
Name string
Args []Node
}
// BinaryNode represents a binary operator.
type BinaryNode struct {
Op string
Left Node
Right Node
}
// UnaryNode represents a unary operator.
type UnaryNode struct {
Op string
Operand Node
}
// Parser holds the lexer and the stacks used by the shuntingyard algorithm.
type Parser struct {
lexer *Lexer
tokens []Token
pos int
ops []OpToken
vals []Node
}
type OpToken struct {
Token
StartPos int
}
func precedence(tkn Token) int {
switch tkn.Kind {
case TokenKindStartGroup:
return 20
case TokenKindStartIndex, TokenKindStartParameters, TokenKindDereference:
return 19
case TokenKindLogicalOperator:
switch tkn.Raw {
case "!":
return 16
case ">", ">=", "<", "<=":
return 11
case "==", "!=":
return 10
case "&&":
return 6
case "||":
return 5
}
case TokenKindEndGroup, TokenKindEndIndex, TokenKindEndParameters, TokenKindSeparator:
return 1
}
return 0
}
// Parse parses the expression and returns the root node.
func Parse(expression string) (Node, error) {
lexer := NewLexer(expression, 0)
p := &Parser{}
// Tokenise all tokens
if err := p.initWithLexer(lexer); err != nil {
return nil, err
}
return p.parse()
}
func (p *Parser) parse() (Node, error) {
// Shuntingyard algorithm
for p.pos < len(p.tokens) {
tok := p.tokens[p.pos]
p.pos++
switch tok.Kind {
case TokenKindNumber, TokenKindString, TokenKindBoolean, TokenKindNull:
p.pushValue(&ValueNode{Kind: tok.Kind, Value: tok.Value})
case TokenKindNamedValue, TokenKindPropertyName, TokenKindWildcard:
p.pushValue(&ValueNode{Kind: tok.Kind, Value: tok.Raw})
case TokenKindFunction:
p.pushFunc(tok, len(p.vals))
case TokenKindStartParameters, TokenKindStartGroup, TokenKindStartIndex, TokenKindLogicalOperator, TokenKindDereference:
if err := p.pushOp(tok); err != nil {
return nil, err
}
case TokenKindSeparator:
if err := p.popGroup(TokenKindStartParameters); err != nil {
return nil, err
}
case TokenKindEndParameters:
if err := p.pushFuncValue(); err != nil {
return nil, err
}
case TokenKindEndGroup:
if err := p.popGroup(TokenKindStartGroup); err != nil {
return nil, err
}
p.ops = p.ops[:len(p.ops)-1]
case TokenKindEndIndex:
if err := p.popGroup(TokenKindStartIndex); err != nil {
return nil, err
}
// pop the start parameters
p.ops = p.ops[:len(p.ops)-1]
right := p.vals[len(p.vals)-1]
p.vals = p.vals[:len(p.vals)-1]
left := p.vals[len(p.vals)-1]
p.vals = p.vals[:len(p.vals)-1]
p.vals = append(p.vals, &BinaryNode{Op: "[", Left: left, Right: right})
}
}
for len(p.ops) > 0 {
if err := p.popOp(); err != nil {
return nil, err
}
}
if len(p.vals) != 1 {
return nil, errors.New("invalid expression")
}
return p.vals[0], nil
}
func (p *Parser) pushFuncValue() error {
if err := p.popGroup(TokenKindStartParameters); err != nil {
return err
}
// pop the start parameters
p.ops = p.ops[:len(p.ops)-1]
// create function node
fnTok := p.ops[len(p.ops)-1]
if fnTok.Kind != TokenKindFunction {
return errors.New("expected function token")
}
p.ops = p.ops[:len(p.ops)-1]
// collect arguments
args := []Node{}
for len(p.vals) > fnTok.StartPos {
args = append([]Node{p.vals[len(p.vals)-1]}, args...)
p.vals = p.vals[:len(p.vals)-1]
}
p.pushValue(&FunctionNode{Name: fnTok.Raw, Args: args})
return nil
}
func (p *Parser) initWithLexer(lexer *Lexer) error {
p.lexer = lexer
for {
tok := lexer.Next()
if tok == nil {
break
}
if tok.Kind == TokenKindUnexpected {
return fmt.Errorf("unexpected token %s at position %d", tok.Raw, tok.Index)
}
p.tokens = append(p.tokens, *tok)
}
return nil
}
func (p *Parser) popGroup(kind TokenKind) error {
for len(p.ops) > 0 && p.ops[len(p.ops)-1].Kind != kind {
if err := p.popOp(); err != nil {
return err
}
}
if len(p.ops) == 0 {
return errors.New("mismatched parentheses")
}
return nil
}
func (p *Parser) pushValue(v Node) {
p.vals = append(p.vals, v)
}
func (p *Parser) pushOp(t Token) error {
for len(p.ops) > 0 {
top := p.ops[len(p.ops)-1]
if precedence(top.Token) >= precedence(t) &&
top.Kind != TokenKindStartGroup &&
top.Kind != TokenKindStartIndex &&
top.Kind != TokenKindStartParameters &&
top.Kind != TokenKindSeparator {
if err := p.popOp(); err != nil {
return err
}
} else {
break
}
}
p.ops = append(p.ops, OpToken{Token: t})
return nil
}
func (p *Parser) pushFunc(t Token, start int) {
p.ops = append(p.ops, OpToken{Token: t, StartPos: start})
}
func (p *Parser) popOp() error {
if len(p.ops) == 0 {
return nil
}
op := p.ops[len(p.ops)-1]
p.ops = p.ops[:len(p.ops)-1]
switch op.Kind {
case TokenKindLogicalOperator:
if op.Raw == "!" {
if len(p.vals) < 1 {
return errors.New("insufficient operands")
}
right := p.vals[len(p.vals)-1]
p.vals = p.vals[:len(p.vals)-1]
p.vals = append(p.vals, &UnaryNode{Op: op.Raw, Operand: right})
} else {
if len(p.vals) < 2 {
return errors.New("insufficient operands")
}
right := p.vals[len(p.vals)-1]
left := p.vals[len(p.vals)-2]
p.vals = p.vals[:len(p.vals)-2]
p.vals = append(p.vals, &BinaryNode{Op: op.Raw, Left: left, Right: right})
}
case TokenKindStartParameters:
// unary operator '!' handled elsewhere
case TokenKindDereference:
if len(p.vals) < 2 {
return errors.New("insufficient operands")
}
right := p.vals[len(p.vals)-1]
left := p.vals[len(p.vals)-2]
p.vals = p.vals[:len(p.vals)-2]
p.vals = append(p.vals, &BinaryNode{Op: ".", Left: left, Right: right})
}
return nil
}
// String returns a string representation of the node.
func (n *ValueNode) String() string { return fmt.Sprintf("%v", n.Value) }
// String returns a string representation of the node.
func (n *FunctionNode) String() string {
return fmt.Sprintf("%s(%s)", n.Name, strings.Join(funcArgs(n.Args), ", "))
}
func funcArgs(args []Node) []string {
res := []string{}
for _, a := range args {
res = append(res, a.String())
}
return res
}
// String returns a string representation of the node.
func (n *BinaryNode) String() string {
return fmt.Sprintf("(%s %s %s)", n.Left.String(), n.Op, n.Right.String())
}
// String returns a string representation of the node.
func (n *UnaryNode) String() string { return fmt.Sprintf("(%s%s)", n.Op, n.Operand.String()) }
func VisitNode(exprNode Node, callback func(node Node)) {
callback(exprNode)
switch node := exprNode.(type) {
case *FunctionNode:
for _, arg := range node.Args {
VisitNode(arg, callback)
}
case *UnaryNode:
VisitNode(node.Operand, callback)
case *BinaryNode:
VisitNode(node.Left, callback)
VisitNode(node.Right, callback)
}
}

361
internal/expr/lexer.go Normal file
View File

@@ -0,0 +1,361 @@
package workflow
import (
"math"
"slices"
"strconv"
"strings"
"unicode"
)
// TokenKind represents the type of token returned by the lexer.
// The values mirror the C# TokenKind enum.
//
// Note: The names are kept identical to the C# implementation for
// easier mapping when porting the parser.
//
// The lexer is intentionally simple it only tokenises the subset of
// expressions that are used in GitHub Actions workflow `if:` expressions.
// It does not evaluate the expression that is left to the parser.
type TokenKind int
const (
TokenKindStartGroup TokenKind = iota
TokenKindStartIndex
TokenKindEndGroup
TokenKindEndIndex
TokenKindSeparator
TokenKindDereference
TokenKindWildcard
TokenKindLogicalOperator
TokenKindNumber
TokenKindString
TokenKindBoolean
TokenKindNull
TokenKindPropertyName
TokenKindFunction
TokenKindNamedValue
TokenKindStartParameters
TokenKindEndParameters
TokenKindUnexpected
)
// Token represents a single lexical token.
// Raw holds the original text, Value holds the parsed value when applicable.
// Index is the start position in the source string.
//
// The struct is intentionally minimal it only contains what the parser
// needs. If you need more information (e.g. token length) you can add it.
type Token struct {
Kind TokenKind
Raw string
Value interface{}
Index int
}
// Lexer holds the state while tokenising an expression.
// It is a direct port of the C# LexicalAnalyzer.
//
// Flags can be used to enable/disable features for now we only support
// a single flag that mirrors ExpressionFlags.DTExpressionsV1.
//
// The lexer is not threadsafe reuse a single instance per expression.
type Lexer struct {
expr string
flags int
index int
last *Token
stack []TokenKind // unclosed start tokens
}
// NewLexer creates a new lexer for the given expression.
func NewLexer(expr string, flags int) *Lexer {
return &Lexer{expr: expr, flags: flags}
}
func testTokenBoundary(c rune) bool {
switch c {
case '(', '[', ')', ']', ',', '.',
'!', '>', '<', '=', '&', '|':
return true
default:
return unicode.IsSpace(c)
}
}
// Next returns the next token or nil if the end of the expression is reached.
func (l *Lexer) Next() *Token {
// Skip whitespace
for l.index < len(l.expr) && unicode.IsSpace(rune(l.expr[l.index])) {
l.index++
}
if l.index >= len(l.expr) {
return nil
}
c := l.expr[l.index]
switch c {
case '(':
l.index++
// Function call or logical grouping
if l.last != nil && l.last.Kind == TokenKindFunction {
return l.createToken(TokenKindStartParameters, "(")
}
if l.flags&FlagV1 != 0 {
// V1 does not support grouping treat as unexpected
return l.createToken(TokenKindUnexpected, "(")
}
return l.createToken(TokenKindStartGroup, "(")
case '[':
l.index++
return l.createToken(TokenKindStartIndex, "[")
case ')':
l.index++
if len(l.stack) > 0 && l.stack[len(l.stack)-1] == TokenKindStartParameters {
return l.createToken(TokenKindEndParameters, ")")
}
return l.createToken(TokenKindEndGroup, ")")
case ']':
l.index++
return l.createToken(TokenKindEndIndex, "]")
case ',':
l.index++
return l.createToken(TokenKindSeparator, ",")
case '*':
l.index++
return l.createToken(TokenKindWildcard, "*")
case '\'':
return l.readString()
case '!', '>', '<', '=', '&', '|':
if l.flags&FlagV1 != 0 {
l.index++
return l.createToken(TokenKindUnexpected, string(c))
}
return l.readOperator()
default:
return l.defaultNext(c)
}
}
func (l *Lexer) defaultNext(c byte) *Token {
if c == '.' {
// Could be number or dereference
if l.last == nil || l.last.Kind == TokenKindSeparator || l.last.Kind == TokenKindStartGroup || l.last.Kind == TokenKindStartIndex || l.last.Kind == TokenKindStartParameters || l.last.Kind == TokenKindLogicalOperator {
return l.readNumber()
}
l.index++
return l.createToken(TokenKindDereference, ".")
}
if c == '-' || c == '+' || unicode.IsDigit(rune(c)) {
return l.readNumber()
}
return l.readKeyword()
}
// Helper to create a token and update lexer state.
func (l *Lexer) createToken(kind TokenKind, raw string) *Token {
// Token order check
if !l.checkLastToken(kind, raw) {
// Illegal token sequence
return &Token{Kind: TokenKindUnexpected, Raw: raw, Index: l.index}
}
tok := &Token{Kind: kind, Raw: raw, Index: l.index}
l.last = tok
// Manage stack for grouping
switch kind {
case TokenKindStartGroup, TokenKindStartIndex, TokenKindStartParameters:
l.stack = append(l.stack, kind)
case TokenKindEndGroup, TokenKindEndIndex, TokenKindEndParameters:
if len(l.stack) > 0 {
l.stack = l.stack[:len(l.stack)-1]
}
}
return tok
}
// nil last token represented by nil
func (l *Lexer) getLastKind() *TokenKind {
var lastKind *TokenKind
if l.last != nil {
lastKind = &l.last.Kind
}
return lastKind
}
// checkLastToken verifies that the token sequence is legal based on the last token.
func (l *Lexer) checkLastToken(kind TokenKind, raw string) bool {
lastKind := l.getLastKind()
// Helper to check if lastKind is in allowed list
allowed := func(allowedKinds ...TokenKind) bool {
return lastKind != nil && slices.Contains(allowedKinds, *lastKind)
}
// For nil last, we treat as no previous token
// Define allowed previous kinds for each token kind
switch kind {
case TokenKindStartGroup:
return lastKind == nil || allowed(TokenKindSeparator, TokenKindStartGroup, TokenKindStartParameters, TokenKindStartIndex, TokenKindLogicalOperator)
case TokenKindStartIndex:
return allowed(TokenKindEndGroup, TokenKindEndParameters, TokenKindEndIndex, TokenKindWildcard, TokenKindPropertyName, TokenKindNamedValue)
case TokenKindStartParameters:
return allowed(TokenKindFunction)
case TokenKindEndGroup:
return allowed(TokenKindEndGroup, TokenKindEndParameters, TokenKindEndIndex, TokenKindWildcard, TokenKindNull, TokenKindBoolean, TokenKindNumber, TokenKindString, TokenKindPropertyName, TokenKindNamedValue)
case TokenKindEndIndex:
return allowed(TokenKindEndGroup, TokenKindEndParameters, TokenKindEndIndex, TokenKindWildcard, TokenKindNull, TokenKindBoolean, TokenKindNumber, TokenKindString, TokenKindPropertyName, TokenKindNamedValue)
case TokenKindEndParameters:
return allowed(TokenKindStartParameters, TokenKindEndGroup, TokenKindEndParameters, TokenKindEndIndex, TokenKindWildcard, TokenKindNull, TokenKindBoolean, TokenKindNumber, TokenKindString, TokenKindPropertyName, TokenKindNamedValue)
case TokenKindSeparator:
return allowed(TokenKindEndGroup, TokenKindEndParameters, TokenKindEndIndex, TokenKindWildcard, TokenKindNull, TokenKindBoolean, TokenKindNumber, TokenKindString, TokenKindPropertyName, TokenKindNamedValue)
case TokenKindWildcard:
return allowed(TokenKindStartIndex, TokenKindDereference)
case TokenKindDereference:
return allowed(TokenKindEndGroup, TokenKindEndParameters, TokenKindEndIndex, TokenKindWildcard, TokenKindPropertyName, TokenKindNamedValue)
case TokenKindLogicalOperator:
if raw == "!" { // "!"
return lastKind == nil || allowed(TokenKindSeparator, TokenKindStartGroup, TokenKindStartParameters, TokenKindStartIndex, TokenKindLogicalOperator)
}
return allowed(TokenKindEndGroup, TokenKindEndParameters, TokenKindEndIndex, TokenKindWildcard, TokenKindNull, TokenKindBoolean, TokenKindNumber, TokenKindString, TokenKindPropertyName, TokenKindNamedValue)
case TokenKindNull, TokenKindBoolean, TokenKindNumber, TokenKindString:
return lastKind == nil || allowed(TokenKindSeparator, TokenKindStartIndex, TokenKindStartGroup, TokenKindStartParameters, TokenKindLogicalOperator)
case TokenKindPropertyName:
return allowed(TokenKindDereference)
case TokenKindFunction, TokenKindNamedValue:
return lastKind == nil || allowed(TokenKindSeparator, TokenKindStartIndex, TokenKindStartGroup, TokenKindStartParameters, TokenKindLogicalOperator)
default:
return true
}
}
// readNumber parses a numeric literal.
func (l *Lexer) readNumber() *Token {
start := l.index
periods := 0
for l.index < len(l.expr) {
ch := l.expr[l.index]
if ch == '.' {
periods++
}
if testTokenBoundary(rune(ch)) && ch != '.' {
break
}
l.index++
}
raw := l.expr[start:l.index]
if len(raw) > 2 {
switch raw[:2] {
case "0x", "0o":
tok := l.createToken(TokenKindNumber, raw)
if i, err := strconv.ParseInt(raw, 0, 32); err == nil {
tok.Value = float64(i)
return tok
}
}
}
// Try to parse as float64
var val interface{} = raw
if f, err := strconv.ParseFloat(raw, 64); err == nil {
val = f
}
tok := l.createToken(TokenKindNumber, raw)
tok.Value = val
return tok
}
// readString parses a singlequoted string literal.
func (l *Lexer) readString() *Token {
start := l.index
l.index++ // skip opening quote
var sb strings.Builder
closed := false
for l.index < len(l.expr) {
ch := l.expr[l.index]
l.index++
if ch == '\'' {
if l.index < len(l.expr) && l.expr[l.index] == '\'' {
// escaped quote
sb.WriteByte('\'')
l.index++
continue
}
closed = true
break
}
sb.WriteByte(ch)
}
raw := l.expr[start:l.index]
tok := l.createToken(TokenKindString, raw)
if closed {
tok.Value = sb.String()
} else {
tok.Kind = TokenKindUnexpected
}
return tok
}
// readOperator parses logical operators (==, !=, >, >=, etc.).
func (l *Lexer) readOperator() *Token {
start := l.index
l.index++
if l.index < len(l.expr) {
two := l.expr[start : l.index+1]
switch two {
case "!=", ">=", "<=", "==", "&&", "||":
l.index++
return l.createToken(TokenKindLogicalOperator, two)
}
}
ch := l.expr[start]
switch ch {
case '!', '>', '<':
return l.createToken(TokenKindLogicalOperator, string(ch))
}
return l.createToken(TokenKindUnexpected, string(ch))
}
// readKeyword parses identifiers, booleans, null, etc.
func (l *Lexer) readKeyword() *Token {
start := l.index
for l.index < len(l.expr) && !unicode.IsSpace(rune(l.expr[l.index])) && !strings.ContainsRune("()[],.!<>==&|*", rune(l.expr[l.index])) {
l.index++
}
raw := l.expr[start:l.index]
if l.last != nil && l.last.Kind == TokenKindDereference {
return l.createToken(TokenKindPropertyName, raw)
}
switch raw {
case "true":
tok := l.createToken(TokenKindBoolean, raw)
tok.Value = true
return tok
case "false":
tok := l.createToken(TokenKindBoolean, raw)
tok.Value = false
return tok
case "null":
return l.createToken(TokenKindNull, raw)
case "NaN":
tok := l.createToken(TokenKindNumber, raw)
tok.Value = math.NaN()
return tok
case "Infinity":
tok := l.createToken(TokenKindNumber, raw)
tok.Value = math.Inf(1)
return tok
}
if l.index < len(l.expr) && l.expr[l.index] == '(' {
return l.createToken(TokenKindFunction, raw)
}
return l.createToken(TokenKindNamedValue, raw)
}
// Flag constants only V1 is used for now.
const FlagV1 = 1
// UnclosedTokens returns the stack of unclosed start tokens.
func (l *Lexer) UnclosedTokens() []TokenKind {
return l.stack
}

View File

@@ -0,0 +1,112 @@
package workflow
import (
"testing"
"github.com/stretchr/testify/assert"
)
// TestLexerMultiple runs a set of expressions through the lexer and
// verifies that the produced token kinds and values match expectations.
func TestLexerMultiple(t *testing.T) {
cases := []struct {
expr string
expected []TokenKind
values []interface{} // optional, nil if not checking values
}{
{
expr: "github.event_name == 'push'",
expected: []TokenKind{
TokenKindNamedValue, // github
TokenKindDereference,
TokenKindPropertyName, // event_name
TokenKindLogicalOperator, // ==
TokenKindString, // 'push'
},
},
{
expr: "github.event_name == 'push' && github.ref == 'refs/heads/main'",
expected: []TokenKind{
TokenKindNamedValue, TokenKindDereference, TokenKindPropertyName, TokenKindLogicalOperator, TokenKindString,
TokenKindLogicalOperator, // &&
TokenKindNamedValue, TokenKindDereference, TokenKindPropertyName, TokenKindLogicalOperator, TokenKindString,
},
},
{
expr: "contains(github.ref, 'refs/heads/')",
expected: []TokenKind{
TokenKindFunction, // contains
TokenKindStartParameters,
TokenKindNamedValue, TokenKindDereference, TokenKindPropertyName, // github.ref
TokenKindSeparator,
TokenKindString,
TokenKindEndParameters,
},
},
{
expr: "matrix[0].name",
expected: []TokenKind{
TokenKindNamedValue, // matrix
TokenKindStartIndex,
TokenKindNumber,
TokenKindEndIndex,
TokenKindDereference,
TokenKindPropertyName, // name
},
},
{
expr: "github.*",
expected: []TokenKind{
TokenKindNamedValue, TokenKindDereference, TokenKindWildcard,
},
},
{
expr: "null",
expected: []TokenKind{TokenKindNull},
},
{
expr: "true",
expected: []TokenKind{TokenKindBoolean},
values: []interface{}{true},
},
{
expr: "123",
expected: []TokenKind{TokenKindNumber},
values: []interface{}{123.0},
},
{
expr: "(a && b)",
expected: []TokenKind{TokenKindStartGroup, TokenKindNamedValue, TokenKindLogicalOperator, TokenKindNamedValue, TokenKindEndGroup},
},
{
expr: "[1,2]", // Syntax Error
expected: []TokenKind{TokenKindUnexpected, TokenKindNumber, TokenKindSeparator, TokenKindNumber, TokenKindEndIndex},
},
{
expr: "'Hello i''s escaped'",
expected: []TokenKind{TokenKindString},
values: []interface{}{"Hello i's escaped"},
},
}
for _, tc := range cases {
lexer := NewLexer(tc.expr, 0)
var tokens []*Token
for {
tok := lexer.Next()
if tok == nil {
break
}
tokens = append(tokens, tok)
}
assert.Equal(t, len(tc.expected), len(tokens), "expression: %s", tc.expr)
for i, kind := range tc.expected {
assert.Equal(t, kind, tokens[i].Kind, "expr %s token %d", tc.expr, i)
}
if tc.values != nil {
for i, val := range tc.values {
assert.Equal(t, val, tokens[i].Value, "expr %s token %d value", tc.expr, i)
}
}
}
}

View File

@@ -0,0 +1,56 @@
package workflow
import (
"math"
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestLexer(t *testing.T) {
input := "github.event_name == 'push' && github.ref == 'refs/heads/main'"
lexer := NewLexer(input, 0)
var tokens []*Token
for {
tok := lexer.Next()
if tok == nil || tok.Kind == TokenKindUnexpected {
break
}
tokens = append(tokens, tok)
}
for i, tok := range tokens {
t.Logf("Token %d: Kind=%v, Value=%v", i, tok.Kind, tok.Value)
}
assert.Equal(t, tokens[1].Kind, TokenKindDereference)
}
func TestLexerNumbers(t *testing.T) {
table := []struct {
in string
out interface{}
}{
{"-Infinity", math.Inf(-1)},
{"Infinity", math.Inf(1)},
{"2.5", float64(2.5)},
{"3.3", float64(3.3)},
{"1", float64(1)},
{"-1", float64(-1)},
{"0x34", float64(0x34)},
{"0o34", float64(0o34)},
}
for _, cs := range table {
lexer := NewLexer(cs.in, 0)
var tokens []*Token
for {
tok := lexer.Next()
if tok == nil || tok.Kind == TokenKindUnexpected {
break
}
tokens = append(tokens, tok)
}
require.Len(t, tokens, 1)
assert.Equal(t, cs.out, tokens[0].Value)
assert.Equal(t, cs.in, tokens[0].Raw)
}
}

30
internal/model/anchors.go Normal file
View File

@@ -0,0 +1,30 @@
package model
import (
"errors"
"gopkg.in/yaml.v3"
)
// Assumes there is no cycle ensured via test TestVerifyCycleIsInvalid
func resolveAliases(node *yaml.Node) error {
switch node.Kind {
case yaml.AliasNode:
aliasTarget := node.Alias
if aliasTarget == nil {
return errors.New("unresolved alias node")
}
*node = *aliasTarget
if err := resolveAliases(node); err != nil {
return err
}
case yaml.DocumentNode, yaml.MappingNode, yaml.SequenceNode:
for _, child := range node.Content {
if err := resolveAliases(child); err != nil {
return err
}
}
}
return nil
}

View File

@@ -0,0 +1,242 @@
package model
import (
"errors"
"fmt"
"strings"
"gopkg.in/yaml.v3"
)
// TraceWriter is an interface for logging trace information.
// Implementations can write to console, file, or any other sink.
type TraceWriter interface {
Info(format string, args ...interface{})
}
// StrategyResult holds the result of expanding a strategy.
// FlatMatrix contains the expanded matrix entries.
// IncludeMatrix contains entries that were added via include.
// FailFast indicates whether the job should fail fast.
// MaxParallel is the maximum parallelism allowed.
// MatrixKeys is the set of keys present in the matrix.
type StrategyResult struct {
FlatMatrix []map[string]yaml.Node
IncludeMatrix []map[string]yaml.Node
FailFast bool
MaxParallel *float64
MatrixKeys map[string]struct{}
}
type strategyContext struct {
jobTraceWriter TraceWriter
failFast bool
maxParallel float64
matrix map[string][]yaml.Node
flatMatrix []map[string]yaml.Node
includeMatrix []map[string]yaml.Node
include []yaml.Node
exclude []yaml.Node
}
func (strategyContext *strategyContext) handleInclude() error {
// Handle include logic
if len(strategyContext.include) > 0 {
for _, incNode := range strategyContext.include {
if incNode.Kind != yaml.MappingNode {
return fmt.Errorf("include entry is not a mapping node")
}
incMap := make(map[string]yaml.Node)
for i := 0; i < len(incNode.Content); i += 2 {
keyNode := incNode.Content[i]
valNode := incNode.Content[i+1]
if keyNode.Kind != yaml.ScalarNode {
return fmt.Errorf("include key is not scalar")
}
incMap[keyNode.Value] = *valNode
}
matched := false
for _, row := range strategyContext.flatMatrix {
match := true
for k, v := range incMap {
if rv, ok := row[k]; ok && !nodesEqual(rv, v) {
match = false
break
}
}
if match {
matched = true
// Add missing keys
strategyContext.jobTraceWriter.Info("Add missing keys %v", incMap)
for k, v := range incMap {
if _, ok := row[k]; !ok {
row[k] = v
}
}
}
}
if !matched {
if strategyContext.jobTraceWriter != nil {
strategyContext.jobTraceWriter.Info("Append include entry %v", incMap)
}
strategyContext.includeMatrix = append(strategyContext.includeMatrix, incMap)
}
}
}
return nil
}
func (strategyContext *strategyContext) handleExclude() error {
// Handle exclude logic
if len(strategyContext.exclude) > 0 {
for _, exNode := range strategyContext.exclude {
// exNode is expected to be a mapping node
if exNode.Kind != yaml.MappingNode {
return fmt.Errorf("exclude entry is not a mapping node")
}
// Convert mapping to map[string]yaml.Node
exMap := make(map[string]yaml.Node)
for i := 0; i < len(exNode.Content); i += 2 {
keyNode := exNode.Content[i]
valNode := exNode.Content[i+1]
if keyNode.Kind != yaml.ScalarNode {
return fmt.Errorf("exclude key is not scalar")
}
exMap[keyNode.Value] = *valNode
}
// Remove matching rows
filtered := []map[string]yaml.Node{}
for _, row := range strategyContext.flatMatrix {
match := true
for k, v := range exMap {
if rv, ok := row[k]; !ok || !nodesEqual(rv, v) {
match = false
break
}
}
if !match {
filtered = append(filtered, row)
} else if strategyContext.jobTraceWriter != nil {
strategyContext.jobTraceWriter.Info("Removing %v from matrix due to exclude entry %v", row, exMap)
}
}
strategyContext.flatMatrix = filtered
}
}
return nil
}
// ExpandStrategy expands the given strategy into a flat matrix and include matrix.
// It mimics the behavior of the C# StrategyUtils. The strategy parameter is expected
// to be populated from a YAML mapping that follows the GitHub Actions strategy schema.
func ExpandStrategy(strategy *Strategy, jobTraceWriter TraceWriter) (*StrategyResult, error) {
if strategy == nil {
return &StrategyResult{FlatMatrix: []map[string]yaml.Node{{}}, IncludeMatrix: []map[string]yaml.Node{}, FailFast: true}, nil
}
// Initialize defaults
strategyContext := &strategyContext{
jobTraceWriter: jobTraceWriter,
failFast: strategy.FailFast,
maxParallel: strategy.MaxParallel,
matrix: strategy.Matrix,
flatMatrix: []map[string]yaml.Node{{}},
}
// Process matrix entries
for key, values := range strategyContext.matrix {
switch key {
case "include":
strategyContext.include = values
case "exclude":
strategyContext.exclude = values
default:
// Other keys are treated as matrix dimensions
// Expand each existing row with the new key/value pairs
next := []map[string]yaml.Node{}
for _, row := range strategyContext.flatMatrix {
for _, val := range values {
newRow := make(map[string]yaml.Node)
for k, v := range row {
newRow[k] = v
}
newRow[key] = val
next = append(next, newRow)
}
}
strategyContext.flatMatrix = next
}
}
if err := strategyContext.handleExclude(); err != nil {
return nil, err
}
if len(strategyContext.flatMatrix) == 0 {
if jobTraceWriter != nil {
jobTraceWriter.Info("Matrix is empty, adding an empty entry")
}
strategyContext.flatMatrix = []map[string]yaml.Node{{}}
}
// Enforce job matrix limit of github
if len(strategyContext.flatMatrix) > 256 {
if jobTraceWriter != nil {
jobTraceWriter.Info("Failure: Matrix contains more than 256 entries after exclude")
}
return nil, errors.New("matrix contains more than 256 entries")
}
// Build matrix keys set
matrixKeys := make(map[string]struct{})
if len(strategyContext.flatMatrix) > 0 {
for k := range strategyContext.flatMatrix[0] {
matrixKeys[k] = struct{}{}
}
}
if err := strategyContext.handleInclude(); err != nil {
return nil, err
}
return &StrategyResult{
FlatMatrix: strategyContext.flatMatrix,
IncludeMatrix: strategyContext.includeMatrix,
FailFast: strategyContext.failFast,
MaxParallel: &strategyContext.maxParallel,
MatrixKeys: matrixKeys,
}, nil
}
// nodesEqual compares two yaml.Node values for equality.
func nodesEqual(a, b yaml.Node) bool {
return DeepEquals(a, b, true)
}
// GetDefaultDisplaySuffix returns a string like "(foo, bar, baz)".
// Empty items are ignored. If all items are empty the result is "".
func GetDefaultDisplaySuffix(items []string) string {
var b strings.Builder // efficient string concatenation
first := true // true until we write the first nonempty item
for _, mk := range items {
if mk == "" { // Go has no null string, so we only need to check for empty
continue
}
if first {
b.WriteString("(")
first = false
} else {
b.WriteString(", ")
}
b.WriteString(mk)
}
if !first { // we wrote at least one item
b.WriteString(")")
}
return b.String()
}

View File

@@ -0,0 +1,68 @@
package model
import (
"testing"
"github.com/stretchr/testify/require"
"gopkg.in/yaml.v3"
)
type EmptyTraceWriter struct {
}
func (e *EmptyTraceWriter) Info(_ string, _ ...interface{}) {
}
func TestStrategy(t *testing.T) {
table := []struct {
content string
flatmatrix int
includematrix int
}{
{`
matrix:
label:
- a
- b
fields:
- a
- b
`, 4, 0},
{`
matrix:
label:
- a
- b
include:
- label: a
x: self`, 2, 0,
},
{`
matrix:
label:
- a
- b
include:
- label: c
x: self`, 2, 1,
},
{`
matrix:
label:
- a
- b
exclude:
- label: a`, 1, 0,
},
}
for _, tc := range table {
var strategy Strategy
err := yaml.Unmarshal([]byte(tc.content), &strategy)
require.NoError(t, err)
res, err := ExpandStrategy(&strategy, &EmptyTraceWriter{})
require.NoError(t, err)
require.Len(t, res.FlatMatrix, tc.flatmatrix)
require.Len(t, res.IncludeMatrix, tc.includematrix)
}
}

View File

@@ -0,0 +1,148 @@
package model
import (
"strings"
v2 "github.com/actions-oss/act-cli/internal/eval/v2"
"gopkg.in/yaml.v3"
)
// DeepEquals compares two yaml.Node values recursively.
// It supports scalar, mapping and sequence nodes and allows
// an optional partial match for mappings and sequences.
func DeepEquals(a, b yaml.Node, partialMatch bool) bool {
// Scalar comparison
if a.Kind == yaml.ScalarNode && b.Kind == yaml.ScalarNode {
return scalarEquals(a, b)
}
// Mapping comparison
if a.Kind == yaml.MappingNode && b.Kind == yaml.MappingNode {
return deepMapEquals(a, b, partialMatch)
}
// Sequence comparison
if a.Kind == yaml.SequenceNode && b.Kind == yaml.SequenceNode {
return deepSequenceEquals(a, b, partialMatch)
}
// Different kinds are not equal
return false
}
func scalarEquals(a, b yaml.Node) bool {
var left, right any
return a.Decode(&left) == nil && b.Decode(&right) == nil && v2.CreateIntermediateResult(v2.NewEvaluationContext(), left).AbstractEqual(v2.CreateIntermediateResult(v2.NewEvaluationContext(), right))
}
func deepMapEquals(a, b yaml.Node, partialMatch bool) bool {
mapA := make(map[string]yaml.Node)
for i := 0; i < len(a.Content); i += 2 {
keyNode := a.Content[i]
valNode := a.Content[i+1]
if keyNode.Kind != yaml.ScalarNode {
return false
}
mapA[strings.ToLower(keyNode.Value)] = *valNode
}
mapB := make(map[string]yaml.Node)
for i := 0; i < len(b.Content); i += 2 {
keyNode := b.Content[i]
valNode := b.Content[i+1]
if keyNode.Kind != yaml.ScalarNode {
return false
}
mapB[strings.ToLower(keyNode.Value)] = *valNode
}
if partialMatch {
if len(mapA) < len(mapB) {
return false
}
} else {
if len(mapA) != len(mapB) {
return false
}
}
for k, vB := range mapB {
vA, ok := mapA[k]
if !ok || !DeepEquals(vA, vB, partialMatch) {
return false
}
}
return true
}
func deepSequenceEquals(a, b yaml.Node, partialMatch bool) bool {
if partialMatch {
if len(a.Content) < len(b.Content) {
return false
}
} else {
if len(a.Content) != len(b.Content) {
return false
}
}
limit := len(b.Content)
if !partialMatch {
limit = len(a.Content)
}
for i := 0; i < limit; i++ {
if !DeepEquals(*a.Content[i], *b.Content[i], partialMatch) {
return false
}
}
return true
}
// traverse walks a YAML node recursively.
func traverse(node *yaml.Node, omitKeys bool, result *[]*yaml.Node) {
if node == nil {
return
}
*result = append(*result, node)
switch node.Kind {
case yaml.MappingNode:
if omitKeys {
// node.Content: key0, val0, key1, val1, …
for i := 1; i < len(node.Content); i += 2 { // only the values
traverse(node.Content[i], omitKeys, result)
}
} else {
for _, child := range node.Content {
traverse(child, omitKeys, result)
}
}
case yaml.SequenceNode:
// For all other node kinds (Scalar, Sequence, Alias, etc.)
for _, child := range node.Content {
traverse(child, omitKeys, result)
}
}
}
// GetDisplayStrings implements the LINQ expression:
//
// from displayitem in keys.SelectMany(key => item[key].Traverse(true))
// where !(displayitem is SequenceToken || displayitem is MappingToken)
// select displayitem.ToString()
func GetDisplayStrings(keys []string, item map[string]*yaml.Node) []string {
var res []string
for _, k := range keys {
if node, ok := item[k]; ok {
var all []*yaml.Node
traverse(node, true, &all) // include the parent node itself
for _, n := range all {
// Keep only scalars everything else is dropped
if n.Kind == yaml.ScalarNode {
res = append(res, n.Value)
}
}
}
}
return res
}

View File

@@ -0,0 +1,277 @@
package model
import "gopkg.in/yaml.v3"
type JobStatus int
const (
JobStatusPending JobStatus = iota
JobStatusDependenciesReady
JobStatusBlocked
JobStatusCompleted
)
type JobState struct {
JobID string // Workflow path to job, incl matrix and parent jobids
Result string // Actions Job Result
Outputs map[string]string // Returned Outputs
State JobStatus
Strategy []MatrixJobState
}
type MatrixJobState struct {
Matrix map[string]any
Name string
Result string
Outputs map[string]string // Returned Outputs
State JobStatus
}
type WorkflowStatus int
const (
WorkflowStatusPending WorkflowStatus = iota
WorkflowStatusDependenciesReady
WorkflowStatusBlocked
WorkflowStatusCompleted
)
type WorkflowState struct {
Name string
RunName string
Jobs JobState
StateWorkflowStatus WorkflowStatus
}
type Workflow struct {
On *On `yaml:"on,omitempty"`
Name string `yaml:"name,omitempty"`
Description string `yaml:"description,omitempty"`
RunName yaml.Node `yaml:"run-name,omitempty"`
Permissions *Permissions `yaml:"permissions,omitempty"`
Env yaml.Node `yaml:"env,omitempty"`
Defaults yaml.Node `yaml:"defaults,omitempty"`
Concurrency yaml.Node `yaml:"concurrency,omitempty"` // Two layouts
Jobs map[string]Job `yaml:"jobs,omitempty"`
}
type On struct {
Data map[string]yaml.Node `yaml:"-"`
WorkflowDispatch *WorkflowDispatch `yaml:"workflow_dispatch,omitempty"`
WorkflowCall *WorkflowCall `yaml:"workflow_call,omitempty"`
Schedule []Cron `yaml:"schedule,omitempty"`
}
type Cron struct {
Cron string `yaml:"cron,omitempty"`
}
func (a *On) UnmarshalYAML(node *yaml.Node) error {
switch node.Kind {
case yaml.ScalarNode:
var s string
if err := node.Decode(&s); err != nil {
return err
}
a.Data = map[string]yaml.Node{}
a.Data[s] = yaml.Node{}
case yaml.SequenceNode:
var s []string
if err := node.Decode(&s); err != nil {
return err
}
a.Data = map[string]yaml.Node{}
for _, v := range s {
a.Data[v] = yaml.Node{}
}
default:
if err := node.Decode(&a.Data); err != nil {
return err
}
type OnObj On
if err := node.Decode((*OnObj)(a)); err != nil {
return err
}
}
return nil
}
func (a *On) MarshalYAML() (interface{}, error) {
return a.Data, nil
}
var (
_ yaml.Unmarshaler = &On{}
_ yaml.Marshaler = &On{}
_ yaml.Unmarshaler = &Concurrency{}
_ yaml.Unmarshaler = &RunsOn{}
_ yaml.Unmarshaler = &ImplicitStringArray{}
_ yaml.Unmarshaler = &Environment{}
)
type WorkflowDispatch struct {
Inputs map[string]Input `yaml:"inputs,omitempty"`
}
type Input struct {
Description string `yaml:"description,omitempty"`
Type string `yaml:"type,omitempty"`
Default string `yaml:"default,omitempty"`
Required bool `yaml:"required,omitempty"`
}
type WorkflowCall struct {
Inputs map[string]Input `yaml:"inputs,omitempty"`
Secrets map[string]Secret `yaml:"secrets,omitempty"`
Outputs map[string]Output `yaml:"outputs,omitempty"`
}
type Secret struct {
Description string `yaml:"description,omitempty"`
Required bool `yaml:"required,omitempty"`
}
type Output struct {
Description string `yaml:"description,omitempty"`
Value yaml.Node `yaml:"value,omitempty"`
}
type Job struct {
Needs ImplicitStringArray `yaml:"needs,omitempty"`
Permissions *Permissions `yaml:"permissions,omitempty"`
Strategy yaml.Node `yaml:"strategy,omitempty"`
Name yaml.Node `yaml:"name,omitempty"`
Concurrency yaml.Node `yaml:"concurrency,omitempty"`
// Reusable Workflow
Uses yaml.Node `yaml:"uses,omitempty"`
With yaml.Node `yaml:"with,omitempty"`
Secrets yaml.Node `yaml:"secrets,omitempty"`
// Runner Job
RunsOn yaml.Node `yaml:"runs-on,omitempty"`
Defaults yaml.Node `yaml:"defaults,omitempty"`
TimeoutMinutes yaml.Node `yaml:"timeout-minutes,omitempty"`
Container yaml.Node `yaml:"container,omitempty"`
Services yaml.Node `yaml:"services,omitempty"`
Env yaml.Node `yaml:"env,omitempty"`
Steps []yaml.Node `yaml:"steps,omitempty"`
Outputs yaml.Node `yaml:"outputs,omitempty"`
}
type ImplicitStringArray []string
func (a *ImplicitStringArray) UnmarshalYAML(node *yaml.Node) error {
if node.Kind == yaml.ScalarNode {
var s string
if err := node.Decode(&s); err != nil {
return err
}
*a = []string{s}
return nil
}
return node.Decode((*[]string)(a))
}
type Permissions map[string]string
func (p *Permissions) UnmarshalYAML(node *yaml.Node) error {
if node.Kind == yaml.ScalarNode {
var s string
if err := node.Decode(&s); err != nil {
return err
}
var perm string
switch s {
case "read-all":
perm = "read"
case "write-all":
perm = "write"
default:
return nil
}
(*p)["actions"] = perm
(*p)["attestations"] = perm
(*p)["contents"] = perm
(*p)["checks"] = perm
(*p)["deployments"] = perm
(*p)["discussions"] = perm
(*p)["id-token"] = perm
(*p)["issues"] = perm
(*p)["models"] = perm
(*p)["packages"] = perm
(*p)["pages"] = perm
(*p)["pull-requests"] = perm
(*p)["repository-projects"] = perm
(*p)["security-events"] = perm
(*p)["statuses"] = perm
return nil
}
return node.Decode((*map[string]string)(p))
}
type Strategy struct {
Matrix map[string][]yaml.Node `yaml:"matrix"`
MaxParallel float64 `yaml:"max-parallel"`
FailFast bool `yaml:"fail-fast"`
}
type Concurrency struct {
Group string `yaml:"group"`
CancelInProgress bool `yaml:"cancel-in-progress"`
}
func (c *Concurrency) UnmarshalYAML(node *yaml.Node) error {
if node.Kind == yaml.ScalarNode {
var s string
if err := node.Decode(&s); err != nil {
return err
}
c.Group = s
return nil
}
type ConcurrencyObj Concurrency
return node.Decode((*ConcurrencyObj)(c))
}
type Environment struct {
Name string `yaml:"name"`
URL yaml.Node `yaml:"url"`
}
func (e *Environment) UnmarshalYAML(node *yaml.Node) error {
if node.Kind == yaml.ScalarNode {
var s string
if err := node.Decode(&s); err != nil {
return err
}
e.Name = s
return nil
}
type EnvironmentObj Environment
return node.Decode((*EnvironmentObj)(e))
}
type RunsOn struct {
Labels []string `yaml:"labels"`
Group string `yaml:"group,omitempty"`
}
func (a *RunsOn) UnmarshalYAML(node *yaml.Node) error {
if node.Kind == yaml.ScalarNode {
var s string
if err := node.Decode(&s); err != nil {
return err
}
a.Labels = []string{s}
return nil
}
if node.Kind == yaml.SequenceNode {
var s []string
if err := node.Decode(&s); err != nil {
return err
}
a.Labels = s
return nil
}
type RunsOnObj RunsOn
return node.Decode((*RunsOnObj)(a))
}

View File

@@ -0,0 +1,141 @@
package model
import (
"context"
"testing"
v2 "github.com/actions-oss/act-cli/internal/eval/v2"
"github.com/actions-oss/act-cli/internal/templateeval"
"github.com/actions-oss/act-cli/pkg/schema"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"gopkg.in/yaml.v3"
)
func TestParseWorkflow(t *testing.T) {
ee := &templateeval.ExpressionEvaluator{
EvaluationContext: v2.EvaluationContext{
Variables: v2.CaseInsensitiveObject[any]{},
Functions: v2.GetFunctions(),
},
}
var node yaml.Node
err := yaml.Unmarshal([]byte(`
on: push
run-name: ${{ fromjson('{}') }}
jobs:
_:
name: ${{ github.ref_name }}
steps:
- run: echo Hello World
env:
TAG: ${{ env.global }}
`), &node)
require.NoError(t, err)
err = ee.EvaluateYamlNode(context.Background(), node.Content[0], &schema.Node{
Definition: "workflow-root",
Schema: schema.GetWorkflowSchema(),
})
require.NoError(t, err)
ee.RestrictEval = true
ee.EvaluationContext.Variables = v2.CaseInsensitiveObject[any]{
"github": v2.CaseInsensitiveObject[any]{
"ref_name": "self",
},
"vars": v2.CaseInsensitiveObject[any]{},
"inputs": v2.CaseInsensitiveObject[any]{},
}
err = ee.EvaluateYamlNode(context.Background(), node.Content[0], &schema.Node{
Definition: "workflow-root",
Schema: schema.GetWorkflowSchema(),
})
require.Error(t, err)
var myw Workflow
require.NoError(t, node.Decode(&myw))
}
func TestParseWorkflowCall(t *testing.T) {
ee := &templateeval.ExpressionEvaluator{
EvaluationContext: v2.EvaluationContext{
Variables: v2.CaseInsensitiveObject[any]{},
Functions: v2.GetFunctions(),
},
}
var node yaml.Node
// jobs.test.outputs.test
err := yaml.Unmarshal([]byte(`
on:
workflow_call:
outputs:
test:
value: ${{ jobs.test.outputs.test }} # tojson(vars.raw)
run-name: ${{ github.ref_name }}
jobs:
_:
runs-on: ubuntu-latest
name: ${{ github.ref_name }}
steps:
- run: echo Hello World
env:
TAG: ${{ env.global }}
`), &node)
require.NoError(t, err)
require.NoError(t, resolveAliases(node.Content[0]))
require.NoError(t, (&schema.Node{
Definition: "workflow-root",
Schema: schema.GetWorkflowSchema(),
}).UnmarshalYAML(node.Content[0]))
err = ee.EvaluateYamlNode(context.Background(), node.Content[0], &schema.Node{
Definition: "workflow-root",
Schema: schema.GetWorkflowSchema(),
})
require.NoError(t, err)
var raw any
err = node.Content[0].Decode(&raw)
assert.NoError(t, err)
ee.RestrictEval = true
ee.EvaluationContext.Variables = v2.CaseInsensitiveObject[any]{
"github": v2.CaseInsensitiveObject[any]{
"ref_name": "self",
},
"vars": v2.CaseInsensitiveObject[any]{
"raw": raw,
},
"inputs": v2.CaseInsensitiveObject[any]{},
"jobs": v2.CaseInsensitiveObject[any]{
"test": v2.CaseInsensitiveObject[any]{
"outputs": v2.CaseInsensitiveObject[any]{
"test": "Hello World",
},
},
},
}
err = ee.EvaluateYamlNode(context.Background(), node.Content[0], &schema.Node{
RestrictEval: true,
Definition: "workflow-root",
Schema: schema.GetWorkflowSchema(),
})
require.NoError(t, err)
var myw Workflow
require.NoError(t, node.Decode(&myw))
workflowCall := myw.On.WorkflowCall
if workflowCall != nil {
for _, out := range workflowCall.Outputs {
err = ee.EvaluateYamlNode(context.Background(), &out.Value, &schema.Node{
RestrictEval: true,
Definition: "workflow-output-context",
Schema: schema.GetWorkflowSchema(),
})
require.NoError(t, err)
require.Equal(t, "Hello World", out.Value.Value)
}
}
out, err := yaml.Marshal(&myw)
assert.NoError(t, err)
assert.NotEmpty(t, out)
}

View File

@@ -0,0 +1,195 @@
package templateeval
import (
"context"
"fmt"
"regexp"
v2 "github.com/actions-oss/act-cli/internal/eval/v2"
exprparser "github.com/actions-oss/act-cli/internal/expr"
"github.com/actions-oss/act-cli/pkg/schema"
"gopkg.in/yaml.v3"
)
type ExpressionEvaluator struct {
RestrictEval bool
EvaluationContext v2.EvaluationContext
}
func isImplExpr(snode *schema.Node) bool {
def := snode.Schema.GetDefinition(snode.Definition)
return def.String != nil && def.String.IsExpression
}
func (ee ExpressionEvaluator) evaluateScalarYamlNode(_ context.Context, node *yaml.Node, snode *schema.Node) (*yaml.Node, error) {
var in string
if err := node.Decode(&in); err != nil {
return nil, err
}
expr, isExpr, err := rewriteSubExpression(in, false)
if err != nil {
return nil, err
}
if snode == nil || !isExpr && !isImplExpr(snode) || snode.Schema.GetDefinition(snode.Definition).String.IsExpression || ee.RestrictEval && node.Tag != "!!expr" {
return node, nil
}
parsed, err := exprparser.Parse(expr)
if err != nil {
return nil, err
}
canEvaluate := ee.canEvaluate(parsed, snode)
if !canEvaluate {
node.Tag = "!!expr"
return node, nil
}
eval := v2.NewEvaluator(&ee.EvaluationContext)
res, err := eval.EvaluateRaw(expr)
if err != nil {
return nil, err
}
ret := &yaml.Node{}
if err := ret.Encode(res); err != nil {
return nil, err
}
ret.Line = node.Line
ret.Column = node.Column
// Finally check if we found a schema validation error
return ret, snode.UnmarshalYAML(ret)
}
func (ee ExpressionEvaluator) canEvaluate(parsed exprparser.Node, snode *schema.Node) bool {
canEvaluate := true
for _, v := range snode.GetVariables() {
canEvaluate = canEvaluate && ee.EvaluationContext.Variables.Get(v) != nil
}
for _, v := range snode.GetFunctions() {
canEvaluate = canEvaluate && ee.EvaluationContext.Functions.Get(v.GetName()) != nil
}
exprparser.VisitNode(parsed, func(node exprparser.Node) {
switch el := node.(type) {
case *exprparser.FunctionNode:
canEvaluate = canEvaluate && ee.EvaluationContext.Functions.Get(el.Name) != nil
case *exprparser.ValueNode:
canEvaluate = canEvaluate && (el.Kind != exprparser.TokenKindNamedValue || ee.EvaluationContext.Variables.Get(el.Value.(string)) != nil)
}
})
return canEvaluate
}
func (ee ExpressionEvaluator) evaluateMappingYamlNode(ctx context.Context, node *yaml.Node, snode *schema.Node) (*yaml.Node, error) {
var ret *yaml.Node
// GitHub has this undocumented feature to merge maps, called insert directive
insertDirective := regexp.MustCompile(`\${{\s*insert\s*}}`)
for i := 0; i < len(node.Content)/2; i++ {
k := node.Content[i*2]
var sk string
shouldInsert := k.Decode(&sk) == nil && insertDirective.MatchString(sk)
changed := func() error {
if ret == nil {
ret = &yaml.Node{}
if err := ret.Encode(node); err != nil {
return err
}
ret.Content = ret.Content[:i*2]
}
return nil
}
var ek *yaml.Node
if !shouldInsert {
var err error
ek, err = ee.evaluateYamlNodeInternal(ctx, k, snode)
if err != nil {
return nil, err
}
if ek != nil {
if err := changed(); err != nil {
return nil, err
}
} else {
ek = k
}
}
v := node.Content[i*2+1]
ev, err := ee.evaluateYamlNodeInternal(ctx, v, snode.GetNestedNode(ek.Value))
if err != nil {
return nil, err
}
if ev != nil {
if err := changed(); err != nil {
return nil, err
}
} else {
ev = v
}
// Merge the nested map of the insert directive
if shouldInsert {
if ev.Kind != yaml.MappingNode {
return nil, fmt.Errorf("failed to insert node %v into mapping %v unexpected type %v expected MappingNode", ev, node, ev.Kind)
}
if err := changed(); err != nil {
return nil, err
}
ret.Content = append(ret.Content, ev.Content...)
} else if ret != nil {
ret.Content = append(ret.Content, ek, ev)
}
}
return ret, nil
}
func (ee ExpressionEvaluator) evaluateSequenceYamlNode(ctx context.Context, node *yaml.Node, snode *schema.Node) (*yaml.Node, error) {
var ret *yaml.Node
for i := 0; i < len(node.Content); i++ {
v := node.Content[i]
// Preserve nested sequences
wasseq := v.Kind == yaml.SequenceNode
ev, err := ee.evaluateYamlNodeInternal(ctx, v, snode.GetNestedNode("*"))
if err != nil {
return nil, err
}
if ev != nil {
if ret == nil {
ret = &yaml.Node{}
if err := ret.Encode(node); err != nil {
return nil, err
}
ret.Content = ret.Content[:i]
}
// GitHub has this undocumented feature to merge sequences / arrays
// We have a nested sequence via evaluation, merge the arrays
if ev.Kind == yaml.SequenceNode && !wasseq {
ret.Content = append(ret.Content, ev.Content...)
} else {
ret.Content = append(ret.Content, ev)
}
} else if ret != nil {
ret.Content = append(ret.Content, v)
}
}
return ret, nil
}
func (ee ExpressionEvaluator) evaluateYamlNodeInternal(ctx context.Context, node *yaml.Node, snode *schema.Node) (*yaml.Node, error) {
switch node.Kind {
case yaml.ScalarNode:
return ee.evaluateScalarYamlNode(ctx, node, snode)
case yaml.MappingNode:
return ee.evaluateMappingYamlNode(ctx, node, snode)
case yaml.SequenceNode:
return ee.evaluateSequenceYamlNode(ctx, node, snode)
default:
return nil, nil
}
}
func (ee ExpressionEvaluator) EvaluateYamlNode(ctx context.Context, node *yaml.Node, snode *schema.Node) error {
ret, err := ee.evaluateYamlNodeInternal(ctx, node, snode)
if err != nil {
return err
}
if ret != nil {
return ret.Decode(node)
}
return nil
}

View File

@@ -0,0 +1,94 @@
package templateeval
import (
"context"
"testing"
v2 "github.com/actions-oss/act-cli/internal/eval/v2"
"github.com/actions-oss/act-cli/pkg/schema"
"github.com/stretchr/testify/require"
"gopkg.in/yaml.v3"
)
func TestEval(t *testing.T) {
cases := []struct {
name string
yamlInput string
restrict bool
variables v2.CaseInsensitiveObject[any]
expectErr bool
}{
{
name: "NoError",
yamlInput: `on: push
run-name: ${{ github.ref_name }}
jobs:
_:
name: ${{ github.ref_name }}
steps:
- run: echo Hello World
env:
TAG: ${{ env.global }}`,
restrict: false,
expectErr: false,
},
{
name: "Error",
yamlInput: `on: push
run-name: ${{ fromjson('{}') }}
jobs:
_:
name: ${{ github.ref_name }}
steps:
- run: echo Hello World
env:
TAG: ${{ env.global }}`,
restrict: true,
variables: v2.CaseInsensitiveObject[any]{
"github": v2.CaseInsensitiveObject[any]{
"ref_name": "self",
},
"vars": v2.CaseInsensitiveObject[any]{},
"inputs": v2.CaseInsensitiveObject[any]{},
},
expectErr: true,
},
}
for _, tc := range cases {
t.Run(tc.name, func(t *testing.T) {
ee := &ExpressionEvaluator{
EvaluationContext: v2.EvaluationContext{
Variables: v2.CaseInsensitiveObject[any]{},
Functions: v2.GetFunctions(),
},
}
var node yaml.Node
err := yaml.Unmarshal([]byte(tc.yamlInput), &node)
require.NoError(t, err)
err = ee.EvaluateYamlNode(context.Background(), node.Content[0], &schema.Node{
Definition: "workflow-root",
Schema: schema.GetWorkflowSchema(),
})
require.NoError(t, err)
if tc.restrict {
ee.RestrictEval = true
}
if tc.variables != nil {
ee.EvaluationContext.Variables = tc.variables
}
err = ee.EvaluateYamlNode(context.Background(), node.Content[0], &schema.Node{
Definition: "workflow-root",
Schema: schema.GetWorkflowSchema(),
})
if tc.expectErr {
require.Error(t, err)
} else {
require.NoError(t, err)
}
})
}
}

View File

@@ -0,0 +1,75 @@
package templateeval
import (
"fmt"
"regexp"
"strings"
)
func escapeFormatString(in string) string {
return strings.ReplaceAll(strings.ReplaceAll(in, "{", "{{"), "}", "}}")
}
func rewriteSubExpression(in string, forceFormat bool) (result string, isExpr bool, err error) {
// missing closing pair is an error
if !strings.Contains(in, "${{") {
return in, false, nil
}
strPattern := regexp.MustCompile("(?:''|[^'])*'")
pos := 0
exprStart := -1
strStart := -1
var results []string
formatOut := ""
for pos < len(in) {
if strStart > -1 {
matches := strPattern.FindStringIndex(in[pos:])
if matches == nil {
return "", false, fmt.Errorf("unclosed string at position %d in %s", pos, in)
}
strStart = -1
pos += matches[1]
} else if exprStart > -1 {
exprEnd := strings.Index(in[pos:], "}}")
strStart = strings.Index(in[pos:], "'")
if exprEnd > -1 && strStart > -1 {
if exprEnd < strStart {
strStart = -1
} else {
exprEnd = -1
}
}
if exprEnd > -1 {
formatOut += fmt.Sprintf("{%d}", len(results))
results = append(results, strings.TrimSpace(in[exprStart:pos+exprEnd]))
pos += exprEnd + 2
exprStart = -1
} else if strStart > -1 {
pos += strStart + 1
} else {
return "", false, fmt.Errorf("unclosed expression at position %d in %s", pos, in)
}
} else {
exprStart = strings.Index(in[pos:], "${{")
if exprStart != -1 {
formatOut += escapeFormatString(in[pos : pos+exprStart])
exprStart = pos + exprStart + 3
pos = exprStart
} else {
formatOut += escapeFormatString(in[pos:])
pos = len(in)
}
}
}
if len(results) == 1 && formatOut == "{0}" && !forceFormat {
return results[0], true, nil
}
out := fmt.Sprintf("format('%s', %s)", strings.ReplaceAll(formatOut, "'", "''"), strings.Join(results, ", "))
return out, true, nil
}

View File

@@ -0,0 +1,115 @@
package templateeval
import (
"testing"
"github.com/stretchr/testify/assert"
)
func TestRewriteSubExpression_NoExpression(t *testing.T) {
in := "Hello world"
out, ok, err := rewriteSubExpression(in, false)
assert.NoError(t, err)
if ok {
t.Fatalf("expected ok=false for no expression, got true with output %q", out)
}
if out != in {
t.Fatalf("expected output %q, got %q", in, out)
}
}
func TestRewriteSubExpression_SingleExpression(t *testing.T) {
in := "Hello ${{ 'world' }}"
out, ok, err := rewriteSubExpression(in, false)
assert.NoError(t, err)
if !ok {
t.Fatalf("expected ok=true for single expression, got false")
}
expected := "format('Hello {0}', 'world')"
if out != expected {
t.Fatalf("expected %q, got %q", expected, out)
}
}
func TestRewriteSubExpression_MultipleExpressions(t *testing.T) {
in := "Hello ${{ 'world' }}, you are ${{ 'awesome' }}"
out, ok, err := rewriteSubExpression(in, false)
assert.NoError(t, err)
if !ok {
t.Fatalf("expected ok=true for multiple expressions, got false")
}
expected := "format('Hello {0}, you are {1}', 'world', 'awesome')"
if out != expected {
t.Fatalf("expected %q, got %q", expected, out)
}
}
func TestRewriteSubExpression_ForceFormatSingle(t *testing.T) {
in := "Hello ${{ 'world' }}"
out, ok, err := rewriteSubExpression(in, true)
assert.NoError(t, err)
if !ok {
t.Fatalf("expected ok=true when forceFormat, got false")
}
expected := "format('Hello {0}', 'world')"
if out != expected {
t.Fatalf("expected %q, got %q", expected, out)
}
}
func TestRewriteSubExpression_ForceFormatMultiple(t *testing.T) {
in := "Hello ${{ 'world' }}, you are ${{ 'awesome' }}"
out, ok, err := rewriteSubExpression(in, true)
assert.NoError(t, err)
if !ok {
t.Fatalf("expected ok=true when forceFormat, got false")
}
expected := "format('Hello {0}, you are {1}', 'world', 'awesome')"
if out != expected {
t.Fatalf("expected %q, got %q", expected, out)
}
}
func TestRewriteSubExpression_UnclosedExpression(t *testing.T) {
in := "Hello ${{ 'world' " // missing closing }}
_, _, err := rewriteSubExpression(in, false)
assert.Error(t, err)
assert.Contains(t, err.Error(), "unclosed expression")
}
func TestRewriteSubExpression_UnclosedString(t *testing.T) {
in := "Hello ${{ 'world }}, you are ${{ 'awesome' }}"
_, _, err := rewriteSubExpression(in, false)
assert.Error(t, err)
assert.Contains(t, err.Error(), "unclosed string")
}
func TestRewriteSubExpression_EscapedStringLiteral(t *testing.T) {
// Two single quotes represent an escaped quote inside a string
in := "Hello ${{ 'It''s a test' }}"
out, ok, err := rewriteSubExpression(in, false)
assert.NoError(t, err)
assert.True(t, ok)
expected := "format('Hello {0}', 'It''s a test')"
assert.Equal(t, expected, out)
}
func TestRewriteSubExpression_ExpressionAtEnd(t *testing.T) {
// Expression ends exactly at the string end should be valid
in := "Hello ${{ 'world' }}"
out, ok, err := rewriteSubExpression(in, false)
assert.NoError(t, err)
assert.True(t, ok)
expected := "format('Hello {0}', 'world')"
assert.Equal(t, expected, out)
}
func TestRewriteSubExpression_ExpressionNotAtEnd(t *testing.T) {
// Expression followed by additional text should still be valid
in := "Hello ${{ 'world' }}, how are you?"
out, ok, err := rewriteSubExpression(in, false)
assert.NoError(t, err)
assert.True(t, ok)
expected := "format('Hello {0}, how are you?', 'world')"
assert.Equal(t, expected, out)
}

11
main_test.go Normal file
View File

@@ -0,0 +1,11 @@
package main
import (
"os"
"testing"
)
func TestMain(_ *testing.T) {
os.Args = []string{"act", "--help"}
main()
}

8
pkg/artifactcache/doc.go Normal file
View File

@@ -0,0 +1,8 @@
// Package artifactcache provides a cache handler for the runner.
//
// Inspired by https://github.com/sp-ricard-valverde/github-act-cache-server
//
// TODO: Authorization
// TODO: Restrictions for accessing a cache, see https://docs.github.com/en/actions/using-workflows/caching-dependencies-to-speed-up-workflows#restrictions-for-accessing-a-cache
// TODO: Force deleting cache entries, see https://docs.github.com/en/actions/using-workflows/caching-dependencies-to-speed-up-workflows#force-deleting-cache-entries
package artifactcache

View File

@@ -0,0 +1,613 @@
package artifactcache
import (
"encoding/json"
"errors"
"fmt"
"io"
"net"
"net/http"
"os"
"path/filepath"
"regexp"
"strconv"
"strings"
"sync/atomic"
"time"
"github.com/julienschmidt/httprouter"
"github.com/sirupsen/logrus"
"github.com/timshannon/bolthold"
"go.etcd.io/bbolt"
"github.com/actions-oss/act-cli/pkg/common"
)
const (
urlBase = "/_apis/artifactcache"
)
type Handler struct {
dir string
storage *Storage
router *httprouter.Router
listener net.Listener
server *http.Server
logger logrus.FieldLogger
gcing atomic.Bool
gcAt time.Time
outboundIP string
externalAddress string
}
func StartHandler(dir, outboundIP string, port uint16, logger logrus.FieldLogger) (*Handler, error) {
h := &Handler{}
if logger == nil {
discard := logrus.New()
discard.Out = io.Discard
logger = discard
}
logger = logger.WithField("module", "artifactcache")
h.logger = logger
if dir == "" {
home, err := os.UserHomeDir()
if err != nil {
return nil, err
}
dir = filepath.Join(home, ".cache", "actcache")
}
if err := os.MkdirAll(dir, 0o755); err != nil {
return nil, err
}
h.dir = dir
storage, err := NewStorage(filepath.Join(dir, "cache"))
if err != nil {
return nil, err
}
h.storage = storage
if outboundIP != "" {
h.outboundIP = outboundIP
} else if ip := common.GetOutboundIP(); ip == nil {
return nil, fmt.Errorf("unable to determine outbound IP address")
} else {
h.outboundIP = ip.String()
}
router := httprouter.New()
router.GET(urlBase+"/cache", h.middleware(h.find))
router.POST(urlBase+"/caches", h.middleware(h.reserve))
router.PATCH(urlBase+"/caches/:id", h.middleware(h.upload))
router.POST(urlBase+"/caches/:id", h.middleware(h.commit))
router.GET(urlBase+"/artifacts/:id", h.middleware(h.get))
router.POST(urlBase+"/clean", h.middleware(h.clean))
h.router = router
h.gcCache()
listener, err := net.Listen("tcp", fmt.Sprintf(":%d", port)) // listen on all interfaces
if err != nil {
return nil, err
}
server := &http.Server{
ReadHeaderTimeout: 2 * time.Second,
Handler: router,
}
go func() {
if err := server.Serve(listener); err != nil && errors.Is(err, net.ErrClosed) {
logger.Errorf("http serve: %v", err)
}
}()
h.listener = listener
h.server = server
return h, nil
}
func CreateHandler(dir, externalAddress string, logger logrus.FieldLogger) (*Handler, http.Handler, error) {
h := &Handler{}
if logger == nil {
discard := logrus.New()
discard.Out = io.Discard
logger = discard
}
logger = logger.WithField("module", "artifactcache")
h.logger = logger
if dir == "" {
home, err := os.UserHomeDir()
if err != nil {
return nil, nil, err
}
dir = filepath.Join(home, ".cache", "actcache")
}
if err := os.MkdirAll(dir, 0o755); err != nil {
return nil, nil, err
}
h.dir = dir
storage, err := NewStorage(filepath.Join(dir, "cache"))
if err != nil {
return nil, nil, err
}
h.storage = storage
if externalAddress != "" {
h.externalAddress = externalAddress
} else if ip := common.GetOutboundIP(); ip == nil {
return nil, nil, fmt.Errorf("unable to determine outbound IP address")
} else {
h.outboundIP = ip.String()
}
router := httprouter.New()
router.GET(urlBase+"/cache", h.middleware(h.find))
router.POST(urlBase+"/caches", h.middleware(h.reserve))
router.PATCH(urlBase+"/caches/:id", h.middleware(h.upload))
router.POST(urlBase+"/caches/:id", h.middleware(h.commit))
router.GET(urlBase+"/artifacts/:id", h.middleware(h.get))
router.POST(urlBase+"/clean", h.middleware(h.clean))
h.router = router
h.gcCache()
return h, router, nil
}
func (h *Handler) ExternalURL() string {
if h.externalAddress != "" {
return h.externalAddress
}
// TODO: make the external url configurable if necessary
return fmt.Sprintf("http://%s:%d",
h.outboundIP,
h.listener.Addr().(*net.TCPAddr).Port)
}
func (h *Handler) Close() error {
if h == nil {
return nil
}
var retErr error
if h.server != nil {
err := h.server.Close()
if err != nil {
retErr = err
}
h.server = nil
}
if h.listener != nil {
err := h.listener.Close()
if errors.Is(err, net.ErrClosed) {
err = nil
}
if err != nil {
retErr = err
}
h.listener = nil
}
return retErr
}
func (h *Handler) openDB() (*bolthold.Store, error) {
return bolthold.Open(filepath.Join(h.dir, "bolt.db"), 0o644, &bolthold.Options{
Encoder: json.Marshal,
Decoder: json.Unmarshal,
Options: &bbolt.Options{
Timeout: 5 * time.Second,
NoGrowSync: bbolt.DefaultOptions.NoGrowSync,
FreelistType: bbolt.DefaultOptions.FreelistType,
},
})
}
// GET /_apis/artifactcache/cache
func (h *Handler) find(w http.ResponseWriter, r *http.Request, _ httprouter.Params) {
keys := strings.Split(r.URL.Query().Get("keys"), ",")
// cache keys are case insensitive
for i, key := range keys {
keys[i] = strings.ToLower(key)
}
version := r.URL.Query().Get("version")
db, err := h.openDB()
if err != nil {
h.responseJSON(w, r, 500, err)
return
}
defer db.Close()
cache, err := findCache(db, keys, version)
if err != nil {
h.responseJSON(w, r, 500, err)
return
}
if cache == nil {
h.responseJSON(w, r, 204)
return
}
if ok, err := h.storage.Exist(cache.ID); err != nil {
h.responseJSON(w, r, 500, err)
return
} else if !ok {
_ = db.Delete(cache.ID, cache)
h.responseJSON(w, r, 204)
return
}
h.responseJSON(w, r, 200, map[string]any{
"result": "hit",
"archiveLocation": fmt.Sprintf("%s%s/artifacts/%d", h.ExternalURL(), urlBase, cache.ID),
"cacheKey": cache.Key,
})
}
// POST /_apis/artifactcache/caches
func (h *Handler) reserve(w http.ResponseWriter, r *http.Request, _ httprouter.Params) {
api := &Request{}
if err := json.NewDecoder(r.Body).Decode(api); err != nil {
h.responseJSON(w, r, 400, err)
return
}
// cache keys are case insensitive
api.Key = strings.ToLower(api.Key)
cache := api.ToCache()
db, err := h.openDB()
if err != nil {
h.responseJSON(w, r, 500, err)
return
}
defer db.Close()
now := time.Now().Unix()
cache.CreatedAt = now
cache.UsedAt = now
if err := insertCache(db, cache); err != nil {
h.responseJSON(w, r, 500, err)
return
}
h.responseJSON(w, r, 200, map[string]any{
"cacheId": cache.ID,
})
}
// PATCH /_apis/artifactcache/caches/:id
func (h *Handler) upload(w http.ResponseWriter, r *http.Request, params httprouter.Params) {
id, err := strconv.ParseUint(params.ByName("id"), 10, 64)
if err != nil {
h.responseJSON(w, r, 400, err)
return
}
cache := &Cache{}
db, err := h.openDB()
if err != nil {
h.responseJSON(w, r, 500, err)
return
}
defer db.Close()
if err := db.Get(id, cache); err != nil {
if errors.Is(err, bolthold.ErrNotFound) {
h.responseJSON(w, r, 400, fmt.Errorf("cache %d: not reserved", id))
return
}
h.responseJSON(w, r, 500, err)
return
}
if cache.Complete {
h.responseJSON(w, r, 400, fmt.Errorf("cache %v %q: already complete", cache.ID, cache.Key))
return
}
db.Close()
start, _, err := parseContentRange(r.Header.Get("Content-Range"))
if err != nil {
h.responseJSON(w, r, 400, err)
return
}
if err := h.storage.Write(cache.ID, start, r.Body); err != nil {
h.responseJSON(w, r, 500, err)
}
h.useCache(id)
h.responseJSON(w, r, 200)
}
// POST /_apis/artifactcache/caches/:id
func (h *Handler) commit(w http.ResponseWriter, r *http.Request, params httprouter.Params) {
id, err := strconv.ParseInt(params.ByName("id"), 10, 64)
if err != nil {
h.responseJSON(w, r, 400, err)
return
}
cache := &Cache{}
db, err := h.openDB()
if err != nil {
h.responseJSON(w, r, 500, err)
return
}
defer db.Close()
if err := db.Get(id, cache); err != nil {
if errors.Is(err, bolthold.ErrNotFound) {
h.responseJSON(w, r, 400, fmt.Errorf("cache %d: not reserved", id))
return
}
h.responseJSON(w, r, 500, err)
return
}
if cache.Complete {
h.responseJSON(w, r, 400, fmt.Errorf("cache %v %q: already complete", cache.ID, cache.Key))
return
}
db.Close()
size, err := h.storage.Commit(cache.ID, cache.Size)
if err != nil {
h.responseJSON(w, r, 500, err)
return
}
// write real size back to cache, it may be different from the current value when the request doesn't specify it.
cache.Size = size
db, err = h.openDB()
if err != nil {
h.responseJSON(w, r, 500, err)
return
}
defer db.Close()
cache.Complete = true
if err := db.Update(cache.ID, cache); err != nil {
h.responseJSON(w, r, 500, err)
return
}
h.responseJSON(w, r, 200)
}
// GET /_apis/artifactcache/artifacts/:id
func (h *Handler) get(w http.ResponseWriter, r *http.Request, params httprouter.Params) {
id, err := strconv.ParseUint(params.ByName("id"), 10, 64)
if err != nil {
h.responseJSON(w, r, 400, err)
return
}
h.useCache(id)
h.storage.Serve(w, r, id)
}
// POST /_apis/artifactcache/clean
func (h *Handler) clean(w http.ResponseWriter, r *http.Request, _ httprouter.Params) {
// TODO: don't support force deleting cache entries
// see: https://docs.github.com/en/actions/using-workflows/caching-dependencies-to-speed-up-workflows#force-deleting-cache-entries
h.responseJSON(w, r, 200)
}
func (h *Handler) middleware(handler httprouter.Handle) httprouter.Handle {
return func(w http.ResponseWriter, r *http.Request, params httprouter.Params) {
h.logger.Debugf("%s %s", r.Method, r.RequestURI)
handler(w, r, params)
go h.gcCache()
}
}
// if not found, return (nil, nil) instead of an error.
func findCache(db *bolthold.Store, keys []string, version string) (*Cache, error) {
cache := &Cache{}
for _, prefix := range keys {
// if a key in the list matches exactly, don't return partial matches
if err := db.FindOne(cache,
bolthold.Where("Key").Eq(prefix).
And("Version").Eq(version).
And("Complete").Eq(true).
SortBy("CreatedAt").Reverse()); err == nil || !errors.Is(err, bolthold.ErrNotFound) {
if err != nil {
return nil, fmt.Errorf("find cache: %w", err)
}
return cache, nil
}
prefixPattern := fmt.Sprintf("^%s", regexp.QuoteMeta(prefix))
re, err := regexp.Compile(prefixPattern)
if err != nil {
continue
}
if err := db.FindOne(cache,
bolthold.Where("Key").RegExp(re).
And("Version").Eq(version).
And("Complete").Eq(true).
SortBy("CreatedAt").Reverse()); err != nil {
if errors.Is(err, bolthold.ErrNotFound) {
continue
}
return nil, fmt.Errorf("find cache: %w", err)
}
return cache, nil
}
return nil, nil
}
func insertCache(db *bolthold.Store, cache *Cache) error {
if err := db.Insert(bolthold.NextSequence(), cache); err != nil {
return fmt.Errorf("insert cache: %w", err)
}
// write back id to db
if err := db.Update(cache.ID, cache); err != nil {
return fmt.Errorf("write back id to db: %w", err)
}
return nil
}
func (h *Handler) useCache(id uint64) {
db, err := h.openDB()
if err != nil {
return
}
defer db.Close()
cache := &Cache{}
if err := db.Get(id, cache); err != nil {
return
}
cache.UsedAt = time.Now().Unix()
_ = db.Update(cache.ID, cache)
}
const (
keepUsed = 30 * 24 * time.Hour
keepUnused = 7 * 24 * time.Hour
keepTemp = 5 * time.Minute
keepOld = 5 * time.Minute
)
func (h *Handler) gcCache() {
if h.gcing.Load() {
return
}
if !h.gcing.CompareAndSwap(false, true) {
return
}
defer h.gcing.Store(false)
if time.Since(h.gcAt) < time.Hour {
h.logger.Debugf("skip gc: %v", h.gcAt.String())
return
}
h.gcAt = time.Now()
h.logger.Debugf("gc: %v", h.gcAt.String())
db, err := h.openDB()
if err != nil {
return
}
defer db.Close()
// Remove the caches which are not completed for a while, they are most likely to be broken.
var caches []*Cache
if err := db.Find(&caches, bolthold.
Where("UsedAt").Lt(time.Now().Add(-keepTemp).Unix()).
And("Complete").Eq(false),
); err != nil {
h.logger.Warnf("find caches: %v", err)
} else {
for _, cache := range caches {
h.storage.Remove(cache.ID)
if err := db.Delete(cache.ID, cache); err != nil {
h.logger.Warnf("delete cache: %v", err)
continue
}
h.logger.Infof("deleted cache: %+v", cache)
}
}
// Remove the old caches which have not been used recently.
caches = caches[:0]
if err := db.Find(&caches, bolthold.
Where("UsedAt").Lt(time.Now().Add(-keepUnused).Unix()),
); err != nil {
h.logger.Warnf("find caches: %v", err)
} else {
for _, cache := range caches {
h.storage.Remove(cache.ID)
if err := db.Delete(cache.ID, cache); err != nil {
h.logger.Warnf("delete cache: %v", err)
continue
}
h.logger.Infof("deleted cache: %+v", cache)
}
}
// Remove the old caches which are too old.
caches = caches[:0]
if err := db.Find(&caches, bolthold.
Where("CreatedAt").Lt(time.Now().Add(-keepUsed).Unix()),
); err != nil {
h.logger.Warnf("find caches: %v", err)
} else {
for _, cache := range caches {
h.storage.Remove(cache.ID)
if err := db.Delete(cache.ID, cache); err != nil {
h.logger.Warnf("delete cache: %v", err)
continue
}
h.logger.Infof("deleted cache: %+v", cache)
}
}
// Remove the old caches with the same key and version, keep the latest one.
// Also keep the olds which have been used recently for a while in case of the cache is still in use.
if results, err := db.FindAggregate(
&Cache{},
bolthold.Where("Complete").Eq(true),
"Key", "Version",
); err != nil {
h.logger.Warnf("find aggregate caches: %v", err)
} else {
for _, result := range results {
if result.Count() <= 1 {
continue
}
result.Sort("CreatedAt")
caches = caches[:0]
result.Reduction(&caches)
for _, cache := range caches[:len(caches)-1] {
if time.Since(time.Unix(cache.UsedAt, 0)) < keepOld {
// Keep it since it has been used recently, even if it's old.
// Or it could break downloading in process.
continue
}
h.storage.Remove(cache.ID)
if err := db.Delete(cache.ID, cache); err != nil {
h.logger.Warnf("delete cache: %v", err)
continue
}
h.logger.Infof("deleted cache: %+v", cache)
}
}
}
}
func (h *Handler) responseJSON(w http.ResponseWriter, r *http.Request, code int, v ...any) {
w.Header().Set("Content-Type", "application/json; charset=utf-8")
var data []byte
if len(v) == 0 || v[0] == nil {
data, _ = json.Marshal(struct{}{})
} else if err, ok := v[0].(error); ok {
h.logger.Errorf("%v %v: %v", r.Method, r.RequestURI, err)
data, _ = json.Marshal(map[string]any{
"error": err.Error(),
})
} else {
data, _ = json.Marshal(v[0])
}
w.WriteHeader(code)
_, _ = w.Write(data)
}
func parseContentRange(s string) (int64, int64, error) {
// support the format like "bytes 11-22/*" only
s, _, _ = strings.Cut(strings.TrimPrefix(s, "bytes "), "/")
s1, s2, _ := strings.Cut(s, "-")
start, err := strconv.ParseInt(s1, 10, 64)
if err != nil {
return 0, 0, fmt.Errorf("parse %q: %w", s, err)
}
stop, err := strconv.ParseInt(s2, 10, 64)
if err != nil {
return 0, 0, fmt.Errorf("parse %q: %w", s, err)
}
return start, stop, nil
}

View File

@@ -0,0 +1,707 @@
package artifactcache
import (
"bytes"
"crypto/rand"
"encoding/json"
"fmt"
"io"
"net/http"
"path/filepath"
"strings"
"testing"
"time"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"github.com/timshannon/bolthold"
"go.etcd.io/bbolt"
)
func TestHandler(t *testing.T) {
dir := filepath.Join(t.TempDir(), "artifactcache")
handler, err := StartHandler(dir, "", 0, nil)
require.NoError(t, err)
base := fmt.Sprintf("%s%s", handler.ExternalURL(), urlBase)
defer func() {
t.Run("inpect db", func(t *testing.T) {
db, err := handler.openDB()
require.NoError(t, err)
defer db.Close()
require.NoError(t, db.Bolt().View(func(tx *bbolt.Tx) error {
return tx.Bucket([]byte("Cache")).ForEach(func(k, v []byte) error {
t.Logf("%s: %s", k, v)
return nil
})
}))
})
t.Run("close", func(t *testing.T) {
require.NoError(t, handler.Close())
assert.Nil(t, handler.server)
assert.Nil(t, handler.listener)
_, err := http.Post(fmt.Sprintf("%s/caches/%d", base, 1), "", nil)
assert.Error(t, err)
})
}()
t.Run("get not exist", func(t *testing.T) {
key := strings.ToLower(t.Name())
version := "c19da02a2bd7e77277f1ac29ab45c09b7d46a4ee758284e26bb3045ad11d9d20"
resp, err := http.Get(fmt.Sprintf("%s/cache?keys=%s&version=%s", base, key, version))
require.NoError(t, err)
require.Equal(t, 204, resp.StatusCode)
})
t.Run("reserve and upload", func(t *testing.T) {
key := strings.ToLower(t.Name())
version := "c19da02a2bd7e77277f1ac29ab45c09b7d46a4ee758284e26bb3045ad11d9d20"
content := make([]byte, 100)
_, err := rand.Read(content)
require.NoError(t, err)
uploadCacheNormally(t, base, key, version, content)
})
t.Run("clean", func(t *testing.T) {
resp, err := http.Post(fmt.Sprintf("%s/clean", base), "", nil)
require.NoError(t, err)
assert.Equal(t, 200, resp.StatusCode)
})
t.Run("reserve with bad request", func(t *testing.T) {
body := []byte(`invalid json`)
require.NoError(t, err)
resp, err := http.Post(fmt.Sprintf("%s/caches", base), "application/json", bytes.NewReader(body))
require.NoError(t, err)
assert.Equal(t, 400, resp.StatusCode)
})
t.Run("duplicate reserve", func(t *testing.T) {
key := strings.ToLower(t.Name())
version := "c19da02a2bd7e77277f1ac29ab45c09b7d46a4ee758284e26bb3045ad11d9d20"
var first, second struct {
CacheID uint64 `json:"cacheId"`
}
{
body, err := json.Marshal(&Request{
Key: key,
Version: version,
Size: 100,
})
require.NoError(t, err)
resp, err := http.Post(fmt.Sprintf("%s/caches", base), "application/json", bytes.NewReader(body))
require.NoError(t, err)
assert.Equal(t, 200, resp.StatusCode)
require.NoError(t, json.NewDecoder(resp.Body).Decode(&first))
assert.NotZero(t, first.CacheID)
}
{
body, err := json.Marshal(&Request{
Key: key,
Version: version,
Size: 100,
})
require.NoError(t, err)
resp, err := http.Post(fmt.Sprintf("%s/caches", base), "application/json", bytes.NewReader(body))
require.NoError(t, err)
assert.Equal(t, 200, resp.StatusCode)
require.NoError(t, json.NewDecoder(resp.Body).Decode(&second))
assert.NotZero(t, second.CacheID)
}
assert.NotEqual(t, first.CacheID, second.CacheID)
})
t.Run("upload with bad id", func(t *testing.T) {
req, err := http.NewRequest(http.MethodPatch,
fmt.Sprintf("%s/caches/invalid_id", base), bytes.NewReader(nil))
require.NoError(t, err)
req.Header.Set("Content-Type", "application/octet-stream")
req.Header.Set("Content-Range", "bytes 0-99/*")
resp, err := http.DefaultClient.Do(req)
require.NoError(t, err)
assert.Equal(t, 400, resp.StatusCode)
})
t.Run("upload without reserve", func(t *testing.T) {
req, err := http.NewRequest(http.MethodPatch,
fmt.Sprintf("%s/caches/%d", base, 1000), bytes.NewReader(nil))
require.NoError(t, err)
req.Header.Set("Content-Type", "application/octet-stream")
req.Header.Set("Content-Range", "bytes 0-99/*")
resp, err := http.DefaultClient.Do(req)
require.NoError(t, err)
assert.Equal(t, 400, resp.StatusCode)
})
t.Run("upload with complete", func(t *testing.T) {
key := strings.ToLower(t.Name())
version := "c19da02a2bd7e77277f1ac29ab45c09b7d46a4ee758284e26bb3045ad11d9d20"
var id uint64
content := make([]byte, 100)
_, err := rand.Read(content)
require.NoError(t, err)
{
body, err := json.Marshal(&Request{
Key: key,
Version: version,
Size: 100,
})
require.NoError(t, err)
resp, err := http.Post(fmt.Sprintf("%s/caches", base), "application/json", bytes.NewReader(body))
require.NoError(t, err)
assert.Equal(t, 200, resp.StatusCode)
got := struct {
CacheID uint64 `json:"cacheId"`
}{}
require.NoError(t, json.NewDecoder(resp.Body).Decode(&got))
id = got.CacheID
}
{
req, err := http.NewRequest(http.MethodPatch,
fmt.Sprintf("%s/caches/%d", base, id), bytes.NewReader(content))
require.NoError(t, err)
req.Header.Set("Content-Type", "application/octet-stream")
req.Header.Set("Content-Range", "bytes 0-99/*")
resp, err := http.DefaultClient.Do(req)
require.NoError(t, err)
assert.Equal(t, 200, resp.StatusCode)
}
{
resp, err := http.Post(fmt.Sprintf("%s/caches/%d", base, id), "", nil)
require.NoError(t, err)
assert.Equal(t, 200, resp.StatusCode)
}
{
req, err := http.NewRequest(http.MethodPatch,
fmt.Sprintf("%s/caches/%d", base, id), bytes.NewReader(content))
require.NoError(t, err)
req.Header.Set("Content-Type", "application/octet-stream")
req.Header.Set("Content-Range", "bytes 0-99/*")
resp, err := http.DefaultClient.Do(req)
require.NoError(t, err)
assert.Equal(t, 400, resp.StatusCode)
}
})
t.Run("upload with invalid range", func(t *testing.T) {
key := strings.ToLower(t.Name())
version := "c19da02a2bd7e77277f1ac29ab45c09b7d46a4ee758284e26bb3045ad11d9d20"
var id uint64
content := make([]byte, 100)
_, err := rand.Read(content)
require.NoError(t, err)
{
body, err := json.Marshal(&Request{
Key: key,
Version: version,
Size: 100,
})
require.NoError(t, err)
resp, err := http.Post(fmt.Sprintf("%s/caches", base), "application/json", bytes.NewReader(body))
require.NoError(t, err)
assert.Equal(t, 200, resp.StatusCode)
got := struct {
CacheID uint64 `json:"cacheId"`
}{}
require.NoError(t, json.NewDecoder(resp.Body).Decode(&got))
id = got.CacheID
}
{
req, err := http.NewRequest(http.MethodPatch,
fmt.Sprintf("%s/caches/%d", base, id), bytes.NewReader(content))
require.NoError(t, err)
req.Header.Set("Content-Type", "application/octet-stream")
req.Header.Set("Content-Range", "bytes xx-99/*")
resp, err := http.DefaultClient.Do(req)
require.NoError(t, err)
assert.Equal(t, 400, resp.StatusCode)
}
})
t.Run("commit with bad id", func(t *testing.T) {
{
resp, err := http.Post(fmt.Sprintf("%s/caches/invalid_id", base), "", nil)
require.NoError(t, err)
assert.Equal(t, 400, resp.StatusCode)
}
})
t.Run("commit with not exist id", func(t *testing.T) {
{
resp, err := http.Post(fmt.Sprintf("%s/caches/%d", base, 100), "", nil)
require.NoError(t, err)
assert.Equal(t, 400, resp.StatusCode)
}
})
t.Run("duplicate commit", func(t *testing.T) {
key := strings.ToLower(t.Name())
version := "c19da02a2bd7e77277f1ac29ab45c09b7d46a4ee758284e26bb3045ad11d9d20"
var id uint64
content := make([]byte, 100)
_, err := rand.Read(content)
require.NoError(t, err)
{
body, err := json.Marshal(&Request{
Key: key,
Version: version,
Size: 100,
})
require.NoError(t, err)
resp, err := http.Post(fmt.Sprintf("%s/caches", base), "application/json", bytes.NewReader(body))
require.NoError(t, err)
assert.Equal(t, 200, resp.StatusCode)
got := struct {
CacheID uint64 `json:"cacheId"`
}{}
require.NoError(t, json.NewDecoder(resp.Body).Decode(&got))
id = got.CacheID
}
{
req, err := http.NewRequest(http.MethodPatch,
fmt.Sprintf("%s/caches/%d", base, id), bytes.NewReader(content))
require.NoError(t, err)
req.Header.Set("Content-Type", "application/octet-stream")
req.Header.Set("Content-Range", "bytes 0-99/*")
resp, err := http.DefaultClient.Do(req)
require.NoError(t, err)
assert.Equal(t, 200, resp.StatusCode)
}
{
resp, err := http.Post(fmt.Sprintf("%s/caches/%d", base, id), "", nil)
require.NoError(t, err)
assert.Equal(t, 200, resp.StatusCode)
}
{
resp, err := http.Post(fmt.Sprintf("%s/caches/%d", base, id), "", nil)
require.NoError(t, err)
assert.Equal(t, 400, resp.StatusCode)
}
})
t.Run("commit early", func(t *testing.T) {
key := strings.ToLower(t.Name())
version := "c19da02a2bd7e77277f1ac29ab45c09b7d46a4ee758284e26bb3045ad11d9d20"
var id uint64
content := make([]byte, 100)
_, err := rand.Read(content)
require.NoError(t, err)
{
body, err := json.Marshal(&Request{
Key: key,
Version: version,
Size: 100,
})
require.NoError(t, err)
resp, err := http.Post(fmt.Sprintf("%s/caches", base), "application/json", bytes.NewReader(body))
require.NoError(t, err)
assert.Equal(t, 200, resp.StatusCode)
got := struct {
CacheID uint64 `json:"cacheId"`
}{}
require.NoError(t, json.NewDecoder(resp.Body).Decode(&got))
id = got.CacheID
}
{
req, err := http.NewRequest(http.MethodPatch,
fmt.Sprintf("%s/caches/%d", base, id), bytes.NewReader(content[:50]))
require.NoError(t, err)
req.Header.Set("Content-Type", "application/octet-stream")
req.Header.Set("Content-Range", "bytes 0-59/*")
resp, err := http.DefaultClient.Do(req)
require.NoError(t, err)
assert.Equal(t, 200, resp.StatusCode)
}
{
resp, err := http.Post(fmt.Sprintf("%s/caches/%d", base, id), "", nil)
require.NoError(t, err)
assert.Equal(t, 500, resp.StatusCode)
}
})
t.Run("get with bad id", func(t *testing.T) {
resp, err := http.Get(fmt.Sprintf("%s/artifacts/invalid_id", base))
require.NoError(t, err)
require.Equal(t, 400, resp.StatusCode)
})
t.Run("get with not exist id", func(t *testing.T) {
resp, err := http.Get(fmt.Sprintf("%s/artifacts/%d", base, 100))
require.NoError(t, err)
require.Equal(t, 404, resp.StatusCode)
})
t.Run("get with not exist id", func(t *testing.T) {
resp, err := http.Get(fmt.Sprintf("%s/artifacts/%d", base, 100))
require.NoError(t, err)
require.Equal(t, 404, resp.StatusCode)
})
t.Run("get with multiple keys", func(t *testing.T) {
version := "c19da02a2bd7e77277f1ac29ab45c09b7d46a4ee758284e26bb3045ad11d9d20"
key := strings.ToLower(t.Name())
keys := [3]string{
key + "_a_b_c",
key + "_a_b",
key + "_a",
}
contents := [3][]byte{
make([]byte, 100),
make([]byte, 200),
make([]byte, 300),
}
for i := range contents {
_, err := rand.Read(contents[i])
require.NoError(t, err)
uploadCacheNormally(t, base, keys[i], version, contents[i])
time.Sleep(time.Second) // ensure CreatedAt of caches are different
}
reqKeys := strings.Join([]string{
key + "_a_b_x",
key + "_a_b",
key + "_a",
}, ",")
resp, err := http.Get(fmt.Sprintf("%s/cache?keys=%s&version=%s", base, reqKeys, version))
require.NoError(t, err)
require.Equal(t, 200, resp.StatusCode)
/*
Expect `key_a_b` because:
- `key_a_b_x" doesn't match any caches.
- `key_a_b" matches `key_a_b` and `key_a_b_c`, but `key_a_b` is newer.
*/
except := 1
got := struct {
Result string `json:"result"`
ArchiveLocation string `json:"archiveLocation"`
CacheKey string `json:"cacheKey"`
}{}
require.NoError(t, json.NewDecoder(resp.Body).Decode(&got))
assert.Equal(t, "hit", got.Result)
assert.Equal(t, keys[except], got.CacheKey)
contentResp, err := http.Get(got.ArchiveLocation)
require.NoError(t, err)
require.Equal(t, 200, contentResp.StatusCode)
content, err := io.ReadAll(contentResp.Body)
require.NoError(t, err)
assert.Equal(t, contents[except], content)
})
t.Run("case insensitive", func(t *testing.T) {
version := "c19da02a2bd7e77277f1ac29ab45c09b7d46a4ee758284e26bb3045ad11d9d20"
key := strings.ToLower(t.Name())
content := make([]byte, 100)
_, err := rand.Read(content)
require.NoError(t, err)
uploadCacheNormally(t, base, key+"_ABC", version, content)
{
reqKey := key + "_aBc"
resp, err := http.Get(fmt.Sprintf("%s/cache?keys=%s&version=%s", base, reqKey, version))
require.NoError(t, err)
require.Equal(t, 200, resp.StatusCode)
got := struct {
Result string `json:"result"`
ArchiveLocation string `json:"archiveLocation"`
CacheKey string `json:"cacheKey"`
}{}
require.NoError(t, json.NewDecoder(resp.Body).Decode(&got))
assert.Equal(t, "hit", got.Result)
assert.Equal(t, key+"_abc", got.CacheKey)
}
})
t.Run("exact keys are preferred (key 0)", func(t *testing.T) {
version := "c19da02a2bd7e77277f1ac29ab45c09b7d46a4ee758284e26bb3045ad11d9d20"
key := strings.ToLower(t.Name())
keys := [3]string{
key + "_a",
key + "_a_b_c",
key + "_a_b",
}
contents := [3][]byte{
make([]byte, 100),
make([]byte, 200),
make([]byte, 300),
}
for i := range contents {
_, err := rand.Read(contents[i])
require.NoError(t, err)
uploadCacheNormally(t, base, keys[i], version, contents[i])
time.Sleep(time.Second) // ensure CreatedAt of caches are different
}
reqKeys := strings.Join([]string{
key + "_a",
key + "_a_b",
}, ",")
resp, err := http.Get(fmt.Sprintf("%s/cache?keys=%s&version=%s", base, reqKeys, version))
require.NoError(t, err)
require.Equal(t, 200, resp.StatusCode)
/*
Expect `key_a` because:
- `key_a` matches `key_a`, `key_a_b` and `key_a_b_c`, but `key_a` is an exact match.
- `key_a_b` matches `key_a_b` and `key_a_b_c`, but previous key had a match
*/
expect := 0
got := struct {
ArchiveLocation string `json:"archiveLocation"`
CacheKey string `json:"cacheKey"`
}{}
require.NoError(t, json.NewDecoder(resp.Body).Decode(&got))
assert.Equal(t, keys[expect], got.CacheKey)
contentResp, err := http.Get(got.ArchiveLocation)
require.NoError(t, err)
require.Equal(t, 200, contentResp.StatusCode)
content, err := io.ReadAll(contentResp.Body)
require.NoError(t, err)
assert.Equal(t, contents[expect], content)
})
t.Run("exact keys are preferred (key 1)", func(t *testing.T) {
version := "c19da02a2bd7e77277f1ac29ab45c09b7d46a4ee758284e26bb3045ad11d9d20"
key := strings.ToLower(t.Name())
keys := [3]string{
key + "_a",
key + "_a_b_c",
key + "_a_b",
}
contents := [3][]byte{
make([]byte, 100),
make([]byte, 200),
make([]byte, 300),
}
for i := range contents {
_, err := rand.Read(contents[i])
require.NoError(t, err)
uploadCacheNormally(t, base, keys[i], version, contents[i])
time.Sleep(time.Second) // ensure CreatedAt of caches are different
}
reqKeys := strings.Join([]string{
"------------------------------------------------------",
key + "_a",
key + "_a_b",
}, ",")
resp, err := http.Get(fmt.Sprintf("%s/cache?keys=%s&version=%s", base, reqKeys, version))
require.NoError(t, err)
require.Equal(t, 200, resp.StatusCode)
/*
Expect `key_a` because:
- `------------------------------------------------------` doesn't match any caches.
- `key_a` matches `key_a`, `key_a_b` and `key_a_b_c`, but `key_a` is an exact match.
- `key_a_b` matches `key_a_b` and `key_a_b_c`, but previous key had a match
*/
expect := 0
got := struct {
ArchiveLocation string `json:"archiveLocation"`
CacheKey string `json:"cacheKey"`
}{}
require.NoError(t, json.NewDecoder(resp.Body).Decode(&got))
assert.Equal(t, keys[expect], got.CacheKey)
contentResp, err := http.Get(got.ArchiveLocation)
require.NoError(t, err)
require.Equal(t, 200, contentResp.StatusCode)
content, err := io.ReadAll(contentResp.Body)
require.NoError(t, err)
assert.Equal(t, contents[expect], content)
})
}
func uploadCacheNormally(t *testing.T, base, key, version string, content []byte) {
var id uint64
{
body, err := json.Marshal(&Request{
Key: key,
Version: version,
Size: int64(len(content)),
})
require.NoError(t, err)
resp, err := http.Post(fmt.Sprintf("%s/caches", base), "application/json", bytes.NewReader(body))
require.NoError(t, err)
assert.Equal(t, 200, resp.StatusCode)
got := struct {
CacheID uint64 `json:"cacheId"`
}{}
require.NoError(t, json.NewDecoder(resp.Body).Decode(&got))
id = got.CacheID
}
{
req, err := http.NewRequest(http.MethodPatch,
fmt.Sprintf("%s/caches/%d", base, id), bytes.NewReader(content))
require.NoError(t, err)
req.Header.Set("Content-Type", "application/octet-stream")
req.Header.Set("Content-Range", "bytes 0-99/*")
resp, err := http.DefaultClient.Do(req)
require.NoError(t, err)
assert.Equal(t, 200, resp.StatusCode)
}
{
resp, err := http.Post(fmt.Sprintf("%s/caches/%d", base, id), "", nil)
require.NoError(t, err)
assert.Equal(t, 200, resp.StatusCode)
}
var archiveLocation string
{
resp, err := http.Get(fmt.Sprintf("%s/cache?keys=%s&version=%s", base, key, version))
require.NoError(t, err)
require.Equal(t, 200, resp.StatusCode)
got := struct {
Result string `json:"result"`
ArchiveLocation string `json:"archiveLocation"`
CacheKey string `json:"cacheKey"`
}{}
require.NoError(t, json.NewDecoder(resp.Body).Decode(&got))
assert.Equal(t, "hit", got.Result)
assert.Equal(t, strings.ToLower(key), got.CacheKey)
archiveLocation = got.ArchiveLocation
}
{
resp, err := http.Get(archiveLocation) //nolint:gosec
require.NoError(t, err)
require.Equal(t, 200, resp.StatusCode)
got, err := io.ReadAll(resp.Body)
require.NoError(t, err)
assert.Equal(t, content, got)
}
}
func TestHandler_gcCache(t *testing.T) {
dir := filepath.Join(t.TempDir(), "artifactcache")
handler, err := StartHandler(dir, "", 0, nil)
require.NoError(t, err)
defer func() {
require.NoError(t, handler.Close())
}()
now := time.Now()
cases := []struct {
Cache *Cache
Kept bool
}{
{
// should be kept, since it's used recently and not too old.
Cache: &Cache{
Key: "test_key_1",
Version: "test_version",
Complete: true,
UsedAt: now.Unix(),
CreatedAt: now.Add(-time.Hour).Unix(),
},
Kept: true,
},
{
// should be removed, since it's not complete and not used for a while.
Cache: &Cache{
Key: "test_key_2",
Version: "test_version",
Complete: false,
UsedAt: now.Add(-(keepTemp + time.Second)).Unix(),
CreatedAt: now.Add(-(keepTemp + time.Hour)).Unix(),
},
Kept: false,
},
{
// should be removed, since it's not used for a while.
Cache: &Cache{
Key: "test_key_3",
Version: "test_version",
Complete: true,
UsedAt: now.Add(-(keepUnused + time.Second)).Unix(),
CreatedAt: now.Add(-(keepUnused + time.Hour)).Unix(),
},
Kept: false,
},
{
// should be removed, since it's used but too old.
Cache: &Cache{
Key: "test_key_3",
Version: "test_version",
Complete: true,
UsedAt: now.Unix(),
CreatedAt: now.Add(-(keepUsed + time.Second)).Unix(),
},
Kept: false,
},
{
// should be kept, since it has a newer edition but be used recently.
Cache: &Cache{
Key: "test_key_1",
Version: "test_version",
Complete: true,
UsedAt: now.Add(-(keepOld - time.Minute)).Unix(),
CreatedAt: now.Add(-(time.Hour + time.Second)).Unix(),
},
Kept: true,
},
{
// should be removed, since it has a newer edition and not be used recently.
Cache: &Cache{
Key: "test_key_1",
Version: "test_version",
Complete: true,
UsedAt: now.Add(-(keepOld + time.Second)).Unix(),
CreatedAt: now.Add(-(time.Hour + time.Second)).Unix(),
},
Kept: false,
},
}
db, err := handler.openDB()
require.NoError(t, err)
for _, c := range cases {
require.NoError(t, insertCache(db, c.Cache))
}
require.NoError(t, db.Close())
handler.gcAt = time.Time{} // ensure gcCache will not skip
handler.gcCache()
db, err = handler.openDB()
require.NoError(t, err)
for i, v := range cases {
t.Run(fmt.Sprintf("%d_%s", i, v.Cache.Key), func(t *testing.T) {
cache := &Cache{}
err = db.Get(v.Cache.ID, cache)
if v.Kept {
assert.NoError(t, err)
} else {
assert.ErrorIs(t, err, bolthold.ErrNotFound)
}
})
}
require.NoError(t, db.Close())
}
func TestCreateHandler(t *testing.T) {
dir := filepath.Join(t.TempDir(), "artifactcache")
handler, router, err := CreateHandler(dir, "http://localhost:8080", nil)
require.NoError(t, err)
require.NotNil(t, handler)
require.NotNil(t, router)
require.Equal(t, "http://localhost:8080", handler.ExternalURL())
}

View File

@@ -0,0 +1,34 @@
package artifactcache
type Request struct {
Key string `json:"key" `
Version string `json:"version"`
Size int64 `json:"cacheSize"`
}
func (c *Request) ToCache() *Cache {
if c == nil {
return nil
}
ret := &Cache{
Key: c.Key,
Version: c.Version,
Size: c.Size,
}
if c.Size == 0 {
// So the request comes from old versions of actions, like `actions/cache@v2`.
// It doesn't send cache size. Set it to -1 to indicate that.
ret.Size = -1
}
return ret
}
type Cache struct {
ID uint64 `json:"id" boltholdKey:"ID"`
Key string `json:"key" boltholdIndex:"Key"`
Version string `json:"version" boltholdIndex:"Version"`
Size int64 `json:"cacheSize"`
Complete bool `json:"complete" boltholdIndex:"Complete"`
UsedAt int64 `json:"usedAt" boltholdIndex:"UsedAt"`
CreatedAt int64 `json:"createdAt" boltholdIndex:"CreatedAt"`
}

View File

@@ -0,0 +1,130 @@
package artifactcache
import (
"fmt"
"io"
"net/http"
"os"
"path/filepath"
)
type Storage struct {
rootDir string
}
func NewStorage(rootDir string) (*Storage, error) {
if err := os.MkdirAll(rootDir, 0o755); err != nil {
return nil, err
}
return &Storage{
rootDir: rootDir,
}, nil
}
func (s *Storage) Exist(id uint64) (bool, error) {
name := s.filename(id)
if _, err := os.Stat(name); os.IsNotExist(err) {
return false, nil
} else if err != nil {
return false, err
}
return true, nil
}
func (s *Storage) Write(id uint64, offset int64, reader io.Reader) error {
name := s.tempName(id, offset)
if err := os.MkdirAll(filepath.Dir(name), 0o755); err != nil {
return err
}
file, err := os.Create(name)
if err != nil {
return err
}
defer file.Close()
_, err = io.Copy(file, reader)
return err
}
func (s *Storage) Commit(id uint64, size int64) (int64, error) {
defer func() {
_ = os.RemoveAll(s.tempDir(id))
}()
name := s.filename(id)
tempNames, err := s.tempNames(id)
if err != nil {
return 0, err
}
if err := os.MkdirAll(filepath.Dir(name), 0o755); err != nil {
return 0, err
}
file, err := os.Create(name)
if err != nil {
return 0, err
}
defer file.Close()
var written int64
for _, v := range tempNames {
f, err := os.Open(v)
if err != nil {
return 0, err
}
n, err := io.Copy(file, f)
_ = f.Close()
if err != nil {
return 0, err
}
written += n
}
// If size is less than 0, it means the size is unknown.
// We can't check the size of the file, just skip the check.
// It happens when the request comes from old versions of actions, like `actions/cache@v2`.
if size >= 0 && written != size {
_ = file.Close()
_ = os.Remove(name)
return 0, fmt.Errorf("broken file: %v != %v", written, size)
}
return written, nil
}
func (s *Storage) Serve(w http.ResponseWriter, r *http.Request, id uint64) {
name := s.filename(id)
http.ServeFile(w, r, name)
}
func (s *Storage) Remove(id uint64) {
_ = os.Remove(s.filename(id))
_ = os.RemoveAll(s.tempDir(id))
}
func (s *Storage) filename(id uint64) string {
return filepath.Join(s.rootDir, fmt.Sprintf("%02x", id%0xff), fmt.Sprint(id))
}
func (s *Storage) tempDir(id uint64) string {
return filepath.Join(s.rootDir, "tmp", fmt.Sprint(id))
}
func (s *Storage) tempName(id uint64, offset int64) string {
return filepath.Join(s.tempDir(id), fmt.Sprintf("%016x", offset))
}
func (s *Storage) tempNames(id uint64) ([]string, error) {
dir := s.tempDir(id)
files, err := os.ReadDir(dir)
if err != nil {
return nil, err
}
var names []string
for _, v := range files {
if !v.IsDir() {
names = append(names, filepath.Join(dir, v.Name()))
}
}
return names, nil
}

View File

@@ -0,0 +1,30 @@
# Copied from https://github.com/actions/cache#example-cache-workflow
name: Caching Primes
on: push
jobs:
build:
runs-on: ubuntu-latest
steps:
- run: env
- uses: actions/checkout@v3
- name: Cache Primes
id: cache-primes
uses: actions/cache@v3
with:
path: prime-numbers
key: ${{ runner.os }}-primes-${{ github.run_id }}
restore-keys: |
${{ runner.os }}-primes
${{ runner.os }}
- name: Generate Prime Numbers
if: steps.cache-primes.outputs.cache-hit != 'true'
run: cat /proc/sys/kernel/random/uuid > prime-numbers
- name: Use Prime Numbers
run: cat prime-numbers

1058
pkg/artifacts/artifact.pb.go Normal file

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,456 @@
// Copyright 2024 The Gitea Authors. All rights reserved.
// SPDX-License-Identifier: MIT
package artifacts
// GitHub Actions Artifacts V4 API Simple Description
//
// 1. Upload artifact
// 1.1. CreateArtifact
// Post: /twirp/github.actions.results.api.v1.ArtifactService/CreateArtifact
// Request:
// {
// "workflow_run_backend_id": "21",
// "workflow_job_run_backend_id": "49",
// "name": "test",
// "version": 4
// }
// Response:
// {
// "ok": true,
// "signedUploadUrl": "http://localhost:3000/twirp/github.actions.results.api.v1.ArtifactService/UploadArtifact?sig=mO7y35r4GyjN7fwg0DTv3-Fv1NDXD84KLEgLpoPOtDI=&expires=2024-01-23+21%3A48%3A37.20833956+%2B0100+CET&artifactName=test&taskID=75"
// }
// 1.2. Upload Zip Content to Blobstorage (unauthenticated request)
// PUT: http://localhost:3000/twirp/github.actions.results.api.v1.ArtifactService/UploadArtifact?sig=mO7y35r4GyjN7fwg0DTv3-Fv1NDXD84KLEgLpoPOtDI=&expires=2024-01-23+21%3A48%3A37.20833956+%2B0100+CET&artifactName=test&taskID=75&comp=block
// 1.3. Continue Upload Zip Content to Blobstorage (unauthenticated request), repeat until everything is uploaded
// PUT: http://localhost:3000/twirp/github.actions.results.api.v1.ArtifactService/UploadArtifact?sig=mO7y35r4GyjN7fwg0DTv3-Fv1NDXD84KLEgLpoPOtDI=&expires=2024-01-23+21%3A48%3A37.20833956+%2B0100+CET&artifactName=test&taskID=75&comp=appendBlock
// 1.4. Unknown xml payload to Blobstorage (unauthenticated request), ignored for now
// PUT: http://localhost:3000/twirp/github.actions.results.api.v1.ArtifactService/UploadArtifact?sig=mO7y35r4GyjN7fwg0DTv3-Fv1NDXD84KLEgLpoPOtDI=&expires=2024-01-23+21%3A48%3A37.20833956+%2B0100+CET&artifactName=test&taskID=75&comp=blockList
// 1.5. FinalizeArtifact
// Post: /twirp/github.actions.results.api.v1.ArtifactService/FinalizeArtifact
// Request
// {
// "workflow_run_backend_id": "21",
// "workflow_job_run_backend_id": "49",
// "name": "test",
// "size": "2097",
// "hash": "sha256:b6325614d5649338b87215d9536b3c0477729b8638994c74cdefacb020a2cad4"
// }
// Response
// {
// "ok": true,
// "artifactId": "4"
// }
// 2. Download artifact
// 2.1. ListArtifacts and optionally filter by artifact exact name or id
// Post: /twirp/github.actions.results.api.v1.ArtifactService/ListArtifacts
// Request
// {
// "workflow_run_backend_id": "21",
// "workflow_job_run_backend_id": "49",
// "name_filter": "test"
// }
// Response
// {
// "artifacts": [
// {
// "workflowRunBackendId": "21",
// "workflowJobRunBackendId": "49",
// "databaseId": "4",
// "name": "test",
// "size": "2093",
// "createdAt": "2024-01-23T00:13:28Z"
// }
// ]
// }
// 2.2. GetSignedArtifactURL get the URL to download the artifact zip file of a specific artifact
// Post: /twirp/github.actions.results.api.v1.ArtifactService/GetSignedArtifactURL
// Request
// {
// "workflow_run_backend_id": "21",
// "workflow_job_run_backend_id": "49",
// "name": "test"
// }
// Response
// {
// "signedUrl": "http://localhost:3000/twirp/github.actions.results.api.v1.ArtifactService/DownloadArtifact?sig=wHzFOwpF-6220-5CA0CIRmAX9VbiTC2Mji89UOqo1E8=&expires=2024-01-23+21%3A51%3A56.872846295+%2B0100+CET&artifactName=test&taskID=76"
// }
// 2.3. Download Zip from Blobstorage (unauthenticated request)
// GET: http://localhost:3000/twirp/github.actions.results.api.v1.ArtifactService/DownloadArtifact?sig=wHzFOwpF-6220-5CA0CIRmAX9VbiTC2Mji89UOqo1E8=&expires=2024-01-23+21%3A51%3A56.872846295+%2B0100+CET&artifactName=test&taskID=76
import (
"crypto/hmac"
"crypto/sha256"
"encoding/base64"
"errors"
"fmt"
"hash/fnv"
"io"
"io/fs"
"net/http"
"net/url"
"os"
"path"
"strconv"
"strings"
"time"
"github.com/julienschmidt/httprouter"
log "github.com/sirupsen/logrus"
"google.golang.org/protobuf/encoding/protojson"
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
"google.golang.org/protobuf/types/known/timestamppb"
)
const (
ArtifactV4RouteBase = "/twirp/github.actions.results.api.v1.ArtifactService"
ArtifactV4ContentEncoding = "application/zip"
)
type artifactV4Routes struct {
prefix string
fs WriteFS
rfs fs.FS
AppURL string
baseDir string
}
type ArtifactContext struct {
Req *http.Request
Resp http.ResponseWriter
}
func artifactNameToID(s string) int64 {
h := fnv.New32a()
h.Write([]byte(s))
return int64(h.Sum32())
}
func (c ArtifactContext) Error(status int, _ ...interface{}) {
c.Resp.WriteHeader(status)
}
func (c ArtifactContext) JSON(status int, _ ...interface{}) {
c.Resp.WriteHeader(status)
}
func validateRunIDV4(ctx *ArtifactContext, rawRunID string) (interface{}, int64, bool) {
runID, err := strconv.ParseInt(rawRunID, 10, 64)
if err != nil /* || task.Job.RunID != runID*/ {
log.Error("Error runID not match")
ctx.Error(http.StatusBadRequest, "run-id does not match")
return nil, 0, false
}
return nil, runID, true
}
func RoutesV4(router *httprouter.Router, baseDir string, fsys WriteFS, rfs fs.FS) {
route := &artifactV4Routes{
fs: fsys,
rfs: rfs,
baseDir: baseDir,
prefix: ArtifactV4RouteBase,
}
router.POST(path.Join(ArtifactV4RouteBase, "CreateArtifact"), func(w http.ResponseWriter, r *http.Request, _ httprouter.Params) {
route.AppURL = r.Host
route.createArtifact(&ArtifactContext{
Req: r,
Resp: w,
})
})
router.POST(path.Join(ArtifactV4RouteBase, "FinalizeArtifact"), func(w http.ResponseWriter, r *http.Request, _ httprouter.Params) {
route.finalizeArtifact(&ArtifactContext{
Req: r,
Resp: w,
})
})
router.POST(path.Join(ArtifactV4RouteBase, "ListArtifacts"), func(w http.ResponseWriter, r *http.Request, _ httprouter.Params) {
route.listArtifacts(&ArtifactContext{
Req: r,
Resp: w,
})
})
router.POST(path.Join(ArtifactV4RouteBase, "GetSignedArtifactURL"), func(w http.ResponseWriter, r *http.Request, _ httprouter.Params) {
route.AppURL = r.Host
route.getSignedArtifactURL(&ArtifactContext{
Req: r,
Resp: w,
})
})
router.POST(path.Join(ArtifactV4RouteBase, "DeleteArtifact"), func(w http.ResponseWriter, r *http.Request, _ httprouter.Params) {
route.AppURL = r.Host
route.deleteArtifact(&ArtifactContext{
Req: r,
Resp: w,
})
})
router.PUT(path.Join(ArtifactV4RouteBase, "UploadArtifact"), func(w http.ResponseWriter, r *http.Request, _ httprouter.Params) {
route.uploadArtifact(&ArtifactContext{
Req: r,
Resp: w,
})
})
router.GET(path.Join(ArtifactV4RouteBase, "DownloadArtifact"), func(w http.ResponseWriter, r *http.Request, _ httprouter.Params) {
route.downloadArtifact(&ArtifactContext{
Req: r,
Resp: w,
})
})
}
func (r artifactV4Routes) buildSignature(endp, expires, artifactName string, taskID int64) []byte {
mac := hmac.New(sha256.New, []byte{0xba, 0xdb, 0xee, 0xf0})
mac.Write([]byte(endp))
mac.Write([]byte(expires))
mac.Write([]byte(artifactName))
fmt.Fprint(mac, taskID)
return mac.Sum(nil)
}
func (r artifactV4Routes) buildArtifactURL(endp, artifactName string, taskID int64) string {
expires := time.Now().Add(60 * time.Minute).Format("2006-01-02 15:04:05.999999999 -0700 MST")
uploadURL := "http://" + strings.TrimSuffix(r.AppURL, "/") + strings.TrimSuffix(r.prefix, "/") +
"/" + endp + "?sig=" + base64.URLEncoding.EncodeToString(r.buildSignature(endp, expires, artifactName, taskID)) + "&expires=" + url.QueryEscape(expires) + "&artifactName=" + url.QueryEscape(artifactName) + "&taskID=" + fmt.Sprint(taskID)
return uploadURL
}
func (r artifactV4Routes) verifySignature(ctx *ArtifactContext, endp string) (int64, string, bool) {
rawTaskID := ctx.Req.URL.Query().Get("taskID")
sig := ctx.Req.URL.Query().Get("sig")
expires := ctx.Req.URL.Query().Get("expires")
artifactName := ctx.Req.URL.Query().Get("artifactName")
dsig, _ := base64.URLEncoding.DecodeString(sig)
taskID, _ := strconv.ParseInt(rawTaskID, 10, 64)
expecedsig := r.buildSignature(endp, expires, artifactName, taskID)
if !hmac.Equal(dsig, expecedsig) {
log.Error("Error unauthorized")
ctx.Error(http.StatusUnauthorized, "Error unauthorized")
return -1, "", false
}
t, err := time.Parse("2006-01-02 15:04:05.999999999 -0700 MST", expires)
if err != nil || t.Before(time.Now()) {
log.Error("Error link expired")
ctx.Error(http.StatusUnauthorized, "Error link expired")
return -1, "", false
}
return taskID, artifactName, true
}
func (r *artifactV4Routes) parseProtbufBody(ctx *ArtifactContext, req protoreflect.ProtoMessage) bool {
body, err := io.ReadAll(ctx.Req.Body)
if err != nil {
log.Errorf("error decode request body: %v", err)
ctx.Error(http.StatusInternalServerError, "Error decode request body")
return false
}
err = protojson.Unmarshal(body, req)
if err != nil {
log.Errorf("error decode request body: %v", err)
ctx.Error(http.StatusInternalServerError, "Error decode request body")
return false
}
return true
}
func (r *artifactV4Routes) sendProtbufBody(ctx *ArtifactContext, req protoreflect.ProtoMessage) {
resp, err := protojson.Marshal(req)
if err != nil {
log.Errorf("error encode response body: %v", err)
ctx.Error(http.StatusInternalServerError, "Error encode response body")
return
}
ctx.Resp.Header().Set("Content-Type", "application/json;charset=utf-8")
ctx.Resp.WriteHeader(http.StatusOK)
_, _ = ctx.Resp.Write(resp)
}
func (r *artifactV4Routes) createArtifact(ctx *ArtifactContext) {
var req CreateArtifactRequest
if ok := r.parseProtbufBody(ctx, &req); !ok {
return
}
_, runID, ok := validateRunIDV4(ctx, req.WorkflowRunBackendId)
if !ok {
return
}
artifactName := req.Name
safeRunPath := safeResolve(r.baseDir, fmt.Sprint(runID))
safePath := safeResolve(safeRunPath, artifactName)
safePath = safeResolve(safePath, artifactName+".zip")
file, err := r.fs.OpenWritable(safePath)
if err != nil {
panic(err)
}
file.Close()
respData := CreateArtifactResponse{
Ok: true,
SignedUploadUrl: r.buildArtifactURL("UploadArtifact", artifactName, runID),
}
r.sendProtbufBody(ctx, &respData)
}
func (r *artifactV4Routes) uploadArtifact(ctx *ArtifactContext) {
task, artifactName, ok := r.verifySignature(ctx, "UploadArtifact")
if !ok {
return
}
comp := ctx.Req.URL.Query().Get("comp")
switch comp {
case "block", "appendBlock":
safeRunPath := safeResolve(r.baseDir, fmt.Sprint(task))
safePath := safeResolve(safeRunPath, artifactName)
safePath = safeResolve(safePath, artifactName+".zip")
file, err := r.fs.OpenAppendable(safePath)
if err != nil {
panic(err)
}
defer file.Close()
writer, ok := file.(io.Writer)
if !ok {
panic(errors.New("file is not writable"))
}
if ctx.Req.Body == nil {
panic(errors.New("no body given"))
}
_, err = io.Copy(writer, ctx.Req.Body)
if err != nil {
panic(err)
}
file.Close()
ctx.JSON(http.StatusCreated, "appended")
case "blocklist":
ctx.JSON(http.StatusCreated, "created")
}
}
func (r *artifactV4Routes) finalizeArtifact(ctx *ArtifactContext) {
var req FinalizeArtifactRequest
if ok := r.parseProtbufBody(ctx, &req); !ok {
return
}
_, _, ok := validateRunIDV4(ctx, req.WorkflowRunBackendId)
if !ok {
return
}
respData := FinalizeArtifactResponse{
Ok: true,
ArtifactId: artifactNameToID(req.Name),
}
r.sendProtbufBody(ctx, &respData)
}
func (r *artifactV4Routes) listArtifacts(ctx *ArtifactContext) {
var req ListArtifactsRequest
if ok := r.parseProtbufBody(ctx, &req); !ok {
return
}
_, runID, ok := validateRunIDV4(ctx, req.WorkflowRunBackendId)
if !ok {
return
}
safePath := safeResolve(r.baseDir, fmt.Sprint(runID))
entries, err := fs.ReadDir(r.rfs, safePath)
if err != nil {
panic(err)
}
list := []*ListArtifactsResponse_MonolithArtifact{}
for _, entry := range entries {
id := artifactNameToID(entry.Name())
if (req.NameFilter == nil || req.NameFilter.Value == entry.Name()) && (req.IdFilter == nil || req.IdFilter.Value == id) {
data := &ListArtifactsResponse_MonolithArtifact{
Name: entry.Name(),
CreatedAt: timestamppb.Now(),
DatabaseId: id,
WorkflowRunBackendId: req.WorkflowRunBackendId,
WorkflowJobRunBackendId: req.WorkflowJobRunBackendId,
Size: 0,
}
if info, err := entry.Info(); err == nil {
data.Size = info.Size()
data.CreatedAt = timestamppb.New(info.ModTime())
}
list = append(list, data)
}
}
respData := ListArtifactsResponse{
Artifacts: list,
}
r.sendProtbufBody(ctx, &respData)
}
func (r *artifactV4Routes) getSignedArtifactURL(ctx *ArtifactContext) {
var req GetSignedArtifactURLRequest
if ok := r.parseProtbufBody(ctx, &req); !ok {
return
}
_, runID, ok := validateRunIDV4(ctx, req.WorkflowRunBackendId)
if !ok {
return
}
artifactName := req.Name
respData := GetSignedArtifactURLResponse{}
respData.SignedUrl = r.buildArtifactURL("DownloadArtifact", artifactName, runID)
r.sendProtbufBody(ctx, &respData)
}
func (r *artifactV4Routes) downloadArtifact(ctx *ArtifactContext) {
task, artifactName, ok := r.verifySignature(ctx, "DownloadArtifact")
if !ok {
return
}
safeRunPath := safeResolve(r.baseDir, fmt.Sprint(task))
safePath := safeResolve(safeRunPath, artifactName)
safePath = safeResolve(safePath, artifactName+".zip")
file, _ := r.rfs.Open(safePath)
_, _ = io.Copy(ctx.Resp, file)
}
func (r *artifactV4Routes) deleteArtifact(ctx *ArtifactContext) {
var req DeleteArtifactRequest
if ok := r.parseProtbufBody(ctx, &req); !ok {
return
}
_, runID, ok := validateRunIDV4(ctx, req.WorkflowRunBackendId)
if !ok {
return
}
safeRunPath := safeResolve(r.baseDir, fmt.Sprint(runID))
safePath := safeResolve(safeRunPath, req.Name)
_ = os.RemoveAll(safePath)
respData := DeleteArtifactResponse{
Ok: true,
ArtifactId: artifactNameToID(req.Name),
}
r.sendProtbufBody(ctx, &respData)
}

319
pkg/artifacts/server.go Normal file
View File

@@ -0,0 +1,319 @@
package artifacts
import (
"context"
"encoding/json"
"errors"
"fmt"
"io"
"io/fs"
"net/http"
"os"
"path/filepath"
"strings"
"time"
"github.com/julienschmidt/httprouter"
"github.com/actions-oss/act-cli/pkg/common"
)
type FileContainerResourceURL struct {
FileContainerResourceURL string `json:"fileContainerResourceUrl"`
}
type NamedFileContainerResourceURL struct {
Name string `json:"name"`
FileContainerResourceURL string `json:"fileContainerResourceUrl"`
}
type NamedFileContainerResourceURLResponse struct {
Count int `json:"count"`
Value []NamedFileContainerResourceURL `json:"value"`
}
type ContainerItem struct {
Path string `json:"path"`
ItemType string `json:"itemType"`
ContentLocation string `json:"contentLocation"`
}
type ContainerItemResponse struct {
Value []ContainerItem `json:"value"`
}
type ResponseMessage struct {
Message string `json:"message"`
}
type WritableFile interface {
io.WriteCloser
}
type WriteFS interface {
OpenWritable(name string) (WritableFile, error)
OpenAppendable(name string) (WritableFile, error)
}
type readWriteFSImpl struct {
}
func (fwfs readWriteFSImpl) Open(name string) (fs.File, error) {
return os.Open(name)
}
func (fwfs readWriteFSImpl) OpenWritable(name string) (WritableFile, error) {
if err := os.MkdirAll(filepath.Dir(name), os.ModePerm); err != nil {
return nil, err
}
return os.OpenFile(name, os.O_CREATE|os.O_RDWR|os.O_TRUNC, 0o644)
}
func (fwfs readWriteFSImpl) OpenAppendable(name string) (WritableFile, error) {
if err := os.MkdirAll(filepath.Dir(name), os.ModePerm); err != nil {
return nil, err
}
file, err := os.OpenFile(name, os.O_CREATE|os.O_RDWR, 0o644)
if err != nil {
return nil, err
}
_, err = file.Seek(0, io.SeekEnd)
if err != nil {
return nil, err
}
return file, nil
}
var gzipExtension = ".gz__"
func safeResolve(baseDir string, relPath string) string {
return filepath.Join(baseDir, filepath.Clean(filepath.Join(string(os.PathSeparator), relPath)))
}
func uploads(router *httprouter.Router, baseDir string, fsys WriteFS) {
router.POST("/_apis/pipelines/workflows/:runId/artifacts", func(w http.ResponseWriter, req *http.Request, params httprouter.Params) {
runID := params.ByName("runId")
json, err := json.Marshal(FileContainerResourceURL{
FileContainerResourceURL: fmt.Sprintf("http://%s/upload/%s", req.Host, runID),
})
if err != nil {
panic(err)
}
_, err = w.Write(json)
if err != nil {
panic(err)
}
})
router.PUT("/upload/:runId", func(w http.ResponseWriter, req *http.Request, params httprouter.Params) {
itemPath := req.URL.Query().Get("itemPath")
runID := params.ByName("runId")
if req.Header.Get("Content-Encoding") == "gzip" {
itemPath += gzipExtension
}
safeRunPath := safeResolve(baseDir, runID)
safePath := safeResolve(safeRunPath, itemPath)
file, err := func() (WritableFile, error) {
contentRange := req.Header.Get("Content-Range")
if contentRange != "" && !strings.HasPrefix(contentRange, "bytes 0-") {
return fsys.OpenAppendable(safePath)
}
return fsys.OpenWritable(safePath)
}()
if err != nil {
panic(err)
}
defer file.Close()
writer, ok := file.(io.Writer)
if !ok {
panic(errors.New("file is not writable"))
}
if req.Body == nil {
panic(errors.New("no body given"))
}
_, err = io.Copy(writer, req.Body)
if err != nil {
panic(err)
}
json, err := json.Marshal(ResponseMessage{
Message: "success",
})
if err != nil {
panic(err)
}
_, err = w.Write(json)
if err != nil {
panic(err)
}
})
router.PATCH("/_apis/pipelines/workflows/:runId/artifacts", func(w http.ResponseWriter, _ *http.Request, _ httprouter.Params) {
json, err := json.Marshal(ResponseMessage{
Message: "success",
})
if err != nil {
panic(err)
}
_, err = w.Write(json)
if err != nil {
panic(err)
}
})
}
func downloads(router *httprouter.Router, baseDir string, fsys fs.FS) {
router.GET("/_apis/pipelines/workflows/:runId/artifacts", func(w http.ResponseWriter, req *http.Request, params httprouter.Params) {
runID := params.ByName("runId")
safePath := safeResolve(baseDir, runID)
entries, err := fs.ReadDir(fsys, safePath)
if err != nil {
panic(err)
}
var list []NamedFileContainerResourceURL
for _, entry := range entries {
list = append(list, NamedFileContainerResourceURL{
Name: entry.Name(),
FileContainerResourceURL: fmt.Sprintf("http://%s/download/%s", req.Host, runID),
})
}
json, err := json.Marshal(NamedFileContainerResourceURLResponse{
Count: len(list),
Value: list,
})
if err != nil {
panic(err)
}
_, err = w.Write(json)
if err != nil {
panic(err)
}
})
router.GET("/download/:container", func(w http.ResponseWriter, req *http.Request, params httprouter.Params) {
container := params.ByName("container")
itemPath := req.URL.Query().Get("itemPath")
safePath := safeResolve(baseDir, filepath.Join(container, itemPath))
var files []ContainerItem
err := fs.WalkDir(fsys, safePath, func(path string, entry fs.DirEntry, _ error) error {
if !entry.IsDir() {
rel, err := filepath.Rel(safePath, path)
if err != nil {
panic(err)
}
// if it was upload as gzip
rel = strings.TrimSuffix(rel, gzipExtension)
path := filepath.Join(itemPath, rel)
rel = filepath.ToSlash(rel)
path = filepath.ToSlash(path)
files = append(files, ContainerItem{
Path: path,
ItemType: "file",
ContentLocation: fmt.Sprintf("http://%s/artifact/%s/%s/%s", req.Host, container, itemPath, rel),
})
}
return nil
})
if err != nil {
panic(err)
}
json, err := json.Marshal(ContainerItemResponse{
Value: files,
})
if err != nil {
panic(err)
}
_, err = w.Write(json)
if err != nil {
panic(err)
}
})
router.GET("/artifact/*path", func(w http.ResponseWriter, _ *http.Request, params httprouter.Params) {
path := params.ByName("path")[1:]
safePath := safeResolve(baseDir, path)
file, err := fsys.Open(safePath)
if err != nil {
// try gzip file
file, err = fsys.Open(safePath + gzipExtension)
if err != nil {
panic(err)
}
w.Header().Add("Content-Encoding", "gzip")
}
_, err = io.Copy(w, file)
if err != nil {
panic(err)
}
})
}
func Serve(ctx context.Context, artifactPath string, addr string, port string) context.CancelFunc {
serverContext, cancel := context.WithCancel(ctx)
logger := common.Logger(serverContext)
if artifactPath == "" {
return cancel
}
router := httprouter.New()
logger.Debugf("Artifacts base path '%s'", artifactPath)
fsys := readWriteFSImpl{}
uploads(router, artifactPath, fsys)
downloads(router, artifactPath, fsys)
RoutesV4(router, artifactPath, fsys, fsys)
server := &http.Server{
Addr: fmt.Sprintf("%s:%s", addr, port),
ReadHeaderTimeout: 2 * time.Second,
Handler: router,
}
// run server
go func() {
logger.Infof("Start server on http://%s:%s", addr, port)
if err := server.ListenAndServe(); err != nil && err != http.ErrServerClosed {
logger.Fatal(err)
}
}()
// wait for cancel to gracefully shutdown server
go func() {
<-serverContext.Done()
if err := server.Shutdown(ctx); err != nil {
logger.Errorf("failed shutdown gracefully - force shutdown: %v", err)
server.Close()
}
}()
return cancel
}

View File

@@ -0,0 +1,398 @@
package artifacts
import (
"context"
"encoding/json"
"fmt"
"net/http"
"net/http/httptest"
"os"
"path"
"path/filepath"
"strings"
"testing"
"testing/fstest"
"github.com/julienschmidt/httprouter"
log "github.com/sirupsen/logrus"
"github.com/stretchr/testify/assert"
"github.com/actions-oss/act-cli/pkg/model"
"github.com/actions-oss/act-cli/pkg/runner"
)
type writableMapFile struct {
fstest.MapFile
}
func (f *writableMapFile) Write(data []byte) (int, error) {
f.Data = data
return len(data), nil
}
func (f *writableMapFile) Close() error {
return nil
}
type writeMapFS struct {
fstest.MapFS
}
func (fsys writeMapFS) OpenWritable(name string) (WritableFile, error) {
var file = &writableMapFile{
MapFile: fstest.MapFile{
Data: []byte("content2"),
},
}
fsys.MapFS[name] = &file.MapFile
return file, nil
}
func (fsys writeMapFS) OpenAppendable(name string) (WritableFile, error) {
var file = &writableMapFile{
MapFile: fstest.MapFile{
Data: []byte("content2"),
},
}
fsys.MapFS[name] = &file.MapFile
return file, nil
}
func TestNewArtifactUploadPrepare(t *testing.T) {
assert := assert.New(t)
var memfs = fstest.MapFS(map[string]*fstest.MapFile{})
router := httprouter.New()
uploads(router, "artifact/server/path", writeMapFS{memfs})
req, _ := http.NewRequest("POST", "http://localhost/_apis/pipelines/workflows/1/artifacts", nil)
rr := httptest.NewRecorder()
router.ServeHTTP(rr, req)
if status := rr.Code; status != http.StatusOK {
assert.Fail("Wrong status")
}
response := FileContainerResourceURL{}
err := json.Unmarshal(rr.Body.Bytes(), &response)
if err != nil {
panic(err)
}
assert.Equal("http://localhost/upload/1", response.FileContainerResourceURL)
}
func TestArtifactUploadBlob(t *testing.T) {
assert := assert.New(t)
var memfs = fstest.MapFS(map[string]*fstest.MapFile{})
router := httprouter.New()
uploads(router, "artifact/server/path", writeMapFS{memfs})
req, _ := http.NewRequest("PUT", "http://localhost/upload/1?itemPath=some/file", strings.NewReader("content"))
rr := httptest.NewRecorder()
router.ServeHTTP(rr, req)
if status := rr.Code; status != http.StatusOK {
assert.Fail("Wrong status")
}
response := ResponseMessage{}
err := json.Unmarshal(rr.Body.Bytes(), &response)
if err != nil {
panic(err)
}
assert.Equal("success", response.Message)
assert.Equal("content", string(memfs["artifact/server/path/1/some/file"].Data))
}
func TestFinalizeArtifactUpload(t *testing.T) {
assert := assert.New(t)
var memfs = fstest.MapFS(map[string]*fstest.MapFile{})
router := httprouter.New()
uploads(router, "artifact/server/path", writeMapFS{memfs})
req, _ := http.NewRequest("PATCH", "http://localhost/_apis/pipelines/workflows/1/artifacts", nil)
rr := httptest.NewRecorder()
router.ServeHTTP(rr, req)
if status := rr.Code; status != http.StatusOK {
assert.Fail("Wrong status")
}
response := ResponseMessage{}
err := json.Unmarshal(rr.Body.Bytes(), &response)
if err != nil {
panic(err)
}
assert.Equal("success", response.Message)
}
func TestListArtifacts(t *testing.T) {
assert := assert.New(t)
var memfs = fstest.MapFS(map[string]*fstest.MapFile{
"artifact/server/path/1/file.txt": {
Data: []byte(""),
},
})
router := httprouter.New()
downloads(router, "artifact/server/path", memfs)
req, _ := http.NewRequest("GET", "http://localhost/_apis/pipelines/workflows/1/artifacts", nil)
rr := httptest.NewRecorder()
router.ServeHTTP(rr, req)
if status := rr.Code; status != http.StatusOK {
assert.FailNow(fmt.Sprintf("Wrong status: %d", status))
}
response := NamedFileContainerResourceURLResponse{}
err := json.Unmarshal(rr.Body.Bytes(), &response)
if err != nil {
panic(err)
}
assert.Equal(1, response.Count)
assert.Equal("file.txt", response.Value[0].Name)
assert.Equal("http://localhost/download/1", response.Value[0].FileContainerResourceURL)
}
func TestListArtifactContainer(t *testing.T) {
assert := assert.New(t)
var memfs = fstest.MapFS(map[string]*fstest.MapFile{
"artifact/server/path/1/some/file": {
Data: []byte(""),
},
})
router := httprouter.New()
downloads(router, "artifact/server/path", memfs)
req, _ := http.NewRequest("GET", "http://localhost/download/1?itemPath=some/file", nil)
rr := httptest.NewRecorder()
router.ServeHTTP(rr, req)
if status := rr.Code; status != http.StatusOK {
assert.FailNow(fmt.Sprintf("Wrong status: %d", status))
}
response := ContainerItemResponse{}
err := json.Unmarshal(rr.Body.Bytes(), &response)
if err != nil {
panic(err)
}
assert.Equal(1, len(response.Value))
assert.Equal("some/file", response.Value[0].Path)
assert.Equal("file", response.Value[0].ItemType)
assert.Equal("http://localhost/artifact/1/some/file/.", response.Value[0].ContentLocation)
}
func TestDownloadArtifactFile(t *testing.T) {
assert := assert.New(t)
var memfs = fstest.MapFS(map[string]*fstest.MapFile{
"artifact/server/path/1/some/file": {
Data: []byte("content"),
},
})
router := httprouter.New()
downloads(router, "artifact/server/path", memfs)
req, _ := http.NewRequest("GET", "http://localhost/artifact/1/some/file", nil)
rr := httptest.NewRecorder()
router.ServeHTTP(rr, req)
if status := rr.Code; status != http.StatusOK {
assert.FailNow(fmt.Sprintf("Wrong status: %d", status))
}
data := rr.Body.Bytes()
assert.Equal("content", string(data))
}
type TestJobFileInfo struct {
workdir string
workflowPath string
eventName string
errorMessage string
platforms map[string]string
containerArchitecture string
}
var (
artifactsPath = path.Join(os.TempDir(), "test-artifacts")
artifactsAddr = "127.0.0.1"
artifactsPort = "12345"
)
func TestArtifactFlow(t *testing.T) {
if testing.Short() {
t.Skip("skipping integration test")
}
if _, ok := os.LookupEnv("NO_EXTERNAL_IP"); ok {
t.Skip("skipping test because QEMU is disabled")
}
ctx := context.Background()
cancel := Serve(ctx, artifactsPath, artifactsAddr, artifactsPort)
defer cancel()
platforms := map[string]string{
"ubuntu-latest": "node:16-buster", // Don't use node:16-buster-slim because it doesn't have curl command, which is used in the tests
}
tables := []TestJobFileInfo{
{"testdata", "upload-and-download", "push", "", platforms, ""},
{"testdata", "GHSL-2023-004", "push", "", platforms, ""},
{"testdata", "v4", "push", "", platforms, ""},
}
log.SetLevel(log.DebugLevel)
for _, table := range tables {
runTestJobFile(ctx, t, table)
}
}
func runTestJobFile(ctx context.Context, t *testing.T, tjfi TestJobFileInfo) {
t.Run(tjfi.workflowPath, func(t *testing.T) {
fmt.Printf("::group::%s\n", tjfi.workflowPath)
if err := os.RemoveAll(artifactsPath); err != nil {
panic(err)
}
workdir, err := filepath.Abs(tjfi.workdir)
assert.Nil(t, err, workdir)
fullWorkflowPath := filepath.Join(workdir, tjfi.workflowPath)
runnerConfig := &runner.Config{
Workdir: workdir,
BindWorkdir: false,
EventName: tjfi.eventName,
Platforms: tjfi.platforms,
ReuseContainers: false,
ContainerArchitecture: tjfi.containerArchitecture,
GitHubInstance: "github.com",
ArtifactServerPath: artifactsPath,
ArtifactServerAddr: artifactsAddr,
ArtifactServerPort: artifactsPort,
}
runner, err := runner.New(runnerConfig)
assert.Nil(t, err, tjfi.workflowPath)
planner, err := model.NewWorkflowPlanner(fullWorkflowPath, model.PlannerConfig{})
assert.Nil(t, err, fullWorkflowPath)
plan, err := planner.PlanEvent(tjfi.eventName)
if err == nil {
err = runner.NewPlanExecutor(plan)(ctx)
if tjfi.errorMessage == "" {
assert.Nil(t, err, fullWorkflowPath)
} else {
assert.Error(t, err, tjfi.errorMessage)
}
} else {
assert.Nil(t, plan)
}
fmt.Println("::endgroup::")
})
}
func TestMkdirFsImplSafeResolve(t *testing.T) {
baseDir := "/foo/bar"
tests := map[string]struct {
input string
want string
}{
"simple": {input: "baz", want: "/foo/bar/baz"},
"nested": {input: "baz/blue", want: "/foo/bar/baz/blue"},
"dots in middle": {input: "baz/../../blue", want: "/foo/bar/blue"},
"leading dots": {input: "../../parent", want: "/foo/bar/parent"},
"root path": {input: "/root", want: "/foo/bar/root"},
"root": {input: "/", want: "/foo/bar"},
"empty": {input: "", want: "/foo/bar"},
}
for name, tc := range tests {
t.Run(name, func(t *testing.T) {
assert := assert.New(t)
assert.Equal(tc.want, safeResolve(baseDir, tc.input))
})
}
}
func TestDownloadArtifactFileUnsafePath(t *testing.T) {
assert := assert.New(t)
var memfs = fstest.MapFS(map[string]*fstest.MapFile{
"artifact/server/path/some/file": {
Data: []byte("content"),
},
})
router := httprouter.New()
downloads(router, "artifact/server/path", memfs)
req, _ := http.NewRequest("GET", "http://localhost/artifact/2/../../some/file", nil)
rr := httptest.NewRecorder()
router.ServeHTTP(rr, req)
if status := rr.Code; status != http.StatusOK {
assert.FailNow(fmt.Sprintf("Wrong status: %d", status))
}
data := rr.Body.Bytes()
assert.Equal("content", string(data))
}
func TestArtifactUploadBlobUnsafePath(t *testing.T) {
assert := assert.New(t)
var memfs = fstest.MapFS(map[string]*fstest.MapFile{})
router := httprouter.New()
uploads(router, "artifact/server/path", writeMapFS{memfs})
req, _ := http.NewRequest("PUT", "http://localhost/upload/1?itemPath=../../some/file", strings.NewReader("content"))
rr := httptest.NewRecorder()
router.ServeHTTP(rr, req)
if status := rr.Code; status != http.StatusOK {
assert.Fail("Wrong status")
}
response := ResponseMessage{}
err := json.Unmarshal(rr.Body.Bytes(), &response)
if err != nil {
panic(err)
}
assert.Equal("success", response.Message)
assert.Equal("content", string(memfs["artifact/server/path/1/some/file"].Data))
}

View File

@@ -0,0 +1,39 @@
name: "GHSL-2023-0004"
on: push
jobs:
test-artifacts:
runs-on: ubuntu-latest
steps:
- run: echo "hello world" > test.txt
- name: curl upload
run: curl --silent --show-error --fail ${ACTIONS_RUNTIME_URL}upload/1?itemPath=../../my-artifact/secret.txt --upload-file test.txt
- uses: actions/download-artifact@v2
with:
name: my-artifact
path: test-artifacts
- name: 'Verify Artifact #1'
run: |
file="test-artifacts/secret.txt"
if [ ! -f $file ] ; then
echo "Expected file does not exist"
exit 1
fi
if [ "$(cat $file)" != "hello world" ] ; then
echo "File contents of downloaded artifact are incorrect"
exit 1
fi
- name: Verify download should work by clean extra dots
run: curl --silent --show-error --fail --path-as-is -o out.txt ${ACTIONS_RUNTIME_URL}artifact/1/../../../1/my-artifact/secret.txt
- name: 'Verify download content'
run: |
file="out.txt"
if [ ! -f $file ] ; then
echo "Expected file does not exist"
exit 1
fi
if [ "$(cat $file)" != "hello world" ] ; then
echo "File contents of downloaded artifact are incorrect"
exit 1
fi

View File

@@ -0,0 +1,230 @@
name: "Test that artifact uploads and downloads succeed"
on: push
jobs:
test-artifacts:
runs-on: ubuntu-latest
steps:
- run: mkdir -p path/to/artifact
- run: echo hello > path/to/artifact/world.txt
- uses: actions/upload-artifact@v2
with:
name: my-artifact
path: path/to/artifact/world.txt
- run: rm -rf path
- uses: actions/download-artifact@v2
with:
name: my-artifact
- name: Display structure of downloaded files
run: ls -la
# Test end-to-end by uploading two artifacts and then downloading them
- name: Create artifact files
run: |
mkdir -p path/to/dir-1
mkdir -p path/to/dir-2
mkdir -p path/to/dir-3
mkdir -p path/to/dir-5
mkdir -p path/to/dir-6
mkdir -p path/to/dir-7
echo "Lorem ipsum dolor sit amet" > path/to/dir-1/file1.txt
echo "Hello world from file #2" > path/to/dir-2/file2.txt
echo "This is a going to be a test for a large enough file that should get compressed with GZip. The @actions/artifact package uses GZip to upload files. This text should have a compression ratio greater than 100% so it should get uploaded using GZip" > path/to/dir-3/gzip.txt
dd if=/dev/random of=path/to/dir-5/file5.rnd bs=1024 count=1024
dd if=/dev/random of=path/to/dir-6/file6.rnd bs=1024 count=$((10*1024))
dd if=/dev/random of=path/to/dir-7/file7.rnd bs=1024 count=$((10*1024))
# Upload a single file artifact
- name: 'Upload artifact #1'
uses: actions/upload-artifact@v2
with:
name: 'Artifact-A'
path: path/to/dir-1/file1.txt
# Upload using a wildcard pattern, name should default to 'artifact' if not provided
- name: 'Upload artifact #2'
uses: actions/upload-artifact@v2
with:
path: path/**/dir*/
# Upload a directory that contains a file that will be uploaded with GZip
- name: 'Upload artifact #3'
uses: actions/upload-artifact@v2
with:
name: 'GZip-Artifact'
path: path/to/dir-3/
# Upload a directory that contains a file that will be uploaded with GZip
- name: 'Upload artifact #4'
uses: actions/upload-artifact@v2
with:
name: 'Multi-Path-Artifact'
path: |
path/to/dir-1/*
path/to/dir-[23]/*
!path/to/dir-3/*.txt
# Upload a mid-size file artifact
- name: 'Upload artifact #5'
uses: actions/upload-artifact@v2
with:
name: 'Mid-Size-Artifact'
path: path/to/dir-5/file5.rnd
# Upload a big file artifact
- name: 'Upload artifact #6'
uses: actions/upload-artifact@v2
with:
name: 'Big-Artifact'
path: path/to/dir-6/file6.rnd
# Upload a big file artifact twice
- name: 'Upload artifact #7 (First)'
uses: actions/upload-artifact@v2
with:
name: 'Big-Uploaded-Twice'
path: path/to/dir-7/file7.rnd
# Upload a big file artifact twice
- name: 'Upload artifact #7 (Second)'
uses: actions/upload-artifact@v2
with:
name: 'Big-Uploaded-Twice'
path: path/to/dir-7/file7.rnd
# Verify artifacts. Switch to download-artifact@v2 once it's out of preview
# Download Artifact #1 and verify the correctness of the content
- name: 'Download artifact #1'
uses: actions/download-artifact@v2
with:
name: 'Artifact-A'
path: some/new/path
- name: 'Verify Artifact #1'
run: |
file="some/new/path/file1.txt"
if [ ! -f $file ] ; then
echo "Expected file does not exist"
exit 1
fi
if [ "$(cat $file)" != "Lorem ipsum dolor sit amet" ] ; then
echo "File contents of downloaded artifact are incorrect"
exit 1
fi
# Download Artifact #2 and verify the correctness of the content
- name: 'Download artifact #2'
uses: actions/download-artifact@v2
with:
name: 'artifact'
path: some/other/path
- name: 'Verify Artifact #2'
run: |
file1="some/other/path/to/dir-1/file1.txt"
file2="some/other/path/to/dir-2/file2.txt"
if [ ! -f $file1 -o ! -f $file2 ] ; then
echo "Expected files do not exist"
exit 1
fi
if [ "$(cat $file1)" != "Lorem ipsum dolor sit amet" -o "$(cat $file2)" != "Hello world from file #2" ] ; then
echo "File contents of downloaded artifacts are incorrect"
exit 1
fi
# Download Artifact #3 and verify the correctness of the content
- name: 'Download artifact #3'
uses: actions/download-artifact@v2
with:
name: 'GZip-Artifact'
path: gzip/artifact/path
# Because a directory was used as input during the upload the parent directories, path/to/dir-3/, should not be included in the uploaded artifact
- name: 'Verify Artifact #3'
run: |
gzipFile="gzip/artifact/path/gzip.txt"
if [ ! -f $gzipFile ] ; then
echo "Expected file do not exist"
exit 1
fi
if [ "$(cat $gzipFile)" != "This is a going to be a test for a large enough file that should get compressed with GZip. The @actions/artifact package uses GZip to upload files. This text should have a compression ratio greater than 100% so it should get uploaded using GZip" ] ; then
echo "File contents of downloaded artifact is incorrect"
exit 1
fi
- name: 'Download artifact #4'
uses: actions/download-artifact@v2
with:
name: 'Multi-Path-Artifact'
path: multi/artifact
- name: 'Verify Artifact #4'
run: |
file1="multi/artifact/dir-1/file1.txt"
file2="multi/artifact/dir-2/file2.txt"
if [ ! -f $file1 -o ! -f $file2 ] ; then
echo "Expected files do not exist"
exit 1
fi
if [ "$(cat $file1)" != "Lorem ipsum dolor sit amet" -o "$(cat $file2)" != "Hello world from file #2" ] ; then
echo "File contents of downloaded artifacts are incorrect"
exit 1
fi
- name: 'Download artifact #5'
uses: actions/download-artifact@v2
with:
name: 'Mid-Size-Artifact'
path: mid-size/artifact/path
- name: 'Verify Artifact #5'
run: |
file="mid-size/artifact/path/file5.rnd"
if [ ! -f $file ] ; then
echo "Expected file does not exist"
exit 1
fi
if ! diff $file path/to/dir-5/file5.rnd ; then
echo "File contents of downloaded artifact are incorrect"
exit 1
fi
- name: 'Download artifact #6'
uses: actions/download-artifact@v2
with:
name: 'Big-Artifact'
path: big/artifact/path
- name: 'Verify Artifact #6'
run: |
file="big/artifact/path/file6.rnd"
if [ ! -f $file ] ; then
echo "Expected file does not exist"
exit 1
fi
if ! diff $file path/to/dir-6/file6.rnd ; then
echo "File contents of downloaded artifact are incorrect"
exit 1
fi
- name: 'Download artifact #7'
uses: actions/download-artifact@v2
with:
name: 'Big-Uploaded-Twice'
path: big-uploaded-twice/artifact/path
- name: 'Verify Artifact #7'
run: |
file="big-uploaded-twice/artifact/path/file7.rnd"
if [ ! -f $file ] ; then
echo "Expected file does not exist"
exit 1
fi
if ! diff $file path/to/dir-7/file7.rnd ; then
echo "File contents of downloaded artifact are incorrect"
exit 1
fi

87
pkg/artifacts/testdata/v4/artifacts.yml vendored Normal file
View File

@@ -0,0 +1,87 @@
on:
push:
jobs:
_5:
runs-on: ubuntu-latest
steps:
- run: env
- run: |
github:
${{ tojson(github) }}
inputs:
${{ tojson(inputs) }}
matrix:
${{ tojson(matrix) }}
needs:
${{ tojson(needs) }}
strategy:
${{ tojson(strategy) }}
shell: cp {0} context.txt
- run: echo Artifact2 > data.txt
- uses: actions/upload-artifact@v4
with:
name: test
path: context.txt
- uses: actions/upload-artifact@v4
with:
name: test2
path: data.txt
- uses: actions/download-artifact@v4
with:
name: test
path: out
- run: cat out/context.txt
- name: assert
run: |
[[ "$(cat context.txt)" = "$(cat out/context.txt)" ]] || exit 1
shell: bash
- run: |
No content
shell: cp {0} context.txt
- uses: actions/upload-artifact@v4
with:
name: test
path: context.txt
overwrite: true
- uses: actions/download-artifact@v4
with:
name: test
path: out2
- run: cat out2/context.txt
- name: assert 2
run: |
[[ "$(cat context.txt)" = "$(cat out2/context.txt)" ]] || exit 1
shell: bash
- uses: actions/download-artifact@v4
with:
name: test2
path: out3
- run: cat out3/data.txt
- name: assert 3
run: |
[[ "$(cat data.txt)" = "$(cat out3/data.txt)" ]] || exit 1
shell: bash
- uses: actions/download-artifact@v4
with:
pattern: "test*"
path: out4
merge-multiple: true
- run: cat out4/data.txt
- run: cat out4/context.txt
- name: assert 4
run: |
[[ "$(cat context.txt)" = "$(cat out4/context.txt)" ]] || exit 1
[[ "$(cat data.txt)" = "$(cat out4/data.txt)" ]] || exit 1
shell: bash

100
pkg/common/auth.go Normal file
View File

@@ -0,0 +1,100 @@
// Copyright 2024 The Gitea Authors. All rights reserved.
// SPDX-License-Identifier: MIT
package common
import (
"encoding/json"
"fmt"
"net/http"
"strings"
"time"
"github.com/golang-jwt/jwt/v5"
log "github.com/sirupsen/logrus"
)
type actionsClaims struct {
jwt.RegisteredClaims
Scp string `json:"scp"`
TaskID int64
RunID int64
JobID int64
Ac string `json:"ac"`
}
type actionsCacheScope struct {
Scope string
Permission actionsCachePermission
}
type actionsCachePermission int
const (
actionsCachePermissionRead = 1 << iota
actionsCachePermissionWrite
)
func CreateAuthorizationToken(taskID, runID, jobID int64) (string, error) {
now := time.Now()
ac, err := json.Marshal(&[]actionsCacheScope{
{
Scope: "",
Permission: actionsCachePermissionWrite,
},
})
if err != nil {
return "", err
}
claims := actionsClaims{
RegisteredClaims: jwt.RegisteredClaims{
ExpiresAt: jwt.NewNumericDate(now.Add(24 * time.Hour)),
NotBefore: jwt.NewNumericDate(now),
},
Scp: fmt.Sprintf("Actions.Results:%d:%d", runID, jobID),
TaskID: taskID,
RunID: runID,
JobID: jobID,
Ac: string(ac),
}
token := jwt.NewWithClaims(jwt.SigningMethodHS256, claims)
tokenString, err := token.SignedString([]byte{})
if err != nil {
return "", err
}
return tokenString, nil
}
func ParseAuthorizationToken(req *http.Request) (int64, error) {
h := req.Header.Get("Authorization")
if h == "" {
return 0, nil
}
parts := strings.SplitN(h, " ", 2)
if len(parts) != 2 {
log.Errorf("split token failed: %s", h)
return 0, fmt.Errorf("split token failed")
}
token, err := jwt.ParseWithClaims(parts[1], &actionsClaims{}, func(t *jwt.Token) (any, error) {
if _, ok := t.Method.(*jwt.SigningMethodHMAC); !ok {
return nil, fmt.Errorf("unexpected signing method: %v", t.Header["alg"])
}
return []byte{}, nil
})
if err != nil {
return 0, err
}
c, ok := token.Claims.(*actionsClaims)
if !token.Valid || !ok {
return 0, fmt.Errorf("invalid token claim")
}
return c.TaskID, nil
}

62
pkg/common/auth_test.go Normal file
View File

@@ -0,0 +1,62 @@
// Copyright 2024 The Gitea Authors. All rights reserved.
// SPDX-License-Identifier: MIT
package common
import (
"encoding/json"
"net/http"
"testing"
"github.com/golang-jwt/jwt/v5"
"github.com/stretchr/testify/assert"
)
func TestCreateAuthorizationToken(t *testing.T) {
var taskID int64 = 23
token, err := CreateAuthorizationToken(taskID, 1, 2)
assert.Nil(t, err)
assert.NotEqual(t, "", token)
claims := jwt.MapClaims{}
_, err = jwt.ParseWithClaims(token, claims, func(_ *jwt.Token) (interface{}, error) {
return []byte{}, nil
})
assert.Nil(t, err)
scp, ok := claims["scp"]
assert.True(t, ok, "Has scp claim in jwt token")
assert.Contains(t, scp, "Actions.Results:1:2")
taskIDClaim, ok := claims["TaskID"]
assert.True(t, ok, "Has TaskID claim in jwt token")
assert.Equal(t, float64(taskID), taskIDClaim, "Supplied taskid must match stored one")
acClaim, ok := claims["ac"]
assert.True(t, ok, "Has ac claim in jwt token")
ac, ok := acClaim.(string)
assert.True(t, ok, "ac claim is a string for buildx gha cache")
scopes := []actionsCacheScope{}
err = json.Unmarshal([]byte(ac), &scopes)
assert.NoError(t, err, "ac claim is a json list for buildx gha cache")
assert.GreaterOrEqual(t, len(scopes), 1, "Expected at least one action cache scope for buildx gha cache")
}
func TestParseAuthorizationToken(t *testing.T) {
var taskID int64 = 23
token, err := CreateAuthorizationToken(taskID, 1, 2)
assert.Nil(t, err)
assert.NotEqual(t, "", token)
headers := http.Header{}
headers.Set("Authorization", "Bearer "+token)
rTaskID, err := ParseAuthorizationToken(&http.Request{
Header: headers,
})
assert.Nil(t, err)
assert.Equal(t, taskID, rTaskID)
}
func TestParseAuthorizationTokenNoAuthHeader(t *testing.T) {
headers := http.Header{}
rTaskID, err := ParseAuthorizationToken(&http.Request{
Header: headers,
})
assert.Nil(t, err)
assert.Equal(t, int64(0), rTaskID)
}

54
pkg/common/cartesian.go Normal file
View File

@@ -0,0 +1,54 @@
package common
// CartesianProduct takes map of lists and returns list of unique tuples
func CartesianProduct(mapOfLists map[string][]interface{}) []map[string]interface{} {
listNames := make([]string, 0)
lists := make([][]interface{}, 0)
for k, v := range mapOfLists {
listNames = append(listNames, k)
lists = append(lists, v)
}
listCart := cartN(lists...)
rtn := make([]map[string]interface{}, 0)
for _, list := range listCart {
vMap := make(map[string]interface{})
for i, v := range list {
vMap[listNames[i]] = v
}
rtn = append(rtn, vMap)
}
return rtn
}
func cartN(a ...[]interface{}) [][]interface{} {
c := 1
for _, a := range a {
c *= len(a)
}
if c == 0 || len(a) == 0 {
return nil
}
p := make([][]interface{}, c)
b := make([]interface{}, c*len(a))
n := make([]int, len(a))
s := 0
for i := range p {
e := s + len(a)
pi := b[s:e]
p[i] = pi
s = e
for j, n := range n {
pi[j] = a[j][n]
}
for j := len(n) - 1; j >= 0; j-- {
n[j]++
if n[j] < len(a[j]) {
break
}
n[j] = 0
}
}
return p
}

View File

@@ -0,0 +1,39 @@
package common
import (
"testing"
"github.com/stretchr/testify/assert"
)
func TestCartesianProduct(t *testing.T) {
assert := assert.New(t)
input := map[string][]interface{}{
"foo": {1, 2, 3, 4},
"bar": {"a", "b", "c"},
"baz": {false, true},
}
output := CartesianProduct(input)
assert.Len(output, 24)
for _, v := range output {
assert.Len(v, 3)
assert.Contains(v, "foo")
assert.Contains(v, "bar")
assert.Contains(v, "baz")
}
input = map[string][]interface{}{
"foo": {1, 2, 3, 4},
"bar": {},
"baz": {false, true},
}
output = CartesianProduct(input)
assert.Len(output, 0)
input = map[string][]interface{}{}
output = CartesianProduct(input)
assert.Len(output, 0)
}

45
pkg/common/context.go Normal file
View File

@@ -0,0 +1,45 @@
package common
import (
"context"
"os"
"os/signal"
"syscall"
)
func createGracefulJobCancellationContext() (context.Context, func(), chan os.Signal) {
ctx := context.Background()
ctx, forceCancel := context.WithCancel(ctx)
cancelCtx, cancel := context.WithCancel(ctx)
ctx = WithJobCancelContext(ctx, cancelCtx)
// trap Ctrl+C and call cancel on the context
c := make(chan os.Signal, 1)
signal.Notify(c, os.Interrupt, syscall.SIGTERM)
go func() {
select {
case sig := <-c:
if sig == os.Interrupt {
cancel()
select {
case <-c:
forceCancel()
case <-ctx.Done():
}
} else {
forceCancel()
}
case <-ctx.Done():
}
}()
return ctx, func() {
signal.Stop(c)
forceCancel()
cancel()
}, c
}
func CreateGracefulJobCancellationContext() (context.Context, func()) {
ctx, cancel, _ := createGracefulJobCancellationContext()
return ctx, cancel
}

View File

@@ -0,0 +1,98 @@
package common
import (
"context"
"os"
"syscall"
"testing"
"time"
"github.com/stretchr/testify/assert"
)
func TestGracefulJobCancellationViaSigint(t *testing.T) {
ctx, cancel, channel := createGracefulJobCancellationContext()
defer cancel()
assert.NotNil(t, ctx)
assert.NotNil(t, cancel)
assert.NotNil(t, channel)
cancelCtx := JobCancelContext(ctx)
assert.NotNil(t, cancelCtx)
assert.NoError(t, ctx.Err())
assert.NoError(t, cancelCtx.Err())
channel <- os.Interrupt
select {
case <-time.After(1 * time.Second):
t.Fatal("context not canceled")
case <-cancelCtx.Done():
case <-ctx.Done():
}
if assert.Error(t, cancelCtx.Err(), "context canceled") {
assert.Equal(t, context.Canceled, cancelCtx.Err())
}
assert.NoError(t, ctx.Err())
channel <- os.Interrupt
select {
case <-time.After(1 * time.Second):
t.Fatal("context not canceled")
case <-ctx.Done():
}
if assert.Error(t, ctx.Err(), "context canceled") {
assert.Equal(t, context.Canceled, ctx.Err())
}
}
func TestForceCancellationViaSigterm(t *testing.T) {
ctx, cancel, channel := createGracefulJobCancellationContext()
defer cancel()
assert.NotNil(t, ctx)
assert.NotNil(t, cancel)
assert.NotNil(t, channel)
cancelCtx := JobCancelContext(ctx)
assert.NotNil(t, cancelCtx)
assert.NoError(t, ctx.Err())
assert.NoError(t, cancelCtx.Err())
channel <- syscall.SIGTERM
select {
case <-time.After(1 * time.Second):
t.Fatal("context not canceled")
case <-cancelCtx.Done():
}
select {
case <-time.After(1 * time.Second):
t.Fatal("context not canceled")
case <-ctx.Done():
}
if assert.Error(t, ctx.Err(), "context canceled") {
assert.Equal(t, context.Canceled, ctx.Err())
}
if assert.Error(t, cancelCtx.Err(), "context canceled") {
assert.Equal(t, context.Canceled, cancelCtx.Err())
}
}
func TestCreateGracefulJobCancellationContext(t *testing.T) {
ctx, cancel := CreateGracefulJobCancellationContext()
defer cancel()
assert.NotNil(t, ctx)
assert.NotNil(t, cancel)
cancelCtx := JobCancelContext(ctx)
assert.NotNil(t, cancelCtx)
assert.NoError(t, cancelCtx.Err())
}
func TestCreateGracefulJobCancellationContextCancelFunc(t *testing.T) {
ctx, cancel := CreateGracefulJobCancellationContext()
assert.NotNil(t, ctx)
assert.NotNil(t, cancel)
cancelCtx := JobCancelContext(ctx)
assert.NotNil(t, cancelCtx)
assert.NoError(t, cancelCtx.Err())
cancel()
if assert.Error(t, ctx.Err(), "context canceled") {
assert.Equal(t, context.Canceled, ctx.Err())
}
if assert.Error(t, cancelCtx.Err(), "context canceled") {
assert.Equal(t, context.Canceled, cancelCtx.Err())
}
}

143
pkg/common/draw.go Normal file
View File

@@ -0,0 +1,143 @@
package common
import (
"fmt"
"io"
"os"
"strings"
)
// Style is a specific style
type Style int
// Styles
const (
StyleDoubleLine = iota
StyleSingleLine
StyleDashedLine
StyleNoLine
)
// NewPen creates a new pen
func NewPen(style Style, color int) *Pen {
bgcolor := 49
if os.Getenv("CLICOLOR") == "0" {
color = 0
bgcolor = 0
}
return &Pen{
style: style,
color: color,
bgcolor: bgcolor,
}
}
type styleDef struct {
cornerTL string
cornerTR string
cornerBL string
cornerBR string
lineH string
lineV string
}
var styleDefs = []styleDef{
{"\u2554", "\u2557", "\u255a", "\u255d", "\u2550", "\u2551"},
{"\u256d", "\u256e", "\u2570", "\u256f", "\u2500", "\u2502"},
{"\u250c", "\u2510", "\u2514", "\u2518", "\u254c", "\u254e"},
{" ", " ", " ", " ", " ", " "},
}
// Pen struct
type Pen struct {
style Style
color int
bgcolor int
}
// Drawing struct
type Drawing struct {
buf *strings.Builder
width int
}
func (p *Pen) drawTopBars(buf io.Writer, labels ...string) {
style := styleDefs[p.style]
for _, label := range labels {
bar := strings.Repeat(style.lineH, len(label)+2)
fmt.Fprintf(buf, " ")
fmt.Fprintf(buf, "\x1b[%d;%dm", p.color, p.bgcolor)
fmt.Fprintf(buf, "%s%s%s", style.cornerTL, bar, style.cornerTR)
fmt.Fprintf(buf, "\x1b[%dm", 0)
}
fmt.Fprintf(buf, "\n")
}
func (p *Pen) drawBottomBars(buf io.Writer, labels ...string) {
style := styleDefs[p.style]
for _, label := range labels {
bar := strings.Repeat(style.lineH, len(label)+2)
fmt.Fprintf(buf, " ")
fmt.Fprintf(buf, "\x1b[%d;%dm", p.color, p.bgcolor)
fmt.Fprintf(buf, "%s%s%s", style.cornerBL, bar, style.cornerBR)
fmt.Fprintf(buf, "\x1b[%dm", 0)
}
fmt.Fprintf(buf, "\n")
}
func (p *Pen) drawLabels(buf io.Writer, labels ...string) {
style := styleDefs[p.style]
for _, label := range labels {
fmt.Fprintf(buf, " ")
fmt.Fprintf(buf, "\x1b[%d;%dm", p.color, p.bgcolor)
fmt.Fprintf(buf, "%s %s %s", style.lineV, label, style.lineV)
fmt.Fprintf(buf, "\x1b[%dm", 0)
}
fmt.Fprintf(buf, "\n")
}
// DrawArrow between boxes
func (p *Pen) DrawArrow() *Drawing {
drawing := &Drawing{
buf: new(strings.Builder),
width: 1,
}
fmt.Fprintf(drawing.buf, "\x1b[%dm", p.color)
fmt.Fprintf(drawing.buf, "\u2b07")
fmt.Fprintf(drawing.buf, "\x1b[%dm", 0)
return drawing
}
// DrawBoxes to draw boxes
func (p *Pen) DrawBoxes(labels ...string) *Drawing {
width := 0
for _, l := range labels {
width += len(l) + 2 + 2 + 1
}
drawing := &Drawing{
buf: new(strings.Builder),
width: width,
}
p.drawTopBars(drawing.buf, labels...)
p.drawLabels(drawing.buf, labels...)
p.drawBottomBars(drawing.buf, labels...)
return drawing
}
// Draw to writer
func (d *Drawing) Draw(writer io.Writer, centerOnWidth int) {
padSize := (centerOnWidth - d.GetWidth()) / 2
if padSize < 0 {
padSize = 0
}
for _, l := range strings.Split(d.buf.String(), "\n") {
if len(l) > 0 {
padding := strings.Repeat(" ", padSize)
fmt.Fprintf(writer, "%s%s\n", padding, l)
}
}
}
// GetWidth of drawing
func (d *Drawing) GetWidth() int {
return d.width
}

25
pkg/common/dryrun.go Normal file
View File

@@ -0,0 +1,25 @@
package common
import (
"context"
)
type dryrunContextKey string
const dryrunContextKeyVal = dryrunContextKey("dryrun")
// Dryrun returns true if the current context is dryrun
func Dryrun(ctx context.Context) bool {
val := ctx.Value(dryrunContextKeyVal)
if val != nil {
if dryrun, ok := val.(bool); ok {
return dryrun
}
}
return false
}
// WithDryrun adds a value to the context for dryrun
func WithDryrun(ctx context.Context, dryrun bool) context.Context {
return context.WithValue(ctx, dryrunContextKeyVal, dryrun)
}

243
pkg/common/executor.go Normal file
View File

@@ -0,0 +1,243 @@
package common
import (
"context"
"errors"
"fmt"
log "github.com/sirupsen/logrus"
)
// Warning that implements `error` but safe to ignore
type Warning struct {
Message string
}
// Error the contract for error
func (w Warning) Error() string {
return w.Message
}
// Warningf create a warning
func Warningf(format string, args ...interface{}) Warning {
w := Warning{
Message: fmt.Sprintf(format, args...),
}
return w
}
// Executor define contract for the steps of a workflow
type Executor func(ctx context.Context) error
// Conditional define contract for the conditional predicate
type Conditional func(ctx context.Context) bool
// NewInfoExecutor is an executor that logs messages
func NewInfoExecutor(format string, args ...interface{}) Executor {
return func(ctx context.Context) error {
logger := Logger(ctx)
logger.Infof(format, args...)
return nil
}
}
// NewDebugExecutor is an executor that logs messages
func NewDebugExecutor(format string, args ...interface{}) Executor {
return func(ctx context.Context) error {
logger := Logger(ctx)
logger.Debugf(format, args...)
return nil
}
}
// NewPipelineExecutor creates a new executor from a series of other executors
func NewPipelineExecutor(executors ...Executor) Executor {
if len(executors) == 0 {
return func(_ context.Context) error {
return nil
}
}
var rtn Executor
for _, executor := range executors {
if rtn == nil {
rtn = executor
} else {
rtn = rtn.Then(executor)
}
}
return rtn
}
// NewConditionalExecutor creates a new executor based on conditions
func NewConditionalExecutor(conditional Conditional, trueExecutor Executor, falseExecutor Executor) Executor {
return func(ctx context.Context) error {
if conditional(ctx) {
if trueExecutor != nil {
return trueExecutor(ctx)
}
} else {
if falseExecutor != nil {
return falseExecutor(ctx)
}
}
return nil
}
}
// NewErrorExecutor creates a new executor that always errors out
func NewErrorExecutor(err error) Executor {
return func(_ context.Context) error {
return err
}
}
// NewParallelExecutor creates a new executor from a parallel of other executors
func NewParallelExecutor(parallel int, executors ...Executor) Executor {
return func(ctx context.Context) error {
work := make(chan Executor, len(executors))
errs := make(chan error, len(executors))
if 1 > parallel {
log.Debugf("Parallel tasks (%d) below minimum, setting to 1", parallel)
parallel = 1
}
for i := 0; i < parallel; i++ {
go func(work <-chan Executor, errs chan<- error) {
for executor := range work {
errs <- executor(ctx)
}
}(work, errs)
}
for i := 0; i < len(executors); i++ {
work <- executors[i]
}
close(work)
// Executor waits all executors to cleanup these resources.
var firstErr error
for i := 0; i < len(executors); i++ {
err := <-errs
if firstErr == nil {
firstErr = err
}
}
if err := ctx.Err(); err != nil {
return err
}
return firstErr
}
}
func NewFieldExecutor(name string, value interface{}, exec Executor) Executor {
return func(ctx context.Context) error {
return exec(WithLogger(ctx, Logger(ctx).WithField(name, value)))
}
}
// Then runs another executor if this executor succeeds
func (e Executor) ThenError(then func(ctx context.Context, err error) error) Executor {
return func(ctx context.Context) error {
err := e(ctx)
if err != nil {
switch err.(type) {
case Warning:
Logger(ctx).Warning(err.Error())
default:
return then(ctx, err)
}
}
if ctx.Err() != nil {
return ctx.Err()
}
return then(ctx, err)
}
}
// Then runs another executor if this executor succeeds
func (e Executor) Then(then Executor) Executor {
return func(ctx context.Context) error {
err := e(ctx)
if err != nil {
switch err.(type) {
case Warning:
Logger(ctx).Warning(err.Error())
default:
return err
}
}
if ctx.Err() != nil {
return ctx.Err()
}
return then(ctx)
}
}
// Then runs another executor if this executor succeeds
func (e Executor) OnError(then Executor) Executor {
return func(ctx context.Context) error {
err := e(ctx)
if err != nil {
switch err.(type) {
case Warning:
Logger(ctx).Warning(err.Error())
default:
return errors.Join(err, then(ctx))
}
}
if ctx.Err() != nil {
return ctx.Err()
}
return nil
}
}
// If only runs this executor if conditional is true
func (e Executor) If(conditional Conditional) Executor {
return func(ctx context.Context) error {
if conditional(ctx) {
return e(ctx)
}
return nil
}
}
// IfNot only runs this executor if conditional is true
func (e Executor) IfNot(conditional Conditional) Executor {
return func(ctx context.Context) error {
if !conditional(ctx) {
return e(ctx)
}
return nil
}
}
// IfBool only runs this executor if conditional is true
func (e Executor) IfBool(conditional bool) Executor {
return e.If(func(_ context.Context) bool {
return conditional
})
}
// Finally adds an executor to run after other executor
func (e Executor) Finally(finally Executor) Executor {
return func(ctx context.Context) (err error) {
defer func() {
err2 := finally(ctx)
if err2 != nil {
err = fmt.Errorf("error occurred running finally: %v (original error: %v)", err2, err)
}
}()
err = e(ctx)
return
}
}
// Not return an inverted conditional
func (c Conditional) Not() Conditional {
return func(ctx context.Context) bool {
return !c(ctx)
}
}

152
pkg/common/executor_test.go Normal file
View File

@@ -0,0 +1,152 @@
package common
import (
"context"
"fmt"
"testing"
"time"
"github.com/stretchr/testify/assert"
)
func TestNewWorkflow(t *testing.T) {
assert := assert.New(t)
ctx := context.Background()
// empty
emptyWorkflow := NewPipelineExecutor()
assert.Nil(emptyWorkflow(ctx))
// error case
errorWorkflow := NewErrorExecutor(fmt.Errorf("test error"))
assert.NotNil(errorWorkflow(ctx))
// multiple success case
runcount := 0
successWorkflow := NewPipelineExecutor(
func(_ context.Context) error {
runcount++
return nil
},
func(_ context.Context) error {
runcount++
return nil
})
assert.Nil(successWorkflow(ctx))
assert.Equal(2, runcount)
}
func TestNewConditionalExecutor(t *testing.T) {
assert := assert.New(t)
ctx := context.Background()
trueCount := 0
falseCount := 0
err := NewConditionalExecutor(func(_ context.Context) bool {
return false
}, func(_ context.Context) error {
trueCount++
return nil
}, func(_ context.Context) error {
falseCount++
return nil
})(ctx)
assert.Nil(err)
assert.Equal(0, trueCount)
assert.Equal(1, falseCount)
err = NewConditionalExecutor(func(_ context.Context) bool {
return true
}, func(_ context.Context) error {
trueCount++
return nil
}, func(_ context.Context) error {
falseCount++
return nil
})(ctx)
assert.Nil(err)
assert.Equal(1, trueCount)
assert.Equal(1, falseCount)
}
func TestNewParallelExecutor(t *testing.T) {
assert := assert.New(t)
ctx := context.Background()
count := 0
activeCount := 0
maxCount := 0
emptyWorkflow := NewPipelineExecutor(func(_ context.Context) error {
count++
activeCount++
if activeCount > maxCount {
maxCount = activeCount
}
time.Sleep(2 * time.Second)
activeCount--
return nil
})
err := NewParallelExecutor(2, emptyWorkflow, emptyWorkflow, emptyWorkflow)(ctx)
assert.Equal(3, count, "should run all 3 executors")
assert.Equal(2, maxCount, "should run at most 2 executors in parallel")
assert.Nil(err)
// Reset to test running the executor with 0 parallelism
count = 0
activeCount = 0
maxCount = 0
errSingle := NewParallelExecutor(0, emptyWorkflow, emptyWorkflow, emptyWorkflow)(ctx)
assert.Equal(3, count, "should run all 3 executors")
assert.Equal(1, maxCount, "should run at most 1 executors in parallel")
assert.Nil(errSingle)
}
func TestNewParallelExecutorFailed(t *testing.T) {
assert := assert.New(t)
ctx, cancel := context.WithCancel(context.Background())
cancel()
count := 0
errorWorkflow := NewPipelineExecutor(func(_ context.Context) error {
count++
return fmt.Errorf("fake error")
})
err := NewParallelExecutor(1, errorWorkflow)(ctx)
assert.Equal(1, count)
assert.ErrorIs(context.Canceled, err)
}
func TestNewParallelExecutorCanceled(t *testing.T) {
assert := assert.New(t)
ctx, cancel := context.WithCancel(context.Background())
cancel()
errExpected := fmt.Errorf("fake error")
count := 0
successWorkflow := NewPipelineExecutor(func(_ context.Context) error {
count++
return nil
})
errorWorkflow := NewPipelineExecutor(func(_ context.Context) error {
count++
return errExpected
})
err := NewParallelExecutor(3, errorWorkflow, successWorkflow, successWorkflow)(ctx)
assert.Equal(3, count)
assert.Error(errExpected, err)
}

73
pkg/common/file.go Normal file
View File

@@ -0,0 +1,73 @@
package common
import (
"fmt"
"io"
"os"
)
// CopyFile copy file
func CopyFile(source string, dest string) (err error) {
sourcefile, err := os.Open(source)
if err != nil {
return err
}
defer sourcefile.Close()
destfile, err := os.Create(dest)
if err != nil {
return err
}
defer destfile.Close()
_, err = io.Copy(destfile, sourcefile)
if err == nil {
sourceinfo, err := os.Stat(source)
if err != nil {
_ = os.Chmod(dest, sourceinfo.Mode())
}
}
return
}
// CopyDir recursive copy of directory
func CopyDir(source string, dest string) (err error) {
// get properties of source dir
sourceinfo, err := os.Stat(source)
if err != nil {
return err
}
// create dest dir
err = os.MkdirAll(dest, sourceinfo.Mode())
if err != nil {
return err
}
objects, err := os.ReadDir(source)
for _, obj := range objects {
sourcefilepointer := source + "/" + obj.Name()
destinationfilepointer := dest + "/" + obj.Name()
if obj.IsDir() {
// create sub-directories - recursively
err = CopyDir(sourcefilepointer, destinationfilepointer)
if err != nil {
fmt.Println(err)
}
} else {
// perform copy
err = CopyFile(sourcefilepointer, destinationfilepointer)
if err != nil {
fmt.Println(err)
}
}
}
return err
}

430
pkg/common/git/git.go Normal file
View File

@@ -0,0 +1,430 @@
package git
import (
"context"
"errors"
"fmt"
"io"
"os"
"path"
"regexp"
"strings"
"sync"
"github.com/go-git/go-git/v5"
"github.com/go-git/go-git/v5/config"
"github.com/go-git/go-git/v5/plumbing"
"github.com/go-git/go-git/v5/plumbing/storer"
"github.com/go-git/go-git/v5/plumbing/transport/http"
"github.com/mattn/go-isatty"
log "github.com/sirupsen/logrus"
"github.com/actions-oss/act-cli/pkg/common"
)
var (
codeCommitHTTPRegex = regexp.MustCompile(`^https?://git-codecommit\.(.+)\.amazonaws.com/v1/repos/(.+)$`)
codeCommitSSHRegex = regexp.MustCompile(`ssh://git-codecommit\.(.+)\.amazonaws.com/v1/repos/(.+)$`)
githubHTTPRegex = regexp.MustCompile(`^https?://.*github.com.*/(.+)/(.+?)(?:.git)?$`)
githubSSHRegex = regexp.MustCompile(`github.com[:/](.+)/(.+?)(?:.git)?$`)
cloneLock sync.Mutex
ErrShortRef = errors.New("short SHA references are not supported")
ErrNoRepo = errors.New("unable to find git repo")
)
type Error struct {
err error
commit string
}
func (e *Error) Error() string {
return e.err.Error()
}
func (e *Error) Unwrap() error {
return e.err
}
func (e *Error) Commit() string {
return e.commit
}
// FindGitRevision get the current git revision
func FindGitRevision(ctx context.Context, file string) (shortSha string, sha string, err error) {
logger := common.Logger(ctx)
gitDir, err := git.PlainOpenWithOptions(
file,
&git.PlainOpenOptions{
DetectDotGit: true,
EnableDotGitCommonDir: true,
},
)
if err != nil {
logger.WithError(err).Error("path", file, "not located inside a git repository")
return "", "", err
}
head, err := gitDir.Reference(plumbing.HEAD, true)
if err != nil {
return "", "", err
}
if head.Hash().IsZero() {
return "", "", fmt.Errorf("head sha1 could not be resolved")
}
hash := head.Hash().String()
logger.Debugf("Found revision: %s", hash)
return hash[:7], strings.TrimSpace(hash), nil
}
// FindGitRef get the current git ref
func FindGitRef(ctx context.Context, file string) (string, error) {
logger := common.Logger(ctx)
logger.Debugf("Loading revision from git directory")
_, ref, err := FindGitRevision(ctx, file)
if err != nil {
return "", err
}
logger.Debugf("HEAD points to '%s'", ref)
// Prefer the git library to iterate over the references and find a matching tag or branch.
var refTag = ""
var refBranch = ""
repo, err := git.PlainOpenWithOptions(
file,
&git.PlainOpenOptions{
DetectDotGit: true,
EnableDotGitCommonDir: true,
},
)
if err != nil {
return "", err
}
iter, err := repo.References()
if err != nil {
return "", err
}
// find the reference that matches the revision's has
err = iter.ForEach(func(r *plumbing.Reference) error {
/* tags and branches will have the same hash
* when a user checks out a tag, it is not mentioned explicitly
* in the go-git package, we must identify the revision
* then check if any tag matches that revision,
* if so then we checked out a tag
* else we look for branches and if matches,
* it means we checked out a branch
*
* If a branches matches first we must continue and check all tags (all references)
* in case we match with a tag later in the iteration
*/
if r.Hash().String() == ref {
if r.Name().IsTag() {
refTag = r.Name().String()
}
if r.Name().IsBranch() {
refBranch = r.Name().String()
}
}
// we found what we where looking for
if refTag != "" && refBranch != "" {
return storer.ErrStop
}
return nil
})
if err != nil {
return "", err
}
// order matters here see above comment.
if refTag != "" {
return refTag, nil
}
if refBranch != "" {
return refBranch, nil
}
return "", fmt.Errorf("failed to identify reference (tag/branch) for the checked-out revision '%s'", ref)
}
// FindGithubRepo get the repo
func FindGithubRepo(ctx context.Context, file, githubInstance, remoteName string) (string, error) {
if remoteName == "" {
remoteName = "origin"
}
url, err := findGitRemoteURL(ctx, file, remoteName)
if err != nil {
return "", err
}
_, slug, err := findGitSlug(url, githubInstance)
return slug, err
}
func findGitRemoteURL(_ context.Context, file, remoteName string) (string, error) {
repo, err := git.PlainOpenWithOptions(
file,
&git.PlainOpenOptions{
DetectDotGit: true,
EnableDotGitCommonDir: true,
},
)
if err != nil {
return "", err
}
remote, err := repo.Remote(remoteName)
if err != nil {
return "", err
}
if len(remote.Config().URLs) < 1 {
return "", fmt.Errorf("remote '%s' exists but has no URL", remoteName)
}
return remote.Config().URLs[0], nil
}
type findStringSubmatcher interface {
FindStringSubmatch(string) []string
}
func matchesRegex(url string, matchers ...findStringSubmatcher) []string {
for _, regex := range matchers {
if matches := regex.FindStringSubmatch(url); matches != nil {
return matches
}
}
return nil
}
// TODO deprecate and remove githubInstance parameter
func findGitSlug(url string, _ /* githubInstance */ string) (string, string, error) {
if matches := matchesRegex(url, codeCommitHTTPRegex, codeCommitSSHRegex); matches != nil {
return "CodeCommit", matches[2], nil
}
if matches := matchesRegex(url, githubHTTPRegex, githubSSHRegex); matches != nil {
return "GitHub", fmt.Sprintf("%s/%s", matches[1], matches[2]), nil
}
if matches := matchesRegex(url,
regexp.MustCompile(`^https?://(?:[^/]+)/([^/]+)/([^/]+)(?:.git)?$`),
regexp.MustCompile(`([^/]+)[:/]([^/]+)/([^/]+)(?:.git)?$`),
); matches != nil {
return "GitHubEnterprise", fmt.Sprintf("%s/%s", matches[1], matches[2]), nil
}
return "", url, nil
}
// NewGitCloneExecutorInput the input for the NewGitCloneExecutor
type NewGitCloneExecutorInput struct {
URL string
Ref string
Dir string
Token string
OfflineMode bool
}
// CloneIfRequired ...
func CloneIfRequired(ctx context.Context, refName plumbing.ReferenceName, input NewGitCloneExecutorInput, logger log.FieldLogger) (*git.Repository, error) {
// If the remote URL has changed, remove the directory and clone again.
if r, err := git.PlainOpen(input.Dir); err == nil {
if remote, err := r.Remote("origin"); err == nil {
if len(remote.Config().URLs) > 0 && remote.Config().URLs[0] != input.URL {
_ = os.RemoveAll(input.Dir)
}
}
}
r, err := git.PlainOpen(input.Dir)
if err != nil {
var progressWriter io.Writer
if isatty.IsTerminal(os.Stdout.Fd()) || isatty.IsCygwinTerminal(os.Stdout.Fd()) {
if entry, ok := logger.(*log.Entry); ok {
progressWriter = entry.WriterLevel(log.DebugLevel)
} else if lgr, ok := logger.(*log.Logger); ok {
progressWriter = lgr.WriterLevel(log.DebugLevel)
} else {
log.Errorf("unable to get writer from logger (type=%T)", logger)
progressWriter = os.Stdout
}
}
cloneOptions := git.CloneOptions{
URL: input.URL,
Progress: progressWriter,
}
if input.Token != "" {
cloneOptions.Auth = &http.BasicAuth{
Username: "token",
Password: input.Token,
}
}
r, err = git.PlainCloneContext(ctx, input.Dir, false, &cloneOptions)
if err != nil {
logger.Errorf("unable to clone %v %s: %v", input.URL, refName, err)
return nil, err
}
if err = os.Chmod(input.Dir, 0o755); err != nil {
return nil, err
}
}
return r, nil
}
func gitOptions(token string) (fetchOptions git.FetchOptions, pullOptions git.PullOptions) {
fetchOptions.RefSpecs = []config.RefSpec{"refs/*:refs/*", "HEAD:refs/heads/HEAD"}
pullOptions.Force = true
if token != "" {
auth := &http.BasicAuth{
Username: "token",
Password: token,
}
fetchOptions.Auth = auth
pullOptions.Auth = auth
}
return fetchOptions, pullOptions
}
// NewGitCloneExecutor creates an executor to clone git repos
//
//nolint:gocyclo
func NewGitCloneExecutor(input NewGitCloneExecutorInput) common.Executor {
return func(ctx context.Context) error {
logger := common.Logger(ctx)
logger.Infof(" \u2601 git clone '%s' # ref=%s", input.URL, input.Ref)
logger.Debugf(" cloning %s to %s", input.URL, input.Dir)
cloneLock.Lock()
defer cloneLock.Unlock()
refName := plumbing.ReferenceName(fmt.Sprintf("refs/heads/%s", input.Ref))
r, err := CloneIfRequired(ctx, refName, input, logger)
if err != nil {
return err
}
isOfflineMode := input.OfflineMode
// fetch latest changes
fetchOptions, pullOptions := gitOptions(input.Token)
if !isOfflineMode {
err = r.Fetch(&fetchOptions)
if err != nil && !errors.Is(err, git.NoErrAlreadyUpToDate) {
return err
}
}
var hash *plumbing.Hash
rev := plumbing.Revision(input.Ref)
if hash, err = r.ResolveRevision(rev); err != nil {
logger.Errorf("unable to resolve %s: %v", input.Ref, err)
}
if hash.String() != input.Ref && len(input.Ref) >= 4 && strings.HasPrefix(hash.String(), input.Ref) {
return &Error{
err: ErrShortRef,
commit: hash.String(),
}
}
// At this point we need to know if it's a tag or a branch
// And the easiest way to do it is duck typing
//
// If err is nil, it's a tag so let's proceed with that hash like we would if
// it was a sha
refType := "tag"
rev = plumbing.Revision(path.Join("refs", "tags", input.Ref))
if _, err := r.Tag(input.Ref); errors.Is(err, git.ErrTagNotFound) {
rName := plumbing.ReferenceName(path.Join("refs", "remotes", "origin", input.Ref))
if _, err := r.Reference(rName, false); errors.Is(err, plumbing.ErrReferenceNotFound) {
refType = "sha"
rev = plumbing.Revision(input.Ref)
} else {
refType = "branch"
rev = plumbing.Revision(rName)
}
}
if hash, err = r.ResolveRevision(rev); err != nil {
logger.Errorf("unable to resolve %s: %v", input.Ref, err)
return err
}
var w *git.Worktree
if w, err = r.Worktree(); err != nil {
return err
}
// If the hash resolved doesn't match the ref provided in a workflow then we're
// using a branch or tag ref, not a sha
//
// Repos on disk point to commit hashes, and need to checkout input.Ref before
// we try and pull down any changes
if hash.String() != input.Ref && refType == "branch" {
logger.Debugf("Provided ref is not a sha. Checking out branch before pulling changes")
sourceRef := plumbing.ReferenceName(path.Join("refs", "remotes", "origin", input.Ref))
if err = w.Checkout(&git.CheckoutOptions{
Branch: sourceRef,
Force: true,
}); err != nil {
logger.Errorf("unable to checkout %s: %v", sourceRef, err)
return err
}
}
if !isOfflineMode {
if err = w.Pull(&pullOptions); err != nil && err != git.NoErrAlreadyUpToDate {
logger.Debugf("Unable to pull %s: %v", refName, err)
}
}
logger.Debugf("Cloned %s to %s", input.URL, input.Dir)
if hash.String() != input.Ref && refType == "branch" {
logger.Debugf("Provided ref is not a sha. Updating branch ref after pull")
if hash, err = r.ResolveRevision(rev); err != nil {
logger.Errorf("unable to resolve %s: %v", input.Ref, err)
return err
}
}
if err = w.Checkout(&git.CheckoutOptions{
Hash: *hash,
Force: true,
}); err != nil {
logger.Errorf("unable to checkout %s: %v", *hash, err)
return err
}
if err = w.Reset(&git.ResetOptions{
Mode: git.HardReset,
Commit: *hash,
}); err != nil {
logger.Errorf("unable to reset to %s: %v", hash.String(), err)
return err
}
logger.Debugf("Checked out %s", input.Ref)
return nil
}
}

278
pkg/common/git/git_test.go Normal file
View File

@@ -0,0 +1,278 @@
package git
import (
"context"
"fmt"
"os"
"os/exec"
"path/filepath"
"syscall"
"testing"
log "github.com/sirupsen/logrus"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"github.com/actions-oss/act-cli/pkg/common"
)
func TestFindGitSlug(t *testing.T) {
assert := assert.New(t)
var slugTests = []struct {
url string // input
provider string // expected result
slug string // expected result
}{
{"https://git-codecommit.us-east-1.amazonaws.com/v1/repos/my-repo-name", "CodeCommit", "my-repo-name"},
{"ssh://git-codecommit.us-west-2.amazonaws.com/v1/repos/my-repo", "CodeCommit", "my-repo"},
{"git@github.com:actions-oss/act-cli.git", "GitHub", "actions-oss/act-cli"},
{"git@github.com:actions-oss/act-cli", "GitHub", "actions-oss/act-cli"},
{"https://github.com/actions-oss/act-cli.git", "GitHub", "actions-oss/act-cli"},
{"http://github.com/actions-oss/act-cli.git", "GitHub", "actions-oss/act-cli"},
{"https://github.com/actions-oss/act-cli", "GitHub", "actions-oss/act-cli"},
{"http://github.com/actions-oss/act-cli", "GitHub", "actions-oss/act-cli"},
{"git+ssh://git@github.com/owner/repo.git", "GitHub", "owner/repo"},
{"http://myotherrepo.com/act.git", "", "http://myotherrepo.com/act.git"},
{"https://gitea.com/actions-oss/act-cli.git", "GitHubEnterprise", "actions-oss/act-cli.git"},
}
for _, tt := range slugTests {
provider, slug, err := findGitSlug(tt.url, "github.com")
assert.NoError(err)
assert.Equal(tt.provider, provider)
assert.Equal(tt.slug, slug)
}
}
func testDir(t *testing.T) string {
basedir, err := os.MkdirTemp("", "act-test")
require.NoError(t, err)
t.Cleanup(func() { _ = os.RemoveAll(basedir) })
return basedir
}
func cleanGitHooks(dir string) error {
hooksDir := filepath.Join(dir, ".git", "hooks")
files, err := os.ReadDir(hooksDir)
if err != nil {
if os.IsNotExist(err) {
return nil
}
return err
}
for _, f := range files {
if f.IsDir() {
continue
}
relName := filepath.Join(hooksDir, f.Name())
if err := os.Remove(relName); err != nil {
return err
}
}
return nil
}
func TestFindGitRemoteURL(t *testing.T) {
assert := assert.New(t)
basedir := testDir(t)
gitConfig()
err := gitCmd("init", basedir)
assert.NoError(err)
err = cleanGitHooks(basedir)
assert.NoError(err)
remoteURL := "https://git-codecommit.us-east-1.amazonaws.com/v1/repos/my-repo-name"
err = gitCmd("-C", basedir, "remote", "add", "origin", remoteURL)
assert.NoError(err)
u, err := findGitRemoteURL(context.Background(), basedir, "origin")
assert.NoError(err)
assert.Equal(remoteURL, u)
remoteURL = "git@github.com/AwesomeOwner/MyAwesomeRepo.git"
err = gitCmd("-C", basedir, "remote", "add", "upstream", remoteURL)
assert.NoError(err)
u, err = findGitRemoteURL(context.Background(), basedir, "upstream")
assert.NoError(err)
assert.Equal(remoteURL, u)
}
func TestGitFindRef(t *testing.T) {
basedir := testDir(t)
gitConfig()
for name, tt := range map[string]struct {
Prepare func(t *testing.T, dir string)
Assert func(t *testing.T, ref string, err error)
}{
"new_repo": {
Prepare: func(_ *testing.T, _ string) {},
Assert: func(t *testing.T, _ string, err error) {
require.Error(t, err)
},
},
"new_repo_with_commit": {
Prepare: func(t *testing.T, dir string) {
require.NoError(t, gitCmd("-C", dir, "commit", "--allow-empty", "-m", "msg"))
},
Assert: func(t *testing.T, ref string, err error) {
require.NoError(t, err)
require.Equal(t, "refs/heads/master", ref)
},
},
"current_head_is_tag": {
Prepare: func(t *testing.T, dir string) {
require.NoError(t, gitCmd("-C", dir, "commit", "--allow-empty", "-m", "commit msg"))
require.NoError(t, gitCmd("-C", dir, "tag", "v1.2.3"))
require.NoError(t, gitCmd("-C", dir, "checkout", "v1.2.3"))
},
Assert: func(t *testing.T, ref string, err error) {
require.NoError(t, err)
require.Equal(t, "refs/tags/v1.2.3", ref)
},
},
"current_head_is_same_as_tag": {
Prepare: func(t *testing.T, dir string) {
require.NoError(t, gitCmd("-C", dir, "commit", "--allow-empty", "-m", "1.4.2 release"))
require.NoError(t, gitCmd("-C", dir, "tag", "v1.4.2"))
},
Assert: func(t *testing.T, ref string, err error) {
require.NoError(t, err)
require.Equal(t, "refs/tags/v1.4.2", ref)
},
},
"current_head_is_not_tag": {
Prepare: func(t *testing.T, dir string) {
require.NoError(t, gitCmd("-C", dir, "commit", "--allow-empty", "-m", "msg"))
require.NoError(t, gitCmd("-C", dir, "tag", "v1.4.2"))
require.NoError(t, gitCmd("-C", dir, "commit", "--allow-empty", "-m", "msg2"))
},
Assert: func(t *testing.T, ref string, err error) {
require.NoError(t, err)
require.Equal(t, "refs/heads/master", ref)
},
},
"current_head_is_another_branch": {
Prepare: func(t *testing.T, dir string) {
require.NoError(t, gitCmd("-C", dir, "checkout", "-b", "mybranch"))
require.NoError(t, gitCmd("-C", dir, "commit", "--allow-empty", "-m", "msg"))
},
Assert: func(t *testing.T, ref string, err error) {
require.NoError(t, err)
require.Equal(t, "refs/heads/mybranch", ref)
},
},
} {
t.Run(name, func(t *testing.T) {
dir := filepath.Join(basedir, name)
require.NoError(t, os.MkdirAll(dir, 0o755))
require.NoError(t, gitCmd("-C", dir, "init", "--initial-branch=master"))
require.NoError(t, cleanGitHooks(dir))
tt.Prepare(t, dir)
ref, err := FindGitRef(context.Background(), dir)
tt.Assert(t, ref, err)
})
}
}
func TestGitCloneExecutor(t *testing.T) {
for name, tt := range map[string]struct {
Err error
URL, Ref string
}{
"tag": {
Err: nil,
URL: "https://github.com/actions/checkout",
Ref: "v2",
},
"branch": {
Err: nil,
URL: "https://github.com/anchore/scan-action",
Ref: "act-fails",
},
"sha": {
Err: nil,
URL: "https://github.com/actions/checkout",
Ref: "5a4ac9002d0be2fb38bd78e4b4dbde5606d7042f", // v2
},
"short-sha": {
Err: &Error{ErrShortRef, "5a4ac9002d0be2fb38bd78e4b4dbde5606d7042f"},
URL: "https://github.com/actions/checkout",
Ref: "5a4ac90", // v2
},
} {
t.Run(name, func(t *testing.T) {
clone := NewGitCloneExecutor(NewGitCloneExecutorInput{
URL: tt.URL,
Ref: tt.Ref,
Dir: testDir(t),
})
err := clone(context.Background())
if tt.Err != nil {
assert.Error(t, err)
assert.Equal(t, tt.Err, err)
} else {
assert.Empty(t, err)
}
})
}
}
func gitConfig() {
if os.Getenv("GITHUB_ACTIONS") == "true" {
var err error
if err = gitCmd("config", "--global", "user.email", "test@test.com"); err != nil {
log.Error(err)
}
if err = gitCmd("config", "--global", "user.name", "Unit Test"); err != nil {
log.Error(err)
}
}
}
func gitCmd(args ...string) error {
cmd := exec.Command("git", args...)
cmd.Stdout = os.Stdout
cmd.Stderr = os.Stderr
err := cmd.Run()
if exitError, ok := err.(*exec.ExitError); ok {
if waitStatus, ok := exitError.Sys().(syscall.WaitStatus); ok {
return fmt.Errorf("exit error %d", waitStatus.ExitStatus())
}
return exitError
}
return nil
}
func TestCloneIfRequired(t *testing.T) {
tempDir := t.TempDir()
ctx := context.Background()
t.Run("clone", func(t *testing.T) {
repo, err := CloneIfRequired(ctx, "refs/heads/main", NewGitCloneExecutorInput{
URL: "https://github.com/actions/checkout",
Dir: tempDir,
}, common.Logger(ctx))
assert.NoError(t, err)
assert.NotNil(t, repo)
})
t.Run("clone different remote", func(t *testing.T) {
repo, err := CloneIfRequired(ctx, "refs/heads/main", NewGitCloneExecutorInput{
URL: "https://github.com/actions/setup-go",
Dir: tempDir,
}, common.Logger(ctx))
require.NoError(t, err)
require.NotNil(t, repo)
remote, err := repo.Remote("origin")
require.NoError(t, err)
require.Len(t, remote.Config().URLs, 1)
assert.Equal(t, "https://github.com/actions/setup-go", remote.Config().URLs[0])
})
}

66
pkg/common/job_error.go Normal file
View File

@@ -0,0 +1,66 @@
package common
import (
"context"
)
type jobErrorContextKey string
const jobErrorContextKeyVal = jobErrorContextKey("job.error")
type jobCancelCtx string
const JobCancelCtxVal = jobCancelCtx("job.cancel")
// JobError returns the job error for current context if any
func JobError(ctx context.Context) error {
val := ctx.Value(jobErrorContextKeyVal)
if val != nil {
if container, ok := val.(map[string]error); ok {
return container["error"]
}
}
return nil
}
func SetJobError(ctx context.Context, err error) {
ctx.Value(jobErrorContextKeyVal).(map[string]error)["error"] = err
}
// WithJobErrorContainer adds a value to the context as a container for an error
func WithJobErrorContainer(ctx context.Context) context.Context {
container := map[string]error{}
return context.WithValue(ctx, jobErrorContextKeyVal, container)
}
func WithJobCancelContext(ctx context.Context, cancelContext context.Context) context.Context {
return context.WithValue(ctx, JobCancelCtxVal, cancelContext)
}
func JobCancelContext(ctx context.Context) context.Context {
val := ctx.Value(JobCancelCtxVal)
if val != nil {
if container, ok := val.(context.Context); ok {
return container
}
}
return nil
}
// EarlyCancelContext returns a new context based on ctx that is canceled when the first of the provided contexts is canceled.
func EarlyCancelContext(ctx context.Context) (context.Context, context.CancelFunc) {
val := JobCancelContext(ctx)
if val != nil {
context, cancel := context.WithCancel(ctx)
go func() {
defer cancel()
select {
case <-context.Done():
case <-ctx.Done():
case <-val.Done():
}
}()
return context, cancel
}
return ctx, func() {}
}

50
pkg/common/line_writer.go Normal file
View File

@@ -0,0 +1,50 @@
package common
import (
"bytes"
"io"
)
// LineHandler is a callback function for handling a line
type LineHandler func(line string) bool
type lineWriter struct {
buffer bytes.Buffer
handlers []LineHandler
}
// NewLineWriter creates a new instance of a line writer
func NewLineWriter(handlers ...LineHandler) io.Writer {
w := new(lineWriter)
w.handlers = handlers
return w
}
func (lw *lineWriter) Write(p []byte) (n int, err error) {
pBuf := bytes.NewBuffer(p)
written := 0
for {
line, err := pBuf.ReadString('\n')
w, _ := lw.buffer.WriteString(line)
written += w
if err == nil {
lw.handleLine(lw.buffer.String())
lw.buffer.Reset()
} else if err == io.EOF {
break
} else {
return written, err
}
}
return written, nil
}
func (lw *lineWriter) handleLine(line string) {
for _, h := range lw.handlers {
ok := h(line)
if !ok {
break
}
}
}

View File

@@ -0,0 +1,37 @@
package common
import (
"testing"
"github.com/stretchr/testify/assert"
)
func TestLineWriter(t *testing.T) {
lines := make([]string, 0)
lineHandler := func(s string) bool {
lines = append(lines, s)
return true
}
lineWriter := NewLineWriter(lineHandler)
assert := assert.New(t)
write := func(s string) {
n, err := lineWriter.Write([]byte(s))
assert.NoError(err)
assert.Equal(len(s), n, s)
}
write("hello")
write(" ")
write("world!!\nextra")
write(" line\n and another\nlast")
write(" line\n")
write("no newline here...")
assert.Len(lines, 4)
assert.Equal("hello world!!\n", lines[0])
assert.Equal("extra line\n", lines[1])
assert.Equal(" and another\n", lines[2])
assert.Equal("last line\n", lines[3])
}

27
pkg/common/logger.go Normal file
View File

@@ -0,0 +1,27 @@
package common
import (
"context"
"github.com/sirupsen/logrus"
)
type loggerContextKey string
const loggerContextKeyVal = loggerContextKey("logrus.FieldLogger")
// Logger returns the appropriate logger for current context
func Logger(ctx context.Context) logrus.FieldLogger {
val := ctx.Value(loggerContextKeyVal)
if val != nil {
if logger, ok := val.(logrus.FieldLogger); ok {
return logger
}
}
return logrus.StandardLogger()
}
// WithLogger adds a value to the context for the logger
func WithLogger(ctx context.Context, logger logrus.FieldLogger) context.Context {
return context.WithValue(ctx, loggerContextKeyVal, logger)
}

75
pkg/common/outbound_ip.go Normal file
View File

@@ -0,0 +1,75 @@
package common
import (
"net"
"sort"
"strings"
)
// GetOutboundIP returns an outbound IP address of this machine.
// It tries to access the internet and returns the local IP address of the connection.
// If the machine cannot access the internet, it returns a preferred IP address from network interfaces.
// It returns nil if no IP address is found.
func GetOutboundIP() net.IP {
// See https://stackoverflow.com/a/37382208
conn, err := net.Dial("udp", "8.8.8.8:80")
if err == nil {
defer conn.Close()
return conn.LocalAddr().(*net.UDPAddr).IP
}
// So the machine cannot access the internet. Pick an IP address from network interfaces.
if ifs, err := net.Interfaces(); err == nil {
type IP struct {
net.IP
net.Interface
}
var ips []IP
for _, i := range ifs {
if addrs, err := i.Addrs(); err == nil {
for _, addr := range addrs {
var ip net.IP
switch v := addr.(type) {
case *net.IPNet:
ip = v.IP
case *net.IPAddr:
ip = v.IP
}
if ip.IsGlobalUnicast() {
ips = append(ips, IP{ip, i})
}
}
}
}
if len(ips) > 1 {
sort.Slice(ips, func(i, j int) bool {
ifi := ips[i].Interface
ifj := ips[j].Interface
// ethernet is preferred
if vi, vj := strings.HasPrefix(ifi.Name, "e"), strings.HasPrefix(ifj.Name, "e"); vi != vj {
return vi
}
ipi := ips[i].IP
ipj := ips[j].IP
// IPv4 is preferred
if vi, vj := ipi.To4() != nil, ipj.To4() != nil; vi != vj {
return vi
}
// en0 is preferred to en1
if ifi.Name != ifj.Name {
return ifi.Name < ifj.Name
}
// fallback
return ipi.String() < ipj.String()
})
return ips[0].IP
}
}
return nil
}

View File

@@ -0,0 +1,191 @@
Apache License
Version 2.0, January 2004
https://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
Copyright 2013-2017 Docker, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
https://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

View File

@@ -0,0 +1,96 @@
package container
import (
"context"
"io"
"os"
"github.com/actions-oss/act-cli/pkg/common"
"github.com/docker/go-connections/nat"
"golang.org/x/term"
)
// NewContainerInput the input for the New function
type NewContainerInput struct {
Image string
Username string
Password string
Entrypoint []string
Cmd []string
WorkingDir string
Env []string
Binds []string
Mounts map[string]string
Name string
Stdout io.Writer
Stderr io.Writer
NetworkMode string
Privileged bool
UsernsMode string
Platform string
Options string
NetworkAliases []string
ExposedPorts nat.PortSet
PortBindings nat.PortMap
}
// FileEntry is a file to copy to a container
type FileEntry struct {
Name string
Mode uint32
Body string
}
// Container for managing docker run containers
type Container interface {
Create(capAdd []string, capDrop []string) common.Executor
Copy(destPath string, files ...*FileEntry) common.Executor
CopyTarStream(ctx context.Context, destPath string, tarStream io.Reader) error
CopyDir(destPath string, srcPath string, useGitIgnore bool) common.Executor
GetContainerArchive(ctx context.Context, srcPath string) (io.ReadCloser, error)
Pull(forcePull bool) common.Executor
Start(attach bool) common.Executor
Exec(command []string, env map[string]string, user, workdir string) common.Executor
UpdateFromEnv(srcPath string, env *map[string]string) common.Executor
UpdateFromImageEnv(env *map[string]string) common.Executor
Remove() common.Executor
Close() common.Executor
ReplaceLogWriter(io.Writer, io.Writer) (io.Writer, io.Writer)
GetHealth(ctx context.Context) Health
}
// NewDockerBuildExecutorInput the input for the NewDockerBuildExecutor function
type NewDockerBuildExecutorInput struct {
ContextDir string
Dockerfile string
BuildContext io.Reader
ImageTag string
Platform string
}
// NewDockerPullExecutorInput the input for the NewDockerPullExecutor function
type NewDockerPullExecutorInput struct {
Image string
ForcePull bool
Platform string
Username string
Password string
}
type Health int
const (
HealthStarting Health = iota
HealthHealthy
HealthUnHealthy
)
var containerAllocateTerminal bool
func init() {
containerAllocateTerminal = term.IsTerminal(int(os.Stdout.Fd()))
}
func SetContainerAllocateTerminal(val bool) {
containerAllocateTerminal = val
}

View File

@@ -0,0 +1,61 @@
//go:build !(WITHOUT_DOCKER || !(linux || darwin || windows || netbsd))
package container
import (
"context"
"strings"
"github.com/actions-oss/act-cli/pkg/common"
"github.com/docker/cli/cli/config"
"github.com/docker/cli/cli/config/credentials"
"github.com/docker/docker/api/types/registry"
)
func LoadDockerAuthConfig(ctx context.Context, image string) (registry.AuthConfig, error) {
logger := common.Logger(ctx)
config, err := config.Load(config.Dir())
if err != nil {
logger.Warnf("Could not load docker config: %v", err)
return registry.AuthConfig{}, err
}
if !config.ContainsAuth() {
config.CredentialsStore = credentials.DetectDefaultStore(config.CredentialsStore)
}
hostName := "index.docker.io"
index := strings.IndexRune(image, '/')
if index > -1 && (strings.ContainsAny(image[:index], ".:") || image[:index] == "localhost") {
hostName = image[:index]
}
authConfig, err := config.GetAuthConfig(hostName)
if err != nil {
logger.Warnf("Could not get auth config from docker config: %v", err)
return registry.AuthConfig{}, err
}
return registry.AuthConfig(authConfig), nil
}
func LoadDockerAuthConfigs(ctx context.Context) map[string]registry.AuthConfig {
logger := common.Logger(ctx)
config, err := config.Load(config.Dir())
if err != nil {
logger.Warnf("Could not load docker config: %v", err)
return nil
}
if !config.ContainsAuth() {
config.CredentialsStore = credentials.DetectDefaultStore(config.CredentialsStore)
}
creds, _ := config.GetAllCredentials()
authConfigs := make(map[string]registry.AuthConfig, len(creds))
for k, v := range creds {
authConfigs[k] = registry.AuthConfig(v)
}
return authConfigs
}

Some files were not shown because too many files have changed in this diff Show More