Compare commits

..

2 Commits

Author SHA1 Message Date
Bassem Dghaidi
a649471855 Merge branch 'main' into alert-autofix-51 2026-01-29 11:04:18 +01:00
Bassem Dghaidi
af0f7d9495 Potential fix for code scanning alert no. 51: Workflow does not contain permissions
Co-authored-by: Copilot Autofix powered by AI <62310815+github-advanced-security[bot]@users.noreply.github.com>
2026-01-29 11:02:50 +01:00
15 changed files with 146 additions and 480 deletions

View File

@@ -1,4 +1,4 @@
name: Check dist content
name: Check dist/
on:
push:
@@ -11,9 +11,6 @@ on:
- '**.md'
workflow_dispatch:
permissions:
contents: read
jobs:
call-check-dist:
name: Check dist/

View File

@@ -1,5 +1,4 @@
name: Close inactive issues
on:
schedule:
- cron: "30 8 * * *"

View File

@@ -1,4 +1,4 @@
name: Code scanning
name: "Code scanning - action"
on:
push:
@@ -6,14 +6,15 @@ on:
schedule:
- cron: '0 19 * * 0'
permissions:
contents: read
security-events: write
jobs:
CodeQL-Build:
# CodeQL runs on ubuntu-latest, windows-latest, and macos-latest
runs-on: ubuntu-latest
permissions:
# required for all workflows
security-events: write
steps:
- name: Checkout repository
uses: actions/checkout@v5

View File

@@ -1,21 +1,16 @@
name: Assign issue
on:
issues:
types: [opened]
permissions:
issues: write
jobs:
run-action:
runs-on: ubuntu-latest
steps:
- name: Get current oncall
id: oncall
run: |
echo "CURRENT=$(curl --request GET 'https://api.pagerduty.com/oncalls?include[]=users&schedule_ids[]=P5VG2BX&earliest=true' --header 'Authorization: Token token=${{ secrets.PAGERDUTY_TOKEN }}' --header 'Accept: application/vnd.pagerduty+json;version=2' --header 'Content-Type: application/json' | jq -r '.oncalls[].user.name')" >> $GITHUB_OUTPUT
- name: add_assignees
run: |
curl -X POST -H "Accept: application/vnd.github+json" -H "Authorization: Bearer ${{ secrets.GITHUB_TOKEN}}" https://api.github.com/repos/${{github.repository}}/issues/${{ github.event.issue.number}}/assignees -d '{"assignees":["${{steps.oncall.outputs.CURRENT}}"]}'
- name: Get current oncall
id: oncall
run: |
echo "CURRENT=$(curl --request GET 'https://api.pagerduty.com/oncalls?include[]=users&schedule_ids[]=P5VG2BX&earliest=true' --header 'Authorization: Token token=${{ secrets.PAGERDUTY_TOKEN }}' --header 'Accept: application/vnd.pagerduty+json;version=2' --header 'Content-Type: application/json' | jq -r '.oncalls[].user.name')" >> $GITHUB_OUTPUT
- name: add_assignees
run: |
curl -X POST -H "Accept: application/vnd.github+json" -H "Authorization: Bearer ${{ secrets.GITHUB_TOKEN}}" https://api.github.com/repos/${{github.repository}}/issues/${{ github.event.issue.number}}/assignees -d '{"assignees":["${{steps.oncall.outputs.CURRENT}}"]}'

View File

@@ -1,4 +1,4 @@
name: License check
name: Licensed
on:
push:
@@ -9,9 +9,6 @@ on:
- main
workflow_dispatch:
permissions:
contents: read
jobs:
validate-cached-dependency-records:
runs-on: ubuntu-latest

View File

@@ -1,25 +1,20 @@
name: Assign pull request reviewer
name: Add Reviewer PR
on:
pull_request_target:
types: [opened]
permissions:
pull-requests: write
jobs:
run-action:
runs-on: ubuntu-latest
steps:
- name: Get current oncall
id: oncall
run: |
echo "CURRENT=$(curl --request GET 'https://api.pagerduty.com/oncalls?include[]=users&schedule_ids[]=P5VG2BX&earliest=true' --header 'Authorization: Token token=${{ secrets.PAGERDUTY_TOKEN }}' --header 'Accept: application/vnd.pagerduty+json;version=2' --header 'Content-Type: application/json' | jq -r '.oncalls[].user.name')" >> $GITHUB_OUTPUT
- name: Request Review
run: |
curl -X POST -H "Accept: application/vnd.github+json" -H "Authorization: Bearer ${{ secrets.GITHUB_TOKEN}}" https://api.github.com/repos/${{github.repository}}/pulls/${{ github.event.pull_request.number}}/requested_reviewers -d '{"reviewers":["${{steps.oncall.outputs.CURRENT}}"]}'
- name: Add Assignee
run: |
curl -X POST -H "Accept: application/vnd.github+json" -H "Authorization: Bearer ${{ secrets.GITHUB_TOKEN}}" https://api.github.com/repos/${{github.repository}}/issues/${{ github.event.pull_request.number}}/assignees -d '{"assignees":["${{steps.oncall.outputs.CURRENT}}"]}'
- name: Get current oncall
id: oncall
run: |
echo "CURRENT=$(curl --request GET 'https://api.pagerduty.com/oncalls?include[]=users&schedule_ids[]=P5VG2BX&earliest=true' --header 'Authorization: Token token=${{ secrets.PAGERDUTY_TOKEN }}' --header 'Accept: application/vnd.pagerduty+json;version=2' --header 'Content-Type: application/json' | jq -r '.oncalls[].user.name')" >> $GITHUB_OUTPUT
- name: Request Review
run: |
curl -X POST -H "Accept: application/vnd.github+json" -H "Authorization: Bearer ${{ secrets.GITHUB_TOKEN}}" https://api.github.com/repos/${{github.repository}}/pulls/${{ github.event.pull_request.number}}/requested_reviewers -d '{"reviewers":["${{steps.oncall.outputs.CURRENT}}"]}'
- name: Add Assignee
run: |
curl -X POST -H "Accept: application/vnd.github+json" -H "Authorization: Bearer ${{ secrets.GITHUB_TOKEN}}" https://api.github.com/repos/${{github.repository}}/issues/${{ github.event.pull_request.number}}/assignees -d '{"assignees":["${{steps.oncall.outputs.CURRENT}}"]}'

View File

@@ -1,17 +1,17 @@
name: Publish immutable action
name: 'Publish Immutable Action Version'
on:
release:
types: [released]
permissions:
contents: read
id-token: write
packages: write
jobs:
publish:
runs-on: ubuntu-latest
permissions:
contents: read
id-token: write
packages: write
steps:
- name: Checking out
uses: actions/checkout@v5

View File

@@ -1,5 +1,4 @@
name: Release new action version
on:
release:
types: [released]
@@ -11,7 +10,6 @@ on:
env:
TAG_NAME: ${{ github.event.inputs.TAG_NAME || github.event.release.tag_name }}
permissions:
contents: write

View File

@@ -1,5 +1,8 @@
name: Tests
permissions:
contents: read
on:
pull_request:
branches:
@@ -10,9 +13,6 @@ on:
- main
- releases/**
permissions:
contents: read
jobs:
# Build and unit test
build:
@@ -60,7 +60,6 @@ jobs:
path: |
test-cache
~/test-cache
test-restore:
needs: test-save
strategy:
@@ -90,359 +89,44 @@ jobs:
runs-on: ubuntu-latest
container:
image: ubuntu:latest
options: --privileged
options: --dns 127.0.0.1
services:
squid-proxy:
image: wernight/squid
image: ubuntu/squid:latest
ports:
- 3128:3128
env:
https_proxy: http://squid-proxy:3128
steps:
- name: Checkout
uses: actions/checkout@v5
- name: Install dependencies
run: |
apt-get update
apt-get install -y iptables dnsutils curl jq ipset
- name: Fetch GitHub meta and configure firewall
run: |
# Fetch GitHub meta API to get all IP ranges
echo "Fetching GitHub meta API..."
curl -sS https://api.github.com/meta > /tmp/github-meta.json
# Wait for squid-proxy service to be resolvable and accepting connections
echo "Waiting for squid-proxy service..."
for i in 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15; do
PROXY_IP=$(getent hosts squid-proxy | awk '{ print $1 }')
if [ -n "$PROXY_IP" ]; then
echo "squid-proxy resolved to: $PROXY_IP"
# Test that proxy is actually accepting connections
if curl --connect-timeout 2 --max-time 5 -x http://squid-proxy:3128 -sS https://api.github.com/zen 2>/dev/null; then
echo "Proxy is working!"
break
else
echo "Attempt $i: Proxy resolved but not ready yet, waiting..."
fi
else
echo "Attempt $i: squid-proxy not resolvable yet, waiting..."
fi
sleep 2
done
if [ -z "$PROXY_IP" ]; then
echo "ERROR: Could not resolve squid-proxy after 15 attempts"
exit 1
fi
# Verify proxy works before locking down firewall
echo "Final proxy connectivity test..."
if ! curl --connect-timeout 5 --max-time 10 -x http://squid-proxy:3128 -sS https://api.github.com/zen; then
echo "ERROR: Proxy is not working properly"
exit 1
fi
echo "Proxy verified working!"
# Allow established connections
iptables -A OUTPUT -m state --state ESTABLISHED,RELATED -j ACCEPT
# Allow loopback
iptables -A OUTPUT -o lo -j ACCEPT
# Allow connections to the proxy
iptables -A OUTPUT -d $PROXY_IP -p tcp --dport 3128 -j ACCEPT
# Allow DNS
iptables -A OUTPUT -p udp --dport 53 -j ACCEPT
iptables -A OUTPUT -p tcp --dport 53 -j ACCEPT
# Create ipset for GitHub IPs (more efficient than individual rules)
ipset create github-ips hash:net
# Add all GitHub IP ranges from meta API (hooks, web, api, git, actions, etc.)
# EXCLUDING blob storage which must go through proxy
for category in hooks web api git pages importer actions actions_macos codespaces copilot; do
echo "Adding IPs for category: $category"
jq -r ".${category}[]? // empty" /tmp/github-meta.json 2>/dev/null | while read cidr; do
# Skip IPv6 for now (iptables vs ip6tables) - use case for POSIX compatibility
case "$cidr" in
*:*) ;; # IPv6, skip
*) ipset add github-ips "$cidr" 2>/dev/null || true ;;
esac
done
done
# Allow all GitHub IPs
iptables -A OUTPUT -m set --match-set github-ips dst -p tcp --dport 443 -j ACCEPT
iptables -A OUTPUT -m set --match-set github-ips dst -p tcp --dport 80 -j ACCEPT
# CRITICAL: Block direct access to blob storage and results-receiver
# These MUST go through the proxy for cache operations
echo "Blocking direct access to cache-critical endpoints..."
# Block results-receiver.actions.githubusercontent.com
for ip in $(getent ahosts "results-receiver.actions.githubusercontent.com" 2>/dev/null | awk '{print $1}' | sort -u); do
echo "Blocking direct access to results-receiver: $ip"
iptables -I OUTPUT 1 -d "$ip" -p tcp --dport 443 -j REJECT
done
# Block blob.core.windows.net (Azure blob storage used for cache)
for host in productionresultssa0.blob.core.windows.net productionresultssa1.blob.core.windows.net productionresultssa2.blob.core.windows.net productionresultssa3.blob.core.windows.net; do
for ip in $(getent ahosts "$host" 2>/dev/null | awk '{print $1}' | sort -u); do
echo "Blocking direct access to blob storage ($host): $ip"
iptables -I OUTPUT 1 -d "$ip" -p tcp --dport 443 -j REJECT
done
done
# Block all other outbound HTTP/HTTPS traffic
iptables -A OUTPUT -p tcp --dport 80 -j REJECT
iptables -A OUTPUT -p tcp --dport 443 -j REJECT
echo "iptables rules applied:"
iptables -L OUTPUT -n -v
echo ""
echo "ipset github-ips contains $(ipset list github-ips | grep -c '^[0-9]') entries"
- name: Verify proxy enforcement
run: |
echo "=== Testing proxy enforcement ==="
# Test 1: Verify proxy is working by explicitly using it
echo "Test 1: Connection through proxy (should SUCCEED)"
if curl --connect-timeout 10 --max-time 15 -x http://squid-proxy:3128 -sS -o /dev/null -w "%{http_code}" https://api.github.com/zen; then
echo ""
echo "✓ Proxy connection works"
else
echo "✗ ERROR: Proxy is not working!"
exit 1
fi
# Test 2: Direct connection to blob storage should FAIL (blocked by iptables)
echo ""
echo "Test 2: Direct connection to blob storage (should FAIL - blocked by iptables)"
if curl --connect-timeout 5 --max-time 10 --noproxy '*' -sS https://productionresultssa0.blob.core.windows.net 2>/dev/null; then
echo "✗ ERROR: Direct blob storage connection succeeded but should have been blocked!"
exit 1
else
echo "✓ Direct blob storage correctly blocked by iptables"
fi
# Test 3: Connection to blob storage THROUGH proxy should work
echo ""
echo "Test 3: Connection through proxy to blob storage (should SUCCEED)"
HTTP_CODE=$(curl --connect-timeout 10 --max-time 15 -x http://squid-proxy:3128 -sS -o /dev/null -w "%{http_code}" https://productionresultssa0.blob.core.windows.net 2>&1) || true
echo "HTTP response code: $HTTP_CODE"
if [ "$HTTP_CODE" = "400" ] || [ "$HTTP_CODE" = "409" ] || [ "$HTTP_CODE" = "200" ]; then
echo "✓ Proxy successfully forwarded request to blob storage (got HTTP $HTTP_CODE)"
else
echo "✗ ERROR: Proxy failed to forward request (got: $HTTP_CODE)"
exit 1
fi
echo ""
echo "=== All proxy enforcement tests passed ==="
echo "The proxy is working. If cache operations fail, it's because the action doesn't use the proxy."
- name: Generate files
run: __tests__/create-cache-files.sh proxy test-cache
- name: Save cache
env:
http_proxy: http://squid-proxy:3128
https_proxy: http://squid-proxy:3128
uses: ./
with:
key: test-proxy-${{ github.run_id }}
path: test-cache
- name: Verify proxy setup
run: |
echo "## 🔒 Proxy Integration Test - Cache Save" >> $GITHUB_STEP_SUMMARY
echo "" >> $GITHUB_STEP_SUMMARY
echo "### ✅ Test Configuration" >> $GITHUB_STEP_SUMMARY
echo "" >> $GITHUB_STEP_SUMMARY
echo "- **Proxy**: squid-proxy:3128" >> $GITHUB_STEP_SUMMARY
echo "- **Firewall**: iptables blocking direct access to cache endpoints" >> $GITHUB_STEP_SUMMARY
echo "- **Test**: Cache save operation completed successfully through proxy" >> $GITHUB_STEP_SUMMARY
echo "" >> $GITHUB_STEP_SUMMARY
echo "If the cache save step succeeded, it means:" >> $GITHUB_STEP_SUMMARY
echo "1. Direct access to results-receiver.actions.githubusercontent.com was blocked" >> $GITHUB_STEP_SUMMARY
echo "2. Direct access to *.blob.core.windows.net was blocked" >> $GITHUB_STEP_SUMMARY
echo "3. Cache operations were routed through the squid proxy" >> $GITHUB_STEP_SUMMARY
echo "" >> $GITHUB_STEP_SUMMARY
echo "✅ **SUCCESS**: Proxy integration test passed!" >> $GITHUB_STEP_SUMMARY
test-proxy-restore:
needs: test-proxy-save
runs-on: ubuntu-latest
container:
image: ubuntu:latest
options: --privileged
options: --dns 127.0.0.1
services:
squid-proxy:
image: wernight/squid
image: ubuntu/squid:latest
ports:
- 3128:3128
env:
https_proxy: http://squid-proxy:3128
steps:
- name: Checkout
uses: actions/checkout@v5
- name: Install dependencies
run: |
apt-get update
apt-get install -y iptables dnsutils curl jq ipset
- name: Fetch GitHub meta and configure firewall
run: |
# Fetch GitHub meta API to get all IP ranges
echo "Fetching GitHub meta API..."
curl -sS https://api.github.com/meta > /tmp/github-meta.json
# Wait for squid-proxy service to be resolvable and accepting connections
echo "Waiting for squid-proxy service..."
for i in 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15; do
PROXY_IP=$(getent hosts squid-proxy | awk '{ print $1 }')
if [ -n "$PROXY_IP" ]; then
echo "squid-proxy resolved to: $PROXY_IP"
# Test that proxy is actually accepting connections
if curl --connect-timeout 2 --max-time 5 -x http://squid-proxy:3128 -sS https://api.github.com/zen 2>/dev/null; then
echo "Proxy is working!"
break
else
echo "Attempt $i: Proxy resolved but not ready yet, waiting..."
fi
else
echo "Attempt $i: squid-proxy not resolvable yet, waiting..."
fi
sleep 2
done
if [ -z "$PROXY_IP" ]; then
echo "ERROR: Could not resolve squid-proxy after 15 attempts"
exit 1
fi
# Verify proxy works before locking down firewall
echo "Final proxy connectivity test..."
if ! curl --connect-timeout 5 --max-time 10 -x http://squid-proxy:3128 -sS https://api.github.com/zen; then
echo "ERROR: Proxy is not working properly"
exit 1
fi
echo "Proxy verified working!"
# Allow established connections
iptables -A OUTPUT -m state --state ESTABLISHED,RELATED -j ACCEPT
# Allow loopback
iptables -A OUTPUT -o lo -j ACCEPT
# Allow connections to the proxy
iptables -A OUTPUT -d $PROXY_IP -p tcp --dport 3128 -j ACCEPT
# Allow DNS
iptables -A OUTPUT -p udp --dport 53 -j ACCEPT
iptables -A OUTPUT -p tcp --dport 53 -j ACCEPT
# Create ipset for GitHub IPs (more efficient than individual rules)
ipset create github-ips hash:net
# Add all GitHub IP ranges from meta API (hooks, web, api, git, actions, etc.)
# EXCLUDING blob storage which must go through proxy
for category in hooks web api git pages importer actions actions_macos codespaces copilot; do
echo "Adding IPs for category: $category"
jq -r ".${category}[]? // empty" /tmp/github-meta.json 2>/dev/null | while read cidr; do
# Skip IPv6 for now (iptables vs ip6tables) - use case for POSIX compatibility
case "$cidr" in
*:*) ;; # IPv6, skip
*) ipset add github-ips "$cidr" 2>/dev/null || true ;;
esac
done
done
# Allow all GitHub IPs
iptables -A OUTPUT -m set --match-set github-ips dst -p tcp --dport 443 -j ACCEPT
iptables -A OUTPUT -m set --match-set github-ips dst -p tcp --dport 80 -j ACCEPT
# CRITICAL: Block direct access to blob storage and results-receiver
# These MUST go through the proxy for cache operations
echo "Blocking direct access to cache-critical endpoints..."
# Block results-receiver.actions.githubusercontent.com
for ip in $(getent ahosts "results-receiver.actions.githubusercontent.com" 2>/dev/null | awk '{print $1}' | sort -u); do
echo "Blocking direct access to results-receiver: $ip"
iptables -I OUTPUT 1 -d "$ip" -p tcp --dport 443 -j REJECT
done
# Block blob.core.windows.net (Azure blob storage used for cache)
for host in productionresultssa0.blob.core.windows.net productionresultssa1.blob.core.windows.net productionresultssa2.blob.core.windows.net productionresultssa3.blob.core.windows.net; do
for ip in $(getent ahosts "$host" 2>/dev/null | awk '{print $1}' | sort -u); do
echo "Blocking direct access to blob storage ($host): $ip"
iptables -I OUTPUT 1 -d "$ip" -p tcp --dport 443 -j REJECT
done
done
# Block all other outbound HTTP/HTTPS traffic
iptables -A OUTPUT -p tcp --dport 80 -j REJECT
iptables -A OUTPUT -p tcp --dport 443 -j REJECT
echo "iptables rules applied:"
iptables -L OUTPUT -n -v
echo ""
echo "ipset github-ips contains $(ipset list github-ips | grep -c '^[0-9]') entries"
- name: Verify proxy enforcement
run: |
echo "=== Testing proxy enforcement ==="
# Test 1: Verify proxy is working by explicitly using it
echo "Test 1: Connection through proxy (should SUCCEED)"
if curl --connect-timeout 10 --max-time 15 -x http://squid-proxy:3128 -sS -o /dev/null -w "%{http_code}" https://api.github.com/zen; then
echo ""
echo "✓ Proxy connection works"
else
echo "✗ ERROR: Proxy is not working!"
exit 1
fi
# Test 2: Direct connection to blob storage should FAIL (blocked by iptables)
echo ""
echo "Test 2: Direct connection to blob storage (should FAIL - blocked by iptables)"
if curl --connect-timeout 5 --max-time 10 --noproxy '*' -sS https://productionresultssa0.blob.core.windows.net 2>/dev/null; then
echo "✗ ERROR: Direct blob storage connection succeeded but should have been blocked!"
exit 1
else
echo "✓ Direct blob storage correctly blocked by iptables"
fi
# Test 3: Connection to blob storage THROUGH proxy should work
echo ""
echo "Test 3: Connection through proxy to blob storage (should SUCCEED)"
HTTP_CODE=$(curl --connect-timeout 10 --max-time 15 -x http://squid-proxy:3128 -sS -o /dev/null -w "%{http_code}" https://productionresultssa0.blob.core.windows.net 2>&1) || true
echo "HTTP response code: $HTTP_CODE"
if [ "$HTTP_CODE" = "400" ] || [ "$HTTP_CODE" = "409" ] || [ "$HTTP_CODE" = "200" ]; then
echo "✓ Proxy successfully forwarded request to blob storage (got HTTP $HTTP_CODE)"
else
echo "✗ ERROR: Proxy failed to forward request (got: $HTTP_CODE)"
exit 1
fi
echo ""
echo "=== All proxy enforcement tests passed ==="
echo "The proxy is working. If cache operations fail, it's because the action doesn't use the proxy."
- name: Restore cache
env:
http_proxy: http://squid-proxy:3128
https_proxy: http://squid-proxy:3128
uses: ./
with:
key: test-proxy-${{ github.run_id }}
path: test-cache
- name: Verify proxy setup
run: |
echo "## 🔒 Proxy Integration Test - Cache Restore" >> $GITHUB_STEP_SUMMARY
echo "" >> $GITHUB_STEP_SUMMARY
echo "### ✅ Test Configuration" >> $GITHUB_STEP_SUMMARY
echo "" >> $GITHUB_STEP_SUMMARY
echo "- **Proxy**: squid-proxy:3128" >> $GITHUB_STEP_SUMMARY
echo "- **Firewall**: iptables blocking direct access to cache endpoints" >> $GITHUB_STEP_SUMMARY
echo "- **Test**: Cache restore operation completed successfully through proxy" >> $GITHUB_STEP_SUMMARY
echo "" >> $GITHUB_STEP_SUMMARY
echo "If the cache restore step succeeded, it means:" >> $GITHUB_STEP_SUMMARY
echo "1. Direct access to results-receiver.actions.githubusercontent.com was blocked" >> $GITHUB_STEP_SUMMARY
echo "2. Direct access to *.blob.core.windows.net was blocked" >> $GITHUB_STEP_SUMMARY
echo "3. Cache operations were routed through the squid proxy" >> $GITHUB_STEP_SUMMARY
echo "" >> $GITHUB_STEP_SUMMARY
echo "✅ **SUCCESS**: Proxy integration test passed!" >> $GITHUB_STEP_SUMMARY
- name: Verify cache
run: __tests__/verify-cache-files.sh proxy test-cache

View File

@@ -123,11 +123,11 @@ jobs:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v6
- uses: actions/checkout@v4
- name: Cache Primes
id: cache-primes
uses: actions/cache@v5
uses: actions/cache@v4
with:
path: prime-numbers
key: ${{ runner.os }}-primes
@@ -154,11 +154,11 @@ jobs:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v6
- uses: actions/checkout@v4
- name: Restore cached Primes
id: cache-primes-restore
uses: actions/cache/restore@v5
uses: actions/cache/restore@v4
with:
path: |
path/to/dependencies
@@ -169,7 +169,7 @@ jobs:
.
- name: Save Primes
id: cache-primes-save
uses: actions/cache/save@v5
uses: actions/cache/save@v4
with:
path: |
path/to/dependencies
@@ -224,7 +224,7 @@ A cache key can include any of the contexts, functions, literals, and operators
For example, using the [`hashFiles`](https://docs.github.com/en/actions/learn-github-actions/expressions#hashfiles) function allows you to create a new cache when dependencies change.
```yaml
- uses: actions/cache@v5
- uses: actions/cache@v4
with:
path: |
path/to/dependencies
@@ -242,7 +242,7 @@ Additionally, you can use arbitrary command output in a cache key, such as a dat
echo "date=$(/bin/date -u "+%Y%m%d")" >> $GITHUB_OUTPUT
shell: bash
- uses: actions/cache@v5
- uses: actions/cache@v4
with:
path: path/to/dependencies
key: ${{ runner.os }}-${{ steps.get-date.outputs.date }}-${{ hashFiles('**/lockfiles') }}
@@ -262,9 +262,9 @@ Example:
```yaml
steps:
- uses: actions/checkout@v6
- uses: actions/checkout@v4
- uses: actions/cache@v5
- uses: actions/cache@v4
id: cache
with:
path: path/to/dependencies
@@ -292,11 +292,11 @@ jobs:
build-linux:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v6
- uses: actions/checkout@v4
- name: Cache Primes
id: cache-primes
uses: actions/cache@v5
uses: actions/cache@v4
with:
path: prime-numbers
key: primes
@@ -307,7 +307,7 @@ jobs:
- name: Cache Numbers
id: cache-numbers
uses: actions/cache@v5
uses: actions/cache@v4
with:
path: numbers
key: primes
@@ -319,11 +319,11 @@ jobs:
build-windows:
runs-on: windows-latest
steps:
- uses: actions/checkout@v6
- uses: actions/checkout@v4
- name: Cache Primes
id: cache-primes
uses: actions/cache@v5
uses: actions/cache@v4
with:
path: prime-numbers
key: primes

View File

@@ -12,7 +12,7 @@ This document lists some of the strategies (and example workflows if possible) w
jobs:
build:
runs-on: ubuntu-latest
- uses: actions/cache@v5
- uses: actions/cache@v4
with:
key: ${{ some-metadata }}-cache
```
@@ -24,7 +24,7 @@ In your workflows, you can use different strategies to name your key depending o
One of the most common use case is to use hash for lockfile as key. This way, same cache will be restored for a lockfile until there's a change in dependencies listed in lockfile.
```yaml
- uses: actions/cache@v5
- uses: actions/cache@v4
with:
path: |
path/to/dependencies
@@ -37,7 +37,7 @@ One of the most common use case is to use hash for lockfile as key. This way, sa
If cache is not found matching the primary key, restore keys can be used to download the closest matching cache that was recently created. This ensures that the build/install step will need to additionally fetch just a handful of newer dependencies, and hence saving build time.
```yaml
- uses: actions/cache@v5
- uses: actions/cache@v4
with:
path: |
path/to/dependencies
@@ -54,7 +54,7 @@ The restore keys can be provided as a complete name, or a prefix, read more [her
In case of workflows with matrix running for multiple Operating Systems, the caches can be stored separately for each of them. This can be used in combination with hashfiles in case multiple caches are being generated per OS.
```yaml
- uses: actions/cache@v5
- uses: actions/cache@v4
with:
path: |
path/to/dependencies
@@ -73,7 +73,7 @@ Caches scoped to the particular workflow run id or run attempt can be stored and
On similar lines, commit sha can be used to create a very specialized and short lived cache.
```yaml
- uses: actions/cache@v5
- uses: actions/cache@v4
with:
path: |
path/to/dependencies
@@ -86,7 +86,7 @@ On similar lines, commit sha can be used to create a very specialized and short
Cache key can be formed by combination of more than one metadata, evaluated info.
```yaml
- uses: actions/cache@v5
- uses: actions/cache@v4
with:
path: |
path/to/dependencies
@@ -146,9 +146,9 @@ In case you are using a centralized job to create and save your cache that can b
```yaml
steps:
- uses: actions/checkout@v6
- uses: actions/checkout@v4
- uses: actions/cache/restore@v5
- uses: actions/cache/restore@v4
id: cache
with:
path: path/to/dependencies
@@ -171,9 +171,9 @@ You can use the output of this action to exit the workflow on cache miss. This w
```yaml
steps:
- uses: actions/checkout@v6
- uses: actions/checkout@v4
- uses: actions/cache/restore@v5
- uses: actions/cache/restore@v4
id: cache
with:
path: path/to/dependencies
@@ -194,7 +194,7 @@ steps:
If you want to avoid re-computing the cache key again in `save` action, the outputs from `restore` action can be used as input to the `save` action.
```yaml
- uses: actions/cache/restore@v5
- uses: actions/cache/restore@v4
id: restore-cache
with:
path: |
@@ -204,7 +204,7 @@ If you want to avoid re-computing the cache key again in `save` action, the outp
.
.
.
- uses: actions/cache/save@v5
- uses: actions/cache/save@v4
with:
path: |
path/to/dependencies
@@ -219,7 +219,7 @@ On the other hand, the key can also be explicitly re-computed while executing th
Let's say we have a restore step that computes key at runtime
```yaml
uses: actions/cache/restore@v5
uses: actions/cache/restore@v4
id: restore-cache
with:
key: cache-${{ hashFiles('**/lockfiles') }}
@@ -228,7 +228,7 @@ with:
Case 1: Where an user would want to reuse the key as it is
```yaml
uses: actions/cache/save@v5
uses: actions/cache/save@v4
with:
key: ${{ steps.restore-cache.outputs.cache-primary-key }}
```
@@ -236,7 +236,7 @@ with:
Case 2: Where the user would want to re-evaluate the key
```yaml
uses: actions/cache/save@v5
uses: actions/cache/save@v4
with:
key: npm-cache-${{hashfiles(package-lock.json)}}
```
@@ -253,12 +253,12 @@ In case of multi-module projects, where the built artifact of one project needs
```yaml
steps:
- uses: actions/checkout@v6
- uses: actions/checkout@v4
- name: Build
run: ./build-parent-module.sh
- uses: actions/cache/save@v5
- uses: actions/cache/save@v4
id: cache
with:
path: path/to/dependencies
@@ -269,9 +269,9 @@ steps:
```yaml
steps:
- uses: actions/checkout@v6
- uses: actions/checkout@v4
- uses: actions/cache/restore@v5
- uses: actions/cache/restore@v4
id: cache
with:
path: path/to/dependencies
@@ -280,7 +280,7 @@ steps:
- name: Install Dependencies
if: steps.cache.outputs.cache-hit != 'true'
run: ./install.sh
- name: Build
run: ./build-child-module.sh

View File

@@ -45,7 +45,7 @@
## Bun
```yaml
- uses: actions/cache@v5
- uses: actions/cache@v4
with:
path: |
~/.bun/install/cache
@@ -55,7 +55,7 @@
### Windows
```yaml
- uses: actions/cache@v5
- uses: actions/cache@v4
with:
path: |
~\.bun
@@ -67,7 +67,7 @@
Using [NuGet lock files](https://docs.microsoft.com/nuget/consume-packages/package-references-in-project-files#locking-dependencies):
```yaml
- uses: actions/cache@v5
- uses: actions/cache@v4
with:
path: ~/.nuget/packages
key: ${{ runner.os }}-nuget-${{ hashFiles('**/packages.lock.json') }}
@@ -76,10 +76,10 @@ Using [NuGet lock files](https://docs.microsoft.com/nuget/consume-packages/packa
```
Depending on the environment, huge packages might be pre-installed in the global cache folder.
From `actions/cache@v3` onwards, you can now exclude unwanted packages with [exclude pattern](https://github.com/actions/toolkit/tree/main/packages/glob#exclude-patterns)
With `actions/cache@v4` you can now exclude unwanted packages with [exclude pattern](https://github.com/actions/toolkit/tree/main/packages/glob#exclude-patterns)
```yaml
- uses: actions/cache@v5
- uses: actions/cache@v4
with:
path: |
~/.nuget/packages
@@ -96,7 +96,7 @@ Or you could move the cache folder like below.
env:
NUGET_PACKAGES: ${{ github.workspace }}/.nuget/packages
steps:
- uses: actions/cache@v5
- uses: actions/cache@v4
with:
path: ${{ github.workspace }}/.nuget/packages
key: ${{ runner.os }}-nuget-${{ hashFiles('**/packages.lock.json') }}
@@ -108,7 +108,7 @@ steps:
```yaml
- name: Cache lein project dependencies
uses: actions/cache@v5
uses: actions/cache@v4
with:
path: ~/.m2/repository
key: ${{ runner.os }}-clojure-${{ hashFiles('**/project.clj') }}
@@ -122,7 +122,7 @@ steps:
### POSIX
```yaml
- uses: actions/cache@v5
- uses: actions/cache@v4
with:
path: ~/.dub
key: ${{ runner.os }}-dub-${{ hashFiles('**/dub.selections.json') }}
@@ -133,7 +133,7 @@ steps:
### Windows
```yaml
- uses: actions/cache@v5
- uses: actions/cache@v4
with:
path: ~\AppData\Local\dub
key: ${{ runner.os }}-dub-${{ hashFiles('**/dub.selections.json') }}
@@ -146,7 +146,7 @@ steps:
### Linux
```yaml
- uses: actions/cache@v5
- uses: actions/cache@v4
with:
path: |
~/.deno
@@ -157,7 +157,7 @@ steps:
### macOS
```yaml
- uses: actions/cache@v5
- uses: actions/cache@v4
with:
path: |
~/.deno
@@ -168,7 +168,7 @@ steps:
### Windows
```yaml
- uses: actions/cache@v5
- uses: actions/cache@v4
with:
path: |
~\.deno
@@ -179,7 +179,7 @@ steps:
## Elixir - Mix
```yaml
- uses: actions/cache@v5
- uses: actions/cache@v4
with:
path: |
deps
@@ -191,7 +191,7 @@ steps:
## Erlang - Rebar3
```yaml
- uses: actions/cache@v5
- uses: actions/cache@v2
with:
path: |
~/.cache/rebar3
@@ -206,7 +206,7 @@ steps:
### Linux
```yaml
- uses: actions/cache@v5
- uses: actions/cache@v4
with:
path: |
~/.cache/go-build
@@ -219,7 +219,7 @@ steps:
### macOS
```yaml
- uses: actions/cache@v5
- uses: actions/cache@v4
with:
path: |
~/Library/Caches/go-build
@@ -232,7 +232,7 @@ steps:
### Windows
```yaml
- uses: actions/cache@v5
- uses: actions/cache@v4
with:
path: |
~\AppData\Local\go-build
@@ -248,7 +248,7 @@ We cache the elements of the Cabal store separately, as the entirety of `~/.caba
```yaml
- name: Cache ~/.cabal/packages, ~/.cabal/store and dist-newstyle
uses: actions/cache@v5
uses: actions/cache@v4
with:
path: |
~/.cabal/packages
@@ -263,14 +263,14 @@ We cache the elements of the Cabal store separately, as the entirety of `~/.caba
### Linux or macOS
```yaml
- uses: actions/cache@v5
- uses: actions/cache@v4
name: Cache ~/.stack
with:
path: ~/.stack
key: ${{ runner.os }}-stack-global-${{ hashFiles('stack.yaml') }}-${{ hashFiles('package.yaml') }}
restore-keys: |
${{ runner.os }}-stack-global-
- uses: actions/cache@v5
- uses: actions/cache@v4
name: Cache .stack-work
with:
path: .stack-work
@@ -282,16 +282,16 @@ We cache the elements of the Cabal store separately, as the entirety of `~/.caba
### Windows
```yaml
- uses: actions/cache@v5
- uses: actions/cache@v4
name: Cache %APPDATA%\stack %LOCALAPPDATA%\Programs\stack
with:
path: |
~\AppData\Roaming\stack
~\AppData\Local\Programs\stack
~\AppData\Local\Programs\stack
key: ${{ runner.os }}-stack-global-${{ hashFiles('stack.yaml') }}-${{ hashFiles('package.yaml') }}
restore-keys: |
${{ runner.os }}-stack-global-
- uses: actions/cache@v5
- uses: actions/cache@v4
name: Cache .stack-work
with:
path: .stack-work
@@ -305,7 +305,7 @@ We cache the elements of the Cabal store separately, as the entirety of `~/.caba
> **Note** Ensure no Gradle daemons are running anymore when your workflow completes. Creating the cache package might fail due to locks being held by Gradle. Refer to the [Gradle Daemon documentation](https://docs.gradle.org/current/userguide/gradle_daemon.html) on how to disable or stop the Gradle Daemons.
```yaml
- uses: actions/cache@v5
- uses: actions/cache@v4
with:
path: |
~/.gradle/caches
@@ -319,7 +319,7 @@ We cache the elements of the Cabal store separately, as the entirety of `~/.caba
```yaml
- name: Cache local Maven repository
uses: actions/cache@v5
uses: actions/cache@v4
with:
path: ~/.m2/repository
key: ${{ runner.os }}-maven-${{ hashFiles('**/pom.xml') }}
@@ -355,7 +355,7 @@ After [deprecation](https://github.blog/changelog/2022-10-11-github-actions-depr
`Get npm cache directory` step can then be used with `actions/cache` as shown below
```yaml
- uses: actions/cache@v5
- uses: actions/cache@v4
id: npm-cache # use this to check for `cache-hit` ==> if: steps.npm-cache.outputs.cache-hit != 'true'
with:
path: ${{ steps.npm-cache-dir.outputs.dir }}
@@ -368,7 +368,7 @@ After [deprecation](https://github.blog/changelog/2022-10-11-github-actions-depr
```yaml
- name: restore lerna
uses: actions/cache@v5
uses: actions/cache@v4
with:
path: '**/node_modules'
key: ${{ runner.os }}-${{ hashFiles('**/yarn.lock') }}
@@ -382,7 +382,7 @@ The yarn cache directory will depend on your operating system and version of `ya
id: yarn-cache-dir-path
run: echo "dir=$(yarn cache dir)" >> $GITHUB_OUTPUT
- uses: actions/cache@v5
- uses: actions/cache@v4
id: yarn-cache # use this to check for `cache-hit` (`steps.yarn-cache.outputs.cache-hit != 'true'`)
with:
path: ${{ steps.yarn-cache-dir-path.outputs.dir }}
@@ -400,7 +400,7 @@ The yarn 2 cache directory will depend on your config. See https://yarnpkg.com/c
id: yarn-cache-dir-path
run: echo "dir=$(yarn config get cacheFolder)" >> $GITHUB_OUTPUT
- uses: actions/cache@v5
- uses: actions/cache@v4
id: yarn-cache # use this to check for `cache-hit` (`steps.yarn-cache.outputs.cache-hit != 'true'`)
with:
path: ${{ steps.yarn-cache-dir-path.outputs.dir }}
@@ -415,7 +415,7 @@ Esy allows you to export built dependencies and import pre-built dependencies.
```yaml
- name: Restore Cache
id: restore-cache
uses: actions/cache@v5
uses: actions/cache@v4
with:
path: _export
key: ${{ runner.os }}-esy-${{ hashFiles('esy.lock/index.json') }}
@@ -444,7 +444,7 @@ Esy allows you to export built dependencies and import pre-built dependencies.
id: composer-cache
run: |
echo "dir=$(composer config cache-files-dir)" >> $GITHUB_OUTPUT
- uses: actions/cache@v5
- uses: actions/cache@v4
with:
path: ${{ steps.composer-cache.outputs.dir }}
key: ${{ runner.os }}-composer-${{ hashFiles('**/composer.lock') }}
@@ -465,7 +465,7 @@ Locations:
### Simple example
```yaml
- uses: actions/cache@v5
- uses: actions/cache@v4
with:
path: ~/.cache/pip
key: ${{ runner.os }}-pip-${{ hashFiles('**/requirements.txt') }}
@@ -478,7 +478,7 @@ Replace `~/.cache/pip` with the correct `path` if not using Ubuntu.
### Multiple OS's in a workflow
```yaml
- uses: actions/cache@v5
- uses: actions/cache@v4
if: startsWith(runner.os, 'Linux')
with:
path: ~/.cache/pip
@@ -486,7 +486,7 @@ Replace `~/.cache/pip` with the correct `path` if not using Ubuntu.
restore-keys: |
${{ runner.os }}-pip-
- uses: actions/cache@v5
- uses: actions/cache@v4
if: startsWith(runner.os, 'macOS')
with:
path: ~/Library/Caches/pip
@@ -494,7 +494,7 @@ Replace `~/.cache/pip` with the correct `path` if not using Ubuntu.
restore-keys: |
${{ runner.os }}-pip-
- uses: actions/cache@v5
- uses: actions/cache@v4
if: startsWith(runner.os, 'Windows')
with:
path: ~\AppData\Local\pip\Cache
@@ -520,7 +520,7 @@ jobs:
- os: windows-latest
path: ~\AppData\Local\pip\Cache
steps:
- uses: actions/cache@v5
- uses: actions/cache@v4
with:
path: ${{ matrix.path }}
key: ${{ runner.os }}-pip-${{ hashFiles('**/requirements.txt') }}
@@ -539,7 +539,7 @@ jobs:
echo "dir=$(pip cache dir)" >> $GITHUB_OUTPUT
- name: pip cache
uses: actions/cache@v5
uses: actions/cache@v4
with:
path: ${{ steps.pip-cache.outputs.dir }}
key: ${{ runner.os }}-pip-${{ hashFiles('**/requirements.txt') }}
@@ -553,11 +553,11 @@ jobs:
- name: Set up Python
# The actions/cache step below uses this id to get the exact python version
id: setup-python
uses: actions/setup-python@v6
uses: actions/setup-python@v2
- uses: actions/cache@v5
- uses: actions/cache@v4
with:
path: ~/.local/share/virtualenvs
key: ${{ runner.os }}-python-${{ steps.setup-python.outputs.python-version }}-pipenv-${{ hashFiles('Pipfile.lock') }}
@@ -584,7 +584,7 @@ For renv, the cache directory will vary by OS. The `RENV_PATHS_ROOT` environment
cat("##[set-output name=r-version;]", R.Version()$version.string, sep = "")
shell: Rscript {0}
- name: Restore Renv package cache
uses: actions/cache@v5
uses: actions/cache@v4
with:
path: ${{ env.RENV_PATHS_ROOT }}
key: ${{ steps.get-version.outputs.os-version }}-${{ steps.get-version.outputs.r-version }}-${{ inputs.cache-version }}-${{ hashFiles('renv.lock') }}
@@ -610,7 +610,7 @@ whenever possible:
## Rust - Cargo
```yaml
- uses: actions/cache@v5
- uses: actions/cache@v4
with:
path: |
~/.cargo/bin/
@@ -625,7 +625,7 @@ whenever possible:
```yaml
- name: Cache SBT
uses: actions/cache@v5
uses: actions/cache@v4
with:
path: |
~/.ivy2/cache
@@ -636,7 +636,7 @@ whenever possible:
## Swift, Objective-C - Carthage
```yaml
- uses: actions/cache@v5
- uses: actions/cache@v4
with:
path: Carthage
key: ${{ runner.os }}-carthage-${{ hashFiles('**/Cartfile.resolved') }}
@@ -647,7 +647,7 @@ whenever possible:
## Swift, Objective-C - CocoaPods
```yaml
- uses: actions/cache@v5
- uses: actions/cache@v4
with:
path: Pods
key: ${{ runner.os }}-pods-${{ hashFiles('**/Podfile.lock') }}
@@ -658,7 +658,7 @@ whenever possible:
## Swift - Swift Package Manager
```yaml
- uses: actions/cache@v5
- uses: actions/cache@v4
with:
path: .build
key: ${{ runner.os }}-spm-${{ hashFiles('**/Package.resolved') }}
@@ -673,7 +673,7 @@ env:
MINT_PATH: .mint/lib
MINT_LINK_PATH: .mint/bin
steps:
- uses: actions/cache@v5
- uses: actions/cache@v4
with:
path: .mint
key: ${{ runner.os }}-mint-${{ hashFiles('**/Mintfile') }}
@@ -689,7 +689,7 @@ steps:
```yaml
- name: Cache Bazel
uses: actions/cache@v5
uses: actions/cache@v4
with:
path: |
~/.cache/bazel
@@ -703,7 +703,7 @@ steps:
```yaml
- name: Cache Bazel
uses: actions/cache@v5
uses: actions/cache@v4
with:
path: |
/private/var/tmp/_bazel_runner/

View File

@@ -35,9 +35,9 @@ If you are using separate jobs to create and save your cache(s) to be reused by
```yaml
steps:
- uses: actions/checkout@v6
- uses: actions/checkout@v4
- uses: actions/cache/restore@v5
- uses: actions/cache/restore@v4
id: cache
with:
path: path/to/dependencies
@@ -64,12 +64,12 @@ In case of multi-module projects, where the built artifact of one project needs
```yaml
steps:
- uses: actions/checkout@v6
- uses: actions/checkout@v4
- name: Build
run: /build-parent-module.sh
- uses: actions/cache/save@v5
- uses: actions/cache/save@v4
id: cache
with:
path: path/to/dependencies
@@ -80,9 +80,9 @@ steps:
```yaml
steps:
- uses: actions/checkout@v6
- uses: actions/checkout@v4
- uses: actions/cache/restore@v5
- uses: actions/cache/restore@v4
id: cache
with:
path: path/to/dependencies
@@ -107,9 +107,9 @@ To fail if there is no cache hit for the primary key, leave `restore-keys` empty
```yaml
steps:
- uses: actions/checkout@v6
- uses: actions/checkout@v4
- uses: actions/cache/restore@v5
- uses: actions/cache/restore@v4
id: cache
with:
path: path/to/dependencies

View File

@@ -23,7 +23,7 @@ If you are using separate jobs for generating common artifacts and sharing them
```yaml
steps:
- uses: actions/checkout@v6
- uses: actions/checkout@v4
- name: Install Dependencies
run: /install.sh
@@ -31,7 +31,7 @@ steps:
- name: Build artifacts
run: /build.sh
- uses: actions/cache/save@v5
- uses: actions/cache/save@v4
id: cache
with:
path: path/to/dependencies
@@ -47,7 +47,7 @@ Let's say we have a restore step that computes a key at runtime.
#### Restore a cache
```yaml
uses: actions/cache/restore@v5
uses: actions/cache/restore@v4
id: restore-cache
with:
key: cache-${{ hashFiles('**/lockfiles') }}
@@ -55,7 +55,7 @@ with:
#### Case 1 - Where a user would want to reuse the key as it is
```yaml
uses: actions/cache/save@v5
uses: actions/cache/save@v4
with:
key: ${{ steps.restore-cache.outputs.cache-primary-key }}
```
@@ -63,7 +63,7 @@ with:
#### Case 2 - Where the user would want to re-evaluate the key
```yaml
uses: actions/cache/save@v5
uses: actions/cache/save@v4
with:
key: npm-cache-${{hashfiles(package-lock.json)}}
```
@@ -91,11 +91,11 @@ jobs:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v6
- uses: actions/checkout@v4
- name: Restore cached Prime Numbers
id: cache-prime-numbers-restore
uses: actions/cache/restore@v5
uses: actions/cache/restore@v4
with:
key: ${{ runner.os }}-prime-numbers
path: |
@@ -107,7 +107,7 @@ jobs:
- name: Always Save Prime Numbers
id: cache-prime-numbers-save
if: always() && steps.cache-prime-numbers-restore.outputs.cache-hit != 'true'
uses: actions/cache/save@v5
uses: actions/cache/save@v4
with:
key: ${{ steps.cache-prime-numbers-restore.outputs.cache-primary-key }}
path: |

View File

@@ -12,7 +12,7 @@ A cache today is immutable and cannot be updated. But some use cases require the
```yaml
- name: update cache on every commit
uses: actions/cache@v5
uses: actions/cache@v4
with:
path: prime-numbers
key: primes-${{ runner.os }}-${{ github.run_id }} # Can use time based key as well
@@ -21,7 +21,7 @@ A cache today is immutable and cannot be updated. But some use cases require the
```
Please note that this will create a new cache on every run and hence will consume the cache [quota](./README.md#cache-limits).
## Use cache across feature branches
Reusing cache across feature branches is not allowed today to provide cache [isolation](https://docs.github.com/en/actions/using-workflows/caching-dependencies-to-speed-up-workflows#restrictions-for-accessing-a-cache). However if both feature branches are from the default branch, a good way to achieve this is to ensure that the default branch has a cache. This cache will then be consumable by both feature branches.