-
Notifications
You must be signed in to change notification settings - Fork 229
174 lines (166 loc) · 5.88 KB
/
e2e-aks-ci.yml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
name: E2E on AKS
on:
schedule:
- cron: '5 10 * * *'
workflow_dispatch:
inputs:
ref:
description: "checkout git branch/tag"
required: true
default: "main"
keep_cluster:
description: "Keep the cluster afterwards?"
required: false
default: "no"
env:
GOARCH: amd64
CGO_ENABLED: 0
GINKGO_NODES: 1
FLAKE_ATTEMPTS: 1
AKS_MACHINE_TYPE: 'Standard_D3_v2'
jobs:
aks-fleet-examples:
runs-on: ubuntu-latest
if: >
github.repository == 'rancher/fleet'
steps:
-
name: Checkout
uses: actions/checkout@v4
with:
submodules: recursive
fetch-depth: 0
-
name: Setup Go
uses: actions/setup-go@v5
with:
go-version-file: 'go.mod'
check-latest: true
-
name: Setup Ginkgo Test Framework
run: go install github.com/onsi/ginkgo/v2/ginkgo
# Follow https://github.com/marketplace/actions/azure-login#configure-deployment-credentials
# az group create --name fleetCI --location eastus2
# az ad sp create-for-rbac --name "fleetCI" --sdk-auth --role contributor \
# --scopes /subscriptions/{id}/resourceGroups/fleetCI
-
name: Login to Azure
uses: azure/login@v2
with:
creds: '{"clientId":"${{ secrets.AZURE_CLIENT_ID }}","clientSecret":"${{ secrets.AZURE_CLIENT_SECRET }}","subscriptionId":"${{ secrets.AZURE_SUBSCRIPTION_ID }}","tenantId":"${{ secrets.AZURE_TENANT_ID }}"}'
-
name: Create AKS cluster
id: create-cluster
# We need to specify bash as a shell when a job is running on windows runner
shell: bash
run: |
id=$RANDOM
echo "ID=$id" >> $GITHUB_OUTPUT
az aks create --resource-group fleetCI \
--node-vm-size ${{ env.AKS_MACHINE_TYPE }} \
--name fleetCI$id \
--node-count 2 \
--generate-ssh-keys
az aks get-credentials --resource-group fleetCI \
--name fleetCI$id \
--file kubeconfig-fleet-ci
# List existing clusters
az aks list | jq '.[] | .name + " " + (.powerState|tostring)'
-
name: Build fleet binaries
run: |
./.github/scripts/build-fleet-binaries.sh
-
name: Set up QEMU
uses: docker/setup-qemu-action@v3
-
name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
-
name: Get UUID
id: uuid
run: echo "uuid=$(uuidgen)" >> $GITHUB_OUTPUT
-
id: meta-fleet
uses: docker/metadata-action@v5
with:
images: |
ttl.sh/rancher/fleet-${{ steps.uuid.outputs.uuid }}
tags: type=raw,value=1h
-
uses: docker/build-push-action@v6
with:
context: .
file: package/Dockerfile
build-args: |
ARCH=${{ env.GOARCH }}
push: true
tags: ${{ steps.meta-fleet.outputs.tags }}
labels: ${{ steps.meta-fleet.outputs.labels }}
-
id: meta-fleet-agent
uses: docker/metadata-action@v5
with:
images: |
ttl.sh/rancher/fleet-agent-${{ steps.uuid.outputs.uuid }}
tags: type=raw,value=1h
-
uses: docker/build-push-action@v6
with:
context: .
file: package/Dockerfile.agent
build-args: |
ARCH=${{ env.GOARCH }}
push: true
tags: ${{ steps.meta-fleet-agent.outputs.tags }}
labels: ${{ steps.meta-fleet-agent.outputs.labels }}
-
name: Deploy Fleet
run: |
export KUBECONFIG="$GITHUB_WORKSPACE/kubeconfig-fleet-ci"
kc_node=$(kubectl get nodes --no-headers -o name | head -n 1)
node=${kc_node#node/}
echo "${{ steps.meta-fleet.outputs.tags }} ${{ steps.meta-fleet-agent.outputs.tags }}"
NODE=$node SHARDS='[{"id":"shard0"},{"id":"shard1"},{"id":"shard2"}]' ./.github/scripts/deploy-fleet.sh \
${{ steps.meta-fleet.outputs.tags }} \
${{ steps.meta-fleet-agent.outputs.tags }}
-
name: Fleet E2E Tests
env:
FLEET_E2E_NS: fleet-local
run: |
export KUBECONFIG="$GITHUB_WORKSPACE/kubeconfig-fleet-ci"
ginkgo --github-output --label-filter='!infra-setup' e2e/single-cluster e2e/keep-resources
-
name: Acceptance Tests for Examples
env:
FLEET_E2E_NS: fleet-local
run: |
export KUBECONFIG="$GITHUB_WORKSPACE/kubeconfig-fleet-ci"
ginkgo --github-output e2e/acceptance/single-cluster-examples
-
name: Fleet Tests Requiring Github Secrets
# These tests can't run for PRs, because PRs don't have access to the secrets
env:
FLEET_E2E_NS: fleet-local
GIT_REPO_URL: "[email protected]:fleetrepoci/testaks.git"
GIT_REPO_HOST: "github.com"
GIT_REPO_USER: "git"
CI_OCI_USERNAME: ${{ secrets.CI_OCI_USERNAME }}
CI_OCI_PASSWORD: ${{ secrets.CI_OCI_PASSWORD }}
run: |
export KUBECONFIG="$GITHUB_WORKSPACE/kubeconfig-fleet-ci"
export GIT_SSH_KEY="$GITHUB_WORKSPACE/id_ecdsa"
export GIT_SSH_PUBKEY="$GITHUB_WORKSPACE/id_ecdsa.pub"
echo "${{ secrets.CI_AKS_SSH_KEY }}" > "$GIT_SSH_KEY"
echo "${{ secrets.CI_AKS_SSH_PUBKEY }}" > "$GIT_SSH_PUBKEY"
ginkgo --github-output e2e/require-secrets
-
name: Delete AKS cluster
# We always tear down the cluster, to avoid costs. Except when running
# manually and keep_cluster was set to "yes"
if: ${{ always() && github.event.inputs.keep_cluster != 'yes' }}
shell: bash
run: |
id="${{ steps.create-cluster.outputs.ID }}"
az aks delete --resource-group fleetCI --name fleetCI$id --yes