- Install kubectl if not available - Setup kubeconfig from K3s - Verify cluster access
294 lines
11 KiB
YAML
294 lines
11 KiB
YAML
name: Build Docker Image
|
||
|
||
on:
|
||
push:
|
||
branches: [main, develop]
|
||
tags:
|
||
- 'v*'
|
||
workflow_dispatch:
|
||
|
||
env:
|
||
REGISTRY: gitea0213.kro.kr
|
||
IMAGE_NAME: ${{ github.repository }}
|
||
|
||
jobs:
|
||
build-and-push:
|
||
runs-on: ubuntu-24.04-arm
|
||
permissions:
|
||
contents: write
|
||
packages: write
|
||
|
||
outputs:
|
||
image-tag: ${{ steps.meta.outputs.tags }}
|
||
image-digest: ${{ steps.build.outputs.digest }}
|
||
|
||
steps:
|
||
- name: Checkout code
|
||
uses: actions/checkout@v4
|
||
|
||
- name: Install kubectl
|
||
run: |
|
||
if ! command -v kubectl &> /dev/null; then
|
||
curl -LO "https://dl.k8s.io/release/$(curl -L -s https://dl.k8s.io/release/stable.txt)/bin/linux/arm64/kubectl"
|
||
chmod +x kubectl
|
||
sudo mv kubectl /usr/local/bin/
|
||
fi
|
||
kubectl version --client
|
||
|
||
- name: Setup kubeconfig
|
||
run: |
|
||
mkdir -p $HOME/.kube
|
||
sudo cat /etc/rancher/k3s/k3s.yaml > $HOME/.kube/config
|
||
chmod 600 $HOME/.kube/config
|
||
export KUBECONFIG=$HOME/.kube/config
|
||
kubectl get nodes
|
||
|
||
- name: Lowercase repository name
|
||
id: lowercase
|
||
run: |
|
||
echo "repo=$(echo ${{ github.repository }} | tr '[:upper:]' '[:lower:]')" >> $GITHUB_OUTPUT
|
||
|
||
- name: Extract metadata (tags, labels)
|
||
id: meta
|
||
uses: docker/metadata-action@v5
|
||
with:
|
||
images: ${{ env.REGISTRY }}/${{ steps.lowercase.outputs.repo }}
|
||
tags: |
|
||
type=ref,event=branch
|
||
type=ref,event=pr
|
||
type=semver,pattern={{version}}
|
||
type=semver,pattern={{major}}.{{minor}}
|
||
type=sha,prefix={{branch}}-sha-,format=long
|
||
type=raw,value=latest,enable={{is_default_branch}}
|
||
|
||
- name: Create Kaniko build context
|
||
run: |
|
||
# Create tar.gz of build context
|
||
tar czf /tmp/build-context.tar.gz -C services/nextjs .
|
||
|
||
# Create namespace if not exists
|
||
sudo kubectl get namespace kaniko-builds 2>/dev/null || sudo kubectl create namespace kaniko-builds
|
||
|
||
# Create/update registry credentials secret
|
||
sudo kubectl create secret docker-registry kaniko-registry-creds \
|
||
--docker-server=${{ env.REGISTRY }} \
|
||
--docker-username=bluemayne \
|
||
--docker-password=${{ secrets.GITEAREGISTRY }} \
|
||
--namespace=kaniko-builds \
|
||
--dry-run=client -o yaml | sudo kubectl apply -f -
|
||
|
||
- name: Build and push with Kaniko on Kubernetes
|
||
id: build
|
||
run: |
|
||
TAGS="${{ steps.meta.outputs.tags }}"
|
||
|
||
# Prepare destination arguments
|
||
DESTINATIONS=""
|
||
while IFS= read -r tag; do
|
||
DESTINATIONS="$DESTINATIONS --destination=$tag"
|
||
done <<< "$TAGS"
|
||
|
||
# Create temporary pod name
|
||
POD_NAME="kaniko-build-${{ github.run_number }}-$(date +%s)"
|
||
|
||
# Create Kaniko Job
|
||
cat <<EOF | sudo kubectl apply -f -
|
||
apiVersion: v1
|
||
kind: ConfigMap
|
||
metadata:
|
||
name: ${POD_NAME}-context
|
||
namespace: kaniko-builds
|
||
data:
|
||
Dockerfile: |
|
||
$(sed 's/^/ /' deploy/docker/Dockerfile.prod)
|
||
---
|
||
apiVersion: batch/v1
|
||
kind: Job
|
||
metadata:
|
||
name: ${POD_NAME}
|
||
namespace: kaniko-builds
|
||
spec:
|
||
ttlSecondsAfterFinished: 600
|
||
backoffLimit: 0
|
||
template:
|
||
spec:
|
||
restartPolicy: Never
|
||
initContainers:
|
||
- name: prepare-context
|
||
image: alpine:latest
|
||
command: ["/bin/sh", "-c"]
|
||
args:
|
||
- |
|
||
apk add --no-cache git
|
||
git clone https://gitea0213.kro.kr/${{ github.repository }}.git /workspace/repo
|
||
cd /workspace/repo
|
||
git checkout ${{ github.sha }}
|
||
cp -r services/nextjs/* /workspace/build/
|
||
volumeMounts:
|
||
- name: workspace
|
||
mountPath: /workspace
|
||
containers:
|
||
- name: kaniko
|
||
image: gcr.io/kaniko-project/executor:latest
|
||
args:
|
||
- --context=/workspace/build
|
||
- --dockerfile=/workspace/Dockerfile
|
||
- --cache=true
|
||
- --cache-repo=${{ env.REGISTRY }}/${{ steps.lowercase.outputs.repo }}/cache
|
||
- --compressed-caching=false
|
||
- --snapshot-mode=redo
|
||
- --use-new-run
|
||
$DESTINATIONS
|
||
volumeMounts:
|
||
- name: workspace
|
||
mountPath: /workspace
|
||
- name: dockerfile
|
||
mountPath: /workspace/Dockerfile
|
||
subPath: Dockerfile
|
||
- name: docker-config
|
||
mountPath: /kaniko/.docker
|
||
volumes:
|
||
- name: workspace
|
||
emptyDir: {}
|
||
- name: dockerfile
|
||
configMap:
|
||
name: ${POD_NAME}-context
|
||
- name: docker-config
|
||
secret:
|
||
secretName: kaniko-registry-creds
|
||
items:
|
||
- key: .dockerconfigjson
|
||
path: config.json
|
||
EOF
|
||
|
||
# Wait for job to complete
|
||
echo "Waiting for Kaniko job to complete..."
|
||
sudo kubectl wait --for=condition=complete --timeout=600s job/${POD_NAME} -n kaniko-builds || {
|
||
echo "Job failed or timed out. Showing logs:"
|
||
POD=$(sudo kubectl get pods -n kaniko-builds -l job-name=${POD_NAME} -o jsonpath='{.items[0].metadata.name}')
|
||
sudo kubectl logs -n kaniko-builds ${POD} --all-containers=true || true
|
||
sudo kubectl delete job ${POD_NAME} -n kaniko-builds || true
|
||
sudo kubectl delete configmap ${POD_NAME}-context -n kaniko-builds || true
|
||
exit 1
|
||
}
|
||
|
||
echo "✅ Image built successfully"
|
||
echo "digest=unknown" >> $GITHUB_OUTPUT
|
||
|
||
# Cleanup
|
||
sudo kubectl delete job ${POD_NAME} -n kaniko-builds || true
|
||
sudo kubectl delete configmap ${POD_NAME}-context -n kaniko-builds || true
|
||
|
||
- name: Extract SHA tag
|
||
id: extract-tag
|
||
run: |
|
||
# Extract the SHA-based tag from the tags list
|
||
TAGS="${{ steps.meta.outputs.tags }}"
|
||
echo "All tags:"
|
||
echo "$TAGS"
|
||
echo "---"
|
||
|
||
# Get commit SHA (full 40 characters)
|
||
COMMIT_SHA="${{ github.sha }}"
|
||
|
||
# Get current branch name
|
||
BRANCH_NAME="${{ github.ref_name }}"
|
||
echo "Branch: $BRANCH_NAME"
|
||
|
||
# Method 1: Extract the full SHA tag from docker/metadata-action output
|
||
# docker/metadata-action creates: <branch>-sha-<full-40-char-sha>
|
||
SHA_TAG=$(echo "$TAGS" | grep -oE "${BRANCH_NAME}-sha-[a-f0-9]{40}" | head -n 1)
|
||
|
||
# Method 2: If not found, try to extract any branch-sha- tag (fallback)
|
||
if [ -z "$SHA_TAG" ]; then
|
||
SHA_TAG=$(echo "$TAGS" | grep -oE "${BRANCH_NAME}-sha-[a-f0-9]+" | head -n 1)
|
||
if [ -n "$SHA_TAG" ]; then
|
||
echo "⚠️ Found SHA tag (may not be full 40 chars): $SHA_TAG"
|
||
fi
|
||
fi
|
||
|
||
# Method 3: Fallback to commit SHA directly (construct the tag)
|
||
if [ -z "$SHA_TAG" ]; then
|
||
SHA_TAG="${BRANCH_NAME}-sha-$COMMIT_SHA"
|
||
echo "⚠️ Could not extract from tags, using commit SHA: $SHA_TAG"
|
||
fi
|
||
|
||
if [ -z "$SHA_TAG" ]; then
|
||
echo "❌ ERROR: Failed to extract SHA tag"
|
||
exit 1
|
||
fi
|
||
|
||
echo "sha-tag=$SHA_TAG" >> $GITHUB_OUTPUT
|
||
echo "✅ Extracted SHA tag: $SHA_TAG"
|
||
|
||
- name: Update kustomization with new image tag
|
||
env:
|
||
GITEA_TOKEN: ${{ secrets.GITEAREGISTRYTOKEN }}
|
||
run: |
|
||
git config --global user.name "gitea-actions[bot]"
|
||
git config --global user.email "gitea-actions[bot]@users.noreply.gitea.com"
|
||
|
||
# Validate that SHA_TAG is not empty
|
||
SHA_TAG="${{ steps.extract-tag.outputs.sha-tag }}"
|
||
if [ -z "$SHA_TAG" ]; then
|
||
echo "❌ ERROR: SHA_TAG is empty, cannot update kustomization"
|
||
exit 1
|
||
fi
|
||
|
||
# Determine overlay based on branch
|
||
BRANCH_NAME="${{ github.ref_name }}"
|
||
if [ "$BRANCH_NAME" = "main" ]; then
|
||
OVERLAY="prod"
|
||
elif [ "$BRANCH_NAME" = "develop" ]; then
|
||
OVERLAY="dev"
|
||
else
|
||
echo "⚠️ Unknown branch: $BRANCH_NAME, skipping kustomization update"
|
||
exit 0
|
||
fi
|
||
|
||
KUSTOMIZATION_FILE="deploy/k8s/overlays/$OVERLAY/kustomization.yaml"
|
||
|
||
# Check if kustomization file has images section
|
||
if grep -q "images:" "$KUSTOMIZATION_FILE"; then
|
||
echo "📝 Updating $KUSTOMIZATION_FILE with tag: $SHA_TAG"
|
||
|
||
# Update kustomization.yaml with new image tag
|
||
# Handle both cases: newTag: (with value) and newTag: (empty)
|
||
sed -i.bak "s|newTag:.*|newTag: $SHA_TAG|" "$KUSTOMIZATION_FILE"
|
||
|
||
# Verify the update was successful
|
||
if grep -q "newTag: $SHA_TAG" "$KUSTOMIZATION_FILE"; then
|
||
echo "✅ Successfully updated kustomization.yaml"
|
||
rm -f "$KUSTOMIZATION_FILE.bak"
|
||
else
|
||
echo "❌ ERROR: Failed to update kustomization.yaml"
|
||
cat "$KUSTOMIZATION_FILE"
|
||
exit 1
|
||
fi
|
||
|
||
# Commit and push if there are changes
|
||
if git diff --quiet; then
|
||
echo "No changes to commit"
|
||
else
|
||
git add "$KUSTOMIZATION_FILE"
|
||
git commit -m "Update $OVERLAY image to $SHA_TAG"
|
||
git push
|
||
echo "✅ Kustomization updated with new image tag: $SHA_TAG"
|
||
fi
|
||
else
|
||
echo "ℹ️ $OVERLAY overlay uses base image (latest tag), skipping kustomization update"
|
||
echo " Image built with tag: $SHA_TAG"
|
||
fi
|
||
|
||
- name: Display image information
|
||
run: |
|
||
echo "✅ Image built and pushed successfully!"
|
||
echo "📦 Image tags:"
|
||
echo "${{ steps.meta.outputs.tags }}"
|
||
echo "🔖 SHA tag: ${{ steps.extract-tag.outputs.sha-tag }}"
|
||
echo "🔖 Digest: ${{ steps.build.outputs.digest }}"
|
||
echo ""
|
||
echo "🚀 Kustomization updated with new image tag"
|
||
echo " ArgoCD will automatically detect and deploy this new image"
|
||
echo " Monitor deployment at your ArgoCD dashboard"
|