Files
jovies/.gitea/workflows/build.yml
Mayne0213 bf62077841 FIX(config): kubeconfig setup with fallback logic
- Add checks for kubeconfig file existence
- Try multiple methods to locate kubeconfig
- Add debugging output for troubleshooting
- Test kubectl connection with fallback to sudo
2025-12-28 17:06:43 +09:00

263 lines
9.8 KiB
YAML
Raw Blame History

This file contains invisible Unicode characters

This file contains invisible Unicode characters that are indistinguishable to humans but may be processed differently by a computer. If you think that this is intentional, you can safely ignore this warning. Use the Escape button to reveal them.

This file contains Unicode characters that might be confused with other characters. If you think that this is intentional, you can safely ignore this warning. Use the Escape button to reveal them.

name: Build Docker Image
on:
push:
branches: [main, develop]
tags:
- 'v*'
workflow_dispatch:
env:
REGISTRY: gitea0213.kro.kr
IMAGE_NAME: ${{ github.repository }}
jobs:
build-and-push:
runs-on: ubuntu-24.04-arm
permissions:
contents: write
packages: write
outputs:
image-tag: ${{ steps.meta.outputs.tags }}
image-digest: ${{ steps.build.outputs.digest }}
steps:
- name: Checkout code
uses: actions/checkout@v4
- name: Install kubectl
run: |
if ! command -v kubectl &> /dev/null; then
curl -LO "https://dl.k8s.io/release/$(curl -L -s https://dl.k8s.io/release/stable.txt)/bin/linux/arm64/kubectl"
chmod +x kubectl
sudo mv kubectl /usr/local/bin/
fi
kubectl version --client
- name: Setup kubeconfig
run: |
mkdir -p $HOME/.kube
# Check if K3s config exists locally
if [ -f /etc/rancher/k3s/k3s.yaml ]; then
sudo cat /etc/rancher/k3s/k3s.yaml > $HOME/.kube/config
else
# If not, try to get it from master node
echo "K3s config not found locally, checking runner location..."
hostname
pwd
whoami
# Try to copy from master node via sudo (if runner has access)
if sudo test -f /etc/rancher/k3s/k3s.yaml; then
sudo cp /etc/rancher/k3s/k3s.yaml $HOME/.kube/config
sudo chown $(whoami):$(whoami) $HOME/.kube/config
else
echo "❌ ERROR: Cannot find kubeconfig. Please configure KUBECONFIG secret."
exit 1
fi
fi
chmod 600 $HOME/.kube/config
# Test connection
kubectl get nodes || sudo kubectl get nodes
- name: Lowercase repository name
id: lowercase
run: |
echo "repo=$(echo ${{ github.repository }} | tr '[:upper:]' '[:lower:]')" >> $GITHUB_OUTPUT
- name: Extract metadata (tags, labels)
id: meta
uses: docker/metadata-action@v5
with:
images: ${{ env.REGISTRY }}/${{ steps.lowercase.outputs.repo }}
tags: |
type=ref,event=branch
type=ref,event=pr
type=semver,pattern={{version}}
type=semver,pattern={{major}}.{{minor}}
type=sha,prefix={{branch}}-sha-,format=long
type=raw,value=latest,enable={{is_default_branch}}
- name: Create Kaniko build context
run: |
# Create tar.gz of build context
tar czf /tmp/build-context.tar.gz -C services/nextjs .
# Create namespace if not exists
sudo kubectl get namespace kaniko-builds 2>/dev/null || sudo kubectl create namespace kaniko-builds
# Create/update registry credentials secret
sudo kubectl create secret docker-registry kaniko-registry-creds \
--docker-server=${{ env.REGISTRY }} \
--docker-username=bluemayne \
--docker-password=${{ secrets.GITEAREGISTRY }} \
--namespace=kaniko-builds \
--dry-run=client -o yaml | sudo kubectl apply -f -
- name: Build and push with Kaniko on Kubernetes
id: build
run: |
TAGS="${{ steps.meta.outputs.tags }}"
# Prepare destination arguments
DESTINATIONS=""
while IFS= read -r tag; do
DESTINATIONS="$DESTINATIONS\n - --destination=$tag"
done <<< "$TAGS"
# Create unique build name
BUILD_NAME="kaniko-build-${{ github.run_number }}-$(date +%s)"
# Prepare Kaniko Job manifest from template
sed -e "s|KANIKO_BUILD_NAME|${BUILD_NAME}|g" \
-e "s|GIT_REPO_URL|https://gitea0213.kro.kr/${{ github.repository }}.git|g" \
-e "s|GIT_SHA|${{ github.sha }}|g" \
-e "s|CACHE_REPO|${{ env.REGISTRY }}/${{ steps.lowercase.outputs.repo }}/cache|g" \
-e "s|# DESTINATIONS will be added here|${DESTINATIONS}|g" \
deploy/kaniko/job.yaml > /tmp/kaniko-job.yaml
echo "📋 Generated Kaniko Job manifest:"
cat /tmp/kaniko-job.yaml
# Apply the Job
sudo kubectl apply -f /tmp/kaniko-job.yaml
# Wait for job to complete
echo "⏳ Waiting for Kaniko job to complete..."
sudo kubectl wait --for=condition=complete --timeout=600s job/${BUILD_NAME} -n kaniko-builds || {
echo "❌ Job failed or timed out. Showing logs:"
POD=$(sudo kubectl get pods -n kaniko-builds -l job-name=${BUILD_NAME} -o jsonpath='{.items[0].metadata.name}')
sudo kubectl logs -n kaniko-builds ${POD} --all-containers=true || true
sudo kubectl delete job ${BUILD_NAME} -n kaniko-builds || true
sudo kubectl delete configmap ${BUILD_NAME}-dockerfile -n kaniko-builds || true
exit 1
}
echo "✅ Image built successfully"
# Get digest from logs
POD=$(sudo kubectl get pods -n kaniko-builds -l job-name=${BUILD_NAME} -o jsonpath='{.items[0].metadata.name}')
DIGEST=$(sudo kubectl logs -n kaniko-builds ${POD} -c kaniko 2>/dev/null | grep -oP 'digest: \K[a-zA-Z0-9:]+' | tail -1 || echo "unknown")
echo "digest=${DIGEST}" >> $GITHUB_OUTPUT
# Cleanup
sudo kubectl delete job ${BUILD_NAME} -n kaniko-builds || true
sudo kubectl delete configmap ${BUILD_NAME}-dockerfile -n kaniko-builds || true
- name: Extract SHA tag
id: extract-tag
run: |
# Extract the SHA-based tag from the tags list
TAGS="${{ steps.meta.outputs.tags }}"
echo "All tags:"
echo "$TAGS"
echo "---"
# Get commit SHA (full 40 characters)
COMMIT_SHA="${{ github.sha }}"
# Get current branch name
BRANCH_NAME="${{ github.ref_name }}"
echo "Branch: $BRANCH_NAME"
# Method 1: Extract the full SHA tag from docker/metadata-action output
# docker/metadata-action creates: <branch>-sha-<full-40-char-sha>
SHA_TAG=$(echo "$TAGS" | grep -oE "${BRANCH_NAME}-sha-[a-f0-9]{40}" | head -n 1)
# Method 2: If not found, try to extract any branch-sha- tag (fallback)
if [ -z "$SHA_TAG" ]; then
SHA_TAG=$(echo "$TAGS" | grep -oE "${BRANCH_NAME}-sha-[a-f0-9]+" | head -n 1)
if [ -n "$SHA_TAG" ]; then
echo "⚠️ Found SHA tag (may not be full 40 chars): $SHA_TAG"
fi
fi
# Method 3: Fallback to commit SHA directly (construct the tag)
if [ -z "$SHA_TAG" ]; then
SHA_TAG="${BRANCH_NAME}-sha-$COMMIT_SHA"
echo "⚠️ Could not extract from tags, using commit SHA: $SHA_TAG"
fi
if [ -z "$SHA_TAG" ]; then
echo "❌ ERROR: Failed to extract SHA tag"
exit 1
fi
echo "sha-tag=$SHA_TAG" >> $GITHUB_OUTPUT
echo "✅ Extracted SHA tag: $SHA_TAG"
- name: Update kustomization with new image tag
env:
GITEA_TOKEN: ${{ secrets.GITEAREGISTRYTOKEN }}
run: |
git config --global user.name "gitea-actions[bot]"
git config --global user.email "gitea-actions[bot]@users.noreply.gitea.com"
# Validate that SHA_TAG is not empty
SHA_TAG="${{ steps.extract-tag.outputs.sha-tag }}"
if [ -z "$SHA_TAG" ]; then
echo "❌ ERROR: SHA_TAG is empty, cannot update kustomization"
exit 1
fi
# Determine overlay based on branch
BRANCH_NAME="${{ github.ref_name }}"
if [ "$BRANCH_NAME" = "main" ]; then
OVERLAY="prod"
elif [ "$BRANCH_NAME" = "develop" ]; then
OVERLAY="dev"
else
echo "⚠️ Unknown branch: $BRANCH_NAME, skipping kustomization update"
exit 0
fi
KUSTOMIZATION_FILE="deploy/k8s/overlays/$OVERLAY/kustomization.yaml"
# Check if kustomization file has images section
if grep -q "images:" "$KUSTOMIZATION_FILE"; then
echo "📝 Updating $KUSTOMIZATION_FILE with tag: $SHA_TAG"
# Update kustomization.yaml with new image tag
# Handle both cases: newTag: (with value) and newTag: (empty)
sed -i.bak "s|newTag:.*|newTag: $SHA_TAG|" "$KUSTOMIZATION_FILE"
# Verify the update was successful
if grep -q "newTag: $SHA_TAG" "$KUSTOMIZATION_FILE"; then
echo "✅ Successfully updated kustomization.yaml"
rm -f "$KUSTOMIZATION_FILE.bak"
else
echo "❌ ERROR: Failed to update kustomization.yaml"
cat "$KUSTOMIZATION_FILE"
exit 1
fi
# Commit and push if there are changes
if git diff --quiet; then
echo "No changes to commit"
else
git add "$KUSTOMIZATION_FILE"
git commit -m "Update $OVERLAY image to $SHA_TAG"
git push
echo "✅ Kustomization updated with new image tag: $SHA_TAG"
fi
else
echo " $OVERLAY overlay uses base image (latest tag), skipping kustomization update"
echo " Image built with tag: $SHA_TAG"
fi
- name: Display image information
run: |
echo "✅ Image built and pushed successfully!"
echo "📦 Image tags:"
echo "${{ steps.meta.outputs.tags }}"
echo "🔖 SHA tag: ${{ steps.extract-tag.outputs.sha-tag }}"
echo "🔖 Digest: ${{ steps.build.outputs.digest }}"
echo ""
echo "🚀 Kustomization updated with new image tag"
echo " ArgoCD will automatically detect and deploy this new image"
echo " Monitor deployment at your ArgoCD dashboard"