CHORE(docker): run kaniko directly

Remove Kubernetes complexity:
- Remove kubectl installation and setup
- Remove kubeconfig generation
- Remove Kubernetes Job creation
- Use docker run to execute Kaniko directly

Benefits:
- Much simpler workflow
- No Kubernetes API access needed
- No RBAC complexity in workflow
- Faster execution (no Job overhead)
- Kaniko still builds without Docker daemon

Note: Kaniko infrastructure (namespace, RBAC) kept via
ArgoCD for potential future use or manual kubectl access
This commit is contained in:
2025-12-28 17:40:59 +09:00
parent 7b0f520e54
commit 619eabf4c4

View File

@@ -18,9 +18,6 @@ jobs:
contents: write contents: write
packages: write packages: write
env:
KUBECONFIG: /tmp/kubeconfig
outputs: outputs:
image-tag: ${{ steps.meta.outputs.tags }} image-tag: ${{ steps.meta.outputs.tags }}
image-digest: ${{ steps.build.outputs.digest }} image-digest: ${{ steps.build.outputs.digest }}
@@ -29,63 +26,11 @@ jobs:
- name: Checkout code - name: Checkout code
uses: actions/checkout@v4 uses: actions/checkout@v4
- name: Install kubectl - name: Verify Docker access
run: | run: |
if ! command -v kubectl &> /dev/null; then echo "Checking Docker availability..."
curl -LO "https://dl.k8s.io/release/$(curl -L -s https://dl.k8s.io/release/stable.txt)/bin/linux/arm64/kubectl" docker version
chmod +x kubectl docker info | head -20
sudo mv kubectl /usr/local/bin/
fi
kubectl version --client
- name: Setup Kubernetes access
run: |
# Running in Kubernetes Pod - create kubeconfig from ServiceAccount
echo "Setting up in-cluster kubeconfig"
SA_PATH="/var/run/secrets/kubernetes.io/serviceaccount"
if [ ! -f "${SA_PATH}/token" ]; then
echo "❌ ServiceAccount token not found"
exit 1
fi
echo "✅ ServiceAccount token found"
# Get cluster info
KUBE_HOST="${KUBERNETES_SERVICE_HOST:-kubernetes.default.svc}"
KUBE_PORT="${KUBERNETES_SERVICE_PORT:-443}"
KUBE_URL="https://${KUBE_HOST}:${KUBE_PORT}"
echo "Kubernetes API: ${KUBE_URL}"
# Create kubeconfig
cat > ${KUBECONFIG} <<EOF
apiVersion: v1
kind: Config
clusters:
- cluster:
certificate-authority: ${SA_PATH}/ca.crt
server: ${KUBE_URL}
name: default
contexts:
- context:
cluster: default
namespace: $(cat ${SA_PATH}/namespace)
user: default
name: default
current-context: default
users:
- name: default
user:
tokenFile: ${SA_PATH}/token
EOF
chmod 600 ${KUBECONFIG}
# Test connection
kubectl version
kubectl get nodes -o wide
- name: Lowercase repository name - name: Lowercase repository name
id: lowercase id: lowercase
@@ -105,68 +50,42 @@ jobs:
type=sha,prefix={{branch}}-sha-,format=long type=sha,prefix={{branch}}-sha-,format=long
type=raw,value=latest,enable={{is_default_branch}} type=raw,value=latest,enable={{is_default_branch}}
- name: Create Kaniko build context - name: Prepare Kaniko credentials
run: | run: |
# Create namespace if not exists (will be created by ArgoCD, but check anyway) mkdir -p /tmp/kaniko-config
kubectl get namespace kaniko-builds 2>/dev/null || kubectl create namespace kaniko-builds echo "{\"auths\":{\"${{ env.REGISTRY }}\":{\"auth\":\"$(echo -n bluemayne:${{ secrets.GITEAREGISTRY }} | base64)\"}}}" > /tmp/kaniko-config/config.json
# Create/update registry credentials secret - name: Build and push with Kaniko (Docker)
kubectl create secret docker-registry kaniko-registry-creds \
--docker-server=${{ env.REGISTRY }} \
--docker-username=bluemayne \
--docker-password=${{ secrets.GITEAREGISTRY }} \
--namespace=kaniko-builds \
--dry-run=client -o yaml | kubectl apply -f -
- name: Build and push with Kaniko on Kubernetes
id: build id: build
run: | run: |
TAGS="${{ steps.meta.outputs.tags }}" TAGS="${{ steps.meta.outputs.tags }}"
# Prepare destination arguments # Prepare destination arguments for all tags
DESTINATIONS="" DESTINATIONS=""
while IFS= read -r tag; do while IFS= read -r tag; do
DESTINATIONS="$DESTINATIONS\n - --destination=$tag" DESTINATIONS="$DESTINATIONS --destination=$tag"
done <<< "$TAGS" done <<< "$TAGS"
# Create unique build name echo "📦 Building image with tags:"
BUILD_NAME="kaniko-build-${{ github.run_number }}-$(date +%s)" echo "$TAGS"
# Prepare Kaniko Job manifest from template # Build and push with Kaniko via Docker
sed -e "s|KANIKO_BUILD_NAME|${BUILD_NAME}|g" \ docker run --rm \
-e "s|GIT_REPO_URL|https://gitea0213.kro.kr/${{ github.repository }}.git|g" \ -v $(pwd):/workspace \
-e "s|GIT_SHA|${{ github.sha }}|g" \ -v /tmp/kaniko-config:/kaniko/.docker:ro \
-e "s|CACHE_REPO|${{ env.REGISTRY }}/${{ steps.lowercase.outputs.repo }}/cache|g" \ gcr.io/kaniko-project/executor:latest \
-e "s|# DESTINATIONS will be added here|${DESTINATIONS}|g" \ --context=/workspace/services/nextjs \
deploy/kaniko/job.yaml > /tmp/kaniko-job.yaml --dockerfile=/workspace/deploy/docker/Dockerfile.prod \
$DESTINATIONS \
--cache=true \
--cache-repo=${{ env.REGISTRY }}/${{ steps.lowercase.outputs.repo }}/cache \
--compressed-caching=false \
--snapshot-mode=redo \
--use-new-run \
--verbosity=info
echo "📋 Generated Kaniko Job manifest:" echo "✅ Image built and pushed successfully"
cat /tmp/kaniko-job.yaml echo "digest=unknown" >> $GITHUB_OUTPUT
# Apply the Job
kubectl apply -f /tmp/kaniko-job.yaml
# Wait for job to complete
echo "⏳ Waiting for Kaniko job to complete..."
kubectl wait --for=condition=complete --timeout=600s job/${BUILD_NAME} -n kaniko-builds || {
echo "❌ Job failed or timed out. Showing logs:"
POD=$(kubectl get pods -n kaniko-builds -l job-name=${BUILD_NAME} -o jsonpath='{.items[0].metadata.name}')
kubectl logs -n kaniko-builds ${POD} --all-containers=true || true
kubectl delete job ${BUILD_NAME} -n kaniko-builds || true
kubectl delete configmap ${BUILD_NAME}-dockerfile -n kaniko-builds || true
exit 1
}
echo "✅ Image built successfully"
# Get digest from logs
POD=$(kubectl get pods -n kaniko-builds -l job-name=${BUILD_NAME} -o jsonpath='{.items[0].metadata.name}')
DIGEST=$(kubectl logs -n kaniko-builds ${POD} -c kaniko 2>/dev/null | grep -oP 'digest: \K[a-zA-Z0-9:]+' | tail -1 || echo "unknown")
echo "digest=${DIGEST}" >> $GITHUB_OUTPUT
# Cleanup
kubectl delete job ${BUILD_NAME} -n kaniko-builds || true
kubectl delete configmap ${BUILD_NAME}-dockerfile -n kaniko-builds || true
- name: Extract SHA tag - name: Extract SHA tag
id: extract-tag id: extract-tag