CHORE(deploy): use argocd for kaniko

Infrastructure as Code:
- Add Kaniko namespace and RBAC manifests
- Create ArgoCD Application for Kaniko infrastructure
- Kustomize configuration for Kaniko resources

Workflow improvements:
- Remove kubeconfig dependency
- Use in-cluster ServiceAccount (runner runs in K8s)
- Remove all sudo commands
- Simplify Kubernetes access

GitOps workflow:
1. Push manifests to Git
2. ArgoCD auto-syncs infrastructure
3. Gitea runner uses ServiceAccount permissions
4. Kaniko builds run in kaniko-builds namespace

Benefits:
- True GitOps approach
- No secrets management needed
- Declarative infrastructure
- ArgoCD handles reconciliation
- Audit trail in Git
This commit is contained in:
2025-12-28 17:36:10 +09:00
parent 363f71d4a6
commit 37a7dbd561
5 changed files with 129 additions and 34 deletions

View File

@@ -35,24 +35,11 @@ jobs:
fi fi
kubectl version --client kubectl version --client
- name: Setup kubeconfig - name: Setup Kubernetes access
env:
KUBECONFIG_CONTENT: ${{ secrets.KUBECONFIG }}
run: | run: |
mkdir -p $HOME/.kube # Running in Kubernetes Pod - use in-cluster config
echo "Running in Kubernetes - using ServiceAccount"
if [ -z "$KUBECONFIG_CONTENT" ]; then kubectl version
echo "❌ ERROR: KUBECONFIG secret is not set."
echo "Please add kubeconfig to Gitea Secrets with name 'KUBECONFIG'"
exit 1
fi
# Decode and save kubeconfig
echo "$KUBECONFIG_CONTENT" | base64 -d > $HOME/.kube/config
chmod 600 $HOME/.kube/config
# Test connection
echo "Testing Kubernetes connection..."
kubectl get nodes -o wide kubectl get nodes -o wide
- name: Lowercase repository name - name: Lowercase repository name
@@ -75,19 +62,16 @@ jobs:
- name: Create Kaniko build context - name: Create Kaniko build context
run: | run: |
# Create tar.gz of build context # Create namespace if not exists (will be created by ArgoCD, but check anyway)
tar czf /tmp/build-context.tar.gz -C services/nextjs . kubectl get namespace kaniko-builds 2>/dev/null || kubectl create namespace kaniko-builds
# Create namespace if not exists
sudo kubectl get namespace kaniko-builds 2>/dev/null || sudo kubectl create namespace kaniko-builds
# Create/update registry credentials secret # Create/update registry credentials secret
sudo kubectl create secret docker-registry kaniko-registry-creds \ kubectl create secret docker-registry kaniko-registry-creds \
--docker-server=${{ env.REGISTRY }} \ --docker-server=${{ env.REGISTRY }} \
--docker-username=bluemayne \ --docker-username=bluemayne \
--docker-password=${{ secrets.GITEAREGISTRY }} \ --docker-password=${{ secrets.GITEAREGISTRY }} \
--namespace=kaniko-builds \ --namespace=kaniko-builds \
--dry-run=client -o yaml | sudo kubectl apply -f - --dry-run=client -o yaml | kubectl apply -f -
- name: Build and push with Kaniko on Kubernetes - name: Build and push with Kaniko on Kubernetes
id: build id: build
@@ -115,29 +99,29 @@ jobs:
cat /tmp/kaniko-job.yaml cat /tmp/kaniko-job.yaml
# Apply the Job # Apply the Job
sudo kubectl apply -f /tmp/kaniko-job.yaml kubectl apply -f /tmp/kaniko-job.yaml
# Wait for job to complete # Wait for job to complete
echo "⏳ Waiting for Kaniko job to complete..." echo "⏳ Waiting for Kaniko job to complete..."
sudo kubectl wait --for=condition=complete --timeout=600s job/${BUILD_NAME} -n kaniko-builds || { kubectl wait --for=condition=complete --timeout=600s job/${BUILD_NAME} -n kaniko-builds || {
echo "❌ Job failed or timed out. Showing logs:" echo "❌ Job failed or timed out. Showing logs:"
POD=$(sudo kubectl get pods -n kaniko-builds -l job-name=${BUILD_NAME} -o jsonpath='{.items[0].metadata.name}') POD=$(kubectl get pods -n kaniko-builds -l job-name=${BUILD_NAME} -o jsonpath='{.items[0].metadata.name}')
sudo kubectl logs -n kaniko-builds ${POD} --all-containers=true || true kubectl logs -n kaniko-builds ${POD} --all-containers=true || true
sudo kubectl delete job ${BUILD_NAME} -n kaniko-builds || true kubectl delete job ${BUILD_NAME} -n kaniko-builds || true
sudo kubectl delete configmap ${BUILD_NAME}-dockerfile -n kaniko-builds || true kubectl delete configmap ${BUILD_NAME}-dockerfile -n kaniko-builds || true
exit 1 exit 1
} }
echo "✅ Image built successfully" echo "✅ Image built successfully"
# Get digest from logs # Get digest from logs
POD=$(sudo kubectl get pods -n kaniko-builds -l job-name=${BUILD_NAME} -o jsonpath='{.items[0].metadata.name}') POD=$(kubectl get pods -n kaniko-builds -l job-name=${BUILD_NAME} -o jsonpath='{.items[0].metadata.name}')
DIGEST=$(sudo kubectl logs -n kaniko-builds ${POD} -c kaniko 2>/dev/null | grep -oP 'digest: \K[a-zA-Z0-9:]+' | tail -1 || echo "unknown") DIGEST=$(kubectl logs -n kaniko-builds ${POD} -c kaniko 2>/dev/null | grep -oP 'digest: \K[a-zA-Z0-9:]+' | tail -1 || echo "unknown")
echo "digest=${DIGEST}" >> $GITHUB_OUTPUT echo "digest=${DIGEST}" >> $GITHUB_OUTPUT
# Cleanup # Cleanup
sudo kubectl delete job ${BUILD_NAME} -n kaniko-builds || true kubectl delete job ${BUILD_NAME} -n kaniko-builds || true
sudo kubectl delete configmap ${BUILD_NAME}-dockerfile -n kaniko-builds || true kubectl delete configmap ${BUILD_NAME}-dockerfile -n kaniko-builds || true
- name: Extract SHA tag - name: Extract SHA tag
id: extract-tag id: extract-tag

View File

@@ -0,0 +1,29 @@
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: kaniko-infrastructure
namespace: argocd
finalizers:
- resources-finalizer.argocd.argoproj.io
spec:
project: default
source:
repoURL: https://gitea0213.kro.kr/bluemayne/jovies.git
targetRevision: main
path: deploy/kaniko
destination:
server: https://kubernetes.default.svc
namespace: kaniko-builds
syncPolicy:
automated:
prune: true
selfHeal: true
allowEmpty: false
syncOptions:
- CreateNamespace=true
retry:
limit: 5
backoff:
duration: 5s
factor: 2
maxDuration: 3m

View File

@@ -0,0 +1,6 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- namespace.yaml
- rbac.yaml

View File

@@ -0,0 +1,7 @@
apiVersion: v1
kind: Namespace
metadata:
name: kaniko-builds
labels:
app.kubernetes.io/name: kaniko-builds
app.kubernetes.io/component: build-infrastructure

69
deploy/kaniko/rbac.yaml Normal file
View File

@@ -0,0 +1,69 @@
---
# ServiceAccount for Gitea runner (optional, if you want dedicated SA)
apiVersion: v1
kind: ServiceAccount
metadata:
name: gitea-runner
namespace: gitea
---
# Role to manage Kaniko builds
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: kaniko-builder
namespace: kaniko-builds
rules:
- apiGroups: [""]
resources: ["configmaps"]
verbs: ["create", "get", "list", "delete"]
- apiGroups: [""]
resources: ["pods"]
verbs: ["get", "list", "watch"]
- apiGroups: [""]
resources: ["pods/log"]
verbs: ["get"]
- apiGroups: ["batch"]
resources: ["jobs"]
verbs: ["create", "get", "list", "watch", "delete"]
- apiGroups: [""]
resources: ["secrets"]
verbs: ["get", "create", "update", "patch"]
---
# RoleBinding for default ServiceAccount in gitea namespace
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: gitea-runner-kaniko-builder
namespace: kaniko-builds
subjects:
- kind: ServiceAccount
name: default
namespace: gitea
roleRef:
kind: Role
name: kaniko-builder
apiGroup: rbac.authorization.k8s.io
---
# ClusterRole to create namespaces (if needed)
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: namespace-creator
rules:
- apiGroups: [""]
resources: ["namespaces"]
verbs: ["create", "get", "list"]
---
# ClusterRoleBinding for default ServiceAccount in gitea namespace
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: gitea-runner-namespace-creator
subjects:
- kind: ServiceAccount
name: default
namespace: gitea
roleRef:
kind: ClusterRole
name: namespace-creator
apiGroup: rbac.authorization.k8s.io