REFACTOR(repo): simplify project structure

- Move services/nextjs/ to nextjs/
- Move Dockerfile.prod to Dockerfile at root
- Remove deploy/ folder (K8s manifests moved to K3S-HOME/web-apps)
- Remove .gitea/ workflows
- Update GitHub Actions for new structure
- Remove develop branch triggers
This commit is contained in:
2026-01-05 02:00:36 +09:00
parent 8f6595a74a
commit 1fbd0467bd
53 changed files with 17 additions and 1050 deletions

View File

@@ -1,289 +0,0 @@
name: Build Docker Image
on:
push:
branches: [main, develop]
tags:
- 'v*'
workflow_dispatch:
env:
REGISTRY: gitea0213.kro.kr
IMAGE_NAME: ${{ github.repository }}
jobs:
build-and-push:
runs-on: ubuntu-24.04-arm
permissions:
contents: write
packages: write
outputs:
image-tag: ${{ steps.meta.outputs.tags }}
image-digest: ${{ steps.build.outputs.digest }}
steps:
- name: Checkout code
uses: actions/checkout@v4
- name: Setup kubectl
run: |
if command -v kubectl &> /dev/null; then
echo "✅ kubectl already installed"
kubectl version --client
else
echo "📥 Installing kubectl..."
# Use specific version to avoid querying stable.txt
KUBECTL_VERSION="v1.31.0"
curl -LO "https://dl.k8s.io/release/${KUBECTL_VERSION}/bin/linux/arm64/kubectl" &
DOWNLOAD_PID=$!
# Show progress
while kill -0 $DOWNLOAD_PID 2>/dev/null; do
echo -n "."
sleep 1
done
wait $DOWNLOAD_PID
echo ""
chmod +x kubectl
sudo mv kubectl /usr/local/bin/
kubectl version --client
fi
- name: Setup kubeconfig from Secret
env:
KUBECONFIG_B64: ${{ secrets.KUBECONFIG }}
run: |
if [ -z "$KUBECONFIG_B64" ]; then
echo "❌ KUBECONFIG secret not set"
echo "Please add KUBECONFIG to Gitea repository secrets"
exit 1
fi
echo "📝 KUBECONFIG secret found (length: ${#KUBECONFIG_B64})"
mkdir -p $HOME/.kube
# Clean up the base64 string (remove all whitespace and newlines)
CLEAN_B64=$(echo "$KUBECONFIG_B64" | tr -d '[:space:]')
echo "After cleanup: ${#CLEAN_B64} chars"
# Try decoding
if echo "$CLEAN_B64" | base64 -d > $HOME/.kube/config 2>/dev/null; then
echo "✅ Successfully decoded kubeconfig"
elif echo "$CLEAN_B64" | base64 --decode > $HOME/.kube/config 2>/dev/null; then
echo "✅ Successfully decoded kubeconfig (with --decode)"
else
echo "❌ Both base64 decode methods failed"
echo "Trying to save as-is (maybe already decoded)..."
echo "$KUBECONFIG_B64" > $HOME/.kube/config
fi
chmod 600 $HOME/.kube/config
# Verify it's valid YAML
if head -1 $HOME/.kube/config | grep -q "apiVersion"; then
echo "✅ Kubeconfig appears valid"
else
echo "⚠️ Kubeconfig may not be valid, first line:"
head -1 $HOME/.kube/config
fi
# Test connection
kubectl cluster-info
kubectl get nodes -o wide
- name: Lowercase repository name
id: lowercase
run: |
echo "repo=$(echo ${{ github.repository }} | tr '[:upper:]' '[:lower:]')" >> $GITHUB_OUTPUT
- name: Extract metadata (tags, labels)
id: meta
uses: docker/metadata-action@v5
with:
images: ${{ env.REGISTRY }}/${{ steps.lowercase.outputs.repo }}
tags: |
type=ref,event=branch
type=ref,event=pr
type=semver,pattern={{version}}
type=semver,pattern={{major}}.{{minor}}
type=sha,prefix={{branch}}-sha-,format=long
type=raw,value=latest,enable={{is_default_branch}}
- name: Create registry credentials in Kubernetes
run: |
# Ensure namespace exists
kubectl get namespace kaniko-builds 2>/dev/null || kubectl create namespace kaniko-builds
# Create/update registry secret
kubectl create secret docker-registry kaniko-registry-creds \
--docker-server=${{ env.REGISTRY }} \
--docker-username=bluemayne \
--docker-password=${{ secrets.GITEAREGISTRY }} \
--namespace=kaniko-builds \
--dry-run=client -o yaml | kubectl apply -f -
- name: Build and push with Kaniko Job
id: build
run: |
TAGS="${{ steps.meta.outputs.tags }}"
# Prepare destination arguments
DESTINATIONS=""
while IFS= read -r tag; do
DESTINATIONS="$DESTINATIONS\n - --destination=$tag"
done <<< "$TAGS"
# Create unique build name
BUILD_NAME="kaniko-build-${{ github.run_number }}-$(date +%s)"
echo "📦 Building image: ${BUILD_NAME}"
echo "Tags: $TAGS"
# Generate Kaniko Job from template
sed -e "s|KANIKO_BUILD_NAME|${BUILD_NAME}|g" \
-e "s|GIT_REPO_URL|https://gitea0213.kro.kr/${{ github.repository }}.git|g" \
-e "s|GIT_SHA|${{ github.sha }}|g" \
-e "s|CACHE_REPO|${{ env.REGISTRY }}/${{ steps.lowercase.outputs.repo }}/cache|g" \
-e "s|# DESTINATIONS will be added here|${DESTINATIONS}|g" \
deploy/kaniko/job.yaml > /tmp/kaniko-job.yaml
# Apply Job
kubectl apply -f /tmp/kaniko-job.yaml
# Wait for completion
echo "⏳ Waiting for Kaniko Job to complete..."
kubectl wait --for=condition=complete --timeout=600s job/${BUILD_NAME} -n kaniko-builds || {
echo "❌ Job failed or timed out"
POD=$(kubectl get pods -n kaniko-builds -l job-name=${BUILD_NAME} -o jsonpath='{.items[0].metadata.name}')
echo "📋 Logs:"
kubectl logs -n kaniko-builds ${POD} --all-containers=true || true
kubectl delete job ${BUILD_NAME} -n kaniko-builds || true
exit 1
}
echo "✅ Image built successfully"
# Get digest
POD=$(kubectl get pods -n kaniko-builds -l job-name=${BUILD_NAME} -o jsonpath='{.items[0].metadata.name}')
DIGEST=$(kubectl logs -n kaniko-builds ${POD} -c kaniko 2>/dev/null | grep -oP 'digest: \K[a-zA-Z0-9:]+' | tail -1 || echo "unknown")
echo "digest=${DIGEST}" >> $GITHUB_OUTPUT
# Cleanup
kubectl delete job ${BUILD_NAME} -n kaniko-builds || true
- name: Extract SHA tag
id: extract-tag
run: |
# Extract the SHA-based tag from the tags list
TAGS="${{ steps.meta.outputs.tags }}"
echo "All tags:"
echo "$TAGS"
echo "---"
# Get commit SHA (full 40 characters)
COMMIT_SHA="${{ github.sha }}"
# Get current branch name
BRANCH_NAME="${{ github.ref_name }}"
echo "Branch: $BRANCH_NAME"
# Method 1: Extract the full SHA tag from docker/metadata-action output
# docker/metadata-action creates: <branch>-sha-<full-40-char-sha>
SHA_TAG=$(echo "$TAGS" | grep -oE "${BRANCH_NAME}-sha-[a-f0-9]{40}" | head -n 1)
# Method 2: If not found, try to extract any branch-sha- tag (fallback)
if [ -z "$SHA_TAG" ]; then
SHA_TAG=$(echo "$TAGS" | grep -oE "${BRANCH_NAME}-sha-[a-f0-9]+" | head -n 1)
if [ -n "$SHA_TAG" ]; then
echo "⚠️ Found SHA tag (may not be full 40 chars): $SHA_TAG"
fi
fi
# Method 3: Fallback to commit SHA directly (construct the tag)
if [ -z "$SHA_TAG" ]; then
SHA_TAG="${BRANCH_NAME}-sha-$COMMIT_SHA"
echo "⚠️ Could not extract from tags, using commit SHA: $SHA_TAG"
fi
if [ -z "$SHA_TAG" ]; then
echo "❌ ERROR: Failed to extract SHA tag"
exit 1
fi
echo "sha-tag=$SHA_TAG" >> $GITHUB_OUTPUT
echo "✅ Extracted SHA tag: $SHA_TAG"
- name: Update kustomization with new image tag
env:
GITEA_TOKEN: ${{ secrets.GITEAREGISTRYTOKEN }}
run: |
git config --global user.name "gitea-actions[bot]"
git config --global user.email "gitea-actions[bot]@users.noreply.gitea.com"
# Validate that SHA_TAG is not empty
SHA_TAG="${{ steps.extract-tag.outputs.sha-tag }}"
if [ -z "$SHA_TAG" ]; then
echo "❌ ERROR: SHA_TAG is empty, cannot update kustomization"
exit 1
fi
# Determine overlay based on branch
BRANCH_NAME="${{ github.ref_name }}"
if [ "$BRANCH_NAME" = "main" ]; then
OVERLAY="prod"
elif [ "$BRANCH_NAME" = "develop" ]; then
OVERLAY="dev"
else
echo "⚠️ Unknown branch: $BRANCH_NAME, skipping kustomization update"
exit 0
fi
KUSTOMIZATION_FILE="deploy/k8s/overlays/$OVERLAY/kustomization.yaml"
# Check if kustomization file has images section
if grep -q "images:" "$KUSTOMIZATION_FILE"; then
echo "📝 Updating $KUSTOMIZATION_FILE with tag: $SHA_TAG"
# Update kustomization.yaml with new image tag
# Handle both cases: newTag: (with value) and newTag: (empty)
sed -i.bak "s|newTag:.*|newTag: $SHA_TAG|" "$KUSTOMIZATION_FILE"
# Verify the update was successful
if grep -q "newTag: $SHA_TAG" "$KUSTOMIZATION_FILE"; then
echo "✅ Successfully updated kustomization.yaml"
rm -f "$KUSTOMIZATION_FILE.bak"
else
echo "❌ ERROR: Failed to update kustomization.yaml"
cat "$KUSTOMIZATION_FILE"
exit 1
fi
# Commit and push if there are changes
if git diff --quiet; then
echo "No changes to commit"
else
git add "$KUSTOMIZATION_FILE"
git commit -m "Update $OVERLAY image to $SHA_TAG"
git push
echo "✅ Kustomization updated with new image tag: $SHA_TAG"
fi
else
echo " $OVERLAY overlay uses base image (latest tag), skipping kustomization update"
echo " Image built with tag: $SHA_TAG"
fi
- name: Display image information
run: |
echo "✅ Image built and pushed successfully!"
echo "📦 Image tags:"
echo "${{ steps.meta.outputs.tags }}"
echo "🔖 SHA tag: ${{ steps.extract-tag.outputs.sha-tag }}"
echo "🔖 Digest: ${{ steps.build.outputs.digest }}"
echo ""
echo "🚀 Kustomization updated with new image tag"
echo " ArgoCD will automatically detect and deploy this new image"
echo " Monitor deployment at your ArgoCD dashboard"

View File

@@ -1,45 +0,0 @@
name: CI
on:
push:
branches: [main, develop]
pull_request:
branches: [main, develop]
jobs:
lint-and-build:
runs-on: ubuntu-latest
steps:
- name: Checkout code
uses: actions/checkout@v4
- name: Setup Node.js
uses: actions/setup-node@v4
with:
node-version: '20'
cache: 'npm'
cache-dependency-path: services/nextjs/package-lock.json
- name: Install dependencies
working-directory: services/nextjs
run: npm ci
- name: Run ESLint
working-directory: services/nextjs
run: npm run lint
- name: Build Next.js application
working-directory: services/nextjs
run: npm run build
env:
NEXT_TELEMETRY_DISABLED: 1
- name: Check build output
working-directory: services/nextjs
run: |
if [ ! -d ".next" ]; then
echo "Build failed: .next directory not found"
exit 1
fi
echo "✅ Build completed successfully"

View File

@@ -2,7 +2,7 @@ name: Build Docker Image
on: on:
push: push:
branches: [main, develop] branches: [main]
tags: tags:
- 'v*' - 'v*'
workflow_dispatch: workflow_dispatch:
@@ -15,7 +15,7 @@ jobs:
build-and-push: build-and-push:
runs-on: ubuntu-24.04-arm runs-on: ubuntu-24.04-arm
permissions: permissions:
contents: write contents: read
packages: write packages: write
outputs: outputs:
@@ -47,19 +47,17 @@ jobs:
with: with:
images: ${{ env.REGISTRY }}/${{ steps.lowercase.outputs.repo }} images: ${{ env.REGISTRY }}/${{ steps.lowercase.outputs.repo }}
tags: | tags: |
type=ref,event=branch type=sha,prefix=sha-,format=long
type=ref,event=pr type=raw,value=latest,enable={{is_default_branch}}
type=semver,pattern={{version}} type=semver,pattern={{version}}
type=semver,pattern={{major}}.{{minor}} type=semver,pattern={{major}}.{{minor}}
type=sha,prefix={{branch}}-sha-,format=long
type=raw,value=latest,enable={{is_default_branch}}
- name: Build and push Docker image - name: Build and push Docker image
id: build id: build
uses: docker/build-push-action@v5 uses: docker/build-push-action@v5
with: with:
context: ./services/nextjs context: ./nextjs
file: ./deploy/docker/Dockerfile.prod file: ./Dockerfile
push: true push: true
platforms: linux/arm64 platforms: linux/arm64
tags: ${{ steps.meta.outputs.tags }} tags: ${{ steps.meta.outputs.tags }}
@@ -67,115 +65,9 @@ jobs:
cache-from: type=gha cache-from: type=gha
cache-to: type=gha,mode=max cache-to: type=gha,mode=max
- name: Extract SHA tag
id: extract-tag
run: |
# Extract the SHA-based tag from the tags list
TAGS="${{ steps.meta.outputs.tags }}"
echo "All tags:"
echo "$TAGS"
echo "---"
# Get commit SHA (full 40 characters)
COMMIT_SHA="${{ github.sha }}"
# Get current branch name
BRANCH_NAME="${{ github.ref_name }}"
echo "Branch: $BRANCH_NAME"
# Method 1: Extract the full SHA tag from docker/metadata-action output
# docker/metadata-action creates: <branch>-sha-<full-40-char-sha>
SHA_TAG=$(echo "$TAGS" | grep -oE "${BRANCH_NAME}-sha-[a-f0-9]{40}" | head -n 1)
# Method 2: If not found, try to extract any branch-sha- tag (fallback)
if [ -z "$SHA_TAG" ]; then
SHA_TAG=$(echo "$TAGS" | grep -oE "${BRANCH_NAME}-sha-[a-f0-9]+" | head -n 1)
if [ -n "$SHA_TAG" ]; then
echo "⚠️ Found SHA tag (may not be full 40 chars): $SHA_TAG"
fi
fi
# Method 3: Fallback to commit SHA directly (construct the tag)
if [ -z "$SHA_TAG" ]; then
SHA_TAG="${BRANCH_NAME}-sha-$COMMIT_SHA"
echo "⚠️ Could not extract from tags, using commit SHA: $SHA_TAG"
fi
if [ -z "$SHA_TAG" ]; then
echo "❌ ERROR: Failed to extract SHA tag"
exit 1
fi
echo "sha-tag=$SHA_TAG" >> $GITHUB_OUTPUT
echo "✅ Extracted SHA tag: $SHA_TAG"
- name: Update kustomization with new image tag
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
run: |
git config --global user.name "github-actions[bot]"
git config --global user.email "github-actions[bot]@users.noreply.github.com"
# Validate that SHA_TAG is not empty
SHA_TAG="${{ steps.extract-tag.outputs.sha-tag }}"
if [ -z "$SHA_TAG" ]; then
echo "❌ ERROR: SHA_TAG is empty, cannot update kustomization"
exit 1
fi
# Determine overlay based on branch
BRANCH_NAME="${{ github.ref_name }}"
if [ "$BRANCH_NAME" = "main" ]; then
OVERLAY="prod"
elif [ "$BRANCH_NAME" = "develop" ]; then
OVERLAY="dev"
else
echo "⚠️ Unknown branch: $BRANCH_NAME, skipping kustomization update"
exit 0
fi
KUSTOMIZATION_FILE="deploy/k8s/overlays/$OVERLAY/kustomization.yaml"
# Check if kustomization file has images section
if grep -q "images:" "$KUSTOMIZATION_FILE"; then
echo "📝 Updating $KUSTOMIZATION_FILE with tag: $SHA_TAG"
# Update kustomization.yaml with new image tag
# Handle both cases: newTag: (with value) and newTag: (empty)
sed -i.bak "s|newTag:.*|newTag: $SHA_TAG|" "$KUSTOMIZATION_FILE"
# Verify the update was successful
if grep -q "newTag: $SHA_TAG" "$KUSTOMIZATION_FILE"; then
echo "✅ Successfully updated kustomization.yaml"
rm -f "$KUSTOMIZATION_FILE.bak"
else
echo "❌ ERROR: Failed to update kustomization.yaml"
cat "$KUSTOMIZATION_FILE"
exit 1
fi
# Commit and push if there are changes
if git diff --quiet; then
echo "No changes to commit"
else
git add "$KUSTOMIZATION_FILE"
git commit -m "Update $OVERLAY image to $SHA_TAG"
git push
echo "✅ Kustomization updated with new image tag: $SHA_TAG"
fi
else
echo " $OVERLAY overlay uses base image (latest tag), skipping kustomization update"
echo " Image built with tag: $SHA_TAG"
fi
- name: Display image information - name: Display image information
run: | run: |
echo "Image built and pushed successfully!" echo "Image built and pushed successfully!"
echo "📦 Image tags:" echo "Image tags:"
echo "${{ steps.meta.outputs.tags }}" echo "${{ steps.meta.outputs.tags }}"
echo "🔖 SHA tag: ${{ steps.extract-tag.outputs.sha-tag }}" echo "Digest: ${{ steps.build.outputs.digest }}"
echo "🔖 Digest: ${{ steps.build.outputs.digest }}"
echo ""
echo "🚀 Kustomization updated with new image tag"
echo " ArgoCD will automatically detect and deploy this new image"
echo " Monitor deployment at your ArgoCD dashboard"

View File

@@ -2,9 +2,9 @@ name: CI
on: on:
push: push:
branches: [main, develop] branches: [main]
pull_request: pull_request:
branches: [main, develop] branches: [main]
jobs: jobs:
lint-and-build: lint-and-build:
@@ -19,27 +19,27 @@ jobs:
with: with:
node-version: '20' node-version: '20'
cache: 'npm' cache: 'npm'
cache-dependency-path: services/nextjs/package-lock.json cache-dependency-path: nextjs/package-lock.json
- name: Install dependencies - name: Install dependencies
working-directory: services/nextjs working-directory: nextjs
run: npm ci run: npm ci
- name: Run ESLint - name: Run ESLint
working-directory: services/nextjs working-directory: nextjs
run: npm run lint run: npm run lint
- name: Build Next.js application - name: Build Next.js application
working-directory: services/nextjs working-directory: nextjs
run: npm run build run: npm run build
env: env:
NEXT_TELEMETRY_DISABLED: 1 NEXT_TELEMETRY_DISABLED: 1
- name: Check build output - name: Check build output
working-directory: services/nextjs working-directory: nextjs
run: | run: |
if [ ! -d ".next" ]; then if [ ! -d ".next" ]; then
echo "Build failed: .next directory not found" echo "Build failed: .next directory not found"
exit 1 exit 1
fi fi
echo "Build completed successfully" echo "Build completed successfully"

View File

@@ -1,27 +0,0 @@
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: jovies-dev
namespace: argocd
spec:
project: default
source:
repoURL: https://github.com/Mayne0213/jovies.git
targetRevision: develop
path: deploy/k8s/overlays/dev
destination:
server: https://kubernetes.default.svc
namespace: jovies-dev
syncPolicy:
automated:
prune: true
selfHeal: true
allowEmpty: false
syncOptions:
- CreateNamespace=true
retry:
limit: 5
backoff:
duration: 5s
factor: 2
maxDuration: 3m

View File

@@ -1,27 +0,0 @@
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: jovies-prod
namespace: argocd
spec:
project: default
source:
repoURL: https://github.com/Mayne0213/jovies.git
targetRevision: main
path: deploy/k8s/overlays/prod
destination:
server: https://kubernetes.default.svc
namespace: jovies
syncPolicy:
automated:
prune: true
selfHeal: true
allowEmpty: false
syncOptions:
- CreateNamespace=true
retry:
limit: 5
backoff:
duration: 5s
factor: 2
maxDuration: 3m

View File

@@ -1,36 +0,0 @@
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: jovies
namespace: argocd
finalizers:
- resources-finalizer.argocd.argoproj.io
spec:
project: default
source:
repoURL: https://github.com/Mayne0213/jovies.git
targetRevision: main
path: deploy/argocd
destination:
server: https://kubernetes.default.svc
namespace: argocd
syncPolicy:
automated:
prune: true
selfHeal: true
allowEmpty: false
syncOptions:
- CreateNamespace=true
retry:
limit: 5
backoff:
duration: 5s
factor: 2
maxDuration: 3m
revisionHistoryLimit: 10

View File

@@ -1,10 +0,0 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
# App of Apps Application (self-managing)
- application.yaml
# Application deployments (prod and dev)
- application-prod.yaml
- application-dev.yaml

View File

@@ -1,26 +0,0 @@
# Development Dockerfile for Movie Next.js application
FROM node:20-alpine AS base
# Install dependencies for development
RUN apk add --no-cache libc6-compat curl
WORKDIR /app
# Copy package files
COPY package.json package-lock.json* ./
# Install all dependencies (including dev dependencies)
RUN npm ci
# Copy source code
COPY . .
# Expose port
EXPOSE 3000
# Health check
HEALTHCHECK --interval=30s --timeout=3s --start-period=5s --retries=3 \
CMD curl -f http://localhost:3000 || exit 1
# Default command for development
CMD ["npm", "run", "dev"]

View File

@@ -1,27 +0,0 @@
services:
# Development Jovies Next.js Application
app:
build:
context: ../../services
dockerfile: ../deploy/docker/Dockerfile.dev
container_name: jovies-app-dev
restart: unless-stopped
labels:
kompose.namespace: jovies
ports:
- 3003:3000
environment:
- NODE_ENV=development
- WATCHPACK_POLLING=true
networks:
- jovies-network
volumes:
- ../../services:/app
- /app/node_modules
- /app/.next
command: npm run dev
networks:
jovies-network:
driver: bridge
name: jovies-network-dev

View File

@@ -1,22 +0,0 @@
services:
# Production Jovies Next.js Application
app:
image: jovies-app
build:
context: ../../services/nextjs
dockerfile: ../../deploy/docker/Dockerfile.prod
container_name: jovies-app-prod
restart: unless-stopped
labels:
kompose.namespace: jovies
ports:
- 3003:3000
environment:
- NODE_ENV=production
networks:
- jovies-network
networks:
jovies-network:
driver: bridge
name: jovies-network-prod

View File

@@ -1,51 +0,0 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: jovies-app
labels:
app: jovies-app
spec:
replicas: 1
selector:
matchLabels:
app: jovies-app
strategy:
type: RollingUpdate
rollingUpdate:
maxUnavailable: 1
maxSurge: 1
revisionHistoryLimit: 1
template:
metadata:
labels:
app: jovies-app
spec:
containers:
- name: jovies-app
image: ghcr.io/mayne0213/jovies:latest
imagePullPolicy: Always
ports:
- containerPort: 3000
protocol: TCP
env:
- name: NODE_ENV
value: production
resources:
requests:
memory: "100Mi"
cpu: "50m"
limits:
memory: "200Mi"
livenessProbe:
httpGet:
path: /
port: 3000
initialDelaySeconds: 30
periodSeconds: 10
readinessProbe:
httpGet:
path: /
port: 3000
initialDelaySeconds: 5
periodSeconds: 5
restartPolicy: Always

View File

@@ -1,14 +0,0 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- deployment.yaml
- service.yaml
commonLabels:
app.kubernetes.io/name: jovies
app.kubernetes.io/component: web
images:
- name: github.com/Mayne0213/jovies
newTag: latest

View File

@@ -1,15 +0,0 @@
apiVersion: v1
kind: Service
metadata:
name: jovies-service
labels:
app: jovies-app
spec:
type: ClusterIP
ports:
- name: http
port: 80
targetPort: 3000
protocol: TCP
selector:
app: jovies-app

View File

@@ -1,18 +0,0 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: jovies-app
labels:
environment: development
spec:
replicas: 1
template:
spec:
containers:
- name: jovies-app
resources:
requests:
memory: "40Mi"
cpu: "10m"
limits:
memory: "100Mi"

View File

@@ -1,28 +0,0 @@
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: jovies-dev-ingress
annotations:
nginx.ingress.kubernetes.io/rewrite-target: /
# HTTP를 HTTPS로 자동 리다이렉트
nginx.ingress.kubernetes.io/ssl-redirect: "true"
# cert-manager가 인증서를 자동으로 발급하도록 설정
cert-manager.io/cluster-issuer: "letsencrypt-prod"
spec:
ingressClassName: traefik
# TLS 설정
tls:
- hosts:
- dev.jovies.kro.kr
secretName: jovies-dev-tls
rules:
- host: dev.jovies.kro.kr
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: jovies-service
port:
number: 80

View File

@@ -1,21 +0,0 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
namespace: jovies-dev
resources:
- ../../base
- resourcequota.yaml
- namespace.yaml
- ingress.yaml
commonLabels:
environment: development
# 이미지 태그 설정
images:
- name: github.com/Mayne0213/jovies
newTag: develop-sha-bed2d09069c0a4a2f83dfd74a8489dcf67625a7d
patchesStrategicMerge:
- deployment-patch.yaml

View File

@@ -1,7 +0,0 @@
apiVersion: v1
kind: Namespace
metadata:
name: jovies-dev
labels:
environment: development
app: jovies

View File

@@ -1,11 +0,0 @@
apiVersion: v1
kind: ResourceQuota
metadata:
name: jovies-dev-quota
namespace: jovies-dev
spec:
hard:
requests.memory: "150Mi"
requests.cpu: "50m"
limits.memory: "300Mi"
pods: "6"

View File

@@ -1,18 +0,0 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: jovies-app
labels:
environment: production
spec:
replicas: 2
template:
spec:
containers:
- name: jovies-app
resources:
requests:
memory: "40Mi"
cpu: "5m" # Reduced from 15m based on actual usage (1-2m)
limits:
memory: "100Mi"

View File

@@ -1,40 +0,0 @@
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: jovies-ingress
annotations:
nginx.ingress.kubernetes.io/rewrite-target: /
# HTTP를 HTTPS로 자동 리다이렉트
nginx.ingress.kubernetes.io/ssl-redirect: "true"
# cert-manager가 인증서를 자동으로 발급하도록 설정
cert-manager.io/cluster-issuer: "letsencrypt-prod"
spec:
ingressClassName: traefik
# TLS 설정
tls:
- hosts:
- jovies.kro.kr
- www.jovies.kro.kr
# cert-manager가 이 이름으로 Secret을 자동 생성
secretName: jovies-tls
rules:
- host: jovies.kro.kr
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: jovies-service
port:
number: 80
- host: www.jovies.kro.kr
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: jovies-service
port:
number: 80

View File

@@ -1,20 +0,0 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
namespace: jovies
resources:
- ../../base
- resourcequota.yaml
- ingress.yaml
commonLabels:
environment: production
# 이미지 태그 설정
images:
- name: ghcr.io/mayne0213/jovies
newTag: main-sha-aa99ab652cbf9aef0d705b18c64d2cd9441a60a7
patchesStrategicMerge:
- deployment-patch.yaml

View File

@@ -1,11 +0,0 @@
apiVersion: v1
kind: ResourceQuota
metadata:
name: jovies-quota
namespace: jovies
spec:
hard:
requests.memory: "200Mi"
requests.cpu: "100m"
limits.memory: "400Mi"
pods: "9"

View File

@@ -1,80 +0,0 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: KANIKO_BUILD_NAME-dockerfile
namespace: kaniko-builds
data:
Dockerfile: |
# This will be replaced by the actual Dockerfile content
DOCKERFILE_CONTENT
---
apiVersion: batch/v1
kind: Job
metadata:
name: KANIKO_BUILD_NAME
namespace: kaniko-builds
spec:
ttlSecondsAfterFinished: 600
backoffLimit: 0
template:
metadata:
labels:
app: kaniko-build
spec:
restartPolicy: Never
initContainers:
- name: prepare-context
image: alpine/git:latest
command: ["/bin/sh", "-c"]
args:
- |
set -e
echo "Cloning repository..."
git clone GIT_REPO_URL /workspace/repo
cd /workspace/repo
git checkout GIT_SHA
echo "Preparing build context..."
mkdir -p /workspace/build
cp -r services/nextjs/* /workspace/build/
cp deploy/docker/Dockerfile.prod /workspace/build/Dockerfile
echo "Build context ready:"
ls -la /workspace/build/
volumeMounts:
- name: workspace
mountPath: /workspace
containers:
- name: kaniko
image: gcr.io/kaniko-project/executor:latest
args:
- --context=/workspace/build
- --dockerfile=/workspace/build/Dockerfile
- --cache=true
- --cache-repo=CACHE_REPO
- --compressed-caching=false
- --snapshot-mode=redo
- --use-new-run
- --verbosity=info
# DESTINATIONS will be added here
volumeMounts:
- name: workspace
mountPath: /workspace
- name: docker-config
mountPath: /kaniko/.docker
resources:
requests:
memory: "512Mi"
cpu: "500m"
limits:
memory: "2Gi"
cpu: "2000m"
volumes:
- name: workspace
emptyDir: {}
- name: docker-config
secret:
secretName: kaniko-registry-creds
items:
- key: .dockerconfigjson
path: config.json

View File

@@ -1,6 +0,0 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- namespace.yaml
- rbac.yaml

View File

@@ -1,7 +0,0 @@
apiVersion: v1
kind: Namespace
metadata:
name: kaniko-builds
labels:
app.kubernetes.io/name: kaniko-builds
app.kubernetes.io/component: build-infrastructure

View File

@@ -1,69 +0,0 @@
---
# ServiceAccount for Gitea runner (optional, if you want dedicated SA)
apiVersion: v1
kind: ServiceAccount
metadata:
name: gitea-runner
namespace: gitea
---
# Role to manage Kaniko builds
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: kaniko-builder
namespace: kaniko-builds
rules:
- apiGroups: [""]
resources: ["configmaps"]
verbs: ["create", "get", "list", "delete"]
- apiGroups: [""]
resources: ["pods"]
verbs: ["get", "list", "watch"]
- apiGroups: [""]
resources: ["pods/log"]
verbs: ["get"]
- apiGroups: ["batch"]
resources: ["jobs"]
verbs: ["create", "get", "list", "watch", "delete"]
- apiGroups: [""]
resources: ["secrets"]
verbs: ["get", "create", "update", "patch"]
---
# RoleBinding for default ServiceAccount in gitea namespace
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: gitea-runner-kaniko-builder
namespace: kaniko-builds
subjects:
- kind: ServiceAccount
name: default
namespace: gitea
roleRef:
kind: Role
name: kaniko-builder
apiGroup: rbac.authorization.k8s.io
---
# ClusterRole to create namespaces (if needed)
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: namespace-creator
rules:
- apiGroups: [""]
resources: ["namespaces"]
verbs: ["create", "get", "list"]
---
# ClusterRoleBinding for default ServiceAccount in gitea namespace
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: gitea-runner-namespace-creator
subjects:
- kind: ServiceAccount
name: default
namespace: gitea
roleRef:
kind: ClusterRole
name: namespace-creator
apiGroup: rbac.authorization.k8s.io

View File

Before

Width:  |  Height:  |  Size: 12 KiB

After

Width:  |  Height:  |  Size: 12 KiB