Compare commits

..

10 Commits

Author SHA1 Message Date
3b4d837a9e CHORE(test): update about-us page text
Some checks failed
Build Docker Image / build-and-push (push) Has been cancelled
CI / lint-and-build (push) Has been cancelled
- Modify text for ArgoCD image updater test
- Verify automated deployment pipeline
2026-01-09 21:48:51 +09:00
1fbd0467bd REFACTOR(repo): simplify project structure
- Move services/nextjs/ to nextjs/
- Move Dockerfile.prod to Dockerfile at root
- Remove deploy/ folder (K8s manifests moved to K3S-HOME/web-apps)
- Remove .gitea/ workflows
- Update GitHub Actions for new structure
- Remove develop branch triggers
2026-01-05 02:00:36 +09:00
8f6595a74a CHORE(app): reduce replicas to 1
- Scale down deployment replicas
- Optimize resource usage
2025-12-30 20:10:52 +09:00
25546df4f3 FEAT(app): add prod yaml to kustomization
- Add production ArgoCD Application to kustomization
- Enable prod deployment management
2025-12-30 01:35:28 +09:00
f9a585cfe5 FIX(deploy): base deployment image url to ghcr.io
- Change github.com/Mayne0213/jovies to ghcr.io/mayne0213/jovies
- Ensures kustomize image replacement works correctly
2025-12-30 01:30:19 +09:00
a04ab84435 CHORE(ci): update ArgoCD URLs to GitHub
- Update repository URLs from Gitea to GitHub
- Change source control references
2025-12-30 01:08:45 +09:00
0101b58bd0 CHORE(app): switch dev ingress to Traefik
- Change ingress controller from HAProxy to Traefik
- Update ingress annotations
2025-12-30 00:37:49 +09:00
2edb888008 REFACTOR(auth): use CR_PAT for ghcr.io
- Use CR_PAT instead of GITHUB_TOKEN for registry login
- Fix authentication for private container registry
2025-12-30 00:04:17 +09:00
67fa839f46 REFACTOR(ci): migrate repoURL from Gitea to GitHub
- Update repository URL to GitHub
- Change source control provider
2025-12-29 20:38:23 +09:00
52712a4d32 PERF(app): optimize kubectl installation
- Check if kubectl already exists first
- Use specific version instead of querying stable.txt
- Show download progress
- Skip installation if already present

This should significantly reduce setup time if kubectl
is already included in the runner image
2025-12-28 18:04:14 +09:00
56 changed files with 21 additions and 1359 deletions

View File

@@ -1,273 +0,0 @@
name: Build Docker Image
on:
push:
branches: [main, develop]
tags:
- 'v*'
workflow_dispatch:
env:
REGISTRY: gitea0213.kro.kr
IMAGE_NAME: ${{ github.repository }}
jobs:
build-and-push:
runs-on: ubuntu-24.04-arm
permissions:
contents: write
packages: write
outputs:
image-tag: ${{ steps.meta.outputs.tags }}
image-digest: ${{ steps.build.outputs.digest }}
steps:
- name: Checkout code
uses: actions/checkout@v4
- name: Setup kubectl
run: |
if ! command -v kubectl &> /dev/null; then
curl -LO "https://dl.k8s.io/release/$(curl -L -s https://dl.k8s.io/release/stable.txt)/bin/linux/arm64/kubectl"
chmod +x kubectl
sudo mv kubectl /usr/local/bin/
fi
kubectl version --client
- name: Setup kubeconfig from Secret
env:
KUBECONFIG_B64: ${{ secrets.KUBECONFIG }}
run: |
if [ -z "$KUBECONFIG_B64" ]; then
echo "❌ KUBECONFIG secret not set"
echo "Please add KUBECONFIG to Gitea repository secrets"
exit 1
fi
echo "📝 KUBECONFIG secret found (length: ${#KUBECONFIG_B64})"
mkdir -p $HOME/.kube
# Clean up the base64 string (remove all whitespace and newlines)
CLEAN_B64=$(echo "$KUBECONFIG_B64" | tr -d '[:space:]')
echo "After cleanup: ${#CLEAN_B64} chars"
# Try decoding
if echo "$CLEAN_B64" | base64 -d > $HOME/.kube/config 2>/dev/null; then
echo "✅ Successfully decoded kubeconfig"
elif echo "$CLEAN_B64" | base64 --decode > $HOME/.kube/config 2>/dev/null; then
echo "✅ Successfully decoded kubeconfig (with --decode)"
else
echo "❌ Both base64 decode methods failed"
echo "Trying to save as-is (maybe already decoded)..."
echo "$KUBECONFIG_B64" > $HOME/.kube/config
fi
chmod 600 $HOME/.kube/config
# Verify it's valid YAML
if head -1 $HOME/.kube/config | grep -q "apiVersion"; then
echo "✅ Kubeconfig appears valid"
else
echo "⚠️ Kubeconfig may not be valid, first line:"
head -1 $HOME/.kube/config
fi
# Test connection
kubectl cluster-info
kubectl get nodes -o wide
- name: Lowercase repository name
id: lowercase
run: |
echo "repo=$(echo ${{ github.repository }} | tr '[:upper:]' '[:lower:]')" >> $GITHUB_OUTPUT
- name: Extract metadata (tags, labels)
id: meta
uses: docker/metadata-action@v5
with:
images: ${{ env.REGISTRY }}/${{ steps.lowercase.outputs.repo }}
tags: |
type=ref,event=branch
type=ref,event=pr
type=semver,pattern={{version}}
type=semver,pattern={{major}}.{{minor}}
type=sha,prefix={{branch}}-sha-,format=long
type=raw,value=latest,enable={{is_default_branch}}
- name: Create registry credentials in Kubernetes
run: |
# Ensure namespace exists
kubectl get namespace kaniko-builds 2>/dev/null || kubectl create namespace kaniko-builds
# Create/update registry secret
kubectl create secret docker-registry kaniko-registry-creds \
--docker-server=${{ env.REGISTRY }} \
--docker-username=bluemayne \
--docker-password=${{ secrets.GITEAREGISTRY }} \
--namespace=kaniko-builds \
--dry-run=client -o yaml | kubectl apply -f -
- name: Build and push with Kaniko Job
id: build
run: |
TAGS="${{ steps.meta.outputs.tags }}"
# Prepare destination arguments
DESTINATIONS=""
while IFS= read -r tag; do
DESTINATIONS="$DESTINATIONS\n - --destination=$tag"
done <<< "$TAGS"
# Create unique build name
BUILD_NAME="kaniko-build-${{ github.run_number }}-$(date +%s)"
echo "📦 Building image: ${BUILD_NAME}"
echo "Tags: $TAGS"
# Generate Kaniko Job from template
sed -e "s|KANIKO_BUILD_NAME|${BUILD_NAME}|g" \
-e "s|GIT_REPO_URL|https://gitea0213.kro.kr/${{ github.repository }}.git|g" \
-e "s|GIT_SHA|${{ github.sha }}|g" \
-e "s|CACHE_REPO|${{ env.REGISTRY }}/${{ steps.lowercase.outputs.repo }}/cache|g" \
-e "s|# DESTINATIONS will be added here|${DESTINATIONS}|g" \
deploy/kaniko/job.yaml > /tmp/kaniko-job.yaml
# Apply Job
kubectl apply -f /tmp/kaniko-job.yaml
# Wait for completion
echo "⏳ Waiting for Kaniko Job to complete..."
kubectl wait --for=condition=complete --timeout=600s job/${BUILD_NAME} -n kaniko-builds || {
echo "❌ Job failed or timed out"
POD=$(kubectl get pods -n kaniko-builds -l job-name=${BUILD_NAME} -o jsonpath='{.items[0].metadata.name}')
echo "📋 Logs:"
kubectl logs -n kaniko-builds ${POD} --all-containers=true || true
kubectl delete job ${BUILD_NAME} -n kaniko-builds || true
exit 1
}
echo "✅ Image built successfully"
# Get digest
POD=$(kubectl get pods -n kaniko-builds -l job-name=${BUILD_NAME} -o jsonpath='{.items[0].metadata.name}')
DIGEST=$(kubectl logs -n kaniko-builds ${POD} -c kaniko 2>/dev/null | grep -oP 'digest: \K[a-zA-Z0-9:]+' | tail -1 || echo "unknown")
echo "digest=${DIGEST}" >> $GITHUB_OUTPUT
# Cleanup
kubectl delete job ${BUILD_NAME} -n kaniko-builds || true
- name: Extract SHA tag
id: extract-tag
run: |
# Extract the SHA-based tag from the tags list
TAGS="${{ steps.meta.outputs.tags }}"
echo "All tags:"
echo "$TAGS"
echo "---"
# Get commit SHA (full 40 characters)
COMMIT_SHA="${{ github.sha }}"
# Get current branch name
BRANCH_NAME="${{ github.ref_name }}"
echo "Branch: $BRANCH_NAME"
# Method 1: Extract the full SHA tag from docker/metadata-action output
# docker/metadata-action creates: <branch>-sha-<full-40-char-sha>
SHA_TAG=$(echo "$TAGS" | grep -oE "${BRANCH_NAME}-sha-[a-f0-9]{40}" | head -n 1)
# Method 2: If not found, try to extract any branch-sha- tag (fallback)
if [ -z "$SHA_TAG" ]; then
SHA_TAG=$(echo "$TAGS" | grep -oE "${BRANCH_NAME}-sha-[a-f0-9]+" | head -n 1)
if [ -n "$SHA_TAG" ]; then
echo "⚠️ Found SHA tag (may not be full 40 chars): $SHA_TAG"
fi
fi
# Method 3: Fallback to commit SHA directly (construct the tag)
if [ -z "$SHA_TAG" ]; then
SHA_TAG="${BRANCH_NAME}-sha-$COMMIT_SHA"
echo "⚠️ Could not extract from tags, using commit SHA: $SHA_TAG"
fi
if [ -z "$SHA_TAG" ]; then
echo "❌ ERROR: Failed to extract SHA tag"
exit 1
fi
echo "sha-tag=$SHA_TAG" >> $GITHUB_OUTPUT
echo "✅ Extracted SHA tag: $SHA_TAG"
- name: Update kustomization with new image tag
env:
GITEA_TOKEN: ${{ secrets.GITEAREGISTRYTOKEN }}
run: |
git config --global user.name "gitea-actions[bot]"
git config --global user.email "gitea-actions[bot]@users.noreply.gitea.com"
# Validate that SHA_TAG is not empty
SHA_TAG="${{ steps.extract-tag.outputs.sha-tag }}"
if [ -z "$SHA_TAG" ]; then
echo "❌ ERROR: SHA_TAG is empty, cannot update kustomization"
exit 1
fi
# Determine overlay based on branch
BRANCH_NAME="${{ github.ref_name }}"
if [ "$BRANCH_NAME" = "main" ]; then
OVERLAY="prod"
elif [ "$BRANCH_NAME" = "develop" ]; then
OVERLAY="dev"
else
echo "⚠️ Unknown branch: $BRANCH_NAME, skipping kustomization update"
exit 0
fi
KUSTOMIZATION_FILE="deploy/k8s/overlays/$OVERLAY/kustomization.yaml"
# Check if kustomization file has images section
if grep -q "images:" "$KUSTOMIZATION_FILE"; then
echo "📝 Updating $KUSTOMIZATION_FILE with tag: $SHA_TAG"
# Update kustomization.yaml with new image tag
# Handle both cases: newTag: (with value) and newTag: (empty)
sed -i.bak "s|newTag:.*|newTag: $SHA_TAG|" "$KUSTOMIZATION_FILE"
# Verify the update was successful
if grep -q "newTag: $SHA_TAG" "$KUSTOMIZATION_FILE"; then
echo "✅ Successfully updated kustomization.yaml"
rm -f "$KUSTOMIZATION_FILE.bak"
else
echo "❌ ERROR: Failed to update kustomization.yaml"
cat "$KUSTOMIZATION_FILE"
exit 1
fi
# Commit and push if there are changes
if git diff --quiet; then
echo "No changes to commit"
else
git add "$KUSTOMIZATION_FILE"
git commit -m "Update $OVERLAY image to $SHA_TAG"
git push
echo "✅ Kustomization updated with new image tag: $SHA_TAG"
fi
else
echo " $OVERLAY overlay uses base image (latest tag), skipping kustomization update"
echo " Image built with tag: $SHA_TAG"
fi
- name: Display image information
run: |
echo "✅ Image built and pushed successfully!"
echo "📦 Image tags:"
echo "${{ steps.meta.outputs.tags }}"
echo "🔖 SHA tag: ${{ steps.extract-tag.outputs.sha-tag }}"
echo "🔖 Digest: ${{ steps.build.outputs.digest }}"
echo ""
echo "🚀 Kustomization updated with new image tag"
echo " ArgoCD will automatically detect and deploy this new image"
echo " Monitor deployment at your ArgoCD dashboard"

View File

@@ -1,45 +0,0 @@
name: CI
on:
push:
branches: [main, develop]
pull_request:
branches: [main, develop]
jobs:
lint-and-build:
runs-on: ubuntu-latest
steps:
- name: Checkout code
uses: actions/checkout@v4
- name: Setup Node.js
uses: actions/setup-node@v4
with:
node-version: '20'
cache: 'npm'
cache-dependency-path: services/nextjs/package-lock.json
- name: Install dependencies
working-directory: services/nextjs
run: npm ci
- name: Run ESLint
working-directory: services/nextjs
run: npm run lint
- name: Build Next.js application
working-directory: services/nextjs
run: npm run build
env:
NEXT_TELEMETRY_DISABLED: 1
- name: Check build output
working-directory: services/nextjs
run: |
if [ ! -d ".next" ]; then
echo "Build failed: .next directory not found"
exit 1
fi
echo "✅ Build completed successfully"

View File

@@ -2,7 +2,7 @@ name: Build Docker Image
on: on:
push: push:
branches: [main, develop] branches: [main]
tags: tags:
- 'v*' - 'v*'
workflow_dispatch: workflow_dispatch:
@@ -15,7 +15,7 @@ jobs:
build-and-push: build-and-push:
runs-on: ubuntu-24.04-arm runs-on: ubuntu-24.04-arm
permissions: permissions:
contents: write contents: read
packages: write packages: write
outputs: outputs:
@@ -34,7 +34,7 @@ jobs:
with: with:
registry: ${{ env.REGISTRY }} registry: ${{ env.REGISTRY }}
username: ${{ github.actor }} username: ${{ github.actor }}
password: ${{ secrets.GITHUB_TOKEN }} password: ${{ secrets.CR_PAT }}
- name: Lowercase repository name - name: Lowercase repository name
id: lowercase id: lowercase
@@ -47,19 +47,17 @@ jobs:
with: with:
images: ${{ env.REGISTRY }}/${{ steps.lowercase.outputs.repo }} images: ${{ env.REGISTRY }}/${{ steps.lowercase.outputs.repo }}
tags: | tags: |
type=ref,event=branch type=sha,prefix=sha-,format=long
type=ref,event=pr type=raw,value=latest,enable={{is_default_branch}}
type=semver,pattern={{version}} type=semver,pattern={{version}}
type=semver,pattern={{major}}.{{minor}} type=semver,pattern={{major}}.{{minor}}
type=sha,prefix={{branch}}-sha-,format=long
type=raw,value=latest,enable={{is_default_branch}}
- name: Build and push Docker image - name: Build and push Docker image
id: build id: build
uses: docker/build-push-action@v5 uses: docker/build-push-action@v5
with: with:
context: ./services/nextjs context: ./nextjs
file: ./deploy/docker/Dockerfile.prod file: ./Dockerfile
push: true push: true
platforms: linux/arm64 platforms: linux/arm64
tags: ${{ steps.meta.outputs.tags }} tags: ${{ steps.meta.outputs.tags }}
@@ -67,115 +65,9 @@ jobs:
cache-from: type=gha cache-from: type=gha
cache-to: type=gha,mode=max cache-to: type=gha,mode=max
- name: Extract SHA tag
id: extract-tag
run: |
# Extract the SHA-based tag from the tags list
TAGS="${{ steps.meta.outputs.tags }}"
echo "All tags:"
echo "$TAGS"
echo "---"
# Get commit SHA (full 40 characters)
COMMIT_SHA="${{ github.sha }}"
# Get current branch name
BRANCH_NAME="${{ github.ref_name }}"
echo "Branch: $BRANCH_NAME"
# Method 1: Extract the full SHA tag from docker/metadata-action output
# docker/metadata-action creates: <branch>-sha-<full-40-char-sha>
SHA_TAG=$(echo "$TAGS" | grep -oE "${BRANCH_NAME}-sha-[a-f0-9]{40}" | head -n 1)
# Method 2: If not found, try to extract any branch-sha- tag (fallback)
if [ -z "$SHA_TAG" ]; then
SHA_TAG=$(echo "$TAGS" | grep -oE "${BRANCH_NAME}-sha-[a-f0-9]+" | head -n 1)
if [ -n "$SHA_TAG" ]; then
echo "⚠️ Found SHA tag (may not be full 40 chars): $SHA_TAG"
fi
fi
# Method 3: Fallback to commit SHA directly (construct the tag)
if [ -z "$SHA_TAG" ]; then
SHA_TAG="${BRANCH_NAME}-sha-$COMMIT_SHA"
echo "⚠️ Could not extract from tags, using commit SHA: $SHA_TAG"
fi
if [ -z "$SHA_TAG" ]; then
echo "❌ ERROR: Failed to extract SHA tag"
exit 1
fi
echo "sha-tag=$SHA_TAG" >> $GITHUB_OUTPUT
echo "✅ Extracted SHA tag: $SHA_TAG"
- name: Update kustomization with new image tag
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
run: |
git config --global user.name "github-actions[bot]"
git config --global user.email "github-actions[bot]@users.noreply.github.com"
# Validate that SHA_TAG is not empty
SHA_TAG="${{ steps.extract-tag.outputs.sha-tag }}"
if [ -z "$SHA_TAG" ]; then
echo "❌ ERROR: SHA_TAG is empty, cannot update kustomization"
exit 1
fi
# Determine overlay based on branch
BRANCH_NAME="${{ github.ref_name }}"
if [ "$BRANCH_NAME" = "main" ]; then
OVERLAY="prod"
elif [ "$BRANCH_NAME" = "develop" ]; then
OVERLAY="dev"
else
echo "⚠️ Unknown branch: $BRANCH_NAME, skipping kustomization update"
exit 0
fi
KUSTOMIZATION_FILE="deploy/k8s/overlays/$OVERLAY/kustomization.yaml"
# Check if kustomization file has images section
if grep -q "images:" "$KUSTOMIZATION_FILE"; then
echo "📝 Updating $KUSTOMIZATION_FILE with tag: $SHA_TAG"
# Update kustomization.yaml with new image tag
# Handle both cases: newTag: (with value) and newTag: (empty)
sed -i.bak "s|newTag:.*|newTag: $SHA_TAG|" "$KUSTOMIZATION_FILE"
# Verify the update was successful
if grep -q "newTag: $SHA_TAG" "$KUSTOMIZATION_FILE"; then
echo "✅ Successfully updated kustomization.yaml"
rm -f "$KUSTOMIZATION_FILE.bak"
else
echo "❌ ERROR: Failed to update kustomization.yaml"
cat "$KUSTOMIZATION_FILE"
exit 1
fi
# Commit and push if there are changes
if git diff --quiet; then
echo "No changes to commit"
else
git add "$KUSTOMIZATION_FILE"
git commit -m "Update $OVERLAY image to $SHA_TAG"
git push
echo "✅ Kustomization updated with new image tag: $SHA_TAG"
fi
else
echo " $OVERLAY overlay uses base image (latest tag), skipping kustomization update"
echo " Image built with tag: $SHA_TAG"
fi
- name: Display image information - name: Display image information
run: | run: |
echo "Image built and pushed successfully!" echo "Image built and pushed successfully!"
echo "📦 Image tags:" echo "Image tags:"
echo "${{ steps.meta.outputs.tags }}" echo "${{ steps.meta.outputs.tags }}"
echo "🔖 SHA tag: ${{ steps.extract-tag.outputs.sha-tag }}" echo "Digest: ${{ steps.build.outputs.digest }}"
echo "🔖 Digest: ${{ steps.build.outputs.digest }}"
echo ""
echo "🚀 Kustomization updated with new image tag"
echo " ArgoCD will automatically detect and deploy this new image"
echo " Monitor deployment at your ArgoCD dashboard"

View File

@@ -2,9 +2,9 @@ name: CI
on: on:
push: push:
branches: [main, develop] branches: [main]
pull_request: pull_request:
branches: [main, develop] branches: [main]
jobs: jobs:
lint-and-build: lint-and-build:
@@ -19,27 +19,27 @@ jobs:
with: with:
node-version: '20' node-version: '20'
cache: 'npm' cache: 'npm'
cache-dependency-path: services/nextjs/package-lock.json cache-dependency-path: nextjs/package-lock.json
- name: Install dependencies - name: Install dependencies
working-directory: services/nextjs working-directory: nextjs
run: npm ci run: npm ci
- name: Run ESLint - name: Run ESLint
working-directory: services/nextjs working-directory: nextjs
run: npm run lint run: npm run lint
- name: Build Next.js application - name: Build Next.js application
working-directory: services/nextjs working-directory: nextjs
run: npm run build run: npm run build
env: env:
NEXT_TELEMETRY_DISABLED: 1 NEXT_TELEMETRY_DISABLED: 1
- name: Check build output - name: Check build output
working-directory: services/nextjs working-directory: nextjs
run: | run: |
if [ ! -d ".next" ]; then if [ ! -d ".next" ]; then
echo "Build failed: .next directory not found" echo "Build failed: .next directory not found"
exit 1 exit 1
fi fi
echo "Build completed successfully" echo "Build completed successfully"

View File

@@ -1,292 +0,0 @@
# Jovies ArgoCD 배포 가이드 (2GB RAM 환경)
## 전제조건
- ✅ ArgoCD 설치됨
- ✅ K3s 클러스터 실행 중
- ✅ GHCR에 이미지 푸시 권한
## 현재 리소스 설정 (2GB 최적화)
```yaml
Jovies Pod:
replicas: 1
resources:
requests:
memory: 100Mi
cpu: 50m
limits:
memory: 200Mi
cpu: 150m
```
### 예상 메모리 사용량
```
시스템: ~300Mi
K3s: ~300Mi
ArgoCD: ~800Mi
Traefik: ~50Mi
Jovies: ~100Mi (최대 200Mi)
────────────────────────────
총합: ~1,550Mi
여유: ~500Mi ✅
```
## 배포 단계
### 1. ArgoCD에 Application 등록
```bash
cd /Users/minjo/home/mayne/projects/jovies
# Application 생성
kubectl apply -f deploy/argocd/application.yaml
```
### 2. 배포 상태 확인
```bash
# Application 상태 확인
kubectl get application jovies -n argocd
# 출력 예시:
# NAME SYNC STATUS HEALTH STATUS
# jovies Synced Healthy
```
### 3. Pod 상태 확인
```bash
# Jovies namespace의 Pod 확인
kubectl get pods -n jovies
# 상세 로그 확인
kubectl logs -n jovies -l app=jovies-app -f
```
### 4. Service 확인
```bash
# Service 정보 확인
kubectl get svc -n jovies
# 출력:
# NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S)
# jovies-service ClusterIP 10.43.xxx.xxx <none> 80/TCP
```
## ArgoCD UI 접근
### 포트포워딩으로 접근
```bash
# ArgoCD 서버 포트포워딩
kubectl port-forward svc/argocd-server -n argocd 8080:443
# 브라우저에서 https://localhost:8080 접속
```
### 초기 비밀번호 확인
```bash
kubectl -n argocd get secret argocd-initial-admin-secret -o jsonpath="{.data.password}" | base64 -d
echo
```
- **Username**: `admin`
- **Password**: 위 명령어 출력값
## 배포 워크플로우
```
1. 개발자가 코드 변경 후 main 브랜치에 push
2. GitHub Actions가 Docker 이미지 빌드 & GHCR에 푸시
(태그: latest, main, main-{sha})
3. ArgoCD가 매니페스트 변경 감지 (3분마다)
4. 자동으로 Kubernetes에 배포 (selfHeal: true)
5. Rolling Update로 무중단 배포
```
## 수동 동기화
필요 시 수동으로 동기화:
```bash
# kubectl 사용
kubectl patch app jovies -n argocd --type merge -p '{"operation":{"sync":{}}}'
# 또는 ArgoCD CLI 사용 (설치되어 있다면)
argocd app sync jovies
```
## 이미지 업데이트
### 방법 1: GitHub Actions (자동)
```yaml
# .github/workflows/build.yml이 자동으로:
# 1. 이미지 빌드
# 2. GHCR에 푸시 (latest 태그)
# 3. ArgoCD가 자동 감지하여 배포
```
### 방법 2: 수동 이미지 태그 변경
```bash
# kustomization.yaml 수정
cd deploy/k8s/overlays/prod
vi kustomization.yaml
# newTag를 원하는 버전으로 변경
images:
- name: ghcr.io/mayne0213/jovies
newTag: main-abc1234 # ← 변경
# Git에 커밋 & 푸시
git add .
git commit -m "Update jovies to main-abc1234"
git push
# ArgoCD가 자동으로 감지하여 배포
```
## 트러블슈팅
### Pod가 CrashLoopBackOff 상태
```bash
# Pod 로그 확인
kubectl logs -n jovies -l app=jovies-app --tail=100
# Pod 상세 정보
kubectl describe pod -n jovies -l app=jovies-app
# 일반적인 원인:
# 1. 이미지를 찾을 수 없음 → GHCR 권한 확인
# 2. 환경변수 누락 → ConfigMap/Secret 확인
# 3. Health check 실패 → 포트 3000 확인
```
### ArgoCD Sync 실패
```bash
# Application 상태 확인
kubectl get app jovies -n argocd -o yaml
# ArgoCD 로그 확인
kubectl logs -n argocd -l app.kubernetes.io/name=argocd-application-controller --tail=100
```
### 메모리 부족 (OOMKilled)
```bash
# Pod 상태 확인
kubectl get pod -n jovies -l app=jovies-app
# 마지막 종료 이유 확인
kubectl describe pod -n jovies -l app=jovies-app | grep -A 5 "Last State"
# OOMKilled가 보이면 리소스 증가 필요:
# deploy/k8s/overlays/prod/deployment-patch.yaml 수정
resources:
limits:
memory: "300Mi" # 200Mi → 300Mi로 증가
```
## 롤백
### ArgoCD UI에서 롤백
1. ArgoCD UI 접속
2. `jovies` 애플리케이션 클릭
3. `HISTORY`
4. 이전 버전 선택 후 `ROLLBACK` 클릭
### CLI로 롤백
```bash
# 히스토리 확인
kubectl get app jovies -n argocd -o yaml | grep -A 10 "history:"
# 특정 리비전으로 롤백 (예: revision 3)
argocd app rollback jovies 3
```
## 모니터링
### 리소스 사용량 확인
```bash
# Pod 리소스 사용량
kubectl top pod -n jovies
# Node 전체 리소스 사용량
kubectl top node
# 출력 예시:
# NAME CPU(cores) MEMORY(bytes)
# jovies-app-xxx 10m 120Mi
```
### 메모리 압박 확인
```bash
# 전체 메모리 사용량 모니터링
watch -n 5 'kubectl top node && echo "---" && kubectl top pod -A | head -20'
```
### 로그 스트리밍
```bash
# 실시간 로그 확인
kubectl logs -n jovies -l app=jovies-app -f --tail=100
# 여러 Pod의 로그를 모두 보기
kubectl logs -n jovies -l app=jovies-app --all-containers=true -f
```
## 정리 (삭제)
```bash
# ArgoCD Application 삭제 (리소스도 함께 삭제됨)
kubectl delete -f deploy/argocd/application.yaml
# 또는 직접 삭제
kubectl delete app jovies -n argocd
# Namespace도 삭제하려면
kubectl delete namespace jovies
```
## 다음 단계
메모리가 충분해지면 (4GB+):
1. **Replica 증가**
```yaml
spec:
replicas: 2 # 고가용성
```
2. **리소스 증가**
```yaml
resources:
requests:
memory: "256Mi"
limits:
memory: "512Mi"
```
3. **ArgoCD Image Updater 추가**
- 자동으로 새 이미지 감지
- Git 커밋 없이 배포 가능
4. **Monitoring Stack 추가**
- Prometheus
- Grafana
- Alert Manager

View File

@@ -1,27 +0,0 @@
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: jovies-dev
namespace: argocd
spec:
project: default
source:
repoURL: https://gitea0213.kro.kr/bluemayne/jovies.git
targetRevision: develop
path: deploy/k8s/overlays/dev
destination:
server: https://kubernetes.default.svc
namespace: jovies-dev
syncPolicy:
automated:
prune: true
selfHeal: true
allowEmpty: false
syncOptions:
- CreateNamespace=true
retry:
limit: 5
backoff:
duration: 5s
factor: 2
maxDuration: 3m

View File

@@ -1,29 +0,0 @@
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: kaniko-infrastructure
namespace: argocd
finalizers:
- resources-finalizer.argocd.argoproj.io
spec:
project: default
source:
repoURL: https://gitea0213.kro.kr/bluemayne/jovies.git
targetRevision: main
path: deploy/kaniko
destination:
server: https://kubernetes.default.svc
namespace: kaniko-builds
syncPolicy:
automated:
prune: true
selfHeal: true
allowEmpty: false
syncOptions:
- CreateNamespace=true
retry:
limit: 5
backoff:
duration: 5s
factor: 2
maxDuration: 3m

View File

@@ -1,27 +0,0 @@
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: jovies-prod
namespace: argocd
spec:
project: default
source:
repoURL: https://gitea0213.kro.kr/bluemayne/jovies.git
targetRevision: main
path: deploy/k8s/overlays/prod
destination:
server: https://kubernetes.default.svc
namespace: jovies
syncPolicy:
automated:
prune: true
selfHeal: true
allowEmpty: false
syncOptions:
- CreateNamespace=true
retry:
limit: 5
backoff:
duration: 5s
factor: 2
maxDuration: 3m

View File

@@ -1,36 +0,0 @@
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: jovies
namespace: argocd
finalizers:
- resources-finalizer.argocd.argoproj.io
spec:
project: default
source:
repoURL: https://gitea0213.kro.kr/bluemayne/jovies.git
targetRevision: main
path: deploy/argocd
destination:
server: https://kubernetes.default.svc
namespace: argocd
syncPolicy:
automated:
prune: true
selfHeal: true
allowEmpty: false
syncOptions:
- CreateNamespace=true
retry:
limit: 5
backoff:
duration: 5s
factor: 2
maxDuration: 3m
revisionHistoryLimit: 10

View File

@@ -1,10 +0,0 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
# App of Apps Application (self-managing)
- application.yaml
# Application deployments (prod and dev)
- application-prod.yaml
- application-dev.yaml

View File

@@ -1,26 +0,0 @@
# Development Dockerfile for Movie Next.js application
FROM node:20-alpine AS base
# Install dependencies for development
RUN apk add --no-cache libc6-compat curl
WORKDIR /app
# Copy package files
COPY package.json package-lock.json* ./
# Install all dependencies (including dev dependencies)
RUN npm ci
# Copy source code
COPY . .
# Expose port
EXPOSE 3000
# Health check
HEALTHCHECK --interval=30s --timeout=3s --start-period=5s --retries=3 \
CMD curl -f http://localhost:3000 || exit 1
# Default command for development
CMD ["npm", "run", "dev"]

View File

@@ -1,27 +0,0 @@
services:
# Development Jovies Next.js Application
app:
build:
context: ../../services
dockerfile: ../deploy/docker/Dockerfile.dev
container_name: jovies-app-dev
restart: unless-stopped
labels:
kompose.namespace: jovies
ports:
- 3003:3000
environment:
- NODE_ENV=development
- WATCHPACK_POLLING=true
networks:
- jovies-network
volumes:
- ../../services:/app
- /app/node_modules
- /app/.next
command: npm run dev
networks:
jovies-network:
driver: bridge
name: jovies-network-dev

View File

@@ -1,22 +0,0 @@
services:
# Production Jovies Next.js Application
app:
image: jovies-app
build:
context: ../../services/nextjs
dockerfile: ../../deploy/docker/Dockerfile.prod
container_name: jovies-app-prod
restart: unless-stopped
labels:
kompose.namespace: jovies
ports:
- 3003:3000
environment:
- NODE_ENV=production
networks:
- jovies-network
networks:
jovies-network:
driver: bridge
name: jovies-network-prod

View File

@@ -1,51 +0,0 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: jovies-app
labels:
app: jovies-app
spec:
replicas: 2
selector:
matchLabels:
app: jovies-app
strategy:
type: RollingUpdate
rollingUpdate:
maxUnavailable: 1
maxSurge: 1
revisionHistoryLimit: 1
template:
metadata:
labels:
app: jovies-app
spec:
containers:
- name: jovies-app
image: gitea0213.kro.kr/bluemayne/jovies:latest
imagePullPolicy: Always
ports:
- containerPort: 3000
protocol: TCP
env:
- name: NODE_ENV
value: production
resources:
requests:
memory: "100Mi"
cpu: "50m"
limits:
memory: "200Mi"
livenessProbe:
httpGet:
path: /
port: 3000
initialDelaySeconds: 30
periodSeconds: 10
readinessProbe:
httpGet:
path: /
port: 3000
initialDelaySeconds: 5
periodSeconds: 5
restartPolicy: Always

View File

@@ -1,14 +0,0 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- deployment.yaml
- service.yaml
commonLabels:
app.kubernetes.io/name: jovies
app.kubernetes.io/component: web
images:
- name: gitea0213.kro.kr/bluemayne/jovies
newTag: latest

View File

@@ -1,15 +0,0 @@
apiVersion: v1
kind: Service
metadata:
name: jovies-service
labels:
app: jovies-app
spec:
type: ClusterIP
ports:
- name: http
port: 80
targetPort: 3000
protocol: TCP
selector:
app: jovies-app

View File

@@ -1,18 +0,0 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: jovies-app
labels:
environment: development
spec:
replicas: 1
template:
spec:
containers:
- name: jovies-app
resources:
requests:
memory: "40Mi"
cpu: "10m"
limits:
memory: "100Mi"

View File

@@ -1,28 +0,0 @@
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: jovies-dev-ingress
annotations:
nginx.ingress.kubernetes.io/rewrite-target: /
# HTTP를 HTTPS로 자동 리다이렉트
nginx.ingress.kubernetes.io/ssl-redirect: "true"
# cert-manager가 인증서를 자동으로 발급하도록 설정
cert-manager.io/cluster-issuer: "letsencrypt-prod"
spec:
ingressClassName: haproxy
# TLS 설정
tls:
- hosts:
- dev.jovies.kro.kr
secretName: jovies-dev-tls
rules:
- host: dev.jovies.kro.kr
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: jovies-service
port:
number: 80

View File

@@ -1,21 +0,0 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
namespace: jovies-dev
resources:
- ../../base
- resourcequota.yaml
- namespace.yaml
- ingress.yaml
commonLabels:
environment: development
# 이미지 태그 설정
images:
- name: gitea0213.kro.kr/bluemayne/jovies
newTag: develop-sha-bed2d09069c0a4a2f83dfd74a8489dcf67625a7d
patchesStrategicMerge:
- deployment-patch.yaml

View File

@@ -1,7 +0,0 @@
apiVersion: v1
kind: Namespace
metadata:
name: jovies-dev
labels:
environment: development
app: jovies

View File

@@ -1,11 +0,0 @@
apiVersion: v1
kind: ResourceQuota
metadata:
name: jovies-dev-quota
namespace: jovies-dev
spec:
hard:
requests.memory: "150Mi"
requests.cpu: "50m"
limits.memory: "300Mi"
pods: "6"

View File

@@ -1,18 +0,0 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: jovies-app
labels:
environment: production
spec:
replicas: 2
template:
spec:
containers:
- name: jovies-app
resources:
requests:
memory: "40Mi"
cpu: "5m" # Reduced from 15m based on actual usage (1-2m)
limits:
memory: "100Mi"

View File

@@ -1,40 +0,0 @@
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: jovies-ingress
annotations:
nginx.ingress.kubernetes.io/rewrite-target: /
# HTTP를 HTTPS로 자동 리다이렉트
nginx.ingress.kubernetes.io/ssl-redirect: "true"
# cert-manager가 인증서를 자동으로 발급하도록 설정
cert-manager.io/cluster-issuer: "letsencrypt-prod"
spec:
ingressClassName: haproxy
# TLS 설정
tls:
- hosts:
- jovies.kro.kr
- www.jovies.kro.kr
# cert-manager가 이 이름으로 Secret을 자동 생성
secretName: jovies-tls
rules:
- host: jovies.kro.kr
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: jovies-service
port:
number: 80
- host: www.jovies.kro.kr
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: jovies-service
port:
number: 80

View File

@@ -1,20 +0,0 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
namespace: jovies
resources:
- ../../base
- resourcequota.yaml
- ingress.yaml
commonLabels:
environment: production
# 이미지 태그 설정
images:
- name: ghcr.io/mayne0213/jovies
newTag: main-sha-beff07e89351858d72b29bc48cd27fb40fce97dd
patchesStrategicMerge:
- deployment-patch.yaml

View File

@@ -1,11 +0,0 @@
apiVersion: v1
kind: ResourceQuota
metadata:
name: jovies-quota
namespace: jovies
spec:
hard:
requests.memory: "200Mi"
requests.cpu: "100m"
limits.memory: "400Mi"
pods: "9"

View File

@@ -1,80 +0,0 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: KANIKO_BUILD_NAME-dockerfile
namespace: kaniko-builds
data:
Dockerfile: |
# This will be replaced by the actual Dockerfile content
DOCKERFILE_CONTENT
---
apiVersion: batch/v1
kind: Job
metadata:
name: KANIKO_BUILD_NAME
namespace: kaniko-builds
spec:
ttlSecondsAfterFinished: 600
backoffLimit: 0
template:
metadata:
labels:
app: kaniko-build
spec:
restartPolicy: Never
initContainers:
- name: prepare-context
image: alpine/git:latest
command: ["/bin/sh", "-c"]
args:
- |
set -e
echo "Cloning repository..."
git clone GIT_REPO_URL /workspace/repo
cd /workspace/repo
git checkout GIT_SHA
echo "Preparing build context..."
mkdir -p /workspace/build
cp -r services/nextjs/* /workspace/build/
cp deploy/docker/Dockerfile.prod /workspace/build/Dockerfile
echo "Build context ready:"
ls -la /workspace/build/
volumeMounts:
- name: workspace
mountPath: /workspace
containers:
- name: kaniko
image: gcr.io/kaniko-project/executor:latest
args:
- --context=/workspace/build
- --dockerfile=/workspace/build/Dockerfile
- --cache=true
- --cache-repo=CACHE_REPO
- --compressed-caching=false
- --snapshot-mode=redo
- --use-new-run
- --verbosity=info
# DESTINATIONS will be added here
volumeMounts:
- name: workspace
mountPath: /workspace
- name: docker-config
mountPath: /kaniko/.docker
resources:
requests:
memory: "512Mi"
cpu: "500m"
limits:
memory: "2Gi"
cpu: "2000m"
volumes:
- name: workspace
emptyDir: {}
- name: docker-config
secret:
secretName: kaniko-registry-creds
items:
- key: .dockerconfigjson
path: config.json

View File

@@ -1,6 +0,0 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- namespace.yaml
- rbac.yaml

View File

@@ -1,7 +0,0 @@
apiVersion: v1
kind: Namespace
metadata:
name: kaniko-builds
labels:
app.kubernetes.io/name: kaniko-builds
app.kubernetes.io/component: build-infrastructure

View File

@@ -1,69 +0,0 @@
---
# ServiceAccount for Gitea runner (optional, if you want dedicated SA)
apiVersion: v1
kind: ServiceAccount
metadata:
name: gitea-runner
namespace: gitea
---
# Role to manage Kaniko builds
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: kaniko-builder
namespace: kaniko-builds
rules:
- apiGroups: [""]
resources: ["configmaps"]
verbs: ["create", "get", "list", "delete"]
- apiGroups: [""]
resources: ["pods"]
verbs: ["get", "list", "watch"]
- apiGroups: [""]
resources: ["pods/log"]
verbs: ["get"]
- apiGroups: ["batch"]
resources: ["jobs"]
verbs: ["create", "get", "list", "watch", "delete"]
- apiGroups: [""]
resources: ["secrets"]
verbs: ["get", "create", "update", "patch"]
---
# RoleBinding for default ServiceAccount in gitea namespace
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: gitea-runner-kaniko-builder
namespace: kaniko-builds
subjects:
- kind: ServiceAccount
name: default
namespace: gitea
roleRef:
kind: Role
name: kaniko-builder
apiGroup: rbac.authorization.k8s.io
---
# ClusterRole to create namespaces (if needed)
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: namespace-creator
rules:
- apiGroups: [""]
resources: ["namespaces"]
verbs: ["create", "get", "list"]
---
# ClusterRoleBinding for default ServiceAccount in gitea namespace
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: gitea-runner-namespace-creator
subjects:
- kind: ServiceAccount
name: default
namespace: gitea
roleRef:
kind: ClusterRole
name: namespace-creator
apiGroup: rbac.authorization.k8s.io

View File

@@ -0,0 +1,3 @@
export default function AboutUs(){
return <div>Practice app for K8s/argoCD/Grafana - Updated at 2026-01-09 16:02 KST</div>
}

View File

Before

Width:  |  Height:  |  Size: 12 KiB

After

Width:  |  Height:  |  Size: 12 KiB

View File

@@ -1,3 +0,0 @@
export default function AboutUs(){
return <div>Practice app for K8s/argoCD/Grafana</div>
}