CHORE(app): initial configuration
- Add initial app settings - Configure base deployment
This commit is contained in:
30
deploy/docker/Dockerfile.dev
Normal file
30
deploy/docker/Dockerfile.dev
Normal file
@@ -0,0 +1,30 @@
|
||||
# trunk-ignore-all(checkov/CKV_DOCKER_3)
|
||||
FROM node:20-alpine AS base
|
||||
|
||||
# Install dependencies for development
|
||||
RUN apk add --no-cache libc6-compat curl
|
||||
|
||||
WORKDIR /app
|
||||
|
||||
# Copy package files
|
||||
COPY package.json package-lock.json* ./
|
||||
|
||||
# Install all dependencies (including dev dependencies)
|
||||
RUN npm ci
|
||||
|
||||
# Copy source code
|
||||
COPY . .
|
||||
|
||||
# Generate Prisma Client
|
||||
RUN npx prisma generate
|
||||
|
||||
# Expose port
|
||||
EXPOSE 3000
|
||||
|
||||
# Health check
|
||||
HEALTHCHECK --interval=30s --timeout=3s --start-period=5s --retries=3 \
|
||||
CMD curl -f http://localhost:3000/api/health || exit 1
|
||||
|
||||
# Default command (can be overridden in docker-compose)
|
||||
CMD ["npm", "run", "dev"]
|
||||
|
||||
65
deploy/docker/Dockerfile.prod
Normal file
65
deploy/docker/Dockerfile.prod
Normal file
@@ -0,0 +1,65 @@
|
||||
# Multi-stage build for Next.js application
|
||||
FROM node:20-alpine AS base
|
||||
|
||||
# Install dependencies only when needed
|
||||
FROM base AS deps
|
||||
# Check https://github.com/nodejs/docker-node/tree/b4117f9333da4138b03a546ec926ef50a31506c3#nodealpine to understand why libc6-compat might be needed.
|
||||
RUN apk add --no-cache libc6-compat
|
||||
WORKDIR /app
|
||||
|
||||
# Install dependencies based on the preferred package manager
|
||||
COPY package.json package-lock.json* ./
|
||||
RUN npm ci
|
||||
|
||||
# Rebuild the source code only when needed
|
||||
FROM base AS builder
|
||||
WORKDIR /app
|
||||
COPY --from=deps /app/node_modules ./node_modules
|
||||
COPY . .
|
||||
|
||||
# Generate Prisma Client
|
||||
RUN npx prisma generate
|
||||
|
||||
# Build the application
|
||||
ENV NEXT_TELEMETRY_DISABLED=1
|
||||
RUN npm run build
|
||||
|
||||
# Production image, copy all the files and run next
|
||||
FROM base AS runner
|
||||
WORKDIR /app
|
||||
|
||||
RUN apk add --no-cache curl
|
||||
|
||||
ENV NODE_ENV=production
|
||||
ENV NEXT_TELEMETRY_DISABLED=1
|
||||
|
||||
RUN addgroup --system --gid 1001 nodejs && adduser --system --uid 1001 nextjs
|
||||
|
||||
# Copy built application
|
||||
COPY --from=builder /app/public ./public
|
||||
|
||||
# Set the correct permission for prerender cache
|
||||
RUN mkdir .next && chown nextjs:nodejs .next
|
||||
|
||||
# Automatically leverage output traces to reduce image size
|
||||
# https://nextjs.org/docs/advanced-features/output-file-tracing
|
||||
COPY --from=builder --chown=nextjs:nodejs /app/.next/standalone ./
|
||||
COPY --from=builder --chown=nextjs:nodejs /app/.next/static ./.next/static
|
||||
|
||||
# Copy Prisma files
|
||||
COPY --from=builder /app/prisma ./prisma
|
||||
COPY --from=builder /app/node_modules/.prisma ./node_modules/.prisma
|
||||
|
||||
USER nextjs
|
||||
|
||||
EXPOSE 3000
|
||||
|
||||
ENV PORT=3000
|
||||
ENV HOSTNAME=0.0.0.0
|
||||
|
||||
# Health check
|
||||
HEALTHCHECK --interval=30s --timeout=3s --start-period=5s --retries=3 \
|
||||
CMD curl -f http://localhost:3000/api/health || exit 1
|
||||
|
||||
CMD ["node", "server.js"]
|
||||
|
||||
64
deploy/docker/docker-compose.dev.yml
Normal file
64
deploy/docker/docker-compose.dev.yml
Normal file
@@ -0,0 +1,64 @@
|
||||
services:
|
||||
# Next.js Application (Development) - Using External Database (same as Jotion)
|
||||
app:
|
||||
image: todo-app-dev
|
||||
build:
|
||||
context: ../../services/nextjs
|
||||
dockerfile: ../../deploy/docker/Dockerfile.dev
|
||||
container_name: todo-app-dev
|
||||
restart: unless-stopped
|
||||
labels:
|
||||
kompose.namespace: todo-dev
|
||||
ports:
|
||||
- "3002:3000"
|
||||
env_file:
|
||||
- ../../.env
|
||||
environment:
|
||||
- NODE_ENV=development
|
||||
networks:
|
||||
- todo-network-dev
|
||||
volumes:
|
||||
- ../../services/nextjs:/app
|
||||
- /app/node_modules
|
||||
- /app/.next
|
||||
- app_logs_dev:/app/logs
|
||||
healthcheck:
|
||||
test: ["CMD", "curl", "-f", "http://localhost:3000/api/health"]
|
||||
interval: 30s
|
||||
timeout: 10s
|
||||
retries: 3
|
||||
start_period: 40s
|
||||
command: >
|
||||
sh -lc "npx prisma db push --skip-generate && npm run dev"
|
||||
|
||||
# Prisma Studio - Connects to External Database
|
||||
prisma-studio:
|
||||
image: todo-app-dev
|
||||
container_name: todo-prisma-studio
|
||||
restart: unless-stopped
|
||||
labels:
|
||||
kompose.namespace: todo-dev
|
||||
ports:
|
||||
- "5556:5555"
|
||||
env_file:
|
||||
- ../../.env
|
||||
environment:
|
||||
- NODE_ENV=development
|
||||
networks:
|
||||
- todo-network-dev
|
||||
volumes:
|
||||
- ../../services/nextjs:/app
|
||||
- /app/node_modules
|
||||
command: npx prisma studio --port 5555 --hostname 0.0.0.0
|
||||
|
||||
volumes:
|
||||
# Named volumes for data persistence
|
||||
app_logs_dev:
|
||||
driver: local
|
||||
|
||||
networks:
|
||||
todo-network-dev:
|
||||
driver: bridge
|
||||
ipam:
|
||||
config:
|
||||
- subnet: 172.22.0.0/16
|
||||
41
deploy/docker/docker-compose.yml
Normal file
41
deploy/docker/docker-compose.yml
Normal file
@@ -0,0 +1,41 @@
|
||||
services:
|
||||
# Next.js Application - Using External Database (same as Jotion)
|
||||
app:
|
||||
image: todo-app
|
||||
build:
|
||||
context: ../../services/nextjs
|
||||
dockerfile: ../../deploy/docker/Dockerfile.prod
|
||||
container_name: todo-app
|
||||
restart: unless-stopped
|
||||
labels:
|
||||
kompose.namespace: todo
|
||||
ports:
|
||||
- 3002:3000
|
||||
env_file:
|
||||
- ../../.env
|
||||
environment:
|
||||
- NODE_ENV=production
|
||||
networks:
|
||||
- todo-network
|
||||
volumes:
|
||||
- app_logs:/app/logs
|
||||
healthcheck:
|
||||
test: [CMD, curl, -f, http://localhost:3000/api/health]
|
||||
interval: 30s
|
||||
timeout: 10s
|
||||
retries: 3
|
||||
start_period: 40s
|
||||
command: >
|
||||
sh -lc "npx prisma db push --skip-generate && node server.js"
|
||||
|
||||
volumes:
|
||||
# Named volumes for data persistence
|
||||
app_logs:
|
||||
driver: local
|
||||
|
||||
networks:
|
||||
todo-network:
|
||||
driver: bridge
|
||||
ipam:
|
||||
config:
|
||||
- subnet: 172.21.0.0/16
|
||||
57
deploy/k8s/base/deployment.yaml
Normal file
57
deploy/k8s/base/deployment.yaml
Normal file
@@ -0,0 +1,57 @@
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: todo-app
|
||||
labels:
|
||||
app: todo-app
|
||||
spec:
|
||||
replicas: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
app: todo-app
|
||||
strategy:
|
||||
type: RollingUpdate
|
||||
rollingUpdate:
|
||||
maxUnavailable: 1
|
||||
maxSurge: 1
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: todo-app
|
||||
spec:
|
||||
containers:
|
||||
- name: todo-app
|
||||
image: ghcr.io/mayne0213/todo:latest
|
||||
imagePullPolicy: Always
|
||||
ports:
|
||||
- containerPort: 3000
|
||||
protocol: TCP
|
||||
env:
|
||||
- name: NODE_ENV
|
||||
value: production
|
||||
- name: DATABASE_URL
|
||||
valueFrom:
|
||||
secretKeyRef:
|
||||
name: todo-secrets
|
||||
key: database-url
|
||||
resources:
|
||||
requests:
|
||||
memory: "100Mi"
|
||||
cpu: "50m"
|
||||
limits:
|
||||
memory: "200Mi"
|
||||
cpu: "150m"
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
path: /api/health
|
||||
port: 3000
|
||||
initialDelaySeconds: 30
|
||||
periodSeconds: 10
|
||||
readinessProbe:
|
||||
httpGet:
|
||||
path: /api/health
|
||||
port: 3000
|
||||
initialDelaySeconds: 5
|
||||
periodSeconds: 5
|
||||
restartPolicy: Always
|
||||
|
||||
15
deploy/k8s/base/kustomization.yaml
Normal file
15
deploy/k8s/base/kustomization.yaml
Normal file
@@ -0,0 +1,15 @@
|
||||
apiVersion: kustomize.config.k8s.io/v1beta1
|
||||
kind: Kustomization
|
||||
|
||||
resources:
|
||||
- deployment.yaml
|
||||
- service.yaml
|
||||
|
||||
commonLabels:
|
||||
app.kubernetes.io/name: todo
|
||||
app.kubernetes.io/component: web
|
||||
|
||||
images:
|
||||
- name: ghcr.io/mayne0213/todo
|
||||
newTag: latest
|
||||
|
||||
16
deploy/k8s/base/service.yaml
Normal file
16
deploy/k8s/base/service.yaml
Normal file
@@ -0,0 +1,16 @@
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: todo-service
|
||||
labels:
|
||||
app: todo-app
|
||||
spec:
|
||||
type: ClusterIP
|
||||
ports:
|
||||
- name: http
|
||||
port: 80
|
||||
targetPort: 3000
|
||||
protocol: TCP
|
||||
selector:
|
||||
app: todo-app
|
||||
|
||||
20
deploy/k8s/overlays/prod/deployment-patch.yaml
Normal file
20
deploy/k8s/overlays/prod/deployment-patch.yaml
Normal file
@@ -0,0 +1,20 @@
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: todo-app
|
||||
labels:
|
||||
environment: production
|
||||
spec:
|
||||
replicas: 1
|
||||
template:
|
||||
spec:
|
||||
containers:
|
||||
- name: todo-app
|
||||
resources:
|
||||
requests:
|
||||
memory: "120Mi"
|
||||
cpu: "50m"
|
||||
limits:
|
||||
memory: "230Mi"
|
||||
cpu: "150m"
|
||||
|
||||
20
deploy/k8s/overlays/prod/kustomization.yaml
Normal file
20
deploy/k8s/overlays/prod/kustomization.yaml
Normal file
@@ -0,0 +1,20 @@
|
||||
apiVersion: kustomize.config.k8s.io/v1beta1
|
||||
kind: Kustomization
|
||||
|
||||
namespace: todo
|
||||
|
||||
resources:
|
||||
- ../../base
|
||||
- resourcequota.yaml
|
||||
|
||||
commonLabels:
|
||||
environment: production
|
||||
|
||||
# 이미지 태그 설정
|
||||
images:
|
||||
- name: ghcr.io/mayne0213/todo
|
||||
newTag: latest
|
||||
|
||||
patchesStrategicMerge:
|
||||
- deployment-patch.yaml
|
||||
|
||||
13
deploy/k8s/overlays/prod/resourcequota.yaml
Normal file
13
deploy/k8s/overlays/prod/resourcequota.yaml
Normal file
@@ -0,0 +1,13 @@
|
||||
apiVersion: v1
|
||||
kind: ResourceQuota
|
||||
metadata:
|
||||
name: todo-quota
|
||||
namespace: todo
|
||||
spec:
|
||||
hard:
|
||||
requests.memory: "150Mi"
|
||||
requests.cpu: "100m"
|
||||
limits.memory: "250Mi"
|
||||
limits.cpu: "200m"
|
||||
pods: "2"
|
||||
|
||||
Reference in New Issue
Block a user