Files
observability/opentelemetry-operator/helm-values.yaml
Mayne0213 b4b48c6e89 FIX(opentelemetry-operator): restore memory to 256Mi
- VPA recommended 75Mi was too low causing informer sync timeout
- Restore original memory value for stability
2026-01-10 14:52:24 +09:00

56 lines
1.6 KiB
YAML

# OpenTelemetry Operator Helm Values
# Chart: https://github.com/open-telemetry/opentelemetry-helm-charts/tree/main/charts/opentelemetry-operator
# Manager (Operator) configuration
manager:
collectorImage:
repository: otel/opentelemetry-collector-contrib
targetAllocatorImage:
repository: ghcr.io/open-telemetry/opentelemetry-operator/target-allocator
autoInstrumentationImage:
java:
repository: ghcr.io/open-telemetry/opentelemetry-operator/autoinstrumentation-java
nodejs:
repository: ghcr.io/open-telemetry/opentelemetry-operator/autoinstrumentation-nodejs
python:
repository: ghcr.io/open-telemetry/opentelemetry-operator/autoinstrumentation-python
dotnet:
repository: ghcr.io/open-telemetry/opentelemetry-operator/autoinstrumentation-dotnet
go:
repository: ghcr.io/open-telemetry/opentelemetry-operator/autoinstrumentation-go
resources:
limits:
cpu: null # Disable chart default CPU limits
memory: 256Mi
requests:
cpu: 10m
memory: 256Mi
# ServiceMonitor configuration
serviceMonitor:
enabled: false # Disable ServiceMonitor creation to prevent conflicts
# Admission webhooks (uses cert-manager self-signed CA)
admissionWebhooks:
certManager:
enabled: true
# Kube RBAC Proxy
kubeRBACProxy:
enabled: true
resources:
limits:
cpu: null # Disable chart default CPU limits
memory: 64Mi
requests:
cpu: 5m
memory: 64Mi
# Schedule on master node
tolerations:
- key: node-role.kubernetes.io/control-plane
operator: Exists
effect: NoSchedule
nodeSelector:
node-role.kubernetes.io/control-plane: "true"