Files
observability/opentelemetry-collector/manifests/collector.yaml
Mayne0213 1fdbb5e1dd FEAT(otel): enable Target Allocator for metrics
- Enable Target Allocator with consistent-hashing strategy
- Configure prometheus receiver to use Target Allocator
- Add RBAC permissions for secrets and events
- Use prometheusCR for ServiceMonitor/PodMonitor discovery
2026-01-09 23:30:41 +09:00

250 lines
6.8 KiB
YAML

# OpenTelemetry Collector with Target Allocator
# Managed by OpenTelemetry Operator
#
# Architecture:
# - DaemonSet mode: one collector per node for log collection
# - Target Allocator (consistent-hashing): distributes scrape targets across collectors
# - Filelog receiver for container logs
# - Prometheus receiver with Target Allocator for metrics (replaces Prometheus scraping)
# - Exports to: Tempo (traces), Prometheus (metrics), Loki (logs)
apiVersion: opentelemetry.io/v1beta1
kind: OpenTelemetryCollector
metadata:
name: otel-collector
namespace: opentelemetry
spec:
mode: daemonset
image: otel/opentelemetry-collector-contrib:0.113.0
serviceAccount: otel-collector
# Target Allocator - distributes Prometheus scrape targets across collectors
# Using consistent-hashing strategy (not per-node due to collector-node mapping bug)
targetAllocator:
enabled: true
serviceAccount: otel-collector-targetallocator
image: ghcr.io/open-telemetry/opentelemetry-operator/target-allocator:0.113.0
allocationStrategy: consistent-hashing
filterStrategy: relabel-config
prometheusCR:
enabled: true
serviceMonitorSelector: {}
podMonitorSelector: {}
scrapeInterval: 30s
resources:
requests:
cpu: 10m
memory: 64Mi
limits:
memory: 128Mi
resources:
requests:
cpu: 50m
memory: 256Mi
limits:
memory: 512Mi
tolerations:
- key: node-role.kubernetes.io/control-plane
operator: Exists
effect: NoSchedule
volumeMounts:
- name: varlogpods
mountPath: /var/log/pods
readOnly: true
- name: varlibdockercontainers
mountPath: /var/lib/docker/containers
readOnly: true
volumes:
- name: varlogpods
hostPath:
path: /var/log/pods
- name: varlibdockercontainers
hostPath:
path: /var/lib/docker/containers
ports:
- name: otlp-grpc
port: 4317
protocol: TCP
targetPort: 4317
- name: otlp-http
port: 4318
protocol: TCP
targetPort: 4318
- name: metrics
port: 8888
protocol: TCP
targetPort: 8888
env:
- name: K8S_NODE_NAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
- name: K8S_POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: K8S_POD_IP
valueFrom:
fieldRef:
fieldPath: status.podIP
config:
receivers:
otlp:
protocols:
grpc:
endpoint: 0.0.0.0:4317
http:
endpoint: 0.0.0.0:4318
# Filelog receiver for container logs
filelog:
include:
- /var/log/pods/*/*/*.log
exclude:
- /var/log/pods/opentelemetry_otel-collector*/*/*.log
start_at: end
include_file_path: true
include_file_name: false
operators:
- type: router
id: get-format
routes:
- output: parser-docker
expr: 'body matches "^\\{"'
- output: parser-containerd
expr: 'body matches "^[^ Z]+Z"'
default: parser-containerd
- type: json_parser
id: parser-docker
output: extract-metadata-from-filepath
timestamp:
parse_from: attributes.time
layout: '%Y-%m-%dT%H:%M:%S.%LZ'
- type: regex_parser
id: parser-containerd
regex: '^(?P<time>[^ ^Z]+Z) (?P<stream>stdout|stderr) (?P<logtag>[^ ]*) ?(?P<log>.*)$'
output: extract-metadata-from-filepath
timestamp:
parse_from: attributes.time
layout: '%Y-%m-%dT%H:%M:%S.%LZ'
- type: regex_parser
id: extract-metadata-from-filepath
regex: '^.*\/(?P<namespace>[^_]+)_(?P<pod_name>[^_]+)_(?P<uid>[a-f0-9-]+)\/(?P<container_name>[^\/]+)\/.*$'
parse_from: attributes["log.file.path"]
- type: move
from: attributes.namespace
to: resource["k8s.namespace.name"]
- type: move
from: attributes.pod_name
to: resource["k8s.pod.name"]
- type: move
from: attributes.container_name
to: resource["k8s.container.name"]
- type: move
from: attributes.uid
to: resource["k8s.pod.uid"]
- type: move
from: attributes.stream
to: attributes["log.iostream"]
- type: move
from: attributes.log
to: body
# Prometheus receiver - uses Target Allocator for ServiceMonitor/PodMonitor discovery
prometheus:
config:
global:
scrape_interval: 60s
target_allocator:
endpoint: http://otel-collector-targetallocator:80
interval: 30s
collector_id: ${env:K8S_POD_NAME}
processors:
batch:
timeout: 10s
send_batch_size: 1024
send_batch_max_size: 2048
memory_limiter:
check_interval: 5s
limit_mib: 400
spike_limit_mib: 100
k8sattributes:
extract:
metadata:
- k8s.namespace.name
- k8s.deployment.name
- k8s.pod.name
- k8s.node.name
passthrough: false
pod_association:
- sources:
- from: resource_attribute
name: k8s.pod.ip
- sources:
- from: resource_attribute
name: k8s.pod.uid
- sources:
- from: connection
resourcedetection:
detectors: [env, system]
timeout: 5s
override: false
exporters:
otlp/tempo:
endpoint: tempo.tempo.svc.cluster.local:4317
tls:
insecure: true
prometheusremotewrite:
endpoint: http://prometheus-kube-prometheus-prometheus.prometheus.svc:9090/api/v1/write
tls:
insecure: true
external_labels:
otel_collector: ${env:K8S_POD_NAME}
loki:
endpoint: http://loki.loki.svc.cluster.local:3100/loki/api/v1/push
default_labels_enabled:
exporter: true
level: true
debug:
verbosity: basic
extensions:
health_check:
endpoint: 0.0.0.0:13133
service:
extensions: [health_check]
pipelines:
traces:
receivers: [otlp]
processors: [memory_limiter, k8sattributes, resourcedetection, batch]
exporters: [otlp/tempo]
metrics:
receivers: [otlp, prometheus]
processors: [memory_limiter, k8sattributes, resourcedetection, batch]
exporters: [prometheusremotewrite]
logs:
receivers: [otlp, filelog]
processors: [memory_limiter, k8sattributes, resourcedetection, batch]
exporters: [loki]