Cluster

是否可以使用 terraform 在 AWS EKS 集群上部署 Datadog 代理

  • October 28, 2020

我希望了解是否有人可以指導我如何在我的 AWS EKS 集群上安裝 Datadog 代理作為 pod。我可以使用 kubectl 命令完成我的要求。

但是在這裡,我正在尋找一種可能的解決方案來從 Terraform 腳本中執行相同的工作,或者是否有人可以建議任何其他自動化方式來在我的 eks 集群上部署 Datadog 代理。

用於 Terraform的Helm Provider可用於將包部署到 Kubernetes。這個GitHub 問題包含一個如何使用它來部署 Datadog 代理的範例:

resource "helm_release" "datadog" {
 name          = "datadog"
 version       = "1.38.2"
 chart         = "stable/datadog"
 namespace     = kubernetes_namespace.datadog.metadata.0.name
 recreate_pods = true
 force_update  = true

 values = [<<YAML
image:
 repository: datadog/agent
 tag: 6.14.1-jmx
 pullPolicy: IfNotPresent
clusterAgent:
 containerName: cluster-agent
 image:
   repository: datadog/cluster-agent
   tag: 1.3.1
   pullPolicy: IfNotPresent
 enabled: true
 metricsProvider:
   enabled: true
 replicas: 1
 resources:
   requests:
     cpu: 200m
     memory: 256Mi
   limits:
     cpu: 400m
     memory: 512Mi
datadog:
 apiKeyExistingSecret: datadog-api-key
 apmEnabled: true
 appKeyExistingSecret: datadog-app-key
 collectEvents: true
 env:
   - name: DD_APM_IGNORE_RESOURCES
     value: "GET /webjars/.*, GET /v2/api-docs, GET /swagger-resources, GET /actuator/health, GET /_health, GET /manifest"
   - name: DD_KUBELET_TLS_VERIFY
     value: "false"
   - name: DD_COLLECT_EC2_TAGS
     value: "true"
   - name: DD_CUSTOM_SENSITIVE_WORDS
     value: "authorization"
   - name: DD_LOGS_CONFIG_K8S_CONTAINER_USE_FILE
     value: "true"
 leaderElection: true
 logsConfigContainerCollectAll: true
 logsEnabled: true
 logLevel: INFO
 name: datadog
 nonLocalTraffic: true
 processAgentEnabled: true
 resources:
   requests:
     cpu: 500m
     memory: 512Mi
   limits:
     cpu: 2000m
     memory: 2Gi
 tags:
   - env:${var.environment}
   - cluster:<my_cluster>
 confd:
   disk.yaml: |-
       init_config:
       instances:
         - use_mount: true
           mount_point_whitelist:
             - /$
   vault.yaml: |-
       init_config:
       instances:
         - api_url: https://<some_vault_url>/v1
   istio.yaml: |-
       init_config:
       instances:
         - istio_mesh_endpoint: http://istio-telemetry.istio-system:42422/metrics
           mixer_endpoint: http://istio-telemetry.istio-system:15014/metrics
           galley_endpoint: http://istio-galley.istio-system:15014/metrics
           pilot_endpoint: http://istio-pilot.istio-system:15014/metrics
           citadel_endpoint: http://istio-citadel.istio-system:15014/metrics
           send_histograms_buckets: true
           send_monotonic_counter: true
 useCriSocketVolume: true
daemonset:
 enabled: true
 tolerations:
   - key: "node-role.kubernetes.io/controlplane"
     operator: "Exists"
     effect: "NoSchedule"
   - key: "node-role.kubernetes.io/controlplane"
     operator: "Exists"
     effect: "NoExecute"
   - key: "node-role.kubernetes.io/etcd"
     operator: "Exists"
     effect: "NoExecute"
   - key: "node-role.kubernetes.io/<node_taint>"
     operator: "Exists"
     effect: "NoSchedule"
 useConfigMap: true
 customAgentConfig:
   listeners:
     - name: kubelet
   config_providers:
     - name: kubelet
       polling: true
   apm_config:
     enabled: false
     apm_non_local_traffic: true
   jmx_use_cgroup_memory_limit: true
   logs_config:
     open_files_limit: 500
 updateStrategy:
    type: RollingUpdate
 useHostPort: true
kubeStateMetrics:
 enabled: true
kube-state-metrics:
 rbac:
   create: false
 serviceAccount:
   create: false
   name: "${kubernetes_service_account.kube-state-metrics.metadata.0.name}"
rbac:
 create: false
 serviceAccountName: "${kubernetes_service_account.datadog-cluster-agent.metadata.0.name}"
YAML
 ]

 lifecycle {
   ignore_changes = [
     keyring,
   ]
 }
}

引用自:https://serverfault.com/questions/1040386