Helm安装nebula v3.4失败 部署参考的官方文档

版本: v3.4
yaml:

image:
  nebulaOperator:
    image: XXX/pub/vesoft/nebula-operator:v1.4.1
    imagePullPolicy: IfNotPresent
  kubeRBACProxy:
    image: XXX/pub/kube-rbac-proxy:v0.8.0
    imagePullPolicy: IfNotPresent
  kubeScheduler:
    image: XXX/pub/kube-scheduler:v1.22.12
    imagePullPolicy: IfNotPresent

imagePullSecrets:
  - name: XXX
kubernetesClusterDomain: "dev.minum"

controllerManager:
  create: true
  replicas: 1
  env: []
  resources:
    limits:
      cpu: 200m
      memory: 200Mi
    requests:
      cpu: 100m
      memory: 100Mi

# Enable openkruise scheme for controller manager. (default false)
enableKruise: false

# Period at which the controller forces the repopulation of its local object stores. (default 0h30m0s)
syncPeriod: 0h30m0s

# Namespace the controller watches for updates to Kubernetes objects, If empty, all namespaces are watched.
watchNamespace:

# The address the metric endpoint binds to. (default ":8080")
metricsBindAddr: ":8080"

# The address the probe endpoint binds to. (default: ":8081")
healthProbeBindAddr: ":8081"

# The TCP port the Webhook server binds to. (default 9443)
webhookBindPort: 9443

# Maximum number of concurrently running reconcile loops for nebula cluster (default 3)
maxConcurrentReconciles: 3

nodeSelector: {}

tolerations: []

affinity: {}

nebula:
  version: v3.4.0
  imagePullPolicy: IfNotPresent
  storageClassName: "kadalu.gfs"
  enablePVReclaim: false
  enableBR: false
  enableForceUpdate: false
  schedulerName: default-scheduler # nebula-scheduler
  unsatisfiableAction: ScheduleAnyway
  logRotate: { }
  reference:
    name: statefulsets.apps
    version: v1
  graphd:
    image: XXX/pub/nebula-graphd
    replicas: 2
    serviceType: NodePort
    env: []
    config: {}
    logStorage: "500Mi"
    podLabels: {}
    podAnnotations: {}
    nodeSelector: {}
    tolerations: []
    affinity: {}
    readinessProbe: {}
    initContainers: []
    sidecarContainers: []
    sidecarVolumes: []

  metad:
    image: XXX/pub/nebula-metad
    replicas: 1
    env: []
    config: {}
    logStorage: "500Mi"
    dataStorage: "2Gi"
    license: {}
    podLabels: {}
    podAnnotations: {}
    nodeSelector: {}
    tolerations: []
    affinity: {}
    readinessProbe: {}
    initContainers: []
    sidecarContainers: []
    sidecarVolumes: []

  storaged:
    image: XXX/pub/nebula-storaged
    replicas: 1
    env: []
    config: {}
    resources:
    logStorage: "500Mi"
    dataStorage: "10Gi"
    enableAutoBalance: false
    podLabels: {}
    podAnnotations: {}
    nodeSelector: {}
    tolerations: []
    affinity: {}
    readinessProbe: {}
    initContainers: []
    sidecarContainers: []
    sidecarVolumes: []

  exporter:
    image: XXX/pub/nebula-stats-exporter
    version: v3.3.0
    replicas: 1
    env: [ ]
    podLabels: { }
    podAnnotations: { }
    nodeSelector: { }
    tolerations: [ ]
    affinity: { }
    livenessProbe: { }


imagePullSecrets:
  - name: XXX
nameOverride: ""
fullnameOverride: ""

故障:

+ exec /usr/local/nebula/bin/nebula-metad --flagfile=/usr/local/nebula/etc/nebula-metad.conf --meta_server_addrs=nebula-metad-0.nebula-metad-headless.minum.svc.dev.minum:9559 --local_ip=nebula-metad-0.nebula-metad-headless.minum.svc.dev.minum --ws_ip=nebula-metad-0.nebula-metad-headless.minum.svc.dev.minum --daemonize=false
E0721 08:07:21.537818     1 MetaDaemon.cpp:207] Bad local host addr, status:Bad ip format:nebula-metad-0.nebula-metad-headless.minum.svc.dev.minum```

升级v3.5后问题解决.

1 个赞

此话题已在最后回复的 7 天后被自动关闭。不再允许新回复。