集群部署因为端口限制,yml文件中那些端口需要修改?

版本:

Nebula 1.2.0
Exchange 1.1.0

因端口限制8000-8100 不知道yml文件需要修改那些,现在只改了command的,ports需不需要修改?

version: '3.6'
services:
  metad0:
    image: vesoft/nebula-metad:v1.2.0
    env_file:
      - ./nebula.env
    command:
      - --meta_server_addrs=10.86.87.15:8045,10.86.87.16:8045,10.86.87.17:8045
      - --local_ip=10.86.87.16
      - --ws_ip=10.86.87.16
      - --port=8045
      - --data_path=/data/meta
      - --log_dir=/logs
      - --v=0
      - --minloglevel=2
    deploy:
      replicas: 1
      restart_policy:
        condition: on-failure
      placement:
        constraints:
          - node.hostname == sec-ocr-serving01.py
    healthcheck:
      test: ["CMD", "curl", "-f", "http://10.86.87.16:11000/status"]
      interval: 30s
      timeout: 10s
      retries: 3
      start_period: 20s
    ports:
      - target: 11000
        published: 11000
        protocol: tcp
        mode: host
      - target: 11002
        published: 11002
        protocol: tcp
        mode: host
      - target: 8045
        published: 8045
        protocol: tcp
        mode: host
    volumes:
      - data-metad0:/data/meta
      - logs-metad0:/logs
    networks:
      - nebula-net

  metad1:
    image: vesoft/nebula-metad:v1.2.0
    env_file:
      - ./nebula.env
    command:
      - --meta_server_addrs=10.86.87.15:8045,10.86.87.16:8045,10.86.87.17:8045
      - --local_ip=10.86.87.15
      - --ws_ip=10.86.87.15
      - --port=8045
      - --data_path=/data/meta
      - --log_dir=/logs
      - --v=0
      - --minloglevel=2
    deploy:
      replicas: 1
      restart_policy:
        condition: on-failure
      placement:
        constraints:
          - node.hostname == sec-ocr-serving00.py
    healthcheck:
      test: ["CMD", "curl", "-f", "http://10.86.87.15:11000/status"]
      interval: 30s
      timeout: 10s
      retries: 3
      start_period: 20s
    ports:
      - target: 11000
        published: 11000
        protocol: tcp
        mode: host
      - target: 11002
        published: 11002
        protocol: tcp
        mode: host
      - target: 8045
        published: 8045
        protocol: tcp
        mode: host
    volumes:
      - data-metad1:/data/meta
      - logs-metad1:/logs
    networks:
      - nebula-net

  metad2:
    image: vesoft/nebula-metad:v1.2.0
    env_file:
      - ./nebula.env
    command:
      - --meta_server_addrs=10.86.87.15:8045,10.86.87.16:8045,10.86.87.17:8045
      - --local_ip=10.86.87.17
      - --ws_ip=10.86.87.17
      - --port=8045
      - --data_path=/data/meta
      - --log_dir=/logs
      - --v=0
      - --minloglevel=2
    deploy:
      replicas: 1
      restart_policy:
        condition: on-failure
      placement:
        constraints:
          - node.hostname == sec-ocr-serving02.py
    healthcheck:
      test: ["CMD", "curl", "-f", "http://10.86.87.17:11000/status"]
      interval: 30s
      timeout: 10s
      retries: 3
      start_period: 20s
    ports:
      - target: 11000
        published: 11000
        protocol: tcp
        mode: host
      - target: 11002
        published: 11002
        protocol: tcp
        mode: host
      - target: 8045
        published: 8045
        protocol: tcp
        mode: host
    volumes:
      - data-metad2:/data/meta
      - logs-metad2:/logs
    networks:
      - nebula-net

  storaged0:
    image: vesoft/nebula-storaged:v1.2.0
    env_file:
      - ./nebula.env
    command:
      - --meta_server_addrs=10.86.87.15:8045,10.86.87.16:8045,10.86.87.17:8045
      - --local_ip=10.86.87.16
      - --ws_ip=10.86.87.16
      - --port=8044
      - --data_path=/data/storage
      - --log_dir=/logs
      - --v=0
      - --minloglevel=2
    deploy:
      replicas: 1
      restart_policy:
        condition: on-failure
      placement:
        constraints:
          - node.hostname == sec-ocr-serving01.py
    depends_on:
      - metad0
      - metad1
      - metad2
    healthcheck:
      test: ["CMD", "curl", "-f", "http://10.86.87.16:12000/status"]
      interval: 30s
      timeout: 10s
      retries: 3
      start_period: 20s
    ports:
      - target: 12000
        published: 12000
        protocol: tcp
        mode: host
      - target: 12002
        published: 12002
        protocol: tcp
        mode: host
    volumes:
      - data-storaged0:/data/storage
      - logs-storaged0:/logs
    networks:
      - nebula-net
  storaged1:
    image: vesoft/nebula-storaged:v1.2.0
    env_file:
      - ./nebula.env
    command:
      - --meta_server_addrs=10.86.87.15:8045,10.86.87.16:8045,10.86.87.17:8045
      - --local_ip=10.86.87.15
      - --ws_ip=10.86.87.15
      - --port=8044
      - --data_path=/data/storage
      - --log_dir=/logs
      - --v=0
      - --minloglevel=2
    deploy:
      replicas: 1
      restart_policy:
        condition: on-failure
      placement:
        constraints:
          - node.hostname == sec-ocr-serving00.py
    depends_on:
      - metad0
      - metad1
      - metad2
    healthcheck:
      test: ["CMD", "curl", "-f", "http://10.86.87.15:12000/status"]
      interval: 30s
      timeout: 10s
      retries: 3
      start_period: 20s
    ports:
      - target: 12000
        published: 12000
        protocol: tcp
        mode: host
      - target: 12002
        published: 12004
        protocol: tcp
        mode: host
    volumes:
      - data-storaged1:/data/storage
      - logs-storaged1:/logs
    networks:
      - nebula-net

  storaged2:
    image: vesoft/nebula-storaged:v1.2.0
    env_file:
      - ./nebula.env
    command:
      - --meta_server_addrs=10.86.87.15:8045,10.86.87.16:8045,10.86.87.17:8045
      - --local_ip=10.86.87.17
      - --ws_ip=10.86.87.17
      - --port=8044
      - --data_path=/data/storage
      - --log_dir=/logs
      - --v=0
      - --minloglevel=2
    deploy:
      replicas: 1
      restart_policy:
        condition: on-failure
      placement:
        constraints:
          - node.hostname == sec-ocr-serving02.py
    depends_on:
      - metad0
      - metad1
      - metad2
    healthcheck:
      test: ["CMD", "curl", "-f", "http://10.86.87.17:12000/status"]
      interval: 30s
      timeout: 10s
      retries: 3
      start_period: 20s
    ports:
      - target: 12000
        published: 12000
        protocol: tcp
        mode: host
      - target: 12002
        published: 12006
        protocol: tcp
        mode: host
    volumes:
      - data-storaged2:/data/storage
      - logs-storaged2:/logs
    networks:
      - nebula-net
  graphd0:
    image: vesoft/nebula-graphd:v1.2.0
    env_file:
      - ./nebula.env
    command:
      - --meta_server_addrs=10.86.87.15:8045,10.86.87.16:8045,10.86.87.17:8045
      - --port=8036
      - --ws_ip=10.86.87.16
      - --log_dir=/logs
      - --v=0
      - --minloglevel=2
    deploy:
      replicas: 1
      restart_policy:
        condition: on-failure
      placement:
        constraints:
          - node.hostname == sec-ocr-serving01.py
    depends_on:
      - metad0
      - metad1
      - metad2
    healthcheck:
      test: ["CMD", "curl", "-f", "http://10.86.87.16:13000/status"]
      interval: 30s
      timeout: 10s
      retries: 3
      start_period: 20s
    ports:
      - target: 8036
        published: 8036
        protocol: tcp
        mode: host
      - target: 13000
        published: 13000
        protocol: tcp
        mode: host
      - target: 13002
        published: 13002
        protocol: tcp
        mode: host
    volumes:
      - logs-graphd:/logs
    networks:
      - nebula-net

  graphd1:
    image: vesoft/nebula-graphd:v1.2.0
    env_file:
      - ./nebula.env
    command:
      - --meta_server_addrs=10.86.87.15:8045,10.86.87.16:8045,10.86.87.17:8045
      - --port=8036
      - --ws_ip=10.86.87.15
      - --log_dir=/logs
      - --v=2
      - --minloglevel=2
    deploy:
      replicas: 1
      restart_policy:
        condition: on-failure
      placement:
        constraints:
          - node.hostname == sec-ocr-serving00.py
    depends_on:
      - metad0
      - metad1
      - metad2
    healthcheck:
      test: ["CMD", "curl", "-f", "http://10.86.87.15:13002/status"]
      interval: 30s
      timeout: 10s
      retries: 3
      start_period: 20s
    ports:
      - target: 8036
        published: 3640
        protocol: tcp
        mode: host
      - target: 13000
        published: 13001
        protocol: tcp
        mode: host
      - target: 13002
        published: 13003
        protocol: tcp
        mode: host
    volumes:
      - logs-graphd2:/logs
    networks:
      - nebula-net
  graphd2:
    image: vesoft/nebula-graphd:v1.2.0
    env_file:
      - ./nebula.env
    command:
      - --meta_server_addrs=10.86.87.15:8045,10.86.87.16:8045,10.86.87.17:8045
      - --port=8036
      - --ws_ip=10.86.87.17
      - --log_dir=/logs
      - --v=0
      - --minloglevel=2
    deploy:
      replicas: 1
      restart_policy:
        condition: on-failure
      placement:
        constraints:
          - node.hostname == sec-ocr-serving02.py
    depends_on:
      - metad0
      - metad1
      - metad2
    healthcheck:
      test: ["CMD", "curl", "-f", "http://10.86.87.17:13002/status"]
      interval: 30s
      timeout: 10s
      retries: 3
      start_period: 20s
    ports:
      - target: 8036
        published: 3641
        protocol: tcp
        mode: host
      - target: 13000
        published: 13004
        protocol: tcp
        mode: host
      - target: 13002
        published: 13005
        protocol: tcp
        mode: host
    volumes:
      - logs-graphd3:/logs
    networks:
      - nebula-net
networks:
  nebula-net:
    external: true
    attachable: true
    name: host
volumes:
  data-metad0:
  logs-metad0:
  data-metad1:
  logs-metad1:
  data-metad2:
  logs-metad2:
  data-storaged0:
  logs-storaged0:
  data-storaged1:
  logs-storaged1:
  data-storaged2:
  logs-storaged2:
  logs-graphd:
  logs-graphd2:
  logs-graphd3:

把 ports部分的端口 都修改为你们 能访问的把,你们的端口限制在了8000-8100了

不知道具体都代表什么意思,按模板改了部分端口启动不了,那些是对内那些对外的呢?应该只修改对外的端口就可以。

可以 参考一下 文档 Graphd 配置 - Nebula Graph Database 手册 对应的参数和yml中的是差不多的