用Docker swarm快速部署Nebula Graph集群

简单修改了一个版本的docker swarm的stack部署yaml定义,通过让graphd节点变成普通的无状态服务并通过ingress路由实现负载均衡,这样节点扩展只需要通过swarm的特定节点访问即可,如果有用得上的可以参考

version: '3.8'
services:
  metad0:
    image: vesoft/nebula-metad:nightly
    env_file:
      - ./nebula.env
    command:
      - --meta_server_addrs=172.16.36.211:45500,172.16.36.212:45500,172.16.36.213:45500
      - --local_ip=172.16.36.211
      - --ws_ip=172.16.36.211
      - --port=45500
      - --data_path=/data/meta
      - --log_dir=/logs
      - --v=0
      - --minloglevel=2
    deploy:
      replicas: 1
      restart_policy:
        condition: on-failure
      placement:
        constraints:
          - node.hostname == centos211
    healthcheck:
      test: ["CMD", "curl", "-f", "http://172.16.36.211:11000/status"]
      interval: 30s
      timeout: 10s
      retries: 3
      start_period: 20s
    ports:
      - target: 11000
        published: 11000
        protocol: tcp
        mode: host
      - target: 11002
        published: 11002
        protocol: tcp
        mode: host
      - target: 45500
        published: 45500
        protocol: tcp
        mode: host
    volumes:
      - /data/nebula/meta/data:/data/meta
      - /data/nebula/meta/logs:/logs
    networks:
      - nebula-net

  metad1:
    image: vesoft/nebula-metad:nightly
    env_file:
      - ./nebula.env
    command:
      - --meta_server_addrs=172.16.36.211:45500,172.16.36.212:45500,172.16.36.213:45500
      - --local_ip=172.16.36.212
      - --ws_ip=172.16.36.212
      - --port=45500
      - --data_path=/data/meta
      - --log_dir=/logs
      - --v=0
      - --minloglevel=2
    deploy:
      replicas: 1
      restart_policy:
        condition: on-failure
      placement:
        constraints:
          - node.hostname == centos212
    healthcheck:
      test: ["CMD", "curl", "-f", "http://172.16.36.212:11000/status"]
      interval: 30s
      timeout: 10s
      retries: 3
      start_period: 20s
    ports:
      - target: 11000
        published: 11000
        protocol: tcp
        mode: host
      - target: 11002
        published: 11002
        protocol: tcp
        mode: host
      - target: 45500
        published: 45500
        protocol: tcp
        mode: host
    volumes:
      - /data/nebula/meta/data:/data/meta
      - /data/nebula/meta/logs:/logs
    networks:
      - nebula-net

  metad2:
    image: vesoft/nebula-metad:nightly
    env_file:
      - ./nebula.env
    command:
      - --meta_server_addrs=172.16.36.211:45500,172.16.36.212:45500,172.16.36.213:45500
      - --local_ip=172.16.36.213
      - --ws_ip=172.16.36.213
      - --port=45500
      - --data_path=/data/meta
      - --log_dir=/logs
      - --v=0
      - --minloglevel=2
    deploy:
      replicas: 1
      restart_policy:
        condition: on-failure
      placement:
        constraints:
          - node.hostname == centos213
    healthcheck:
      test: ["CMD", "curl", "-f", "http://172.16.36.213:11000/status"]
      interval: 30s
      timeout: 10s
      retries: 3
      start_period: 20s
    ports:
      - target: 11000
        published: 11000
        protocol: tcp
        mode: host
      - target: 11002
        published: 11002
        protocol: tcp
        mode: host
      - target: 45500
        published: 45500
        protocol: tcp
        mode: host
    volumes:
      - /data/nebula/meta/data:/data/meta
      - /data/nebula/meta/logs:/logs
    networks:
      - nebula-net

  storaged0:
    image: vesoft/nebula-storaged:nightly
    env_file:
      - ./nebula.env
    command:
      - --meta_server_addrs=172.16.36.211:45500,172.16.36.212:45500,172.16.36.213:45500
      - --local_ip=172.16.36.211
      - --ws_ip=172.16.36.211
      - --port=44500
      - --data_path=/data/storage
      - --log_dir=/logs
      - --v=0
      - --minloglevel=2
    deploy:
      replicas: 1
      restart_policy:
        condition: on-failure
      placement:
        constraints:
          - node.hostname == centos211
    depends_on:
      - metad0
      - metad1
      - metad2
    healthcheck:
      test: ["CMD", "curl", "-f", "http://172.16.36.211:12000/status"]
      interval: 30s
      timeout: 10s
      retries: 3
      start_period: 20s
    ports:
      - target: 12000
        published: 12000
        protocol: tcp
        mode: host
      - target: 12002
        published: 12002
        protocol: tcp
        mode: host
    volumes:
      - /data/nebula/storaged/data:/data/storage
      - /data/nebula/storaged/logs:/logs
    networks:
      - nebula-net
  storaged1:
    image: vesoft/nebula-storaged:nightly
    env_file:
      - ./nebula.env
    command:
      - --meta_server_addrs=172.16.36.211:45500,172.16.36.212:45500,172.16.36.213:45500
      - --local_ip=172.16.36.212
      - --ws_ip=172.16.36.212
      - --port=44500
      - --data_path=/data/storage
      - --log_dir=/logs
      - --v=0
      - --minloglevel=2
    deploy:
      replicas: 1
      restart_policy:
        condition: on-failure
      placement:
        constraints:
          - node.hostname == centos212
    depends_on:
      - metad0
      - metad1
      - metad2
    healthcheck:
      test: ["CMD", "curl", "-f", "http://172.16.36.212:12000/status"]
      interval: 30s
      timeout: 10s
      retries: 3
      start_period: 20s
    ports:
      - target: 12000
        published: 12000
        protocol: tcp
        mode: host
      - target: 12002
        published: 12004
        protocol: tcp
        mode: host
    volumes:
      - /data/nebula/storaged/data:/data/storage
      - /data/nebula/storaged/logs:/logs
    networks:
      - nebula-net

  storaged2:
    image: vesoft/nebula-storaged:nightly
    env_file:
      - ./nebula.env
    command:
      - --meta_server_addrs=172.16.36.211:45500,172.16.36.212:45500,172.16.36.213:45500
      - --local_ip=172.16.36.213
      - --ws_ip=172.16.36.213
      - --port=44500
      - --data_path=/data/storage
      - --log_dir=/logs
      - --v=0
      - --minloglevel=2
    deploy:
      replicas: 1
      restart_policy:
        condition: on-failure
      placement:
        constraints:
          - node.hostname == centos213
    depends_on:
      - metad0
      - metad1
      - metad2
    healthcheck:
      test: ["CMD", "curl", "-f", "http://172.16.36.213:12000/status"]
      interval: 30s
      timeout: 10s
      retries: 3
      start_period: 20s
    ports:
      - target: 12000
        published: 12000
        protocol: tcp
        mode: host
      - target: 12002
        published: 12006
        protocol: tcp
        mode: host
    volumes:
      - /data/nebula/storaged/data:/data/storage
      - /data/nebula/storaged/logs:/logs
    networks:
      - nebula-net
  graphd:
    image: vesoft/nebula-graphd:nightly
    env_file:
      - ./nebula.env
    command:
      - --meta_server_addrs=172.16.36.211:45500,172.16.36.212:45500,172.16.36.213:45500
      - --port=3699
      - --ws_ip=0.0.0.0
      - --log_dir=/logs
      - --v=0
      - --minloglevel=2
    deploy:
      replicas: 3
      restart_policy:
        condition: on-failure
      placement:
        max_replicas_per_node: 1
    depends_on:
      - metad0
      - metad1
      - metad2
    healthcheck:
      test: [ "CMD", "curl", "-f", "http://localhost:13000/status" ]
      interval: 30s
      timeout: 10s
      retries: 3
      start_period: 20s
    ports:
      - target: 3699
        published: 3699
        protocol: tcp
        mode: ingress
      - target: 13000
        published: 13000
        protocol: tcp
        mode: ingress
      - target: 13002
        published: 13002
        protocol: tcp
        mode: ingress
    volumes:
      - logs-graphd:/logs
    networks:
      - custom-overlay-network
networks:
  nebula-net:
    external: true
    attachable: true
    name: host
  custom-overlay-network:
    external: true
volumes:
   logs-graphd:
2 个赞