Send request to meta"127.0.0.1":9559 failed

  • nebula 版本:(为节省回复者核对版本信息的时间,首次发帖的版本信息记得以截图形式展示)
  • 部署方式: 单机
  • 安装方式: Docker
  • 是否为线上版本:N
  • 硬件信息
    • 磁盘( 推荐使用 SSD)
    • CPU、内存信息
  • 问题的具体描述
  • 相关的 meta / storage / graph info 日志信息(尽量使用文本形式方便检索)

使用3.0的cpp客户端连接3.0的单机nebula,session的example可以跑通,但是storage_client_example显示
Send request to meta"127.0.0.1":9559 failed
List space failed
load data failed
Get space id for nba failed

:thinking: 是不是网络不通的原因,你试试把 ip 换成真是的 ip 地址呢

是不是需要把地址改成Storage 的地址?9559像是Graph的端口号

1 个赞

对对 Orz 我忘了这个茬。

昨天改了端口之后这里跑通了,但是后面跑ScanEdgeIter::next,又报错了
ThriftClientManager-inl.h:66] Failed to resolve address for ‘storaged1’: Name or service not known (error=-2): Unknown error -2
StorageClient.cpp:130] Send request to “storaged1”:9779 failed
ScanEdgeIter.cpp:36] Scan edge failed

看起来像是host的名字有问题,这个可以在docker-compose.yaml里改吗

docker 怎么改IP地址这个就不晓得了 @steam 召唤大神

我不会啊。大兄弟你等会,我喊个人过来。

1 个赞

多谢啊

看错误是compose里的问题,方便把compose配置贴出来吗

version: '3.4'
services:
  metad0:
    image: vesoft/nebula-metad:v3
    environment:
      USER: root
      TZ:   "${TZ}"
    command:
      - --meta_server_addrs=metad0:9559,metad1:9559,metad2:9559
      - --local_ip=metad0
      - --ws_ip=metad0
      - --port=9559
      - --ws_http_port=19559
      - --data_path=/data/meta
      - --log_dir=/logs
      - --v=0
      - --minloglevel=0
    healthcheck:
      test: ["CMD", "curl", "-sf", "http://metad0:19559/status"]
      interval: 30s
      timeout: 10s
      retries: 3
      start_period: 20s
    ports:
      - "9559:9559"
      - 19559
      - 19560
    volumes:
      - ./data/meta0:/data/meta
      - ./logs/meta0:/logs
    networks:
      - nebula-net
    restart: on-failure
    cap_add:
      - SYS_PTRACE

  metad1:
    image: vesoft/nebula-metad:v3
    environment:
      USER: root
      TZ:   "${TZ}"
    command:
      - --meta_server_addrs=metad0:9559,metad1:9559,metad2:9559
      - --local_ip=metad1
      - --ws_ip=metad1
      - --port=9559
      - --ws_http_port=19559
      - --data_path=/data/meta
      - --log_dir=/logs
      - --v=0
      - --minloglevel=0
    healthcheck:
      test: ["CMD", "curl", "-sf", "http://metad1:19559/status"]
      interval: 30s
      timeout: 10s
      retries: 3
      start_period: 20s
    ports:
      - 9559
      - 19559
      - 19560
    volumes:
      - ./data/meta1:/data/meta
      - ./logs/meta1:/logs
    networks:
      - nebula-net
    restart: on-failure
    cap_add:
      - SYS_PTRACE

  metad2:
    image: vesoft/nebula-metad:v3
    environment:
      USER: root
      TZ:   "${TZ}"
    command:
      - --meta_server_addrs=metad0:9559,metad1:9559,metad2:9559
      - --local_ip=metad2
      - --ws_ip=metad2
      - --port=9559
      - --ws_http_port=19559
      - --data_path=/data/meta
      - --log_dir=/logs
      - --v=0
      - --minloglevel=0
    healthcheck:
      test: ["CMD", "curl", "-sf", "http://metad2:19559/status"]
      interval: 30s
      timeout: 10s
      retries: 3
      start_period: 20s
    ports:
      - 9559
      - 19559
      - 19560
    volumes:
      - ./data/meta2:/data/meta
      - ./logs/meta2:/logs
    networks:
      - nebula-net
    restart: on-failure
    cap_add:
      - SYS_PTRACE

  storaged0:
    image: vesoft/nebula-storaged:v3
    environment:
      USER: root
      TZ:   "${TZ}"
    command:
      - --meta_server_addrs=metad0:9559,metad1:9559,metad2:9559
      - --local_ip=storaged0
      - --ws_ip=storaged0
      - --port=9779
      - --ws_http_port=19779
      - --data_path=/data/storage
      - --log_dir=/logs
      - --v=0
      - --minloglevel=0
    depends_on:
      - metad0
      - metad1
      - metad2
    healthcheck:
      test: ["CMD", "curl", "-sf", "http://storaged0:19779/status"]
      interval: 30s
      timeout: 10s
      retries: 3
      start_period: 20s
    ports:
      - 9779
      - 19779
      - 19780
    volumes:
      - ./data/storage0:/data/storage
      - ./logs/storage0:/logs
    networks:
      - nebula-net
    restart: on-failure
    cap_add:
      - SYS_PTRACE

  storaged1:
    image: vesoft/nebula-storaged:v3
    environment:
      USER: root
      TZ:   "${TZ}"
    command:
      - --meta_server_addrs=metad0:9559,metad1:9559,metad2:9559
      - --local_ip=storaged1
      - --ws_ip=storaged1
      - --port=9779
      - --ws_http_port=19779
      - --data_path=/data/storage
      - --log_dir=/logs
      - --v=0
      - --minloglevel=0
    depends_on:
      - metad0
      - metad1
      - metad2
    healthcheck:
      test: ["CMD", "curl", "-sf", "http://storaged1:19779/status"]
      interval: 30s
      timeout: 10s
      retries: 3
      start_period: 20s
    ports:
      - 9779
      - 19779
      - 19780
    volumes:
      - ./data/storage1:/data/storage
      - ./logs/storage1:/logs
    networks:
      - nebula-net
    restart: on-failure
    cap_add:
      - SYS_PTRACE

  storaged2:
    image: vesoft/nebula-storaged:v3
    environment:
      USER: root
      TZ:   "${TZ}"
    command:
      - --meta_server_addrs=metad0:9559,metad1:9559,metad2:9559
      - --local_ip=storaged2
      - --ws_ip=storaged2
      - --port=9779
      - --ws_http_port=19779
      - --data_path=/data/storage
      - --log_dir=/logs
      - --v=0
      - --minloglevel=0
    depends_on:
      - metad0
      - metad1
      - metad2
    healthcheck:
      test: ["CMD", "curl", "-sf", "http://storaged2:19779/status"]
      interval: 30s
      timeout: 10s
      retries: 3
      start_period: 20s
    ports:
      - 9779
      - 19779
      - 19780
    volumes:
      - ./data/storage2:/data/storage
      - ./logs/storage2:/logs
    networks:
      - nebula-net
    restart: on-failure
    cap_add:
      - SYS_PTRACE

  graphd:
    image: vesoft/nebula-graphd:v3
    environment:
      USER: root
      TZ:   "${TZ}"
    command:
      - --meta_server_addrs=metad0:9559,metad1:9559,metad2:9559
      - --port=9669
      - --local_ip=graphd
      - --ws_ip=graphd
      - --ws_http_port=19669
      - --log_dir=/logs
      - --v=0
      - --minloglevel=0
    depends_on:
      - storaged0
      - storaged1
      - storaged2
    healthcheck:
      test: ["CMD", "curl", "-sf", "http://graphd:19669/status"]
      interval: 30s
      timeout: 10s
      retries: 3
      start_period: 20s
    ports:
      - "9669:9669"
      - 19669
      - 19670
    volumes:
      - ./logs/graph:/logs
    networks:
      - nebula-net
    restart: on-failure
    cap_add:
      - SYS_PTRACE

  graphd1:
    image: vesoft/nebula-graphd:v3
    environment:
      USER: root
      TZ:   "${TZ}"
    command:
      - --meta_server_addrs=metad0:9559,metad1:9559,metad2:9559
      - --port=9669
      - --local_ip=graphd1
      - --ws_ip=graphd1
      - --ws_http_port=19669
      - --log_dir=/logs
      - --v=0
      - --minloglevel=0
    depends_on:
      - storaged0
      - storaged1
      - storaged2
    healthcheck:
      test: ["CMD", "curl", "-sf", "http://graphd1:19669/status"]
      interval: 30s
      timeout: 10s
      retries: 3
      start_period: 20s
    ports:
      - 9669
      - 19669
      - 19670
    volumes:
      - ./logs/graph1:/logs
    networks:
      - nebula-net
    restart: on-failure
    cap_add:
      - SYS_PTRACE

  graphd2:
    image: vesoft/nebula-graphd:v3
    environment:
      USER: root
      TZ:   "${TZ}"
    command:
      - --meta_server_addrs=metad0:9559,metad1:9559,metad2:9559
      - --port=9669
      - --local_ip=graphd2
      - --ws_ip=graphd2
      - --ws_http_port=19669
      - --log_dir=/logs
      - --v=0
      - --minloglevel=0
    depends_on:
      - storaged0
      - storaged1
      - storaged2
    healthcheck:
      test: ["CMD", "curl", "-sf", "http://graphd2:19669/status"]
      interval: 30s
      timeout: 10s
      retries: 3
      start_period: 20s
    ports:
      - 9669
      - 19669
      - 19670
    volumes:
      - ./logs/graph2:/logs
    networks:
      - nebula-net
    restart: on-failure
    cap_add:
      - SYS_PTRACE

  console:
    image: vesoft/nebula-console:v3
    entrypoint: ""
    command:
      - sh
      - -c
      - |
        sleep 3 &&
        nebula-console -addr graphd -port 9669 -u root -p nebula -e 'ADD HOSTS "storaged0":9779,"storaged1":9779,"storaged2":9779' &&
        sleep 36000
    depends_on:
      - graphd
    networks:
      - nebula-net

networks:
  nebula-net:

基本上没改啥

docker-compose 用了 nebula-net 网络,然后用的域名,两个办法:

  1. 在一个容器里运行你的程序,容器也使用同一个 nebula-net 网络。
  2. 在 docker-compose 里指定子网的网段和 ip,然后在宿主机上直接访问 ip,参考:https://github.com/vesoft-inc/nebula-java/blob/master/client/src/test/resources/docker-compose.yaml
3 个赞

如果是真的需要连接 meta/storage 的话,参考 @HarrisChu 的回答。如果只是通过语句读写数据,访问 graphD、就足够了,默认 compose 配置里一个 graphD 是暴露给外边的。

另外这里也有一个 Harris 第一个方法的例子 快速搭建调试 Nebula Graph Python Storage 客户端的环境 - siwei.io 只不过是python不是 cpp,是一样的。

1 个赞

多谢啊,我去试一下

docker使用nebula-net网络之后,没有再报错,但是还是scan不到数据

console这边可以查的到边。

(root@nebula) [my_space_1]> GO FROM "11" OVER e2 YIELD edge as e
+-------------------------------------------+
| e                                         |
+-------------------------------------------+
| [:e2 "11"->"12" @0 {age: 12, name: "n1"}] |
| [:e2 "11"->"13" @0 {age: 12, name: "n1"}] |
+-------------------------------------------+
Got 2 rows (time spent 1143/1574 us)

Sun, 20 Feb 2022 11:29:28 UTC

session这边session.execute(“LOOKUP ON e2 YIELD edge AS e”)也是找的到的。

e|
("11")-[e2(1)]->("12")@0 name:"n1",age:12|
("11")-[e2(1)]->("13")@0 name:"n1",age:12|

但是storage client这边读不到

/data/nebula/examples# ./storage_client_example 
scan edge...
-------------------------
e2.name|e2.age|


+++++++++++++++++++++++++

入参

nebula::ScanEdgeIter scanEdgeIter = c.scanEdgeWithPart("my_space_1",
                                             1,
                                             "e2",
                                             std::vector<std::string>{"name", "age"},
                                             100,                                                                                                                                                                                                                           
                                             0,
                                             std::numeric_limits<int64_t>::max(),
                                             "",
                                             true,
                                             true);

host

(root@nebula) [(none)]> SHOW HOSTS;
+-------------+-------+-----------+--------------+----------------------+------------------------+----------------------+
| Host        | Port  | Status    | Leader count | Leader distribution  | Partition distribution | Version              |
+-------------+-------+-----------+--------------+----------------------+------------------------+----------------------+
| "storaged0" | 9779  | "ONLINE"  | 33           | "my_space_1:33"      | "my_space_1:33"        | "2022.02.15-nightly" |
| "storaged1" | 9779  | "ONLINE"  | 33           | "my_space_1:33"      | "my_space_1:33"        | "2022.02.15-nightly" |
| "storaged2" | 9779  | "ONLINE"  | 34           | "my_space_1:34"      | "my_space_1:34"        | "2022.02.15-nightly" |
| "0.0.0.0"   | 16000 | "OFFLINE" | 0            | "No valid partition" | "No valid partition"   |                      |
+-------------+-------+-----------+--------------+----------------------+------------------------+----------------------+

请问是我写的的入参有问题吗

你用的是 withPart ,是不是边不在 1 这个 part 上。
建 space 的时候,是多少个 part

应该是这个设置

(root@nebula) [(none)]> SHOW CREATE SPACE my_space_1;
+--------------+-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
| Space        | Create Space                                                                                                                                                                                                                                                                  |
+--------------+-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
| "my_space_1" | "CREATE SPACE `my_space_1` (partition_num = 100, replica_factor = 1, charset = utf8, collate = utf8_bin, vid_type = FIXED_STRING(30), atomic_edge = false) ON default_zone_0.0.0.0_16000,default_zone_storaged0_9779,default_zone_storaged1_9779,default_zone_storaged2_9779" |
+--------------+-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+

但是入参设置成100也没数据

用 MurmurHash2 hash 你的 vid,然后 %100 再 + 1,然后用那个 part id,而不是 1 或者 100

代码示例:

PartitionID MetaClient::partId(int32_t numParts, const VertexID id) const {
  // If the length of the id is 8, we will treat it as int64_t to be compatible
  // with the version 1.0
  uint64_t vid = 0;
  if (id.size() == 8) {
    memcpy(static_cast<void*>(&vid), id.data(), 8);
  } else {
    MurmurHash2 hash;
    vid = hash(id.data());
  }
  PartitionID pId = vid % numParts + 1;
  CHECK_GT(pId, 0U);
  return pId;
}
3 个赞

是这个原因,现在可以读到数据了,多谢啦

2 个赞