exchange导入basketballplayer报错

如果 spark 也在这个机器上的话,meta 端口是 9559,另外 docker-compose 是域名,最好用指定 ip,参考 https://github.com/vesoft-inc/nebula-exchange/blob/master/exchange-common/src/test/resources/docker-compose.yaml

如果 spark 在别的机器上,把端口映射出来,然后其他机器能够访问

什么东西都是在一台机器上部署的
现在的域名利用docker inspect 网络名 查到的ip也可以用是吧
然后exchange的conf文件里到底是写本地的还是三个容器的ip呢

你用 local 模式的话,那就是你 inspect 的 3 个容器的 ip,端口写 9559。

graphd的ip和端口也需要改成三个容器的吗

要不大佬直接说这里改成啥样的吧 :rofl:
本地:
172.20.10.14:9559
172.20.10.14:9669
三个容器的网络:

[wjc@localhost bin]$ docker inspect nebula-docker-compose-260_nebula-net
[
    {
        "Name": "nebula-docker-compose-260_nebula-net",
        "Id": "e0e4d7c095f6bccc340e10b9888cc4fae86738252625176de7aacd5e8190747b",
        "Created": "2022-03-11T08:53:55.697559144+08:00",
        "Scope": "local",
        "Driver": "bridge",
        "EnableIPv6": false,
        "IPAM": {
            "Driver": "default",
            "Options": null,
            "Config": [
                {
                    "Subnet": "172.18.0.0/16",
                    "Gateway": "172.18.0.1"
                }
            ]
        },
        "Internal": false,
        "Attachable": false,
        "Ingress": false,
        "ConfigFrom": {
            "Network": ""
        },
        "ConfigOnly": false,
        "Containers": {
            "17d8df8448ccafa765a2be1951772ce8ae9d63cbe56a97078e898e165d19da65": {
                "Name": "nebula-docker-compose-260-metad1-1",
                "EndpointID": "7a0ae0f41c06a3d74d4618fbf09650de206808e5648fe4764e3714a47cc4517f",
                "MacAddress": "02:42:ac:12:00:04",
                "IPv4Address": "172.18.0.4/16",
                "IPv6Address": ""
            },
            "214fc7bdb7a7d12713ec0dbbabc007e97af6ac5270d142c3d25933ff7bcfb0fc": {
                "Name": "nebula-docker-compose-260-graphd2-1",
                "EndpointID": "1644af54b2e74ae84cff6c766ca19055ad670442adbb752a670b0985a8a865dc",
                "MacAddress": "02:42:ac:12:00:09",
                "IPv4Address": "172.18.0.9/16",
                "IPv6Address": ""
            },
            "78559522ec2c6f763474e7f40e51f1af3b6c7a8f5dafddb2dd70f6efd4626672": {
                "Name": "nebula-docker-compose-260-storaged0-1",
                "EndpointID": "2a872cad8ccc730854c648da8feec961713e454d846cbad622207a6870460184",
                "MacAddress": "02:42:ac:12:00:05",
                "IPv4Address": "172.18.0.5/16",
                "IPv6Address": ""
            },
            "ba27e1017e584dbab5f12f0e692b424a626c4a414d649be85d6fdbe9ad18758e": {
                "Name": "nebula-docker-compose-260-graphd1-1",
                "EndpointID": "0c3af52ca8b534173d7b98bc2f4a06e555c6794dcb73b796114ef959cd9753a8",
                "MacAddress": "02:42:ac:12:00:0a",
                "IPv4Address": "172.18.0.10/16",
                "IPv6Address": ""
            },
            "ba619b4b04ea28ccfd6ce7ac02542181145d7b34b6478932637db909e4cf4e1c": {
                "Name": "nebula-docker-compose-260-storaged2-1",
                "EndpointID": "2e5e5c644074e3922214642c7640df6fd4235ed945c81c12b24c56f65e7c8fd3",
                "MacAddress": "02:42:ac:12:00:06",
                "IPv4Address": "172.18.0.6/16",
                "IPv6Address": ""
            },
            "cdfc15134a9697862a5996c79b935fbfb11922b1a0897bff93291f54b2b30bf2": {
                "Name": "nebula-docker-compose-260-metad2-1",
                "EndpointID": "4bc715b00c969ad09ea0c12777767f95f08725b6cb03847baf9deb6c83e9d424",
                "MacAddress": "02:42:ac:12:00:03",
                "IPv4Address": "172.18.0.3/16",
                "IPv6Address": ""
            },
            "fb607114d94457672f0f33bbbd848ab8462ae921af402ac8a607dd37f0d8d853": {
                "Name": "nebula-docker-compose-260-metad0-1",
                "EndpointID": "a401635bfe9ad433b6e96ee484998d29b39621d4b52a9f16b27e8bffb2a76b00",
                "MacAddress": "02:42:ac:12:00:02",
                "IPv4Address": "172.18.0.2/16",
                "IPv6Address": ""
            },
            "fe2d0f818369b01ef82c3d554498dbe01aa6922aaa9967a84c6c44b270da3693": {
                "Name": "nebula-docker-compose-260-graphd-1",
                "EndpointID": "7cf99e3ac12551594e33dfe48ad0b581cc4808b2a07a390448538c7c12868cd8",
                "MacAddress": "02:42:ac:12:00:08",
                "IPv4Address": "172.18.0.8/16",
                "IPv6Address": ""
            },
            "ff2fbf59777e80cba0bed3fb5e694885f1a2671db094bee40fc02bb4dd69fc05": {
                "Name": "nebula-docker-compose-260-storaged1-1",
                "EndpointID": "4e65ba069cdca43d09290adef467e65b427b24018e142fdc957124f0d5916b8e",
                "MacAddress": "02:42:ac:12:00:07",
                "IPv4Address": "172.18.0.7/16",
                "IPv6Address": ""
            }
        },
        "Options": {},
        "Labels": {
            "com.docker.compose.network": "nebula-net",
            "com.docker.compose.project": "nebula-docker-compose-260",
            "com.docker.compose.version": "2.2.3"
        }
    }
]

nebula的端口:

[wjc@localhost nebula-docker-compose-2.6.0]$ docker-compose ps
NAME                                    COMMAND                  SERVICE             STATUS              PORTS
nebula-docker-compose-260-graphd-1      "/usr/local/nebula/b…"   graphd              running (healthy)   0.0.0.0:9669->9669/tcp, 0.0.0.0:49176->19669-19670/tcp, :::9669->9669/tcp, :::49176->19669-19670/tcp
nebula-docker-compose-260-graphd1-1     "/usr/local/nebula/b…"   graphd1             running (healthy)   0.0.0.0:49178->9669/tcp, 0.0.0.0:49175->19669-19670/tcp, :::49178->9669/tcp, :::49175->19669-19670/tcp
nebula-docker-compose-260-graphd2-1     "/usr/local/nebula/b…"   graphd2             running (healthy)   0.0.0.0:49177->9669/tcp, 0.0.0.0:49174->19669-19670/tcp, :::49177->9669/tcp, :::49174->19669-19670/tcp
nebula-docker-compose-260-metad0-1      "/usr/local/nebula/b…"   metad0              running (healthy)   0.0.0.0:49159->9559/tcp, 0.0.0.0:49156->19559-19560/tcp, :::49159->9559/tcp, :::49156->19559-19560/tcp
nebula-docker-compose-260-metad1-1      "/usr/local/nebula/b…"   metad1              running (healthy)   0.0.0.0:49161->9559/tcp, 0.0.0.0:49158->19559-19560/tcp, :::49161->9559/tcp, :::49158->19559-19560/tcp
nebula-docker-compose-260-metad2-1      "/usr/local/nebula/b…"   metad2              running (healthy)   0.0.0.0:49160->9559/tcp, 0.0.0.0:49157->19559-19560/tcp, :::49160->9559/tcp, :::49157->19559-19560/tcp
nebula-docker-compose-260-storaged0-1   "/usr/local/nebula/b…"   storaged0           running (healthy)   0.0.0.0:49169->9779/tcp, 0.0.0.0:49166->19779-19780/tcp, :::49169->9779/tcp, :::49166->19779-19780/tcp
nebula-docker-compose-260-storaged1-1   "/usr/local/nebula/b…"   storaged1           running (healthy)   0.0.0.0:49170->9779/tcp, 0.0.0.0:49167->19779-19780/tcp, :::49170->9779/tcp, :::49167->19779-19780/tcp
nebula-docker-compose-260-storaged2-1   "/usr/local/nebula/b…"   storaged2           running (healthy)   0.0.0.0:49168->9779/tcp, 0.0.0.0:49165->19779-19780/tcp, :::49168->9779/tcp, :::49165->19779-19780/tcp

graph 写 1 个和 3 个都可以,3 个快一点。

172.18.0.8:9669

meta 必须写 3 个
172.18.0.4:9559,172.18.0.2:9559,172.18.0.3:9559

改成这样了

nebula: {
    address:{
      graph:["172.18.0.9:9669","172.18.0.8:9669","172.18.0.10:9669"]
      meta:["172.18.0.3:9559","172.18.0.2:9559","172.18.0.4:9559"]
    }

但是报别的错误了

[wjc@localhost bin]$ ./spark-submit --master local --class com.vesoft.nebula.exchange.Exchange ~/nebula-exchange-2.6/nebula-exchange/target/nebula-exchange-2.6.3.jar -c ~/nebula-exchange-2.6/nebula-exchange/target/classes/csv_application.conf
22/03/16 17:39:01 WARN Utils: Your hostname, localhost resolves to a loopback address: 127.0.0.1; using 172.20.10.14 instead (on interface ens33)
22/03/16 17:39:01 WARN Utils: Set SPARK_LOCAL_IP if you need to bind to another address
22/03/16 17:39:02 WARN NativeCodeLoader: Unable to load native-hadoop library for your platform... using builtin-java classes where applicable
log4j:WARN No appenders could be found for logger (com.vesoft.nebula.exchange.config.Configs$).
log4j:WARN Please initialize the log4j system properly.
log4j:WARN See http://logging.apache.org/log4j/1.2/faq.html#noconfig for more info.
Using Spark's default log4j profile: org/apache/spark/log4j-defaults.properties
22/03/16 17:39:02 INFO SparkContext: Running Spark version 2.4.8
22/03/16 17:39:02 INFO SparkContext: Submitted application: com.vesoft.nebula.exchange.Exchange
22/03/16 17:39:02 INFO SecurityManager: Changing view acls to: wjc
22/03/16 17:39:02 INFO SecurityManager: Changing modify acls to: wjc
22/03/16 17:39:02 INFO SecurityManager: Changing view acls groups to: 
22/03/16 17:39:02 INFO SecurityManager: Changing modify acls groups to: 
22/03/16 17:39:02 INFO SecurityManager: SecurityManager: authentication disabled; ui acls disabled; users  with view permissions: Set(wjc); groups with view permissions: Set(); users  with modify permissions: Set(wjc); groups with modify permissions: Set()
22/03/16 17:39:03 INFO Utils: Successfully started service 'sparkDriver' on port 38303.
22/03/16 17:39:03 INFO SparkEnv: Registering MapOutputTracker
22/03/16 17:39:03 INFO SparkEnv: Registering BlockManagerMaster
22/03/16 17:39:03 INFO BlockManagerMasterEndpoint: Using org.apache.spark.storage.DefaultTopologyMapper for getting topology information
22/03/16 17:39:03 INFO BlockManagerMasterEndpoint: BlockManagerMasterEndpoint up
22/03/16 17:39:03 INFO DiskBlockManager: Created local directory at /tmp/blockmgr-60dcf486-119c-4c7b-ad92-76967dc12ce0
22/03/16 17:39:03 INFO MemoryStore: MemoryStore started with capacity 366.1 MB
22/03/16 17:39:03 INFO SparkEnv: Registering OutputCommitCoordinator
22/03/16 17:39:03 INFO Utils: Successfully started service 'SparkUI' on port 4040.
22/03/16 17:39:03 INFO SparkUI: Bound SparkUI to 0.0.0.0, and started at http://172.20.10.14:4040
22/03/16 17:39:03 INFO SparkContext: Added JAR file:/home/wjc/nebula-exchange-2.6/nebula-exchange/target/nebula-exchange-2.6.3.jar at spark://172.20.10.14:38303/jars/nebula-exchange-2.6.3.jar with timestamp 1647423543931
22/03/16 17:39:04 INFO Executor: Starting executor ID driver on host localhost
22/03/16 17:39:04 INFO Utils: Successfully started service 'org.apache.spark.network.netty.NettyBlockTransferService' on port 33241.
22/03/16 17:39:04 INFO NettyBlockTransferService: Server created on 172.20.10.14:33241
22/03/16 17:39:04 INFO BlockManager: Using org.apache.spark.storage.RandomBlockReplicationPolicy for block replication policy
22/03/16 17:39:04 INFO BlockManagerMaster: Registering BlockManager BlockManagerId(driver, 172.20.10.14, 33241, None)
22/03/16 17:39:04 INFO BlockManagerMasterEndpoint: Registering block manager 172.20.10.14:33241 with 366.1 MB RAM, BlockManagerId(driver, 172.20.10.14, 33241, None)
22/03/16 17:39:04 INFO BlockManagerMaster: Registered BlockManager BlockManagerId(driver, 172.20.10.14, 33241, None)
22/03/16 17:39:04 INFO BlockManager: Initialized BlockManager: BlockManagerId(driver, 172.20.10.14, 33241, None)
22/03/16 17:39:04 INFO Exchange$: Processing Tag player
22/03/16 17:39:04 INFO Exchange$: field keys: _c1, _c2
22/03/16 17:39:04 INFO Exchange$: nebula keys: age, name
22/03/16 17:39:04 INFO Exchange$: Loading CSV files from file:///home/wjc/daoshu/dataset/vertex_player.csv
22/03/16 17:39:04 INFO SharedState: Setting hive.metastore.warehouse.dir ('null') to the value of spark.sql.warehouse.dir ('file:/home/wjc/spark-2.4.8-bin-hadoop2.6/bin/spark-warehouse').
22/03/16 17:39:04 INFO SharedState: Warehouse path is 'file:/home/wjc/spark-2.4.8-bin-hadoop2.6/bin/spark-warehouse'.
22/03/16 17:39:05 INFO StateStoreCoordinatorRef: Registered StateStoreCoordinator endpoint
22/03/16 17:39:05 WARN SizeEstimator: Failed to check whether UseCompressedOops is set; assuming yes
22/03/16 17:39:05 INFO InMemoryFileIndex: It took 34 ms to list leaf files for 1 paths.
22/03/16 17:39:05 INFO InMemoryFileIndex: It took 1 ms to list leaf files for 1 paths.
22/03/16 17:39:08 INFO FileSourceStrategy: Pruning directories with: 
22/03/16 17:39:08 INFO FileSourceStrategy: Post-Scan Filters: (length(trim(value#0, None)) > 0)
22/03/16 17:39:08 INFO FileSourceStrategy: Output Data Schema: struct<value: string>
22/03/16 17:39:08 INFO FileSourceScanExec: Pushed Filters: 
22/03/16 17:39:08 INFO CodeGenerator: Code generated in 300.466899 ms
22/03/16 17:39:09 INFO CodeGenerator: Code generated in 26.023752 ms
22/03/16 17:39:09 INFO MemoryStore: Block broadcast_0 stored as values in memory (estimated size 200.3 KB, free 366.0 MB)
22/03/16 17:39:09 INFO MemoryStore: Block broadcast_0_piece0 stored as bytes in memory (estimated size 21.1 KB, free 365.9 MB)
22/03/16 17:39:09 INFO BlockManagerInfo: Added broadcast_0_piece0 in memory on 172.20.10.14:33241 (size: 21.1 KB, free: 366.1 MB)
22/03/16 17:39:09 INFO SparkContext: Created broadcast 0 from csv at FileBaseReader.scala:86
22/03/16 17:39:09 INFO FileSourceScanExec: Planning scan with bin packing, max size: 4195699 bytes, open cost is considered as scanning 4194304 bytes.
22/03/16 17:39:09 INFO SparkContext: Starting job: csv at FileBaseReader.scala:86
22/03/16 17:39:10 INFO DAGScheduler: Got job 0 (csv at FileBaseReader.scala:86) with 1 output partitions
22/03/16 17:39:10 INFO DAGScheduler: Final stage: ResultStage 0 (csv at FileBaseReader.scala:86)
22/03/16 17:39:10 INFO DAGScheduler: Parents of final stage: List()
22/03/16 17:39:10 INFO DAGScheduler: Missing parents: List()
22/03/16 17:39:10 INFO DAGScheduler: Submitting ResultStage 0 (MapPartitionsRDD[3] at csv at FileBaseReader.scala:86), which has no missing parents
22/03/16 17:39:10 INFO MemoryStore: Block broadcast_1 stored as values in memory (estimated size 8.9 KB, free 365.9 MB)
22/03/16 17:39:10 INFO MemoryStore: Block broadcast_1_piece0 stored as bytes in memory (estimated size 4.6 KB, free 365.9 MB)
22/03/16 17:39:10 INFO BlockManagerInfo: Added broadcast_1_piece0 in memory on 172.20.10.14:33241 (size: 4.6 KB, free: 366.1 MB)
22/03/16 17:39:10 INFO SparkContext: Created broadcast 1 from broadcast at DAGScheduler.scala:1184
22/03/16 17:39:10 INFO DAGScheduler: Submitting 1 missing tasks from ResultStage 0 (MapPartitionsRDD[3] at csv at FileBaseReader.scala:86) (first 15 tasks are for partitions Vector(0))
22/03/16 17:39:10 INFO TaskSchedulerImpl: Adding task set 0.0 with 1 tasks
22/03/16 17:39:10 INFO TaskSetManager: Starting task 0.0 in stage 0.0 (TID 0, localhost, executor driver, partition 0, PROCESS_LOCAL, 8265 bytes)
22/03/16 17:39:10 INFO Executor: Running task 0.0 in stage 0.0 (TID 0)
22/03/16 17:39:10 INFO Executor: Fetching spark://172.20.10.14:38303/jars/nebula-exchange-2.6.3.jar with timestamp 1647423543931
22/03/16 17:39:10 INFO TransportClientFactory: Successfully created connection to /172.20.10.14:38303 after 57 ms (0 ms spent in bootstraps)
22/03/16 17:39:10 INFO Utils: Fetching spark://172.20.10.14:38303/jars/nebula-exchange-2.6.3.jar to /tmp/spark-7e590699-a278-4f9f-9cbd-e594675086b3/userFiles-49c1d77f-4b8a-49b3-8fff-18ed6ca12f22/fetchFileTemp7975268770823151595.tmp
22/03/16 17:39:11 INFO Executor: Adding file:/tmp/spark-7e590699-a278-4f9f-9cbd-e594675086b3/userFiles-49c1d77f-4b8a-49b3-8fff-18ed6ca12f22/nebula-exchange-2.6.3.jar to class loader
22/03/16 17:39:11 INFO FileScanRDD: Reading File path: file:///home/wjc/daoshu/dataset/vertex_player.csv, range: 0-1395, partition values: [empty row]
22/03/16 17:39:11 INFO CodeGenerator: Code generated in 16.069424 ms
22/03/16 17:39:11 INFO Executor: Finished task 0.0 in stage 0.0 (TID 0). 1259 bytes result sent to driver
22/03/16 17:39:11 INFO TaskSetManager: Finished task 0.0 in stage 0.0 (TID 0) in 1442 ms on localhost (executor driver) (1/1)
22/03/16 17:39:11 INFO TaskSchedulerImpl: Removed TaskSet 0.0, whose tasks have all completed, from pool 
22/03/16 17:39:11 INFO DAGScheduler: ResultStage 0 (csv at FileBaseReader.scala:86) finished in 1.643 s
22/03/16 17:39:11 INFO DAGScheduler: Job 0 finished: csv at FileBaseReader.scala:86, took 1.759188 s
22/03/16 17:39:11 INFO FileSourceStrategy: Pruning directories with: 
22/03/16 17:39:11 INFO FileSourceStrategy: Post-Scan Filters: 
22/03/16 17:39:11 INFO FileSourceStrategy: Output Data Schema: struct<value: string>
22/03/16 17:39:11 INFO FileSourceScanExec: Pushed Filters: 
22/03/16 17:39:11 INFO CodeGenerator: Code generated in 18.207281 ms
22/03/16 17:39:11 INFO MemoryStore: Block broadcast_2 stored as values in memory (estimated size 200.3 KB, free 365.7 MB)
22/03/16 17:39:11 INFO MemoryStore: Block broadcast_2_piece0 stored as bytes in memory (estimated size 21.1 KB, free 365.7 MB)
22/03/16 17:39:11 INFO BlockManagerInfo: Added broadcast_2_piece0 in memory on 172.20.10.14:33241 (size: 21.1 KB, free: 366.1 MB)
22/03/16 17:39:11 INFO SparkContext: Created broadcast 2 from csv at FileBaseReader.scala:86
22/03/16 17:39:11 INFO FileSourceScanExec: Planning scan with bin packing, max size: 4195699 bytes, open cost is considered as scanning 4194304 bytes.
22/03/16 17:39:12 ERROR MetaClient: Get Space Error: java.net.UnknownHostException: metad0
Exception in thread "main" com.facebook.thrift.transport.TTransportException: java.net.UnknownHostException: metad0
	at com.facebook.thrift.transport.TSocket.open(TSocket.java:206)
	at com.vesoft.nebula.client.meta.MetaClient.getClient(MetaClient.java:145)
	at com.vesoft.nebula.client.meta.MetaClient.freshClient(MetaClient.java:171)
	at com.vesoft.nebula.client.meta.MetaClient.getSpace(MetaClient.java:233)
	at com.vesoft.nebula.client.meta.MetaClient.getTags(MetaClient.java:261)
	at com.vesoft.nebula.exchange.MetaProvider.getLabelType(MetaProvider.scala:93)
	at com.vesoft.nebula.exchange.utils.NebulaUtils$.getDataSourceFieldType(NebulaUtils.scala:33)
	at com.vesoft.nebula.exchange.processor.VerticesProcessor.process(VerticesProcessor.scala:111)
	at com.vesoft.nebula.exchange.Exchange$$anonfun$main$2.apply(Exchange.scala:150)
	at com.vesoft.nebula.exchange.Exchange$$anonfun$main$2.apply(Exchange.scala:126)
	at scala.collection.immutable.List.foreach(List.scala:392)
	at com.vesoft.nebula.exchange.Exchange$.main(Exchange.scala:126)
	at com.vesoft.nebula.exchange.Exchange.main(Exchange.scala)
	at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
	at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62)
	at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
	at java.lang.reflect.Method.invoke(Method.java:498)
	at org.apache.spark.deploy.JavaMainApplication.start(SparkApplication.scala:52)
	at org.apache.spark.deploy.SparkSubmit.org$apache$spark$deploy$SparkSubmit$$runMain(SparkSubmit.scala:855)
	at org.apache.spark.deploy.SparkSubmit.doRunMain$1(SparkSubmit.scala:161)
	at org.apache.spark.deploy.SparkSubmit.submit(SparkSubmit.scala:184)
	at org.apache.spark.deploy.SparkSubmit.doSubmit(SparkSubmit.scala:86)
	at org.apache.spark.deploy.SparkSubmit$$anon$2.doSubmit(SparkSubmit.scala:930)
	at org.apache.spark.deploy.SparkSubmit$.main(SparkSubmit.scala:939)
	at org.apache.spark.deploy.SparkSubmit.main(SparkSubmit.scala)
Caused by: java.net.UnknownHostException: metad0
	at java.net.AbstractPlainSocketImpl.connect(AbstractPlainSocketImpl.java:184)
	at java.net.SocksSocketImpl.connect(SocksSocketImpl.java:392)
	at java.net.Socket.connect(Socket.java:589)
	at com.facebook.thrift.transport.TSocket.open(TSocket.java:201)
	... 24 more
22/03/16 17:39:12 INFO SparkContext: Invoking stop() from shutdown hook
22/03/16 17:39:12 INFO SparkUI: Stopped Spark web UI at http://172.20.10.14:4040
22/03/16 17:39:12 INFO MapOutputTrackerMasterEndpoint: MapOutputTrackerMasterEndpoint stopped!
22/03/16 17:39:12 INFO MemoryStore: MemoryStore cleared
22/03/16 17:39:12 INFO BlockManager: BlockManager stopped
22/03/16 17:39:12 INFO BlockManagerMaster: BlockManagerMaster stopped
22/03/16 17:39:12 INFO OutputCommitCoordinator$OutputCommitCoordinatorEndpoint: OutputCommitCoordinator stopped!
22/03/16 17:39:12 INFO SparkContext: Successfully stopped SparkContext
22/03/16 17:39:12 INFO ShutdownHookManager: Shutdown hook called
22/03/16 17:39:12 INFO ShutdownHookManager: Deleting directory /tmp/spark-7e590699-a278-4f9f-9cbd-e594675086b3
22/03/16 17:39:12 INFO ShutdownHookManager: Deleting directory /tmp/spark-5fa45465-41ff-4b73-b10b-1f2b2d25cbc9

改成上面说的那个 docker-compose,写死 ip。

原因:
如果你连了 meta 1, 不是 leader,会返回 leader 是 local_ip 里配置的域名,然后你本地连不上了。

或者你配置 /etc/hosts 也行。

又报新错误了

[wjc@localhost bin]$ ./spark-submit --master local --class com.vesoft.nebula.exchange.Exchange ~/nebula-exchange-2.6/nebula-exchange/target/nebula-exchange-2.6.3.jar -c ~/nebula-exchange-2.6/nebula-exchange/target/classes/csv_application.conf
22/03/16 18:24:22 WARN Utils: Your hostname, localhost resolves to a loopback address: 127.0.0.1; using 172.20.10.14 instead (on interface ens33)
22/03/16 18:24:22 WARN Utils: Set SPARK_LOCAL_IP if you need to bind to another address
22/03/16 18:24:22 WARN NativeCodeLoader: Unable to load native-hadoop library for your platform... using builtin-java classes where applicable
log4j:WARN No appenders could be found for logger (com.vesoft.nebula.exchange.config.Configs$).
log4j:WARN Please initialize the log4j system properly.
log4j:WARN See http://logging.apache.org/log4j/1.2/faq.html#noconfig for more info.
Using Spark's default log4j profile: org/apache/spark/log4j-defaults.properties
22/03/16 18:24:23 INFO SparkContext: Running Spark version 2.4.8
22/03/16 18:24:23 INFO SparkContext: Submitted application: com.vesoft.nebula.exchange.Exchange
22/03/16 18:24:23 INFO SecurityManager: Changing view acls to: wjc
22/03/16 18:24:23 INFO SecurityManager: Changing modify acls to: wjc
22/03/16 18:24:23 INFO SecurityManager: Changing view acls groups to: 
22/03/16 18:24:23 INFO SecurityManager: Changing modify acls groups to: 
22/03/16 18:24:23 INFO SecurityManager: SecurityManager: authentication disabled; ui acls disabled; users  with view permissions: Set(wjc); groups with view permissions: Set(); users  with modify permissions: Set(wjc); groups with modify permissions: Set()
22/03/16 18:24:23 INFO Utils: Successfully started service 'sparkDriver' on port 41155.
22/03/16 18:24:23 INFO SparkEnv: Registering MapOutputTracker
22/03/16 18:24:23 INFO SparkEnv: Registering BlockManagerMaster
22/03/16 18:24:23 INFO BlockManagerMasterEndpoint: Using org.apache.spark.storage.DefaultTopologyMapper for getting topology information
22/03/16 18:24:23 INFO BlockManagerMasterEndpoint: BlockManagerMasterEndpoint up
22/03/16 18:24:23 INFO DiskBlockManager: Created local directory at /tmp/blockmgr-d899b6bc-7cc2-45c0-9cfb-b918abbc89a4
22/03/16 18:24:23 INFO MemoryStore: MemoryStore started with capacity 366.1 MB
22/03/16 18:24:23 INFO SparkEnv: Registering OutputCommitCoordinator
22/03/16 18:24:24 INFO Utils: Successfully started service 'SparkUI' on port 4040.
22/03/16 18:24:24 INFO SparkUI: Bound SparkUI to 0.0.0.0, and started at http://172.20.10.14:4040
22/03/16 18:24:24 INFO SparkContext: Added JAR file:/home/wjc/nebula-exchange-2.6/nebula-exchange/target/nebula-exchange-2.6.3.jar at spark://172.20.10.14:41155/jars/nebula-exchange-2.6.3.jar with timestamp 1647426264166
22/03/16 18:24:24 INFO Executor: Starting executor ID driver on host localhost
22/03/16 18:24:24 INFO Utils: Successfully started service 'org.apache.spark.network.netty.NettyBlockTransferService' on port 33979.
22/03/16 18:24:24 INFO NettyBlockTransferService: Server created on 172.20.10.14:33979
22/03/16 18:24:24 INFO BlockManager: Using org.apache.spark.storage.RandomBlockReplicationPolicy for block replication policy
22/03/16 18:24:24 INFO BlockManagerMaster: Registering BlockManager BlockManagerId(driver, 172.20.10.14, 33979, None)
22/03/16 18:24:24 INFO BlockManagerMasterEndpoint: Registering block manager 172.20.10.14:33979 with 366.1 MB RAM, BlockManagerId(driver, 172.20.10.14, 33979, None)
22/03/16 18:24:24 INFO BlockManagerMaster: Registered BlockManager BlockManagerId(driver, 172.20.10.14, 33979, None)
22/03/16 18:24:24 INFO BlockManager: Initialized BlockManager: BlockManagerId(driver, 172.20.10.14, 33979, None)
22/03/16 18:24:24 INFO Exchange$: Processing Tag player
22/03/16 18:24:24 INFO Exchange$: field keys: _c1, _c2
22/03/16 18:24:24 INFO Exchange$: nebula keys: age, name
22/03/16 18:24:24 INFO Exchange$: Loading CSV files from file:///home/wjc/daoshu/dataset/vertex_player.csv
22/03/16 18:24:24 INFO SharedState: Setting hive.metastore.warehouse.dir ('null') to the value of spark.sql.warehouse.dir ('file:/home/wjc/spark-2.4.8-bin-hadoop2.6/bin/spark-warehouse').
22/03/16 18:24:24 INFO SharedState: Warehouse path is 'file:/home/wjc/spark-2.4.8-bin-hadoop2.6/bin/spark-warehouse'.
22/03/16 18:24:25 INFO StateStoreCoordinatorRef: Registered StateStoreCoordinator endpoint
22/03/16 18:24:25 WARN SizeEstimator: Failed to check whether UseCompressedOops is set; assuming yes
22/03/16 18:24:25 INFO InMemoryFileIndex: It took 42 ms to list leaf files for 1 paths.
22/03/16 18:24:26 INFO InMemoryFileIndex: It took 1 ms to list leaf files for 1 paths.
22/03/16 18:24:29 INFO FileSourceStrategy: Pruning directories with: 
22/03/16 18:24:29 INFO FileSourceStrategy: Post-Scan Filters: (length(trim(value#0, None)) > 0)
22/03/16 18:24:29 INFO FileSourceStrategy: Output Data Schema: struct<value: string>
22/03/16 18:24:29 INFO FileSourceScanExec: Pushed Filters: 
22/03/16 18:24:29 INFO CodeGenerator: Code generated in 294.541224 ms
22/03/16 18:24:30 INFO CodeGenerator: Code generated in 26.182688 ms
22/03/16 18:24:30 INFO MemoryStore: Block broadcast_0 stored as values in memory (estimated size 200.3 KB, free 366.0 MB)
22/03/16 18:24:30 INFO MemoryStore: Block broadcast_0_piece0 stored as bytes in memory (estimated size 21.0 KB, free 365.9 MB)
22/03/16 18:24:31 INFO BlockManagerInfo: Added broadcast_0_piece0 in memory on 172.20.10.14:33979 (size: 21.0 KB, free: 366.1 MB)
22/03/16 18:24:31 INFO SparkContext: Created broadcast 0 from csv at FileBaseReader.scala:86
22/03/16 18:24:31 INFO FileSourceScanExec: Planning scan with bin packing, max size: 4195699 bytes, open cost is considered as scanning 4194304 bytes.
22/03/16 18:24:31 INFO SparkContext: Starting job: csv at FileBaseReader.scala:86
22/03/16 18:24:31 INFO DAGScheduler: Got job 0 (csv at FileBaseReader.scala:86) with 1 output partitions
22/03/16 18:24:31 INFO DAGScheduler: Final stage: ResultStage 0 (csv at FileBaseReader.scala:86)
22/03/16 18:24:31 INFO DAGScheduler: Parents of final stage: List()
22/03/16 18:24:31 INFO DAGScheduler: Missing parents: List()
22/03/16 18:24:31 INFO DAGScheduler: Submitting ResultStage 0 (MapPartitionsRDD[3] at csv at FileBaseReader.scala:86), which has no missing parents
22/03/16 18:24:31 INFO MemoryStore: Block broadcast_1 stored as values in memory (estimated size 8.9 KB, free 365.9 MB)
22/03/16 18:24:31 INFO MemoryStore: Block broadcast_1_piece0 stored as bytes in memory (estimated size 4.6 KB, free 365.9 MB)
22/03/16 18:24:31 INFO BlockManagerInfo: Added broadcast_1_piece0 in memory on 172.20.10.14:33979 (size: 4.6 KB, free: 366.1 MB)
22/03/16 18:24:31 INFO SparkContext: Created broadcast 1 from broadcast at DAGScheduler.scala:1184
22/03/16 18:24:31 INFO DAGScheduler: Submitting 1 missing tasks from ResultStage 0 (MapPartitionsRDD[3] at csv at FileBaseReader.scala:86) (first 15 tasks are for partitions Vector(0))
22/03/16 18:24:31 INFO TaskSchedulerImpl: Adding task set 0.0 with 1 tasks
22/03/16 18:24:31 INFO TaskSetManager: Starting task 0.0 in stage 0.0 (TID 0, localhost, executor driver, partition 0, PROCESS_LOCAL, 8265 bytes)
22/03/16 18:24:31 INFO Executor: Running task 0.0 in stage 0.0 (TID 0)
22/03/16 18:24:31 INFO Executor: Fetching spark://172.20.10.14:41155/jars/nebula-exchange-2.6.3.jar with timestamp 1647426264166
22/03/16 18:24:31 INFO TransportClientFactory: Successfully created connection to /172.20.10.14:41155 after 79 ms (0 ms spent in bootstraps)
22/03/16 18:24:31 INFO Utils: Fetching spark://172.20.10.14:41155/jars/nebula-exchange-2.6.3.jar to /tmp/spark-9ff2a26d-9f24-4c44-ad82-7f076bbc160b/userFiles-9fcdc908-c5f7-486f-8de4-a52bbc2efc03/fetchFileTemp5064301530033379076.tmp
22/03/16 18:24:34 INFO Executor: Adding file:/tmp/spark-9ff2a26d-9f24-4c44-ad82-7f076bbc160b/userFiles-9fcdc908-c5f7-486f-8de4-a52bbc2efc03/nebula-exchange-2.6.3.jar to class loader
22/03/16 18:24:34 INFO FileScanRDD: Reading File path: file:///home/wjc/daoshu/dataset/vertex_player.csv, range: 0-1395, partition values: [empty row]
22/03/16 18:24:34 INFO CodeGenerator: Code generated in 21.146547 ms
22/03/16 18:24:34 INFO Executor: Finished task 0.0 in stage 0.0 (TID 0). 1259 bytes result sent to driver
22/03/16 18:24:34 INFO TaskSetManager: Finished task 0.0 in stage 0.0 (TID 0) in 3049 ms on localhost (executor driver) (1/1)
22/03/16 18:24:34 INFO DAGScheduler: ResultStage 0 (csv at FileBaseReader.scala:86) finished in 3.271 s
22/03/16 18:24:34 INFO TaskSchedulerImpl: Removed TaskSet 0.0, whose tasks have all completed, from pool 
22/03/16 18:24:34 INFO DAGScheduler: Job 0 finished: csv at FileBaseReader.scala:86, took 3.430124 s
22/03/16 18:24:34 INFO FileSourceStrategy: Pruning directories with: 
22/03/16 18:24:34 INFO FileSourceStrategy: Post-Scan Filters: 
22/03/16 18:24:34 INFO FileSourceStrategy: Output Data Schema: struct<value: string>
22/03/16 18:24:34 INFO FileSourceScanExec: Pushed Filters: 
22/03/16 18:24:34 INFO CodeGenerator: Code generated in 12.987734 ms
22/03/16 18:24:34 INFO MemoryStore: Block broadcast_2 stored as values in memory (estimated size 200.3 KB, free 365.7 MB)
22/03/16 18:24:34 INFO MemoryStore: Block broadcast_2_piece0 stored as bytes in memory (estimated size 21.0 KB, free 365.7 MB)
22/03/16 18:24:34 INFO BlockManagerInfo: Added broadcast_2_piece0 in memory on 172.20.10.14:33979 (size: 21.0 KB, free: 366.1 MB)
22/03/16 18:24:34 INFO SparkContext: Created broadcast 2 from csv at FileBaseReader.scala:86
22/03/16 18:24:34 INFO FileSourceScanExec: Planning scan with bin packing, max size: 4195699 bytes, open cost is considered as scanning 4194304 bytes.
Exception in thread "main" com.facebook.thrift.transport.TTransportException: java.net.SocketTimeoutException: connect timed out
	at com.facebook.thrift.transport.TSocket.open(TSocket.java:206)
	at com.vesoft.nebula.client.meta.MetaClient.getClient(MetaClient.java:145)
	at com.vesoft.nebula.client.meta.MetaClient.doConnect(MetaClient.java:124)
	at com.vesoft.nebula.client.meta.MetaClient.connect(MetaClient.java:113)
	at com.vesoft.nebula.exchange.MetaProvider.<init>(MetaProvider.scala:56)
	at com.vesoft.nebula.exchange.processor.VerticesProcessor.process(VerticesProcessor.scala:110)
	at com.vesoft.nebula.exchange.Exchange$$anonfun$main$2.apply(Exchange.scala:150)
	at com.vesoft.nebula.exchange.Exchange$$anonfun$main$2.apply(Exchange.scala:126)
	at scala.collection.immutable.List.foreach(List.scala:392)
	at com.vesoft.nebula.exchange.Exchange$.main(Exchange.scala:126)
	at com.vesoft.nebula.exchange.Exchange.main(Exchange.scala)
	at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
	at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62)
	at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
	at java.lang.reflect.Method.invoke(Method.java:498)
	at org.apache.spark.deploy.JavaMainApplication.start(SparkApplication.scala:52)
	at org.apache.spark.deploy.SparkSubmit.org$apache$spark$deploy$SparkSubmit$$runMain(SparkSubmit.scala:855)
	at org.apache.spark.deploy.SparkSubmit.doRunMain$1(SparkSubmit.scala:161)
	at org.apache.spark.deploy.SparkSubmit.submit(SparkSubmit.scala:184)
	at org.apache.spark.deploy.SparkSubmit.doSubmit(SparkSubmit.scala:86)
	at org.apache.spark.deploy.SparkSubmit$$anon$2.doSubmit(SparkSubmit.scala:930)
	at org.apache.spark.deploy.SparkSubmit$.main(SparkSubmit.scala:939)
	at org.apache.spark.deploy.SparkSubmit.main(SparkSubmit.scala)
Caused by: java.net.SocketTimeoutException: connect timed out
	at java.net.PlainSocketImpl.socketConnect(Native Method)
	at java.net.AbstractPlainSocketImpl.doConnect(AbstractPlainSocketImpl.java:350)
	at java.net.AbstractPlainSocketImpl.connectToAddress(AbstractPlainSocketImpl.java:206)
	at java.net.AbstractPlainSocketImpl.connect(AbstractPlainSocketImpl.java:188)
	at java.net.SocksSocketImpl.connect(SocksSocketImpl.java:392)
	at java.net.Socket.connect(Socket.java:589)
	at com.facebook.thrift.transport.TSocket.open(TSocket.java:201)
	... 22 more
22/03/16 18:25:04 INFO SparkContext: Invoking stop() from shutdown hook
22/03/16 18:25:04 INFO SparkUI: Stopped Spark web UI at http://172.20.10.14:4040
22/03/16 18:25:04 INFO MapOutputTrackerMasterEndpoint: MapOutputTrackerMasterEndpoint stopped!
22/03/16 18:25:04 INFO MemoryStore: MemoryStore cleared
22/03/16 18:25:04 INFO BlockManager: BlockManager stopped
22/03/16 18:25:04 INFO BlockManagerMaster: BlockManagerMaster stopped
22/03/16 18:25:04 INFO OutputCommitCoordinator$OutputCommitCoordinatorEndpoint: OutputCommitCoordinator stopped!
22/03/16 18:25:04 INFO SparkContext: Successfully stopped SparkContext
22/03/16 18:25:04 INFO ShutdownHookManager: Shutdown hook called
22/03/16 18:25:04 INFO ShutdownHookManager: Deleting directory /tmp/spark-9ff2a26d-9f24-4c44-ad82-7f076bbc160b
22/03/16 18:25:04 INFO ShutdownHookManager: Deleting directory /tmp/spark-aadccc31-645d-4bf0-b452-cf29ff2ccce4

docker-compose.yaml已经改了

version: '3.4'
services:
  metad0:
    image: vesoft/nebula-metad:v2.6.0
    environment:
      USER: root
      TZ:   "${TZ}"
    command:
      - --meta_server_addrs=172.18.0.2:9559,172.18.0.4:9559,172.18.0.3:9559
      - --local_ip=172.18.0.2
      - --ws_ip=172.18.0.2
      - --port=9559
      - --ws_http_port=19559
      - --data_path=/data/meta
      - --log_dir=/logs
      - --v=0
      - --minloglevel=0
    healthcheck:
      test: ["CMD", "curl", "-sf", "http://metad0:19559/status"]
      interval: 30s
      timeout: 10s
      retries: 3
      start_period: 20s
    ports:
      - 9559
      - 19559
      - 19560
    volumes:
      - ./data/meta0:/data/meta
      - ./logs/meta0:/logs
    networks:
      - nebula-net
    restart: on-failure
    cap_add:
      - SYS_PTRACE

  metad1:
    image: vesoft/nebula-metad:v2.6.0
    environment:
      USER: root
      TZ:   "${TZ}"
    command:
      - --meta_server_addrs=172.18.0.2:9559,172.18.0.4:9559,172.18.0.3:9559
      - --local_ip=172.18.0.4
      - --ws_ip=172.18.0.4
      - --port=9559
      - --ws_http_port=19559
      - --data_path=/data/meta
      - --log_dir=/logs
      - --v=0
      - --minloglevel=0
    healthcheck:
      test: ["CMD", "curl", "-sf", "http://metad1:19559/status"]
      interval: 30s
      timeout: 10s
      retries: 3
      start_period: 20s
    ports:
      - 9559
      - 19559
      - 19560
    volumes:
      - ./data/meta1:/data/meta
      - ./logs/meta1:/logs
    networks:
      - nebula-net
    restart: on-failure
    cap_add:
      - SYS_PTRACE

  metad2:
    image: vesoft/nebula-metad:v2.6.0
    environment:
      USER: root
      TZ:   "${TZ}"
    command:
      - --meta_server_addrs=172.18.0.2:9559,172.18.0.4:9559,172.18.0.3:9559
      - --local_ip=172.18.0.3
      - --ws_ip=172.18.0.3
      - --port=9559
      - --ws_http_port=19559
      - --data_path=/data/meta
      - --log_dir=/logs
      - --v=0
      - --minloglevel=0
    healthcheck:
      test: ["CMD", "curl", "-sf", "http://metad2:19559/status"]
      interval: 30s
      timeout: 10s
      retries: 3
      start_period: 20s
    ports:
      - 9559
      - 19559
      - 19560
    volumes:
      - ./data/meta2:/data/meta
      - ./logs/meta2:/logs
    networks:
      - nebula-net
    restart: on-failure
    cap_add:
      - SYS_PTRACE

  storaged0:
    image: vesoft/nebula-storaged:v2.6.0
    environment:
      USER: root
      TZ:   "${TZ}"
    command:
      - --meta_server_addrs=172.18.0.2:9559,172.18.0.4:9559,172.18.0.3:9559
      - --local_ip=172.18.0.5
      - --ws_ip=172.18.0.5
      - --port=9779
      - --ws_http_port=19779
      - --data_path=/data/storage
      - --log_dir=/logs
      - --v=0
      - --minloglevel=0
    depends_on:
      - metad0
      - metad1
      - metad2
    healthcheck:
      test: ["CMD", "curl", "-sf", "http://storaged0:19779/status"]
      interval: 30s
      timeout: 10s
      retries: 3
      start_period: 20s
    ports:
      - 9779
      - 19779
      - 19780
    volumes:
      - ./data/storage0:/data/storage
      - ./logs/storage0:/logs
    networks:
      - nebula-net
    restart: on-failure
    cap_add:
      - SYS_PTRACE

  storaged1:
    image: vesoft/nebula-storaged:v2.6.0
    environment:
      USER: root
      TZ:   "${TZ}"
    command:
      - --meta_server_addrs=172.18.0.2:9559,172.18.0.4:9559,172.18.0.3:9559
      - --local_ip=172.18.0.7
      - --ws_ip=172.18.0.7
      - --port=9779
      - --ws_http_port=19779
      - --data_path=/data/storage
      - --log_dir=/logs
      - --v=0
      - --minloglevel=0
    depends_on:
      - metad0
      - metad1
      - metad2
    healthcheck:
      test: ["CMD", "curl", "-sf", "http://storaged1:19779/status"]
      interval: 30s
      timeout: 10s
      retries: 3
      start_period: 20s
    ports:
      - 9779
      - 19779
      - 19780
    volumes:
      - ./data/storage1:/data/storage
      - ./logs/storage1:/logs
    networks:
      - nebula-net
    restart: on-failure
    cap_add:
      - SYS_PTRACE

  storaged2:
    image: vesoft/nebula-storaged:v2.6.0
    environment:
      USER: root
      TZ:   "${TZ}"
    command:
      - --meta_server_addrs=172.18.0.2:9559,172.18.0.4:9559,172.18.0.3:9559
      - --local_ip=172.18.0.6
      - --ws_ip=172.18.0.6
      - --port=9779
      - --ws_http_port=19779
      - --data_path=/data/storage
      - --log_dir=/logs
      - --v=0
      - --minloglevel=0
    depends_on:
      - metad0
      - metad1
      - metad2
    healthcheck:
      test: ["CMD", "curl", "-sf", "http://storaged2:19779/status"]
      interval: 30s
      timeout: 10s
      retries: 3
      start_period: 20s
    ports:
      - 9779
      - 19779
      - 19780
    volumes:
      - ./data/storage2:/data/storage
      - ./logs/storage2:/logs
    networks:
      - nebula-net
    restart: on-failure
    cap_add:
      - SYS_PTRACE

  graphd:
    image: vesoft/nebula-graphd:v2.6.0
    environment:
      USER: root
      TZ:   "${TZ}"
    command:
      - --meta_server_addrs=172.18.0.2:9559,172.18.0.4:9559,172.18.0.3:9559
      - --port=9669
      - --local_ip=172.18.0.8
      - --ws_ip=172.18.0.8
      - --ws_http_port=19669
      - --log_dir=/logs
      - --v=0
      - --minloglevel=0
    depends_on:
      - storaged0
      - storaged1
      - storaged2
    healthcheck:
      test: ["CMD", "curl", "-sf", "http://graphd:19669/status"]
      interval: 30s
      timeout: 10s
      retries: 3
      start_period: 20s
    ports:
      - "9669:9669"
      - 19669
      - 19670
    volumes:
      - ./logs/graph:/logs
    networks:
      - nebula-net
    restart: on-failure
    cap_add:
      - SYS_PTRACE

  graphd1:
    image: vesoft/nebula-graphd:v2.6.0
    environment:
      USER: root
      TZ:   "${TZ}"
    command:
      - --meta_server_addrs=172.18.0.2:9559,172.18.0.4:9559,172.18.0.3:9559
      - --port=9669
      - --local_ip=172.18.0.10
      - --ws_ip=172.18.0.10
      - --ws_http_port=19669
      - --log_dir=/logs
      - --v=0
      - --minloglevel=0
    depends_on:
      - storaged0
      - storaged1
      - storaged2
    healthcheck:
      test: ["CMD", "curl", "-sf", "http://graphd1:19669/status"]
      interval: 30s
      timeout: 10s
      retries: 3
      start_period: 20s
    ports:
      - 9669
      - 19669
      - 19670
    volumes:
      - ./logs/graph1:/logs
    networks:
      - nebula-net
    restart: on-failure
    cap_add:
      - SYS_PTRACE

  graphd2:
    image: vesoft/nebula-graphd:v2.6.0
    environment:
      USER: root
      TZ:   "${TZ}"
    command:
      - --meta_server_addrs=172.18.0.2:9559,172.18.0.4:9559,172.18.0.3:9559
      - --port=9669
      - --local_ip=172.18.0.9
      - --ws_ip=172.18.0.9
      - --ws_http_port=19669
      - --log_dir=/logs
      - --v=0
      - --minloglevel=0
    depends_on:
      - storaged0
      - storaged1
      - storaged2
    healthcheck:
      test: ["CMD", "curl", "-sf", "http://graphd2:19669/status"]
      interval: 30s
      timeout: 10s
      retries: 3
      start_period: 20s
    ports:
      - 9669
      - 19669
      - 19670
    volumes:
      - ./logs/graph2:/logs
    networks:
      - nebula-net
    restart: on-failure
    cap_add:
      - SYS_PTRACE

networks:
  nebula-net:

exchange的conf还是这样

  nebula: {
    address:{
      graph:["172.18.0.9:9669","172.18.0.8:9669","172.18.0.10:9669"]
      meta:["172.18.0.3:9559","172.18.0.2:9559","172.18.0.4:9559"]
    }

nebula起不来了 :joy:

仔细对比我发的连接,不行百度 docker-compose 吧。
你 net 配错了。

1 个赞

搞定了, exchange导入数据了, 感谢大佬! :relaxed:

1 个赞

此话题已在最后回复的 7 天后被自动关闭。不再允许新回复。