你好,我改完 ip 重启完服务还是出现相同的问题。这是改完的配置:
########## basics ##########
# Whether to run as a daemon process
--daemonize=true
# The file to host the process id
--pid_file=pids/nebula-graphd.pid
# Whether to enable optimizer
--enable_optimizer=true
# The default charset when a space is created
--default_charset=utf8
# The default collate when a space is created
--default_collate=utf8_bin
# Whether to use the configuration obtained from the configuration file
--local_config=true
########## logging ##########
# The directory to host logging files
--log_dir=logs
# Log level, 0, 1, 2, 3 for INFO, WARNING, ERROR, FATAL respectively
--minloglevel=0
# Verbose log level, 1, 2, 3, 4, the higher of the level, the more verbose of the logging
--v=0
# Maximum seconds to buffer the log messages
--logbufsecs=0
# Whether to redirect stdout and stderr to separate output files
--redirect_stdout=true
# Destination filename of stdout and stderr, which will also reside in log_dir.
--stdout_log_file=graphd-stdout.log
--stderr_log_file=graphd-stderr.log
# Copy log messages at or above this level to stderr in addition to logfiles. The numbers of severity levels INFO, WARNING, ERROR, and FATAL are 0, 1, 2, and 3, respectively.
--stderrthreshold=2
# wether logging files' name contain time stamp.
--timestamp_in_logfile_name=true
########## query ##########
# Whether to treat partial success as an error.
# This flag is only used for Read-only access, and Modify access always treats partial success as an error.
--accept_partial_success=false
# Maximum sentence length, unit byte
--max_allowed_query_size=4194304
########## networking ##########
# Comma separated Meta Server Addresses
--meta_server_addrs=192.168.8.80:9559
# Local IP used to identify the nebula-graphd process.
# Change it to an address other than loopback if the service is distributed or
# will be accessed remotely.
--local_ip=192.168.8.80
# Network device to listen on
--listen_netdev=any
# Port to listen on
--port=9669
# To turn on SO_REUSEPORT or not
--reuse_port=false
# Backlog of the listen socket, adjust this together with net.core.somaxconn
--listen_backlog=1024
# The number of seconds Nebula service waits before closing the idle connections
--client_idle_timeout_secs=28800
# The number of seconds before idle sessions expire
# The range should be in [1, 604800]
--session_idle_timeout_secs=28800
# The number of threads to accept incoming connections
--num_accept_threads=1
# The number of networking IO threads, 0 for # of CPU cores
--num_netio_threads=0
# The number of threads to execute user queries, 0 for # of CPU cores
--num_worker_threads=0
# HTTP service ip
--ws_ip=0.0.0.0
# HTTP service port
--ws_http_port=19669
# storage client timeout
--storage_client_timeout_ms=60000
# Port to listen on Meta with HTTP protocol, it corresponds to ws_http_port in metad's configuration file
--ws_meta_http_port=19559
########## authentication ##########
# Enable authorization
--enable_authorize=true
# User login authentication type, password for nebula authentication, ldap for ldap authentication, cloud for cloud authentication
--auth_type=password
########## memory ##########
# System memory high watermark ratio, cancel the memory checking when the ratio greater than 1.0
--system_memory_high_watermark_ratio=0.8
########## metrics ##########
--enable_space_level_metrics=false
########## experimental feature ##########
# if use experimental features
--enable_experimental_feature=false
########## basics ##########
# Whether to run as a daemon process
--daemonize=true
# The file to host the process id
--pid_file=pids/nebula-metad.pid
########## logging ##########
# The directory to host logging files
--log_dir=logs
# Log level, 0, 1, 2, 3 for INFO, WARNING, ERROR, FATAL respectively
--minloglevel=0
# Verbose log level, 1, 2, 3, 4, the higher of the level, the more verbose of the logging
--v=0
# Maximum seconds to buffer the log messages
--logbufsecs=0
# Whether to redirect stdout and stderr to separate output files
--redirect_stdout=true
# Destination filename of stdout and stderr, which will also reside in log_dir.
--stdout_log_file=metad-stdout.log
--stderr_log_file=metad-stderr.log
# Copy log messages at or above this level to stderr in addition to logfiles. The numbers of severity levels INFO, WARNING, ERROR, and FATAL are 0, 1, 2, and 3, respectively.
--stderrthreshold=2
# wether logging files' name contain time stamp, If Using logrotate to rotate logging files, than should set it to true.
--timestamp_in_logfile_name=true
########## networking ##########
# Comma separated Meta Server addresses
--meta_server_addrs=192.168.8.80:9559
# Local IP used to identify the nebula-metad process.
# Change it to an address other than loopback if the service is distributed or
# will be accessed remotely.
--local_ip=192.168.8.80
# Meta daemon listening port
--port=9559
# HTTP service ip
--ws_ip=0.0.0.0
# HTTP service port
--ws_http_port=19559
# Port to listen on Storage with HTTP protocol, it corresponds to ws_http_port in storage's configuration file
--ws_storage_http_port=19779
########## storage ##########
# Root data path, here should be only single path for metad
--data_path=data/meta
########## Misc #########
# The default number of parts when a space is created
--default_parts_num=100
# The default replica factor when a space is created
--default_replica_factor=1
--heartbeat_interval_secs=10
--agent_heartbeat_interval_secs=60
########## basics ##########
# Whether to run as a daemon process
--daemonize=true
# The file to host the process id
--pid_file=pids/nebula-storaged.pid
# Whether to use the configuration obtained from the configuration file
--local_config=true
########## logging ##########
# The directory to host logging files
--log_dir=logs
# Log level, 0, 1, 2, 3 for INFO, WARNING, ERROR, FATAL respectively
--minloglevel=0
# Verbose log level, 1, 2, 3, 4, the higher of the level, the more verbose of the logging
--v=0
# Maximum seconds to buffer the log messages
--logbufsecs=0
# Whether to redirect stdout and stderr to separate output files
--redirect_stdout=true
# Destination filename of stdout and stderr, which will also reside in log_dir.
--stdout_log_file=storaged-stdout.log
--stderr_log_file=storaged-stderr.log
# Copy log messages at or above this level to stderr in addition to logfiles. The numbers of severity levels INFO, WARNING, ERROR, and FATAL are 0, 1, 2, and 3, respectively.
--stderrthreshold=2
# Wether logging files' name contain time stamp.
--timestamp_in_logfile_name=true
########## networking ##########
# Comma separated Meta server addresses
--meta_server_addrs=192.168.8.80:9559
# Local IP used to identify the nebula-storaged process.
# Change it to an address other than loopback if the service is distributed or
# will be accessed remotely.
--local_ip=192.168.8.80
# Storage daemon listening port
--port=9779
# HTTP service ip
--ws_ip=0.0.0.0
# HTTP service port
--ws_http_port=19779
# heartbeat with meta service
--heartbeat_interval_secs=10
######### Raft #########
# Raft election timeout
--raft_heartbeat_interval_secs=30
# RPC timeout for raft client (ms)
--raft_rpc_timeout_ms=500
## recycle Raft WAL
--wal_ttl=14400
########## Disk ##########
# Root data path. Split by comma. e.g. --data_path=/disk1/path1/,/disk2/path2/
# One path per Rocksdb instance.
--data_path=data/storage
# Minimum reserved bytes of each data path
--minimum_reserved_bytes=268435456
# The default reserved bytes for one batch operation
--rocksdb_batch_size=4096
# The default block cache size used in BlockBasedTable.
# The unit is MB.
--rocksdb_block_cache=4
# The type of storage engine, `rocksdb', `memory', etc.
--engine_type=rocksdb
# Compression algorithm, options: no,snappy,lz4,lz4hc,zlib,bzip2,zstd
# For the sake of binary compatibility, the default value is snappy.
# Recommend to use:
# * lz4 to gain more CPU performance, with the same compression ratio with snappy
# * zstd to occupy less disk space
# * lz4hc for the read-heavy write-light scenario
--rocksdb_compression=lz4
# Set different compressions for different levels
# For example, if --rocksdb_compression is snappy,
# "no:no:lz4:lz4::zstd" is identical to "no:no:lz4:lz4:snappy:zstd:snappy"
# In order to disable compression for level 0/1, set it to "no:no"
--rocksdb_compression_per_level=
# Whether or not to enable rocksdb's statistics, disabled by default
--enable_rocksdb_statistics=false
# Statslevel used by rocksdb to collection statistics, optional values are
# * kExceptHistogramOrTimers, disable timer stats, and skip histogram stats
# * kExceptTimers, Skip timer stats
# * kExceptDetailedTimers, Collect all stats except time inside mutex lock AND time spent on compression.
# * kExceptTimeForMutex, Collect all stats except the counters requiring to get time inside the mutex lock.
# * kAll, Collect all stats
--rocksdb_stats_level=kExceptHistogramOrTimers
# Whether or not to enable rocksdb's prefix bloom filter, enabled by default.
--enable_rocksdb_prefix_filtering=true
# Whether or not to enable rocksdb's whole key bloom filter, disabled by default.
--enable_rocksdb_whole_key_filtering=false
############## Key-Value separation ##############
# Whether or not to enable BlobDB (RocksDB key-value separation support)
--rocksdb_enable_kv_separation=false
# RocksDB key value separation threshold in bytes. Values at or above this threshold will be written to blob files during flush or compaction.
--rocksdb_kv_separation_threshold=100
# Compression algorithm for blobs, options: no,snappy,lz4,lz4hc,zlib,bzip2,zstd
--rocksdb_blob_compression=lz4
# Whether to garbage collect blobs during compaction
--rocksdb_enable_blob_garbage_collection=true
############## rocksdb Options ##############
# rocksdb DBOptions in json, each name and value of option is a string, given as "option_name":"option_value" separated by comma
--rocksdb_db_options={}
# rocksdb ColumnFamilyOptions in json, each name and value of option is string, given as "option_name":"option_value" separated by comma
--rocksdb_column_family_options={"write_buffer_size":"67108864","max_write_buffer_number":"4","max_bytes_for_level_base":"268435456"}
# rocksdb BlockBasedTableOptions in json, each name and value of option is string, given as "option_name":"option_value" separated by comma
--rocksdb_block_based_table_options={"block_size":"8192"}
+----------------+------+-----------+-----------+--------------+----------------------+------------------------+---------+
| Host | Port | HTTP port | Status | Leader count | Leader distribution | Partition distribution | Version |
+----------------+------+-----------+-----------+--------------+----------------------+------------------------+---------+
| "192.168.8.80" | 9779 | 19669 | "ONLINE" | 10 | "test:10" | "test:10" | "3.1.0" |
| "192.168.8.80" | 9000 | 19669 | "OFFLINE" | 0 | "No valid partition" | "No valid partition" | |
| "192.168.8.80" | 9669 | 19669 | "OFFLINE" | 0 | "No valid partition" | "test:10" | |
+----------------+------+-----------+-----------+--------------+----------------------+------------------------+---------+
-报错
2022/05/27 13:21:34 --- START OF NEBULA IMPORTER ---
2022/05/27 13:21:34 [INFO] clientmgr.go:31: Create 10 Nebula Graph clients
2022/05/27 13:21:34 [INFO] reader.go:49: The delimiter of /home/luyilun/Documents/yuanmou/gdbm_experiments/nebula/vertices.csv is U+002C ','
2022/05/27 13:21:34 [INFO] reader.go:68: Start to read file(0): /home/luyilun/Documents/yuanmou/gdbm_experiments/nebula/vertices.csv, schema: < :VID(int) >
2022/05/27 13:21:34 [INFO] reader.go:184: Total lines of file(/home/luyilun/Documents/yuanmou/gdbm_experiments/nebula/vertices.csv) is: 10, error lines: 0
2022/05/27 13:21:34 [INFO] reader.go:49: The delimiter of /home/luyilun/Documents/yuanmou/gdbm_experiments/nebula/edges.csv is U+002C ','
2022/05/27 13:21:34 [INFO] reader.go:68: Start to read file(1): /home/luyilun/Documents/yuanmou/gdbm_experiments/nebula/edges.csv, schema: < :SRC_VID(int),:DST_VID(int) >
2022/05/27 13:21:34 [INFO] reader.go:184: Total lines of file(/home/luyilun/Documents/yuanmou/gdbm_experiments/nebula/edges.csv) is: 3, error lines: 0
2022/05/27 13:21:39 [ERROR] handler.go:63: Client 3 fail to execute: INSERT VERTEX `person`() VALUES 3: ();, Error: write tcp 192.168.8.80:46152->192.168.8.80:9669: write: broken pipe
2022/05/27 13:21:39 [ERROR] handler.go:63: Client 5 fail to execute: INSERT VERTEX `person`() VALUES 5: ();, Error: write tcp 192.168.8.80:46156->192.168.8.80:9669: write: broken pipe
2022/05/27 13:21:39 [ERROR] handler.go:63: Client 9 fail to execute: INSERT VERTEX `person`() VALUES 9: ();, Error: write tcp 192.168.8.80:46164->192.168.8.80:9669: write: broken pipe
2022/05/27 13:21:39 [ERROR] handler.go:63: Client 1 fail to execute: INSERT VERTEX `person`() VALUES 1: ();, Error: write tcp 192.168.8.80:46148->192.168.8.80:9669: write: broken pipe
2022/05/27 13:21:39 [ERROR] handler.go:63: Client 2 fail to execute: INSERT VERTEX `person`() VALUES 2: ();, Error: write tcp 192.168.8.80:46150->192.168.8.80:9669: write: broken pipe
2022/05/27 13:21:39 [ERROR] handler.go:63: Client 7 fail to execute: INSERT VERTEX `person`() VALUES 7: ();, Error: write tcp 192.168.8.80:46160->192.168.8.80:9669: write: broken pipe
2022/05/27 13:21:39 [INFO] statsmgr.go:89: Done(/home/luyilun/Documents/yuanmou/gdbm_experiments/nebula/vertices.csv): Time(4.55s), Finished(10), Failed(6), Read Failed(0), Latency AVG(161158us), Batches Req AVG(162688us), Rows AVG(2.20/s)
2022/05/27 13:21:39 [INFO] statsmgr.go:89: Tick: Time(5.00s), Finished(10), Failed(6), Read Failed(0), Latency AVG(161158us), Batches Req AVG(162688us), Rows AVG(2.00/s)
2022/05/27 13:21:42 [ERROR] handler.go:63: Client 3 fail to execute: INSERT EDGE `follow`() VALUES 7->10:() ;, Error: write tcp 192.168.8.80:46152->192.168.8.80:9669: write: broken pipe
2022/05/27 13:21:42 [ERROR] handler.go:63: Client 2 fail to execute: INSERT EDGE `follow`() VALUES 3->6:() ;, Error: write tcp 192.168.8.80:46150->192.168.8.80:9669: write: broken pipe
2022/05/27 13:21:42 [ERROR] handler.go:63: Client 1 fail to execute: INSERT EDGE `follow`() VALUES 1->2:() ;, Error: write tcp 192.168.8.80:46148->192.168.8.80:9669: write: broken pipe
2022/05/27 13:21:42 [INFO] statsmgr.go:89: Done(/home/luyilun/Documents/yuanmou/gdbm_experiments/nebula/edges.csv): Time(7.55s), Finished(13), Failed(9), Read Failed(0), Latency AVG(123968us), Batches Req AVG(125145us), Rows AVG(1.72/s)
2022/05/27 13:21:42 [WARN] session.go:280: [nebula-go] Sign out failed, write tcp 192.168.8.80:46148->192.168.8.80:9669: write: broken pipe
2022/05/27 13:21:42 [WARN] session.go:280: [nebula-go] Sign out failed, write tcp 192.168.8.80:46150->192.168.8.80:9669: write: broken pipe
2022/05/27 13:21:42 [WARN] session.go:280: [nebula-go] Sign out failed, write tcp 192.168.8.80:46152->192.168.8.80:9669: write: broken pipe
2022/05/27 13:21:42 [WARN] session.go:280: [nebula-go] Sign out failed, write tcp 192.168.8.80:46156->192.168.8.80:9669: write: broken pipe
2022/05/27 13:21:42 [WARN] session.go:280: [nebula-go] Sign out failed, write tcp 192.168.8.80:46160->192.168.8.80:9669: write: broken pipe
2022/05/27 13:21:42 [WARN] session.go:280: [nebula-go] Sign out failed, write tcp 192.168.8.80:46164->192.168.8.80:9669: write: broken pipe
2022/05/27 13:21:42 Total 9 lines fail to insert into nebula graph database
2022/05/27 13:21:43 --- END OF NEBULA IMPORTER ---
看这个报错,10个节点里有6个导入失败,其余4个还是导入成功的,这是什么原因呀