频繁对简单查询timeout

  • nebula 版本:3.6.0

  • 部署方式:分布式(云上机器)

  • 安装方式:jar

  • 是否上生产环境:Y

  • 硬件信息

    • 磁盘 系统:80G ESSD PL0 ; 数据盘:2000G*3 ESSD AutoPL
    • CPU、内存信息 64核(vCPU) / 256 GiB
  • 查询频繁超时,观察机器内存和cpu都很低,qps增加超时也会同比例增加

  • 简单查询类似以下也有频繁超时
    match (n:black:tag1)
    where id(n)==‘parameter’
    return count(case when n.black.black_end_date>=toString(date()) then n end)

  • 相关的 meta / storage / graph info 日志信息(

  • meta配置

########## basics ##########
# Whether to run as a daemon process
--daemonize=true
# The file to host the process id
--pid_file=pids/nebula-metad.pid

########## logging ##########
# The directory to host logging files
--log_dir=logs
# Log level, 0, 1, 2, 3 for INFO, WARNING, ERROR, FATAL respectively
--minloglevel=0
# Verbose log level, 1, 2, 3, 4, the higher of the level, the more verbose of the logging
--v=4
# Maximum seconds to buffer the log messages
--logbufsecs=0
# Whether to redirect stdout and stderr to separate output files
--redirect_stdout=true
# Destination filename of stdout and stderr, which will also reside in log_dir.
--stdout_log_file=metad-stdout.log
--stderr_log_file=metad-stderr.log
# Copy log messages at or above this level to stderr in addition to logfiles. The numbers of severity levels INFO, WARNING, ERROR, and FATAL are 0, 1, 2, and 3, respectively.
--stderrthreshold=3
# wether logging files' name contain time stamp.
--timestamp_in_logfile_name=true

########## networking ##########
# Comma separated Meta Server addresses
--meta_server_addrs=ip1:9559,ip1:9559,ip1:9559
# Local IP used to identify the nebula-metad process.
# Change it to an address other than loopback if the service is distributed or
# will be accessed remotely.
--local_ip=ip1
# Meta daemon listening port
--port=9559
# HTTP service ip
--ws_ip=ip1
# HTTP service port
--ws_http_port=19559
# Port to listen on Storage with HTTP protocol, it corresponds to ws_http_port in storage's configuration file
--ws_storage_http_port=19779

########## storage ##########
# Root data path, here should be only single path for metad
--data_path=data/meta

########## Misc #########
# The default number of parts when a space is created
--default_parts_num=100
# The default replica factor when a space is created
--default_replica_factor=1

--heartbeat_interval_secs=10
--agent_heartbeat_interval_secs=60

############## rocksdb Options ##############
--rocksdb_wal_sync=true

--timezone_name=UTC+08:00
  • graph配置
########## basics ##########
# Whether to run as a daemon process
--daemonize=true
# The file to host the process id
--pid_file=pids/nebula-graphd.pid
# Whether to enable optimizer
--enable_optimizer=true
# Heartbeat interval of communication between meta client and graphd service
--heartbeat_interval_secs=10
# The default charset when a space is created
--default_charset=utf8
# The default collate when a space is created
--default_collate=utf8_bin

# Whether to use the configuration obtained from the configuration file
--local_config=true

########## logging ##########
# The directory to host logging files
--log_dir=logs
# Log level, 0, 1, 2, 3 for INFO, WARNING, ERROR, FATAL respectively
--minloglevel=1
# Verbose log level, 1, 2, 3, 4, the higher of the level, the more verbose of the logging
--v=3
# Maximum seconds to buffer the log messages
--logbufsecs=0
# Whether to redirect stdout and stderr to separate output files
--redirect_stdout=true
# Destination filename of stdout and stderr, which will also reside in log_dir.
--stdout_log_file=graphd-stdout.log
--stderr_log_file=graphd-stderr.log
# Copy log messages at or above this level to stderr in addition to logfiles. The numbers of severity levels INFO, WARNING, ERROR, and FATAL are 0, 1, 2, and 3, respectively.
--stderrthreshold=3
# wether logging files' name contain timestamp
--timestamp_in_logfile_name=true

########## query ##########
# Whether to treat partial success as an error.
# This flag is only used for Read-only access, and Modify access always treats partial success as an error.
--accept_partial_success=false
# Maximum sentence length, unit byte
--max_allowed_query_size=4194304

########## networking ##########
# Comma separated Meta Server Addresses
--meta_server_addrs=ip1:9559,ip2:9559,ip3:9559
# Local IP used to identify the nebula-graphd process.
# Change it to an address other than loopback if the service is distributed or
# will be accessed remotely.
--local_ip=ip1
# Network device to listen on
--listen_netdev=any
# Port to listen on
--port=9669
# To turn on SO_REUSEPORT or not
--reuse_port=false
# Backlog of the listen socket, adjust this together with net.core.somaxconn
--listen_backlog=1024
# The number of seconds Nebula service waits before closing the idle connections
--client_idle_timeout_secs=28800
# The number of seconds before idle sessions expire
# The range should be in [1, 604800]
--session_idle_timeout_secs=28800
# The number of threads to accept incoming connections
--num_accept_threads=1
# The number of networking IO threads, 0 for # of CPU cores
--num_netio_threads=0
# Max active connections for all networking threads. 0 means no limit.
# Max connections for each networking thread = num_max_connections / num_netio_threads
--num_max_connections=0
# The number of threads to execute user queries, 0 for # of CPU cores
--num_worker_threads=64
# HTTP service ip
--ws_ip=ip1
# HTTP service port
--ws_http_port=19669
# storage client timeout
--storage_client_timeout_ms=360000
# slow query threshold in us
--slow_query_threshold_us=500000
# Port to listen on Meta with HTTP protocol, it corresponds to ws_http_port in metad's configuration file
--ws_meta_http_port=19559

########## authentication ##########
# Enable authorization
--enable_authorize=true
# User login authentication type, password for nebula authentication, ldap for ldap authentication, cloud for cloud authentication
--auth_type=password

########## memory ##########
# System memory high watermark ratio, cancel the memory checking when the ratio greater than 1.0
--system_memory_high_watermark_ratio=0.95

########## metrics ##########
--enable_space_level_metrics=true

########## experimental feature ##########
# if use experimental features
--enable_experimental_feature=true

# if use balance data feature, only work if enable_experimental_feature is true
--enable_data_balance=true

# enable udf, written in c++ only for now
#--enable_udf=false

# set the directory where the .so files of udf are stored, when enable_udf is true
#--udf_path=/data/software/nebula-graph-3.6.0/udf/

########## session ##########
# Maximum number of sessions that can be created per IP and per user
--max_sessions_per_ip_per_user=500000

########## memory tracker ##########
# trackable memory ratio (trackable_memory / (total_memory - untracked_reserved_memory) )
--memory_tracker_limit_ratio=0.45
# untracked reserved memory in Mib
--memory_tracker_untracked_reserved_memory_mb=50

# enable log memory tracker stats periodically
--memory_tracker_detail_log=true
# log memory tacker stats interval in milliseconds
--memory_tracker_detail_log_interval_ms=10000

# enable memory background purge (if jemalloc is used)
--memory_purge_enabled=true
# memory background purge interval in seconds
--memory_purge_interval_seconds=10

########## performance optimization ##########
# The max job size in multi job mode
--max_job_size=16
# The min batch size for handling dataset in multi job mode, only enabled when max_job_size is greater than 1
--min_batch_size=8192
# if true, return directly without go through RPC
--optimize_appendvertices=false
# number of paths constructed by each thread
--path_batch_size=10000


--max_plan_depth=4096
--max_allowed_statements=4096
--timezone_name=UTC+08:00
--num_operator_threads=4
  • storage 配置
########## basics ##########
# Whether to run as a daemon process
--daemonize=true
# The file to host the process id
--pid_file=pids/nebula-storaged-listener.pid
# Whether to use the configuration obtained from the configuration file
--local_config=true

########## logging ##########
# The directory to host logging files
--log_dir=logs
# Log level, 0, 1, 2, 3 for INFO, WARNING, ERROR, FATAL respectively
--minloglevel=1
# Verbose log level, 1, 2, 3, 4, the higher of the level, the more verbose of the logging
--v=0
# Maximum seconds to buffer the log messages
--logbufsecs=0
# Whether to redirect stdout and stderr to separate output files
--redirect_stdout=true
# Destination filename of stdout and stderr, which will also reside in log_dir.
--stdout_log_file=storaged-listener-stdout.log
--stderr_log_file=storaged-listener-stderr.log
# Copy log messages at or above this level to stderr in addition to logfiles. The numbers of severity levels INFO, WARNING, ERROR, and FATAL are 0, 1, 2, and 3, respectively.
--stderrthreshold=3
# Wether logging files' name contain timestamp.
--timestamp_in_logfile_name=true

########## networking ##########
# Comma separated Meta server addresses
--meta_server_addrs=ip1:9559,ip2:9559,ip3:9559
# Local IP used to identify the nebula-storaged process.
# Change it to an address other than loopback if the service is distributed or
# will be accessed remotely.
--local_ip=ip1
# Storage daemon listening port
--port=9779
# HTTP service ip
--ws_ip=ip1
# HTTP service port
--ws_http_port=19779
# heartbeat with meta service
--heartbeat_interval_secs=10

######### Raft #########
# Raft election timeout
--raft_heartbeat_interval_secs=60
# RPC timeout for raft client (ms)
--raft_rpc_timeout_ms=500
## recycle Raft WAL
--wal_ttl=1440

########## Disk ##########
# Root data path. split by comma. e.g. --data_path=/disk1/path1/,/disk2/path2/
# One path per Rocksdb instance.
--data_path=/data/nebula2/storaged,/data1/nebula2/storaged,/data2/nebula2/storaged

# Minimum reserved bytes of each data path
--minimum_reserved_bytes=268435456

# The default reserved bytes for one batch operation
--rocksdb_batch_size=4096
# The default block cache size used in BlockBasedTable. (MB)
# recommend: 1/3 of all memory
#--rocksdb_block_cache=4096
--rocksdb_block_cache=102400
# Disable page cache to better control memory used by rocksdb.
# Caution: Make sure to allocate enough block cache if disabling page cache!
#--disable_page_cache=false
--disable_page_cache=true

# Compression algorithm, options: no,snappy,lz4,lz4hc,zlib,bzip2,zstd
# For the sake of binary compatibility, the default value is snappy.
# Recommend to use:
#   * lz4 to gain more CPU performance, with the same compression ratio with snappy
#   * zstd to occupy less disk space
#   * lz4hc for the read-heavy write-light scenario
--rocksdb_compression=lz4

# Set different compressions for different levels
# For example, if --rocksdb_compression is snappy,
# "no:no:lz4:lz4::zstd" is identical to "no:no:lz4:lz4:snappy:zstd:snappy"
# In order to disable compression for level 0/1, set it to "no:no"
--rocksdb_compression_per_level=

############## rocksdb Options ##############
# rocksdb DBOptions in json, each name and value of option is a string, given as "option_name":"option_value" separated by comma
#--rocksdb_db_options={"max_subcompactions":"4","max_background_jobs":"4","skip_checking_sst_file_sizes_on_db_open":"true"}
--rocksdb_db_options={"max_subcompactions":"32","max_background_jobs":"48","skip_checking_sst_file_sizes_on_db_open":"true","max_background_compactions":"32","stats_dump_period_sec":"60"}

# rocksdb ColumnFamilyOptions in json, each name and value of option is string, given as "option_name":"option_value" separated by comma
#--rocksdb_column_family_options={"disable_auto_compactions":"false","write_buffer_size":"67108864","max_write_buffer_number":"4","max_bytes_for_level_base":"268435456"}
#--rocksdb_column_family_options={"disable_auto_compactions":"false","write_buffer_size":"268435456","max_write_buffer_number":"8","max_bytes_for_level_base":"268435456","level0_file_num_compaction_trigger":"200","level0_slowdown_writes_trigger":"500","level0_stop_writes_trigger":"800","min_write_buffer_number_to_merge":"3","memtable_whole_key_filtering":"true","memtable_prefix_bloom_size_ratio":"0.1"}
--rocksdb_column_family_options={"disable_auto_compactions":"false","write_buffer_size":"268435456","max_write_buffer_number":"8","max_bytes_for_level_base":"268435456","level0_file_num_compaction_trigger":"200","level0_slowdown_writes_trigger":"500","level0_stop_writes_trigger":"800","min_write_buffer_number_to_merge":"3"}

# rocksdb BlockBasedTableOptions in json, each name and value of option is string, given as "option_name":"option_value" separated by comma
#--rocksdb_block_based_table_options={"block_size":"8192"}
--rocksdb_block_based_table_options={"block_size":"8192","cache_index_and_filter_blocks":"1"}


# Whether or not to enable rocksdb's statistics, disabled by default
--enable_rocksdb_statistics=true

# Statslevel used by rocksdb to collection statistics, optional values are
#   * kExceptHistogramOrTimers, disable timer stats, and skip histogram stats
#   * kExceptTimers, Skip timer stats
#   * kExceptDetailedTimers, Collect all stats except time inside mutex lock AND time spent on compression.
#   * kExceptTimeForMutex, Collect all stats except the counters requiring to get time inside the mutex lock.
#   * kAll, Collect all stats
--rocksdb_stats_level=kExceptHistogramOrTimers

# Whether or not to enable rocksdb's prefix bloom filter, enabled by default.
--enable_rocksdb_prefix_filtering=true
# Whether or not to enable rocksdb's whole key bloom filter, disabled by default.
--enable_rocksdb_whole_key_filtering=false

############### misc ####################
# Whether turn on query in multiple thread
--query_concurrently=true
# Whether remove outdated space data
--auto_remove_invalid_space=true
# Network IO threads number
#--num_io_threads=16
--num_io_threads=32

# Max active connections for all networking threads. 0 means no limit.
# Max connections for each networking thread = num_max_connections / num_netio_threads
--num_max_connections=0
# Worker threads number to handle request
#--num_worker_threads=32
--num_worker_threads=64

# Maximum subtasks to run admin jobs concurrently
--max_concurrent_subtasks=10
# The rate limit in bytes when leader synchronizes snapshot data
--snapshot_part_rate_limit=10485760
#--snapshot_part_rate_limit=50000000
# The amount of data sent in each batch when leader synchronizes snapshot data
--snapshot_batch_size=1048576
# The rate limit in bytes when leader synchronizes rebuilding index
--rebuild_index_part_rate_limit=4194304
# The amount of data sent in each batch when leader synchronizes rebuilding index
--rebuild_index_batch_size=1048576

########## memory tracker ##########
# trackable memory ratio (trackable_memory / (total_memory - untracked_reserved_memory) )
--memory_tracker_limit_ratio=0.5
# untracked reserved memory in Mib
--memory_tracker_untracked_reserved_memory_mb=50

# enable log memory tracker stats periodically
#--memory_tracker_detail_log=false
--memory_tracker_detail_log=true
# log memory tacker stats interval in milliseconds
#--memory_tracker_detail_log_interval_ms=60000
--memory_tracker_detail_log_interval_ms=10000
# enable memory background purge (if jemalloc is used)
--memory_purge_enabled=true
# memory background purge interval in seconds
--memory_purge_interval_seconds=10


--timezone_name=UTC+08:00
--storage_client_timeout_ms=60000
--num_compaction_threads=32
#--max_edge_returned_per_vertex=1000000
#--enable_partitioned_index_filter=true
--reader_handlers=64
#--move_files=true
#--max_batch_size=1024
  • linux配置
vm.swappiness = 0
net.ipv4.neigh.default.gc_stale_time = 120

net.ipv4.conf.all.rp_filter = 0
net.ipv4.conf.default.rp_filter = 0
net.ipv4.conf.default.arp_announce = 2
net.ipv4.conf.lo.arp_announce = 2
net.ipv4.conf.all.arp_announce = 2

net.ipv4.tcp_max_tw_buckets = 5000
net.ipv4.tcp_syncookies = 1
net.ipv4.tcp_max_syn_backlog = 1024
net.ipv4.tcp_synack_retries = 2

net.ipv6.conf.all.disable_ipv6 = 1
net.ipv6.conf.default.disable_ipv6 = 1
net.ipv6.conf.lo.disable_ipv6 = 1

kernel.sysrq = 1
net.ipv4.tcp_slow_start_after_idle = 0
net.core.netdev_max_backlog = 10000
net.core.somaxconn = 32768
net.ipv4.tcp_max_syn_backlog = 8096
fs.inotify.max_user_instances = 8192
fs.file-max = 2097152
fs.inotify.max_user_watches = 524288
net.core.bpf_jit_enable = 1
net.core.bpf_jit_harden = 1
net.core.dev_weight_tx_bias = 1
net.core.rmem_max = 16777216
net.core.wmem_max = 16777216
net.ipv4.tcp_rmem = 4096 12582912 16777216
net.ipv4.tcp_wmem = 4096 12582912 16777216
net.core.rps_sock_flow_entries = 8192
net.ipv4.neigh.default.gc_thresh1 = 2048
net.ipv4.neigh.default.gc_thresh2 = 4096
net.ipv4.neigh.default.gc_thresh3 = 8192
net.ipv4.tcp_max_orphans = 32768
net.ipv4.tcp_max_tw_buckets = 32768
vm.max_map_count = 262144
kernel.threads-max = 300580
kernel.pid_max=4194303
kernel.core_pattern = core
kernel.core_uses_pid = 1

多大的 QPS

很低,未放量情况下,qps也就30左右,然后约2s一个超时

profile 下看看

tag的属性隐藏了,不是没有属性

"id","name","dependencies","profiling data","operator info"
"6","Aggregate","8","{
  ""execTime"": ""13(us)"",
  ""rows"": 1,
  ""totalTime"": ""15(us)"",
  ""version"": 0
}","outputVar: {
  ""colNames"": [
    ""count(CASE WHEN (n.black.black_end_date>=toString(date())) THEN n END)""
  ],
  ""type"": ""DATASET"",
  ""name"": ""__Aggregate_6""
}
inputVar: __Project_5
groupKeys: []
groupItems: [
  {
    ""expr"": ""count(CASE WHEN (n.black.black_end_date>=toString(date())) THEN $n END)""
  }
]"
"8","AppendVertices","2","{
  ""execTime"": ""723(us)"",
  ""resp[0]"": {
    ""exec"": ""1134(us)"",
    ""host"": ""ip:9779"",
    ""total"": ""1577(us)""
  },
  ""rows"": 0,
  ""totalTime"": ""2228(us)"",
  ""total_rpc"": ""1643(us)"",
  ""version"": 0
}","outputVar: {
  ""colNames"": [
    ""n""
  ],
  ""type"": ""DATASET"",
  ""name"": ""__Project_5""
}
inputVar: __Dedup_2
space: 2
dedup: 1
limit: -1
filter: (black._tag IS NOT EMPTY AND tag1._tag IS NOT EMPTY)
orderBy: []
src: $-._vid
props: [
  {
    ""props"": [
      ""is_valid"",
      ""_tag""
    ],
    ""tagId"": 49
  },
  {
    ""props"": [
      ""_tag""
    ],
    ""tagId"": 48
  },
  {
    ""props"": [
      ""_tag""
    ],
    ""tagId"": 47
  },
  {
    ""props"": [
      ""_tag""
    ],
    ""tagId"": 46
  },
  {
    ""props"": [
      ""_tag""
    ],
    ""tagId"": 45
  },
  {
    ""props"": [
      ""_tag""
    ],
    ""tagId"": 44
  },
  {
    ""props"": [
      ""_tag""
    ],
    ""tagId"": 43
  },
  {
    ""props"": [
      ""_tag""
    ],
    ""tagId"": 42
  },
  {
    ""props"": [
      ""_tag""
    ],
    ""tagId"": 41
  },
  {
    ""props"": [
      ""_tag""
    ],
    ""tagId"": 40
  },
  {
    ""props"": [
      ""_tag""
    ],
    ""tagId"": 39
  },
  {
    ""props"": [
      ""_tag""
    ],
    ""tagId"": 38
  },
  {
    ""props"": [
      ""_tag""
    ],
    ""tagId"": 37
  },
  {
    ""props"": [
      ""_tag""
    ],
    ""tagId"": 36
  },
  {
    ""props"": [
      ""_tag""
    ],
    ""tagId"": 35
  },
  {
    ""props"": [
      ""_tag""
    ],
    ""tagId"": 34
  },
  {
    ""props"": [
      ""_tag""
    ],
    ""tagId"": 33
  },
  {
    ""props"": [
      ""_tag""
    ],
    ""tagId"": 32
  },
  {
    ""props"": [
      ""_tag""
    ],
    ""tagId"": 31
  },
  {
    ""props"": [
      ""_tag""
    ],
    ""tagId"": 30
  },
  {
    ""props"": [
      ""_tag""
    ],
    ""tagId"": 29
  },
  {
    ""props"": [
      ""_tag""
    ],
    ""tagId"": 28
  },
  {
    ""props"": [
      ""_tag""
    ],
    ""tagId"": 27
  },
  {
    ""props"": [
      ""_tag""
    ],
    ""tagId"": 13
  },
  {
    ""props"": [
      ""_tag""
    ],
    ""tagId"": 12
  },
  {
    ""props"": [
      ""_tag""
    ],
    ""tagId"": 11
  },
  {
    ""props"": [
      ""_tag""
    ],
    ""tagId"": 10
  },
  {
    ""props"": [
      ""_tag""
    ],
    ""tagId"": 9
  },
  {
    ""props"": [
      ""_tag""
    ],
    ""tagId"": 8
  },
  {
    ""props"": [
      ""black_start_date"",
      ""black_end_date"",
      ""_tag""
    ],
    ""tagId"": 4
  },
  {
    ""props"": [
      ""_tag""
    ],
    ""tagId"": 3
  },
  {
    ""props"": [
      ""_tag""
    ],
    ""tagId"": 5
  },
  {
    ""props"": [
      ""_tag""
    ],
    ""tagId"": 6
  },
  {
    ""props"": [
      ""_tag""
    ],
    ""tagId"": 7
  },
  {
    ""props"": [
      ""_tag""
    ],
    ""tagId"": 14
  },
  {
    ""props"": [
      ""_tag""
    ],
    ""tagId"": 15
  },
  {
    ""props"": [
      ""_tag""
    ],
    ""tagId"": 16
  },
  {
    ""props"": [
      ""_tag""
    ],
    ""tagId"": 17
  },
  {
    ""props"": [
      ""_tag""
    ],
    ""tagId"": 18
  },
  {
    ""props"": [
      ""_tag""
    ],
    ""tagId"": 19
  },
  {
    ""props"": [
      ""_tag""
    ],
    ""tagId"": 20
  },
  {
    ""props"": [
      ""_tag""
    ],
    ""tagId"": 21
  },
  {
    ""props"": [
      ""_tag""
    ],
    ""tagId"": 22
  },
  {
    ""props"": [
      ""_tag""
    ],
    ""tagId"": 23
  },
  {
    ""props"": [
      ""_tag""
    ],
    ""tagId"": 24
  },
  {
    ""props"": [
      ""_tag""
    ],
    ""tagId"": 50
  },
  {
    ""props"": [
      ""_tag""
    ],
    ""tagId"": 25
  },
  {
    ""props"": [
      ""_tag""
    ],
    ""tagId"": 244
  },
  {
    ""props"": [
      ""_tag""
    ],
    ""tagId"": 51
  },
  {
    ""props"": [
      ""_tag""
    ],
    ""tagId"": 26
  },
  {
    ""props"": [
      ""_tag""
    ],
    ""tagId"": 245
  },
  {
    ""props"": [
      ""_tag""
    ],
    ""tagId"": 52
  },
  {
    ""props"": [
      ""_tag""
    ],
    ""tagId"": 53
  },
  {
    ""props"": [
      ""_tag""
    ],
    ""tagId"": 54
  },
  {
    ""props"": [
      ""_tag""
    ],
    ""tagId"": 55
  },
  {
    ""props"": [
      ""_tag""
    ],
    ""tagId"": 56
  },
  {
    ""props"": [
      ""_tag""
    ],
    ""tagId"": 57
  },
  {
    ""props"": [
      ""_tag""
    ],
    ""tagId"": 58
  },
  {
    ""props"": [
      ""_tag""
    ],
    ""tagId"": 59
  },
  {
    ""props"": [
      ""_tag""
    ],
    ""tagId"": 60
  },
  {
    ""props"": [
      ""_tag""
    ],
    ""tagId"": 61
  },
  {
    ""props"": [
      ""_tag""
    ],
    ""tagId"": 62
  },
  {
    ""props"": [
      ""_tag""
    ],
    ""tagId"": 63
  },
  {
    ""props"": [
      ""_tag""
    ],
    ""tagId"": 64
  },
  {
    ""props"": [
      ""_tag""
    ],
    ""tagId"": 65
  },
  {
    ""props"": [
      ""_tag""
    ],
    ""tagId"": 66
  },
  {
    ""props"": [
      ""_tag""
    ],
    ""tagId"": 67
  },
  {
    ""props"": [
      ""_tag""
    ],
    ""tagId"": 68
  },
  {
    ""props"": [
      ""_tag""
    ],
    ""tagId"": 69
  },
  {
    ""props"": [
      ""_tag""
    ],
    ""tagId"": 70
  },
  {
    ""props"": [
      ""_tag""
    ],
    ""tagId"": 71
  },
  {
    ""props"": [
      ""_tag""
    ],
    ""tagId"": 72
  },
  {
    ""props"": [
      ""_tag""
    ],
    ""tagId"": 73
  }
]
exprs: 
vertex_filter: 
if_track_previous_path: false"
"2","Dedup","1","{
  ""execTime"": ""5(us)"",
  ""rows"": 1,
  ""totalTime"": ""6(us)"",
  ""version"": 0
}","outputVar: {
  ""colNames"": [
    ""_vid""
  ],
  ""type"": ""DATASET"",
  ""name"": ""__Dedup_2""
}
inputVar: __VAR_0"
"1","PassThrough","3","{
  ""execTime"": ""2(us)"",
  ""rows"": 0,
  ""totalTime"": ""3(us)"",
  ""version"": 0
}","outputVar: {
  ""colNames"": [
    ""_vid""
  ],
  ""type"": ""DATASET"",
  ""name"": ""__VAR_0""
}
inputVar: "
"3","Start","","{
  ""execTime"": ""0(us)"",
  ""rows"": 0,
  ""totalTime"": ""22(us)"",
  ""version"": 0
}","outputVar: {
  ""colNames"": [],
  ""type"": ""DATASET"",
  ""name"": ""__Start_3""
}"


这个会有影响吗,目前session数一般维持在200左右

但是看服务器的连接数是在1k左右

请补充 qps,磁盘 cpu 监控信息

qps当前7左右,

周六对配置做了一下调整,关闭了自动compact,且手动执行一次compact(周六到现在未在compact)
周日当天超时无变化 超时比例 约为千7
今天至当前超时比例约为 6%

graph

max_job_size=16 -> 1
num_operator_threads=4 -> 2
memory_tracker_limit_ratio=0.45 -> 0.3
num_accept_threads=1 -> 4
listen_backlog=1024 -> 4098 
num_netio_threads=0 -> 32
num_worker_threads= 64 -> 32

storage

max_concurrent_subtasks=10 -> 5
reader_handlers 64 -> 注掉
memory_tracker_limit_ratio=0.5 -> 0.65
"disable_auto_compactions": false -> true
rocksdb_rate_limit=50
1 个赞

auto_compactions建议还是打开;另外,你单个查询的时延看 profile 感觉不是很高啊

对,就是超时的语句拿出来进行查询,时延基本都是毫秒级,达不到超时;但是在陪跑过程中,就会频繁出现超时

会是连接池的问题吗,现在是用的java连接池

lazy val sessionPool = {
val poolConfig = new SessionPoolConfig(getUrlArray(urls), spaceName, user, password)
.setMaxSessionSize(maxConnSize.toInt)
.setMinSessionSize(2)
.setReconnect(true)
.setRetryTimes(1)
.setIntervalTime(100) // 执行重试等待间隔
.setWaitTime(100) // session获取等待间隔
.setHealthCheckTime(10)
.setTimeout(900)

val pool = new SessionPool(poolConfig)
val initOK = pool.init()
if (!initOK) {
  // 会在启动后统一初始化时p1报警
  log.error("初始化库失败1")
  try {
    Thread.sleep(200)
  } catch {
    case e: Exception => {}
  }
  if (!pool.init()) {
    log.error("初始化库失败2")
    try {
      Thread.sleep(200)
    } catch {
      case e: Exception => {}
    }
    if (!pool.init()) {
      log.error("初始化库失败3")
    }
  }
}
pool

}

def getSessionPool: SessionPool = {
sessionPool
}

调用
val rs = getSessionPool.execute(cql)

附一下超时查询的graph日志
match (n:cuid)
where n.cuid.id==“aL+ZXBhEvFrH48uVF2CtE1”
return case when count(n) == 0 then -9998 when count(n) > 0 then min(case when “sn” in labels(n) then -9998 else null end ) end as v2

I20250121 11:26:48.947898 1655566 GraphSessionManager.cpp:40] Find session from cache: 1737430489176096
I20250121 11:26:48.947953 1655566 ClientSession.cpp:43] Add query: match (n:cuid)
where n.cuid.id==“aL+ZXBhEvFrH48uVF2CtE1”
return case when count(n) == 0 then -9998 when count(n) > 0 then min(case when “sn” in labels(n) then -9998 else null end ) end as v2, epId: 670351
I20250121 11:26:48.947958 1655566 QueryInstance.cpp:80] Parsing query: match (n:cuid)
where n.cuid.id==“aL+ZXBhEvFrH48uVF2CtE1”
return case when count(n) == 0 then -9998 when count(n) > 0 then min(case when “sn” in labels(n) then -9998 else null end ) end as v2
I20250121 11:26:48.948045 1655566 Symbols.cpp:48] New variable for: __Start_0
I20250121 11:26:48.948050 1655566 PlanNode.cpp:27] New variable: __Start_0
I20250121 11:26:48.948055 1655566 Validator.cpp:350] Space chosen, name: test id: 2
I20250121 11:26:48.948087 1655566 ServerBasedSchemaManager.cpp:41] Get Tag Schema Space 2, TagID 7, Version -1
I20250121 11:26:48.948212 1655566 Symbols.cpp:48] New variable for: __IndexScan_1
I20250121 11:26:48.948217 1655566 PlanNode.cpp:27] New variable: __IndexScan_1
I20250121 11:26:48.948222 1655566 MatchPathPlanner.cpp:126] Find starts: 0, Pattern has 0 edges, root: __IndexScan_1, colNames: _vid
I20250121 11:26:48.948226 1655566 Symbols.cpp:48] New variable for: __Start_2
I20250121 11:26:48.948230 1655566 PlanNode.cpp:27] New variable: __Start_2
I20250121 11:26:48.948235 1655566 Symbols.cpp:48] New variable for: __AppendVertices_3
I20250121 11:26:48.948237 1655566 PlanNode.cpp:27] New variable: __AppendVertices_3
I20250121 11:26:48.948316 1655566 Symbols.cpp:48] New variable for: __Project_4
I20250121 11:26:48.948319 1655566 PlanNode.cpp:27] New variable: __Project_4
I20250121 11:26:48.948325 1655566 Symbols.cpp:48] New variable for: __Filter_5
I20250121 11:26:48.948333 1655566 PlanNode.cpp:27] New variable: __Filter_5
I20250121 11:26:48.948340 1655566 Symbols.cpp:48] New variable for: __Aggregate_6
I20250121 11:26:48.948343 1655566 PlanNode.cpp:27] New variable: __Aggregate_6
I20250121 11:26:48.948346 1655566 Symbols.cpp:48] New variable for: __Project_7
I20250121 11:26:48.948349 1655566 PlanNode.cpp:27] New variable: __Project_7
I20250121 11:26:48.948354 1655566 ReturnClausePlanner.cpp:52] return root: __Project_7 colNames: v2
I20250121 11:26:48.948356 1655566 MatchPlanner.cpp:172] root(Project_7): __Project_7, tail(Start_2): __Start_2
I20250121 11:26:48.948360 1655566 Validator.cpp:409] root: Project tail: Start
I20250121 11:26:48.948364 1655566 Validator.cpp:409] root: Project tail: Start
I20250121 11:26:48.948411 1655566 Symbols.cpp:48] New variable for: __IndexScan_8
I20250121 11:26:48.948415 1655566 PlanNode.cpp:27] New variable: __IndexScan_8
I20250121 11:26:48.948451 1655566 Symbols.cpp:48] New variable for: __AppendVertices_9
I20250121 11:26:48.948455 1655566 PlanNode.cpp:27] New variable: __AppendVertices_9
I20250121 11:26:48.948503 1655566 Symbols.cpp:48] New variable for: __AppendVertices_10
I20250121 11:26:48.948506 1655566 PlanNode.cpp:27] New variable: __AppendVertices_10
I20250121 11:26:48.948849 1655599 ThriftClientManager-inl.h:47] Getting a client to “ip1”:9779
I20250121 11:26:48.948858 1655600 ThriftClientManager-inl.h:47] Getting a client to “ip2”:9779
I20250121 11:26:48.948868 1655601 ThriftClientManager-inl.h:47] Getting a client to “ip3”:9779
I20250121 11:26:48.948876 1655602 ThriftClientManager-inl.h:47] Getting a client to “ip4”:9779
I20250121 11:26:48.948884 1655603 ThriftClientManager-inl.h:47] Getting a client to “ip5”:9779
I20250121 11:26:48.948894 1655604 ThriftClientManager-inl.h:47] Getting a client to “ip7”:9779
I20250121 11:26:48.948901 1655605 ThriftClientManager-inl.h:47] Getting a client to “ip6”:9779
I20250121 11:26:48.949930 1655599 AsyncSocket.cpp:2711] this=0x7f1180e12400, AsyncSocket::handleRead() got 56 bytes
I20250121 11:26:48.949985 1655601 AsyncSocket.cpp:2711] this=0x7f117f212000, AsyncSocket::handleRead() got 56 bytes
I20250121 11:26:48.950269 1655603 AsyncSocket.cpp:2711] this=0x7f117ce18800, AsyncSocket::handleRead() got 71 bytes
I20250121 11:26:48.950486 1655602 AsyncSocket.cpp:2711] this=0x7f117dc11800, AsyncSocket::handleRead() got 71 bytes
I20250121 11:26:48.950624 1655604 AsyncSocket.cpp:2711] this=0x7f117c019000, AsyncSocket::handleRead() got 56 bytes
I20250121 11:26:48.950639 1655600 AsyncSocket.cpp:2711] this=0x7f118000f400, AsyncSocket::handleRead() got 56 bytes
I20250121 11:26:49.229768 1655595 AsyncSocket.cpp:2711] this=0x7f1184612000, AsyncSocket::handleRead() got 63 bytes
I20250121 11:26:49.229820 1655566 GraphSessionManager.cpp:40] Find session from cache: 1737338986633546
I20250121 11:26:49.229858 1655566 ClientSession.cpp:43] Add query: YIELD 1, epId: 670352
I20250121 11:26:49.229863 1655566 QueryInstance.cpp:80] Parsing query: YIELD 1
I20250121 11:26:49.229887 1655566 Symbols.cpp:48] New variable for: __Start_0
I20250121 11:26:49.229892 1655566 PlanNode.cpp:27] New variable: __Start_0
I20250121 11:26:49.229902 1655566 Symbols.cpp:48] New variable for: __VAR_0
I20250121 11:26:49.229905 1655566 AnonVarGenerator.h:28] Build anon var: __VAR_0
I20250121 11:26:49.229908 1655566 Symbols.cpp:48] New variable for: __Project_1
I20250121 11:26:49.229911 1655566 PlanNode.cpp:27] New variable: __Project_1
I20250121 11:26:49.229916 1655566 Validator.cpp:409] root: Project tail: Start
I20250121 11:26:49.229995 1655565 QueryInstance.cpp:128] Finish query: YIELD 1
I20250121 11:26:49.230020 1655565 ClientSession.cpp:52] Delete query, epId: 670352
I20250121 11:26:49.433280 1655605 AsyncSocket.cpp:2711] this=0x7f117b219000, AsyncSocket::handleRead() got 72 bytes
I20250121 11:26:49.433627 1655608 ThriftClientManager-inl.h:47] Getting a client to “ip4”:9779
I20250121 11:26:49.433734 1655610 ThriftClientManager-inl.h:47] Getting a client to “ip3”:9779
I20250121 11:26:49.433854 1655612 ThriftClientManager-inl.h:47] Getting a client to “ip2”:9779
I20250121 11:26:49.435132 1655608 AsyncSocket.cpp:2711] this=0x7f1178a19800, AsyncSocket::handleRead() got 5088 bytes
I20250121 11:26:49.435154 1655608 AsyncSocket.cpp:2711] this=0x7f1178a19800, AsyncSocket::handleRead() got 19813 bytes
I20250121 11:26:49.435348 1655612 AsyncSocket.cpp:2711] this=0x7f1175219000, AsyncSocket::handleRead() got 5088 bytes
I20250121 11:26:49.435369 1655612 AsyncSocket.cpp:2711] this=0x7f1175219000, AsyncSocket::handleRead() got 19807 bytes
I20250121 11:26:49.848938 1655592 AsyncSocket.cpp:2711] this=0x7f1187210c00, AsyncSocket::handleRead() got 0 bytes
I20250121 11:26:49.848958 1655592 Cpp2Connection.cpp:638] Channel client_ip:58742 closed: apache::thrift::transport::TTransportException: Channel Closed
I20250121 11:26:49.848973 1655592 Acceptor.cpp:476] Acceptor=0x7f1176038328 onEmpty()
I20250121 11:26:49.848975 1655592 Cpp2Connection.cpp:179] Task killed due to channel close: client_ip:58742
I20250121 11:26:49.850772 1655590 AsyncSocket.cpp:2711] this=0x7f1188e15400, AsyncSocket::handleRead() got 13 bytes
I20250121 11:26:49.850797 1655590 AsyncSocket.cpp:2711] this=0x7f1188e15400, AsyncSocket::handleRead() got 13 bytes
I20250121 11:26:49.850804 1655590 Cpp2Worker.cpp:143] Cpp2Worker: Creating connection for socket 395
I20250121 11:26:49.850828 1655590 Cpp2Worker.cpp:152] Cpp2Worker: created connection for socket 395
I20250121 11:26:49.850833 1655590 AsyncSocket.cpp:2711] this=0x7f1188e15400, AsyncSocket::handleRead() got 38 bytes
I20250121 11:26:49.850857 1655590 AsyncSocket.cpp:2711] this=0x7f1188e15400, AsyncSocket::handleRead() got -2 bytes
I20250121 11:26:49.852344 1655590 AsyncSocket.cpp:2711] this=0x7f1188e15400, AsyncSocket::handleRead() got 39 bytes
I20250121 11:26:49.852362 1655590 AsyncSocket.cpp:2711] this=0x7f1188e15400, AsyncSocket::handleRead() got 0 bytes
I20250121 11:26:49.852366 1655576 GraphService.cpp:142] Sign out session 1737430489176096
I20250121 11:26:49.852366 1655590 Cpp2Connection.cpp:638] Channel client_ip:40711 closed: apache::thrift::transport::TTransportException: Channel Closed
I20250121 11:26:49.852391 1655590 Cpp2Connection.cpp:179] Task killed due to channel close: client_ip:40711
I20250121 11:26:49.852411 1655613 ThriftClientManager-inl.h:47] Getting a client to “ip3”:9559
I20250121 11:26:49.852438 1655613 MetaClient.cpp:730] Send request to meta “ip3”:9559
I20250121 11:26:49.854229 1655613 AsyncSocket.cpp:2711] this=0x7f1174418400, AsyncSocket::handleRead() got 45 bytes
I20250121 11:26:49.943779 1655591 AsyncSocket.cpp:2711] this=0x7f1188012400, AsyncSocket::handleRead() got 329 bytes
I20250121 11:26:49.943809 1655576 GraphSessionManager.cpp:40] Find session from cache: 1737430379870304
I20250121 11:26:49.943845 1655566 ClientSession.cpp:43] Add query: unwind split(‘ka5esTvD’,‘,’) as tag1
match (n:tag1) where id(n) == tag1
return case when count(n) == 0 then 0 when count(n) > 0 then min(case when “sn” in labels(n) then -9998 else null end ) end as v3, epId: 670353
I20250121 11:26:49.943859 1655566 QueryInstance.cpp:80] Parsing query: unwind split(‘ka5esTvD’,‘,’) as tag1
match (n:tag1) where id(n) == tag1
return case when count(n) == 0 then 0 when count(n) > 0 then min(case when “sn” in labels(n) then -9998 else null end ) end as v3
I20250121 11:26:49.943922 1655566 Symbols.cpp:48] New variable for: __Start_0
I20250121 11:26:49.943926 1655566 PlanNode.cpp:27] New variable: __Start_0
I20250121 11:26:49.943931 1655566 Validator.cpp:350] Space chosen, name: test id: 2
I20250121 11:26:49.944056 1655566 Symbols.cpp:48] New variable for: __Unwind_1
I20250121 11:26:49.944059 1655566 PlanNode.cpp:27] New variable: __Unwind_1
I20250121 11:26:49.944067 1655566 Symbols.cpp:48] New variable for: __VAR_0
I20250121 11:26:49.944069 1655566 AnonVarGenerator.h:28] Build anon var: __VAR_0
I20250121 11:26:49.944072 1655566 Symbols.cpp:48] New variable for: __Start_2
I20250121 11:26:49.944075 1655566 PlanNode.cpp:27] New variable: __Start_2
I20250121 11:26:49.944077 1655566 MatchPlanner.cpp:172] root(Unwind_1): __Unwind_1, tail(Start_2): __Start_2
I20250121 11:26:49.944087 1655566 Symbols.cpp:48] New variable for: __Argument_3
I20250121 11:26:49.944089 1655566 PlanNode.cpp:27] New variable: __Argument_3
I20250121 11:26:49.944092 1655566 MatchPathPlanner.cpp:126] Find starts: 0, Pattern has 0 edges, root: __Argument_3, colNames: tag1
I20250121 11:26:49.944097 1655566 Symbols.cpp:48] New variable for: __AppendVertices_4
I20250121 11:26:49.944100 1655566 PlanNode.cpp:27] New variable: __AppendVertices_4
I20250121 11:26:49.944177 1655566 Symbols.cpp:48] New variable for: __Project_5
I20250121 11:26:49.944180 1655566 PlanNode.cpp:27] New variable: __Project_5
I20250121 11:26:49.944185 1655566 Symbols.cpp:48] New variable for: __CrossJoin_6
I20250121 11:26:49.944188 1655566 PlanNode.cpp:27] New variable: __CrossJoin_6
I20250121 11:26:49.944193 1655566 Symbols.cpp:48] New variable for: __Filter_7
I20250121 11:26:49.944196 1655566 PlanNode.cpp:27] New variable: __Filter_7
I20250121 11:26:49.944202 1655566 Symbols.cpp:48] New variable for: __Aggregate_8
I20250121 11:26:49.944204 1655566 PlanNode.cpp:27] New variable: __Aggregate_8
I20250121 11:26:49.944207 1655566 Symbols.cpp:48] New variable for: __Project_9
I20250121 11:26:49.944211 1655566 PlanNode.cpp:27] New variable: __Project_9
I20250121 11:26:49.944213 1655566 ReturnClausePlanner.cpp:52] return root: __Project_9 colNames: v3
I20250121 11:26:49.944216 1655566 MatchPlanner.cpp:172] root(Project_9): __Project_9, tail(Start_2): __Start_2
I20250121 11:26:49.944219 1655566 Validator.cpp:409] root: Project tail: Start
I20250121 11:26:49.944222 1655566 Validator.cpp:409] root: Project tail: Start
I20250121 11:26:49.944258 1655566 Symbols.cpp:48] New variable for: __AppendVertices_10
I20250121 11:26:49.944262 1655566 PlanNode.cpp:27] New variable: __AppendVertices_10
I20250121 11:26:49.944308 1655566 Symbols.cpp:48] New variable for: __AppendVertices_11
I20250121 11:26:49.944311 1655566 PlanNode.cpp:27] New variable: __AppendVertices_11
I20250121 11:26:49.944866 1655614 ThriftClientManager-inl.h:47] Getting a client to “.246”:9779
I20250121 11:26:50.539199 1655602 AsyncSocket.cpp:2711] this=0x7f117dc11400, AsyncSocket::handleRead() got 63 bytes
I20250121 11:26:50.539279 1655545 GraphSessionManager.cpp:40] Find session from cache: 1737430354109783
I20250121 11:26:50.539331 1655557 ClientSession.cpp:43] Add query: YIELD 1, epId: 670354
I20250121 11:26:50.539347 1655557 QueryInstance.cpp:80] Parsing query: YIELD 1
I20250121 11:26:50.539398 1655557 Symbols.cpp:48] New variable for: __Start_0
I20250121 11:26:50.539403 1655557 PlanNode.cpp:27] New variable: __Start_0
I20250121 11:26:50.539418 1655557 Symbols.cpp:48] New variable for: __VAR_0
I20250121 11:26:50.539427 1655557 AnonVarGenerator.h:28] Build anon var: __VAR_0
I20250121 11:26:50.539431 1655557 Symbols.cpp:48] New variable for: __Project_1
I20250121 11:26:50.539435 1655557 PlanNode.cpp:27] New variable: __Project_1
I20250121 11:26:50.539441 1655557 Validator.cpp:409] root: Project tail: Start
I20250121 11:26:50.539551 1655545 QueryInstance.cpp:128] Finish query: YIELD 1
I20250121 11:26:50.539566 1655545 ClientSession.cpp:52] Delete query, epId: 670354
I20250121 11:26:50.623739 1655610 AsyncSocket.cpp:2711] this=0x7f1176e19000, AsyncSocket::handleRead() got 5088 bytes
I20250121 11:26:50.623762 1655610 AsyncSocket.cpp:2711] this=0x7f1176e19000, AsyncSocket::handleRead() got 19797 bytes
I20250121 11:26:50.624653 1655614 AsyncSocket.cpp:2711] this=0x7f1173619400, AsyncSocket::handleRead() got 5088 bytes
I20250121 11:26:50.624677 1655614 AsyncSocket.cpp:2711] this=0x7f1173619400, AsyncSocket::handleRead() got 19697 bytes
I20250121 11:26:50.625572 1655565 QueryInstance.cpp:128] Finish query: unwind split(‘ka5esTvD’,‘,’) as tag1
match (n:tag1) where id(n) == tag1
return case when count(n) == 0 then 0 when count(n) > 0 then min(case when “sn” in labels(n) then -9998 else null end ) end as v3
I20250121 11:26:50.625591 1655565 ClientSession.cpp:52] Delete query, epId: 670353
I20250121 11:26:50.625700 1655557 Executor.cpp:597] Execution is being killed. session: 1737430489176096ep: 670351query: match (n:cuid)
where n.cuid.id==“aL+ZXBhEvFrH48uVF2CtE1”
return case when count(n) == 0 then -9998 when count(n) > 0 then min(case when “sn” in labels(n) then -9998 else null end ) end as v2
E20250121 11:26:50.625734 1655557 QueryInstance.cpp:151] Execution had been killed, query: match (n:cuid)
where n.cuid.id==“aL+ZXBhEvFrH48uVF2CtE1”
return case when count(n) == 0 then -9998 when count(n) > 0 then min(case when “sn” in labels(n) then -9998 else null end ) end as v2
I20250121 11:26:50.625756 1655557 ClientSession.cpp:52] Delete query, epId: 670351
I20250121 11:26:50.626989 1655591 AsyncSocket.cpp:2711] this=0x7f1188012400, AsyncSocket::handleRead() got 352 bytes
I20250121 11:26:50.627033 1655557 GraphSessionManager.cpp:40] Find session from cache: 1737430379870304

超时调用graph日志

I20250122 11:38:26.190208 984348 ClientSession.cpp:43] Add query: match (c:tag1) where id(c)=='E0mhlhkIu0AMuKqmG' match (c)<-[:edge1]->(m:tag2)-[:goods]->(o:info) return count(distinct case when o.info.ob_status in [0,2] then m.tag2.cd else null end) as v1, epId: 1782211
I20250122 11:38:26.190217 984348 QueryInstance.cpp:80] Parsing query: match (c:tag1) where id(c)=='E0mhlhkIu0AMuKqmG' match (c)<-[:edge1]->(m:tag2)-[:goods]->(o:info) return count(distinct case when o.info.ob_status in [0,2] then m.tag2.cd else null end) as v1

I20250122 11:38:26.190258 984348 Symbols.cpp:48] New variable for: __Start_0
I20250122 11:38:26.190261 984348 PlanNode.cpp:27] New variable: __Start_0
I20250122 11:38:26.190266 984348 Validator.cpp:350] Space chosen, name: graph_v1 id: 2
I20250122 11:38:26.190296 984348 Symbols.cpp:48] New variable for: __VAR_0
I20250122 11:38:26.190300 984348 AnonVarGenerator.h:28] Build anon var: __VAR_0
I20250122 11:38:26.190303 984348 Symbols.cpp:48] New variable for: __VAR_1
I20250122 11:38:26.190306 984348 AnonVarGenerator.h:28] Build anon var: __VAR_1
I20250122 11:38:26.190331 984348 Symbols.cpp:48] New variable for: __VAR_2
I20250122 11:38:26.190335 984348 AnonVarGenerator.h:28] Build anon var: __VAR_2
I20250122 11:38:26.190340 984348 Symbols.cpp:48] New variable for: __PassThrough_1
I20250122 11:38:26.190342 984348 PlanNode.cpp:27] New variable: __PassThrough_1
I20250122 11:38:26.190346 984348 Symbols.cpp:48] New variable for: __Dedup_2
I20250122 11:38:26.190349 984348 PlanNode.cpp:27] New variable: __Dedup_2
I20250122 11:38:26.190353 984348 MatchPathPlanner.cpp:126] Find starts: 0, Pattern has 0 edges, root: __Dedup_2, colNames: _vid
I20250122 11:38:26.190359 984348 Symbols.cpp:48] New variable for: __Start_3
I20250122 11:38:26.190362 984348 PlanNode.cpp:27] New variable: __Start_3
I20250122 11:38:26.190366 984348 Symbols.cpp:48] New variable for: __AppendVertices_4
I20250122 11:38:26.190369 984348 PlanNode.cpp:27] New variable: __AppendVertices_4
I20250122 11:38:26.190423 984348 Symbols.cpp:48] New variable for: __Project_5
I20250122 11:38:26.190425 984348 PlanNode.cpp:27] New variable: __Project_5
I20250122 11:38:26.190430 984348 Symbols.cpp:48] New variable for: __Argument_6
I20250122 11:38:26.190433 984348 PlanNode.cpp:27] New variable: __Argument_6
I20250122 11:38:26.190438 984348 MatchPathPlanner.cpp:126] Find starts: 0, Pattern has 2 edges, root: __Argument_6, colNames: c
I20250122 11:38:26.190441 984348 Symbols.cpp:48] New variable for: __Traverse_7
I20250122 11:38:26.190443 984348 PlanNode.cpp:27] New variable: __Traverse_7
I20250122 11:38:26.190476 984348 ServerBasedSchemaManager.cpp:68] Get Edge Schema Space 2, EdgeType 108, Version -1
I20250122 11:38:26.190483 984348 Symbols.cpp:48] New variable for: __Traverse_8
I20250122 11:38:26.190486 984348 PlanNode.cpp:27] New variable: __Traverse_8
I20250122 11:38:26.190518 984348 ServerBasedSchemaManager.cpp:68] Get Edge Schema Space 2, EdgeType 92, Version -1
I20250122 11:38:26.190521 984348 ServerBasedSchemaManager.cpp:68] Get Edge Schema Space 2, EdgeType 104, Version -1
I20250122 11:38:26.190526 984348 Symbols.cpp:48] New variable for: __AppendVertices_9
I20250122 11:38:26.190528 984348 PlanNode.cpp:27] New variable: __AppendVertices_9
I20250122 11:38:26.190573 984348 Symbols.cpp:48] New variable for: __Project_10
I20250122 11:38:26.190577 984348 PlanNode.cpp:27] New variable: __Project_10
I20250122 11:38:26.190582 984348 Symbols.cpp:48] New variable for: __HashInnerJoin_11
I20250122 11:38:26.190584 984348 PlanNode.cpp:27] New variable: __HashInnerJoin_11
I20250122 11:38:26.190593 984348 Symbols.cpp:48] New variable for: __Aggregate_12
I20250122 11:38:26.190595 984348 PlanNode.cpp:27] New variable: __Aggregate_12
I20250122 11:38:26.190598 984348 ReturnClausePlanner.cpp:52] return root: __Aggregate_12 colNames: v1
I20250122 11:38:26.190601 984348 MatchPlanner.cpp:172] root(Aggregate_12): __Aggregate_12, tail(Start_3): __Start_3
I20250122 11:38:26.190604 984348 Validator.cpp:409] root: Aggregate tail: Start
I20250122 11:38:26.190608 984348 Validator.cpp:409] root: Aggregate tail: Start
I20250122 11:38:26.190644 984348 Symbols.cpp:48] New variable for: __AppendVertices_13
I20250122 11:38:26.190647 984348 PlanNode.cpp:27] New variable: __AppendVertices_13
I20250122 11:38:26.190721 984348 Symbols.cpp:48] New variable for: __AppendVertices_14
I20250122 11:38:26.190724 984348 PlanNode.cpp:27] New variable: __AppendVertices_14
I20250122 11:38:26.190744 984348 Symbols.cpp:48] New variable for: __Traverse_15
I20250122 11:38:26.190747 984348 PlanNode.cpp:27] New variable: __Traverse_15
I20250122 11:38:26.190781 984348 Symbols.cpp:48] New variable for: __AppendVertices_16
I20250122 11:38:26.190784 984348 PlanNode.cpp:27] New variable: __AppendVertices_16
I20250122 11:38:26.191218 984386 ThriftClientManager-inl.h:47] Getting a client to "ip1":9779
I20250122 11:38:26.191777 984386 AsyncSocket.cpp:2711] this=0x7f1cb3411000, AsyncSocket::handleRead() got 108 bytes
I20250122 11:38:26.191910 984387 ThriftClientManager-inl.h:47] Getting a client to "ip1":9779
I20250122 11:38:26.192570 984387 AsyncSocket.cpp:2711] this=0x7f1cb2611800, AsyncSocket::handleRead() got 323 bytes
I20250122 11:38:26.192749 984388 ThriftClientManager-inl.h:47] Getting a client to "ip3":9779
I20250122 11:38:26.192763 984390 ThriftClientManager-inl.h:47] Getting a client to "ip5":9779
I20250122 11:38:26.192771 984391 ThriftClientManager-inl.h:47] Getting a client to "ip7":9779
I20250122 11:38:26.192756 984389 ThriftClientManager-inl.h:47] Getting a client to "ip1":9779
I20250122 11:38:26.193486 984389 AsyncSocket.cpp:2711] this=0x7f1cb0a0f000, AsyncSocket::handleRead() got 424 bytes
I20250122 11:38:26.193681 984391 AsyncSocket.cpp:2711] this=0x7f1cae611000, AsyncSocket::handleRead() got 634 bytes
I20250122 11:38:26.194018 984388 AsyncSocket.cpp:2711] this=0x7f1cb1811000, AsyncSocket::handleRead() got 218 bytes
I20250122 11:38:26.201900 984404 AsyncSocket.cpp:2711] this=0x7f1ca3219000, AsyncSocket::handleRead() got 2048 bytes
##### 以下开始有差异
I20250122 11:38:26.201920 984404 AsyncSocket.cpp:2711] this=0x7f1ca3219000, AsyncSocket::handleRead() got 4269 bytes
I20250122 11:38:26.201949 984367 GraphSessionManager.cpp:40] Find session from cache: 1737515700489522
I20250122 11:38:26.201980 984367 ClientSession.cpp:43] Add query: insert edge ...
I20250122 11:38:26.201990 984367 QueryInstance.cpp:80] Parsing query: insert edge ...
I20250122 11:38:26.202180 984367 Symbols.cpp:48] New variable for: __Start_0
I20250122 11:38:26.202185 984367 PlanNode.cpp:27] New variable: __Start_0
I20250122 11:38:26.202189 984367 Validator.cpp:350] Space chosen, name: graph_v1 id: 2
I20250122 11:38:26.202193 984367 ServerBasedSchemaManager.cpp:68] Get Edge Schema Space 2, EdgeType 82, Version -1
I20250122 11:38:26.202200 984367 Symbols.cpp:48] New variable for: __InsertEdges_1
I20250122 11:38:26.202203 984367 PlanNode.cpp:27] New variable: __InsertEdges_1
I20250122 11:38:26.202207 984367 Validator.cpp:350] Space chosen, name: graph_v1 id: 2
I20250122 11:38:26.202244 984367 ServerBasedSchemaManager.cpp:68] Get Edge Schema Space 2, EdgeType 141, Version -1
I20250122 11:38:26.202250 984367 ServerBasedSchemaManager.cpp:68] Get Edge Schema Space 2, EdgeType 141, Version -1
I20250122 11:38:26.202255 984367 Symbols.cpp:48] New variable for: __UpdateEdge_2
I20250122 11:38:26.202258 984367 PlanNode.cpp:27] New variable: __UpdateEdge_2
I20250122 11:38:26.202262 984367 Symbols.cpp:48] New variable for: __UpdateEdge_3
I20250122 11:38:26.202265 984367 PlanNode.cpp:27] New variable: __UpdateEdge_3
I20250122 11:38:26.202268 984367 Validator.cpp:350] Space chosen, name: graph_v1 id: 2
I20250122 11:38:26.202271 984367 ServerBasedSchemaManager.cpp:68] Get Edge Schema Space 2, EdgeType 92, Version -1
I20250122 11:38:26.202275 984367 Symbols.cpp:48] New variable for: __InsertEdges_4
I20250122 11:38:26.202278 984367 PlanNode.cpp:27] New variable: __InsertEdges_4
I20250122 11:38:26.202281 984367 Validator.cpp:350] Space chosen, name: graph_v1 id: 2
I20250122 11:38:26.202311 984367 Symbols.cpp:48] New variable for: __UpdateEdge_5
I20250122 11:38:26.202314 984367 PlanNode.cpp:27] New variable: __UpdateEdge_5
I20250122 11:38:26.202317 984367 Symbols.cpp:48] New variable for: __UpdateEdge_6
I20250122 11:38:26.202320 984367 PlanNode.cpp:27] New variable: __UpdateEdge_6
I20250122 11:38:26.202323 984367 Validator.cpp:350] Space chosen, name: graph_v1 id: 2
I20250122 11:38:26.202327 984367 ServerBasedSchemaManager.cpp:68] Get Edge Schema Space 2, EdgeType 82, Version -1
I20250122 11:38:26.202330 984367 Symbols.cpp:48] New variable for: __InsertEdges_7
I20250122 11:38:26.202333 984367 PlanNode.cpp:27] New variable: __InsertEdges_7
I20250122 11:38:26.202337 984367 Validator.cpp:350] Space chosen, name: graph_v1 id: 2
I20250122 11:38:26.202365 984367 ServerBasedSchemaManager.cpp:68] Get Edge Schema Space 2, EdgeType 79, Version -1
I20250122 11:38:26.202370 984367 ServerBasedSchemaManager.cpp:68] Get Edge Schema Space 2, EdgeType 79, Version -1
I20250122 11:38:26.202375 984367 Symbols.cpp:48] New variable for: __UpdateEdge_8
I20250122 11:38:26.202378 984367 PlanNode.cpp:27] New variable: __UpdateEdge_8
I20250122 11:38:26.202381 984367 Symbols.cpp:48] New variable for: __UpdateEdge_9
I20250122 11:38:26.202384 984367 PlanNode.cpp:27] New variable: __UpdateEdge_9
I20250122 11:38:26.202389 984367 Validator.cpp:350] Space chosen, name: graph_v1 id: 2
I20250122 11:38:26.202419 984367 ServerBasedSchemaManager.cpp:68] Get Edge Schema Space 2, EdgeType 141, Version -1
I20250122 11:38:26.202422 984367 ServerBasedSchemaManager.cpp:68] Get Edge Schema Space 2, EdgeType 141, Version -1
I20250122 11:38:26.202427 984367 Symbols.cpp:48] New variable for: __UpdateEdge_10
I20250122 11:38:26.202430 984367 PlanNode.cpp:27] New variable: __UpdateEdge_10
I20250122 11:38:26.202433 984367 Symbols.cpp:48] New variable for: __UpdateEdge_11
I20250122 11:38:26.202436 984367 PlanNode.cpp:27] New variable: __UpdateEdge_11
I20250122 11:38:26.202440 984367 Validator.cpp:350] Space chosen, name: graph_v1 id: 2
I20250122 11:38:26.202442 984367 ServerBasedSchemaManager.cpp:68] Get Edge Schema Space 2, EdgeType 82, Version -1
I20250122 11:38:26.202446 984367 Symbols.cpp:48] New variable for: __InsertEdges_12
I20250122 11:38:26.202450 984367 PlanNode.cpp:27] New variable: __InsertEdges_12
I20250122 11:38:26.202453 984367 Validator.cpp:350] Space chosen, name: graph_v1 id: 2
I20250122 11:38:26.202481 984367 ServerBasedSchemaManager.cpp:68] Get Edge Schema Space 2, EdgeType 79, Version -1
I20250122 11:38:26.202483 984367 ServerBasedSchemaManager.cpp:68] Get Edge Schema Space 2, EdgeType 79, Version -1
I20250122 11:38:26.202488 984367 Symbols.cpp:48] New variable for: __UpdateEdge_13
I20250122 11:38:26.202491 984367 PlanNode.cpp:27] New variable: __UpdateEdge_13
I20250122 11:38:26.202494 984367 Symbols.cpp:48] New variable for: __UpdateEdge_14
I20250122 11:38:26.202497 984367 PlanNode.cpp:27] New variable: __UpdateEdge_14
I20250122 11:38:26.202500 984367 Validator.cpp:350] Space chosen, name: graph_v1 id: 2
I20250122 11:38:26.202531 984367 ServerBasedSchemaManager.cpp:68] Get Edge Schema Space 2, EdgeType 81, Version -1
I20250122 11:38:26.202535 984367 ServerBasedSchemaManager.cpp:68] Get Edge Schema Space 2, EdgeType 81, Version -1
I20250122 11:38:26.202540 984367 Symbols.cpp:48] New variable for: __UpdateEdge_15
I20250122 11:38:26.202543 984367 PlanNode.cpp:27] New variable: __UpdateEdge_15
I20250122 11:38:26.202546 984367 Symbols.cpp:48] New variable for: __UpdateEdge_16
I20250122 11:38:26.202549 984367 PlanNode.cpp:27] New variable: __UpdateEdge_16
I20250122 11:38:26.202560 984367 Validator.cpp:350] Space chosen, name: graph_v1 id: 2
I20250122 11:38:26.202591 984367 Symbols.cpp:48] New variable for: __UpdateEdge_17
I20250122 11:38:26.202594 984367 PlanNode.cpp:27] New variable: __UpdateEdge_17
I20250122 11:38:26.202597 984367 Symbols.cpp:48] New variable for: __UpdateEdge_18
I20250122 11:38:26.202600 984367 PlanNode.cpp:27] New variable: __UpdateEdge_18
I20250122 11:38:26.202603 984367 Validator.cpp:350] Space chosen, name: graph_v1 id: 2
I20250122 11:38:26.202605 984367 ServerBasedSchemaManager.cpp:68] Get Edge Schema Space 2, EdgeType 82, Version -1
I20250122 11:38:26.202610 984367 Symbols.cpp:48] New variable for: __InsertEdges_19
I20250122 11:38:26.202613 984367 PlanNode.cpp:27] New variable: __InsertEdges_19
I20250122 11:38:26.202616 984367 Validator.cpp:350] Space chosen, name: graph_v1 id: 2
I20250122 11:38:26.202646 984367 Symbols.cpp:48] New variable for: __UpdateEdge_20
I20250122 11:38:26.202648 984367 PlanNode.cpp:27] New variable: __UpdateEdge_20
I20250122 11:38:26.202651 984367 Symbols.cpp:48] New variable for: __UpdateEdge_21
I20250122 11:38:26.202654 984367 PlanNode.cpp:27] New variable: __UpdateEdge_21
I20250122 11:38:26.202658 984367 Validator.cpp:350] Space chosen, name: graph_v1 id: 2
I20250122 11:38:26.202661 984367 ServerBasedSchemaManager.cpp:68] Get Edge Schema Space 2, EdgeType 92, Version -1
I20250122 11:38:26.202665 984367 Symbols.cpp:48] New variable for: __InsertEdges_22
I20250122 11:38:26.202668 984367 PlanNode.cpp:27] New variable: __InsertEdges_22
I20250122 11:38:26.202672 984367 Validator.cpp:409] root: InsertEdges tail: Start
I20250122 11:38:26.202975 984392 ThriftClientManager-inl.h:47] Getting a client to "ip2":9779
I20250122 11:38:26.202988 984393 ThriftClientManager-inl.h:47] Getting a client to "ip7":9779
I20250122 11:38:26.203357 984392 AsyncSocket.cpp:2711] this=0x7f1cad812000, AsyncSocket::handleRead() got 34 bytes
I20250122 11:38:26.203384 984393 AsyncSocket.cpp:2711] this=0x7f1caca11c00, AsyncSocket::handleRead() got 34 bytes
I20250122 11:38:26.203418 984360 InsertExecutor.cpp:68] Add edge time: 482us
I20250122 11:38:26.203474 984394 ThriftClientManager-inl.h:47] Getting a client to "ip1":9779
I20250122 11:38:26.203711 984394 AsyncSocket.cpp:2711] this=0x7f1cabc11c00, AsyncSocket::handleRead() got 61 bytes
I20250122 11:38:26.203732 984394 StorageClientBase-inl.h:186] Failure! Failed part 60, failed part -3043
I20250122 11:38:26.203755 984342 UpdateExecutor.cpp:113] Update edge time: 313us
I20250122 11:38:26.203814 984395 ThriftClientManager-inl.h:47] Getting a client to "ip2":9779
I20250122 11:38:26.204020 984395 AsyncSocket.cpp:2711] this=0x7f1caae18c00, AsyncSocket::handleRead() got 62 bytes
I20250122 11:38:26.204033 984395 StorageClientBase-inl.h:186] Failure! Failed part 85, failed part -3043
I20250122 11:38:26.204049 984367 UpdateExecutor.cpp:113] Update edge time: 269us
I20250122 11:38:26.204121 984396 ThriftClientManager-inl.h:47] Getting a client to "ip1":9779
I20250122 11:38:26.204125 984397 ThriftClientManager-inl.h:47] Getting a client to "ip7":9779
I20250122 11:38:26.204452 984397 AsyncSocket.cpp:2711] this=0x7f1ca9218800, AsyncSocket::handleRead() got 34 bytes
I20250122 11:38:26.204501 984396 AsyncSocket.cpp:2711] this=0x7f1caa01a400, AsyncSocket::handleRead() got 34 bytes
I20250122 11:38:26.204535 984342 InsertExecutor.cpp:68] Add edge time: 454us
I20250122 11:38:26.204589 984400 ThriftClientManager-inl.h:47] Getting a client to "ip1":9779
I20250122 11:38:26.205076 984400 AsyncSocket.cpp:2711] this=0x7f1ca6a19800, AsyncSocket::handleRead() got 55 bytes
I20250122 11:38:26.205111 984354 UpdateExecutor.cpp:113] Update edge time: 560us
I20250122 11:38:26.205169 984402 ThriftClientManager-inl.h:47] Getting a client to "ip6":9779
I20250122 11:38:26.205672 984402 AsyncSocket.cpp:2711] this=0x7f1ca4e19c00, AsyncSocket::handleRead() got 55 bytes
I20250122 11:38:26.205713 984348 UpdateExecutor.cpp:113] Update edge time: 577us
I20250122 11:38:26.205775 984404 ThriftClientManager-inl.h:47] Getting a client to "ip6":9779
I20250122 11:38:26.205787 984405 ThriftClientManager-inl.h:47] Getting a client to "ip4":9779
I20250122 11:38:26.206146 984404 AsyncSocket.cpp:2711] this=0x7f1ca321a400, AsyncSocket::handleRead() got 34 bytes
I20250122 11:38:26.206187 984405 AsyncSocket.cpp:2711] this=0x7f1ca2418800, AsyncSocket::handleRead() got 34 bytes
I20250122 11:38:26.206228 984348 InsertExecutor.cpp:68] Add edge time: 493us
I20250122 11:38:26.206269 984406 ThriftClientManager-inl.h:47] Getting a client to "ip2":9779
I20250122 11:38:26.206454 984406 AsyncSocket.cpp:2711] this=0x7f1ca1618c00, AsyncSocket::handleRead() got 61 bytes
I20250122 11:38:26.206466 984406 StorageClientBase-inl.h:186] Failure! Failed part 125, failed part -3043
I20250122 11:38:26.206481 984348 UpdateExecutor.cpp:113] Update edge time: 237us
I20250122 11:38:26.206521 984407 ThriftClientManager-inl.h:47] Getting a client to "ip1":9779
I20250122 11:38:26.206743 984407 AsyncSocket.cpp:2711] this=0x7f1ca081a400, AsyncSocket::handleRead() got 60 bytes
I20250122 11:38:26.206755 984407 StorageClientBase-inl.h:186] Failure! Failed part 18, failed part -3043
I20250122 11:38:26.206772 984354 UpdateExecutor.cpp:113] Update edge time: 275us
I20250122 11:38:26.206811 984408 ThriftClientManager-inl.h:47] Getting a client to "ip2":9779
I20250122 11:38:26.206979 984408 AsyncSocket.cpp:2711] this=0x7f1c9fa1a400, AsyncSocket::handleRead() got 61 bytes
I20250122 11:38:26.207000 984408 StorageClientBase-inl.h:186] Failure! Failed part 125, failed part -3043
I20250122 11:38:26.207021 984348 UpdateExecutor.cpp:113] Update edge time: 230us
I20250122 11:38:26.207063 984409 ThriftClientManager-inl.h:47] Getting a client to "ip3":9779
I20250122 11:38:26.207288 984409 AsyncSocket.cpp:2711] this=0x7f1c9ec3b000, AsyncSocket::handleRead() got 62 bytes
I20250122 11:38:26.207309 984409 StorageClientBase-inl.h:186] Failure! Failed part 133, failed part -3043
I20250122 11:38:26.207329 984360 UpdateExecutor.cpp:113] Update edge time: 290us
I20250122 11:38:26.207401 984410 ThriftClientManager-inl.h:47] Getting a client to "ip2":9779
I20250122 11:38:26.207408 984411 ThriftClientManager-inl.h:47] Getting a client to "ip4":9779
I20250122 11:38:26.207732 984410 AsyncSocket.cpp:2711] this=0x7f1c9de19000, AsyncSocket::handleRead() got 34 bytes
I20250122 11:38:26.207758 984411 AsyncSocket.cpp:2711] this=0x7f1c9d07b000, AsyncSocket::handleRead() got 34 bytes
I20250122 11:38:26.207798 984360 InsertExecutor.cpp:68] Add edge time: 445us
I20250122 11:38:26.207841 984412 ThriftClientManager-inl.h:47] Getting a client to "ip2":9779
I20250122 11:38:26.208014 984412 AsyncSocket.cpp:2711] this=0x7f1c9c219c00, AsyncSocket::handleRead() got 61 bytes
I20250122 11:38:26.208029 984412 StorageClientBase-inl.h:186] Failure! Failed part 125, failed part -3043
I20250122 11:38:26.208045 984360 UpdateExecutor.cpp:113] Update edge time: 227us
I20250122 11:38:26.208087 984377 ThriftClientManager-inl.h:47] Getting a client to "ip5":9779
I20250122 11:38:26.806746 984396 AsyncSocket.cpp:2711] this=0x7f1caa018800, AsyncSocket::handleRead() got 125 bytes
I20250122 11:38:26.806788 984367 UpdateExecutor.cpp:61] Update vertice time: 764412us
I20250122 11:38:26.806850 984378 ThriftClientManager-inl.h:47] Getting a client to "ip4":9779
I20250122 11:38:26.807354 984378 AsyncSocket.cpp:2711] this=0x7f1cba614c00, AsyncSocket::handleRead() got 121 bytes
I20250122 11:38:26.807391 984360 UpdateExecutor.cpp:61] Update vertice time: 572us
I20250122 11:38:26.807435 984379 ThriftClientManager-inl.h:47] Getting a client to "ip4":9779
I20250122 11:38:26.807861 984379 AsyncSocket.cpp:2711] this=0x7f1cb9813800, AsyncSocket::handleRead() got 121 bytes
I20250122 11:38:26.807900 984367 UpdateExecutor.cpp:61] Update vertice time: 486us
I20250122 11:38:26.807946 984380 ThriftClientManager-inl.h:47] Getting a client to "ip7":9779
I20250122 11:38:26.808384 984380 AsyncSocket.cpp:2711] this=0x7f1cb8a13800, AsyncSocket::handleRead() got 121 bytes
I20250122 11:38:26.808410 984367 UpdateExecutor.cpp:61] Update vertice time: 488us
I20250122 11:38:26.808459 984381 ThriftClientManager-inl.h:47] Getting a client to "ip7":9779
I20250122 11:38:26.808629 984381 AsyncSocket.cpp:2711] this=0x7f1cb7c13800, AsyncSocket::handleRead() got 60 bytes
I20250122 11:38:26.808640 984381 StorageClientBase-inl.h:186] Failure! Failed part 57, failed part -3043
I20250122 11:38:26.808656 984342 UpdateExecutor.cpp:61] Update vertice time: 227us
I20250122 11:38:26.808722 984382 ThriftClientManager-inl.h:47] Getting a client to "ip5":9779
I20250122 11:38:27.062273 984327 AsyncSocket.cpp:2711] this=0x7f1cd322d000, AsyncSocket::handleRead() got 107 bytes
I20250122 11:38:27.062294 984327 HTTPSession.cpp:690] processing new msg streamID=62294 proto=http/1.1, UA=Go-http-client/1.1, downstream=ip1:42874, ip7:19669=local
I20250122 11:38:27.062305 984327 HTTP2PriorityQueue.cpp:512] Adding id=62294 with parent=0 and weight=16
I20250122 11:38:27.062319 984327 HTTPSession.cpp:839] processing ingress headers complete for proto=http/1.1, UA=Go-http-client/1.1, downstream=ip1:42874, ip7:19669=local, streamID=62294
I20250122 11:38:27.062332 984327 HTTPSession.cpp:980] processing ingress message complete for proto=http/1.1, UA=Go-http-client/1.1, downstream=ip1:42874, ip7:19669=local, streamID=62294
I20250122 11:38:27.062336 984327 HTTPTransaction.cpp:503] ingress EOM on proto=http/1.1, UA=Go-http-client/1.1, downstream=ip1:42874, ip7:19669=local, streamID=62294
I20250122 11:38:27.062338 984326 AsyncSocket.cpp:2711] this=0x7f1cd4035000, AsyncSocket::handleRead() got 106 bytes
I20250122 11:38:27.062353 984327 HTTPSession.cpp:1591] proto=http/1.1, UA=Go-http-client/1.1, downstream=ip1:42874, ip7:19669=local sending headers, size=0, uncompressedSize=100
I20250122 11:38:27.062358 984326 HTTPSession.cpp:690] processing new msg streamID=62134 proto=http/1.1, UA=Go-http-client/1.1, downstream=ip1:42870, ip7:19669=local
I20250122 11:38:27.062376 984327 HTTPSessionBase.cpp:147] notifyEgressBodyBuffered pwsd=45
I20250122 11:38:27.062381 984326 HTTP2PriorityQueue.cpp:512] Adding id=62134 with parent=0 and weight=16
I20250122 11:38:27.062381 984327 HTTPTransaction.cpp:1331] Queued egress EOM on proto=http/1.1, UA=Go-http-client/1.1, downstream=ip1:42874, ip7:19669=local, streamID=62294
I20250122 11:38:27.062397 984327 HTTPTransaction.cpp:1519] transaction already egress complete, not updating pause state proto=http/1.1, UA=Go-http-client/1.1, downstream=ip1:42874, ip7:19669=local, streamID=62294
I20250122 11:38:27.062402 984326 HTTPSession.cpp:839] processing ingress headers complete for proto=http/1.1, UA=Go-http-client/1.1, downstream=ip1:42870, ip7:19669=local, streamID=62134
I20250122 11:38:27.062405 984327 HTTPSession.cpp:2104] proto=http/1.1, UA=Go-http-client/1.1, downstream=ip1:42874, ip7:19669=local egressing txnID=62294 allowed=65536
I20250122 11:38:27.062417 984326 HTTPSession.cpp:980] processing ingress message complete for proto=http/1.1, UA=Go-http-client/1.1, downstream=ip1:42870, ip7:19669=local, streamID=62134
I20250122 11:38:27.062443 984326 HTTPTransaction.cpp:503] ingress EOM on proto=http/1.1, UA=Go-http-client/1.1, downstream=ip1:42870, ip7:19669=local, streamID=62134
I20250122 11:38:27.062435 984327 HTTPTransaction.cpp:1247] Sending 45 bytes of body. eom=yes send_window is None trailers=no proto=http/1.1, UA=Go-http-client/1.1, downstream=ip1:42874, ip7:19669=local, streamID=62294
I20250122 11:38:27.062455 984327 HTTPSessionBase.cpp:147] notifyEgressBodyBuffered pwsd=-45
I20250122 11:38:27.062460 984327 HTTPSessionBase.cpp:147] notifyEgressBodyBuffered pwsd=-45
I20250122 11:38:27.062463 984327 HTTPTransaction.cpp:1519] transaction already egress complete, not updating pause state proto=http/1.1, UA=Go-http-client/1.1, downstream=ip1:42874, ip7:19669=local, streamID=62294
I20250122 11:38:27.062467 984327 HTTPSession.cpp:2211] proto=http/1.1, UA=Go-http-client/1.1, downstream=ip1:42874, ip7:19669=local writing 145, activeWrites=1 cork:0 timestampTx:0 timestampAck:0
I20250122 11:38:27.062479 984327 HTTPTransaction.cpp:151] destroying transaction proto=http/1.1, UA=Go-http-client/1.1, downstream=ip1:42874, ip7:19669=local, streamID=62294
I20250122 11:38:27.062482 984327 HTTPSession.cpp:1886] proto=http/1.1, UA=Go-http-client/1.1, downstream=ip1:42874, ip7:19669=local removing streamID=62294, liveTransactions was 1
I20250122 11:38:27.062726 984326 HTTPSession.cpp:1591] proto=http/1.1, UA=Go-http-client/1.1, downstream=ip1:42870, ip7:19669=local sending headers, size=0, uncompressedSize=103
I20250122 11:38:27.062733 984326 HTTPSessionBase.cpp:147] notifyEgressBodyBuffered pwsd=13390
I20250122 11:38:27.062737 984326 HTTPTransaction.cpp:1331] Queued egress EOM on proto=http/1.1, UA=Go-http-client/1.1, downstream=ip1:42870, ip7:19669=local, streamID=62134
I20250122 11:38:27.062740 984326 HTTPTransaction.cpp:1519] transaction already egress complete, not updating pause state proto=http/1.1, UA=Go-http-client/1.1, downstream=ip1:42870, ip7:19669=local, streamID=62134
I20250122 11:38:27.062765 984326 HTTPSession.cpp:2104] proto=http/1.1, UA=Go-http-client/1.1, downstream=ip1:42870, ip7:19669=local egressing txnID=62134 allowed=65536
I20250122 11:38:27.062769 984326 HTTPTransaction.cpp:1247] Sending 13390 bytes of body. eom=yes send_window is None trailers=no proto=http/1.1, UA=Go-http-client/1.1, downstream=ip1:42870, ip7:19669=local, streamID=62134
I20250122 11:38:27.062775 984326 HTTPSessionBase.cpp:147] notifyEgressBodyBuffered pwsd=-13390
I20250122 11:38:27.062779 984326 HTTPSessionBase.cpp:147] notifyEgressBodyBuffered pwsd=-13390
I20250122 11:38:27.062783 984326 HTTPTransaction.cpp:1519] transaction already egress complete, not updating pause state proto=http/1.1, UA=Go-http-client/1.1, downstream=ip1:42870, ip7:19669=local, streamID=62134
I20250122 11:38:27.062786 984326 HTTPSession.cpp:2211] proto=http/1.1, UA=Go-http-client/1.1, downstream=ip1:42870, ip7:19669=local writing 13493, activeWrites=1 cork:0 timestampTx:0 timestampAck:0
I20250122 11:38:27.062801 984326 HTTPTransaction.cpp:151] destroying transaction proto=http/1.1, UA=Go-http-client/1.1, downstream=ip1:42870, ip7:19669=local, streamID=62134
I20250122 11:38:27.062806 984326 HTTPSession.cpp:1886] proto=http/1.1, UA=Go-http-client/1.1, downstream=ip1:42870, ip7:19669=local removing streamID=62134, liveTransactions was 1
I20250122 11:38:27.090683 984381 AsyncSocket.cpp:2711] this=0x7f1cb7caf800, AsyncSocket::handleRead() got 0 bytes
I20250122 11:38:27.090698 984381 Cpp2Connection.cpp:638] Channel client_ip:36296 closed: apache::thrift::transport::TTransportException: Channel Closed
I20250122 11:38:27.090709 984381 Acceptor.cpp:476] Acceptor=0x7f1ca4037428 onEmpty()
I20250122 11:38:27.090713 984381 Cpp2Connection.cpp:179] Task killed due to channel close: client_ip:36296
I20250122 11:38:27.091784 984380 AsyncSocket.cpp:2711] this=0x7f1cb8a15800, AsyncSocket::handleRead() got 13 bytes
I20250122 11:38:27.091799 984380 Acceptor.cpp:476] Acceptor=0x7f1ca4036f28 onEmpty()
I20250122 11:38:27.091809 984380 AsyncSocket.cpp:2711] this=0x7f1cb8a15800, AsyncSocket::handleRead() got 13 bytes
I20250122 11:38:27.091813 984380 Acceptor.cpp:476] Acceptor=0x7f1ca4036f28 onEmpty()
I20250122 11:38:27.091817 984380 Cpp2Worker.cpp:143] Cpp2Worker: Creating connection for socket 436
I20250122 11:38:27.091837 984380 Cpp2Worker.cpp:152] Cpp2Worker: created connection for socket 436
I20250122 11:38:27.091842 984380 AsyncSocket.cpp:2711] this=0x7f1cb8a15800, AsyncSocket::handleRead() got 38 bytes
I20250122 11:38:27.091861 984380 AsyncSocket.cpp:2711] this=0x7f1cb8a15800, AsyncSocket::handleRead() got -2 bytes
I20250122 11:38:27.092993 984380 AsyncSocket.cpp:2711] this=0x7f1cb8a15800, AsyncSocket::handleRead() got 39 bytes
I20250122 11:38:27.093016 984380 AsyncSocket.cpp:2711] this=0x7f1cb8a15800, AsyncSocket::handleRead() got 0 bytes
I20250122 11:38:27.093020 984354 GraphService.cpp:142] Sign out session 1737514882128504
I20250122 11:38:27.093021 984380 Cpp2Connection.cpp:638] Channel client_ip:17444 closed: apache::thrift::transport::TTransportException: Channel Closed
I20250122 11:38:27.093039 984380 Acceptor.cpp:476] Acceptor=0x7f1ca4036f28 onEmpty()
I20250122 11:38:27.093042 984380 Cpp2Connection.cpp:179] Task killed due to channel close: client_ip:17444
I20250122 11:38:27.093056 984383 ThriftClientManager-inl.h:47] Getting a client to "ip3":9559
I20250122 11:38:27.093066 984383 MetaClient.cpp:730] Send request to meta "ip3":9559
I20250122 11:38:27.094909 984383 AsyncSocket.cpp:2711] this=0x7f1cb6010800, AsyncSocket::handleRead() got 45 bytes
I20250122 11:38:27.222981 984377 AsyncSocket.cpp:2711] this=0x7f1cbb416c00, AsyncSocket::handleRead() got 62 bytes
I20250122 11:38:27.223007 984377 StorageClientBase-inl.h:186] Failure! Failed part 89, failed part -3043
I20250122 11:38:27.223030 984354 UpdateExecutor.cpp:113] Update edge time: 1015324us
I20250122 11:38:27.223088 984384 ThriftClientManager-inl.h:47] Getting a client to "ip2":9779
I20250122 11:38:27.223147 984390 AsyncSocket.cpp:2711] this=0x7f1cafc11000, AsyncSocket::handleRead() got 180 bytes
I20250122 11:38:27.223189 984382 AsyncSocket.cpp:2711] this=0x7f1cb6e13800, AsyncSocket::handleRead() got 125 bytes
I20250122 11:38:27.223232 984337 UpdateExecutor.cpp:61] Update vertice time: 414687us
I20250122 11:38:27.223295 984385 ThriftClientManager-inl.h:47] Getting a client to "ip1":9779
I20250122 11:38:27.223307 984384 AsyncSocket.cpp:2711] this=0x7f1cb520f400, AsyncSocket::handleRead() got 62 bytes
I20250122 11:38:27.223331 984384 StorageClientBase-inl.h:186] Failure! Failed part 125, failed part -3043
I20250122 11:38:27.223335 984350 Executor.cpp:597] Execution is being killed. session: 1737514882128504ep: 1782211query: match (c:tag1)
where id(c)=='E0mhlhkIu0AMuKqmG'
match (c)<-[:edge1]->(m:tag2)-[:goods]->(o:info)
return count(distinct case when o.info.ob_status in [0,2] then m.tag2.cd else null end) as v1
I20250122 11:38:27.223352 984351 UpdateExecutor.cpp:113] Update edge time: 292us
E20250122 11:38:27.223387 984363 QueryInstance.cpp:151] Execution had been killed, query: match (c:tag1)
where id(c)=='E0mhlhkIu0AMuKqmG'
match (c)<-[:edge1]->(m:tag2)-[:goods]->(o:info)
return count(distinct case when o.info.ob_status in [0,2] then m.tag2.cd else null end) as v1
I20250122 11:38:27.223416 984363 ClientSession.cpp:52] Delete query, epId: 1782211

上述超时查询通过studio再次调用正常返回的日志

I20250122 11:43:19.273694 1655575 ClientSession.cpp:43] Add query: match (c:tag1) where id(c)=='E0mhlhkIu0AMuKqmG' match (c)<-[:edge1]->(m:tag2)-[:goods]->(o:info) return count(distinct case when o.info.ob_status in [0,2] then m.tag2.cd else null end) as v1, epId: 2447077
I20250122 11:43:19.273706 1655575 QueryInstance.cpp:80] Parsing query: match (c:tag1) where id(c)=='E0mhlhkIu0AMuKqmG' match (c)<-[:edge1]->(m:tag2)-[:goods]->(o:info) return count(distinct case when o.info.ob_status in [0,2] then m.tag2.cd else null end) as v1

I20250122 11:43:19.273756 1655575 Symbols.cpp:48] New variable for: __Start_0
I20250122 11:43:19.273759 1655575 PlanNode.cpp:27] New variable: __Start_0
I20250122 11:43:19.273763 1655575 Validator.cpp:350] Space chosen, name: graph_v1 id: 2
I20250122 11:43:19.273787 1655575 Symbols.cpp:48] New variable for: __VAR_0
I20250122 11:43:19.273792 1655575 AnonVarGenerator.h:28] Build anon var: __VAR_0
I20250122 11:43:19.273794 1655575 Symbols.cpp:48] New variable for: __VAR_1
I20250122 11:43:19.273797 1655575 AnonVarGenerator.h:28] Build anon var: __VAR_1
I20250122 11:43:19.273821 1655575 Symbols.cpp:48] New variable for: __VAR_2
I20250122 11:43:19.273824 1655575 AnonVarGenerator.h:28] Build anon var: __VAR_2
I20250122 11:43:19.273829 1655575 Symbols.cpp:48] New variable for: __PassThrough_1
I20250122 11:43:19.273831 1655575 PlanNode.cpp:27] New variable: __PassThrough_1
I20250122 11:43:19.273835 1655575 Symbols.cpp:48] New variable for: __Dedup_2
I20250122 11:43:19.273838 1655575 PlanNode.cpp:27] New variable: __Dedup_2
I20250122 11:43:19.273842 1655575 MatchPathPlanner.cpp:126] Find starts: 0, Pattern has 0 edges, root: __Dedup_2, colNames: _vid
I20250122 11:43:19.273845 1655575 Symbols.cpp:48] New variable for: __Start_3
I20250122 11:43:19.273847 1655575 PlanNode.cpp:27] New variable: __Start_3
I20250122 11:43:19.273851 1655575 Symbols.cpp:48] New variable for: __AppendVertices_4
I20250122 11:43:19.273854 1655575 PlanNode.cpp:27] New variable: __AppendVertices_4
I20250122 11:43:19.273896 1655575 Symbols.cpp:48] New variable for: __Project_5
I20250122 11:43:19.273900 1655575 PlanNode.cpp:27] New variable: __Project_5
I20250122 11:43:19.273908 1655575 Symbols.cpp:48] New variable for: __Argument_6
I20250122 11:43:19.273911 1655575 PlanNode.cpp:27] New variable: __Argument_6
I20250122 11:43:19.273916 1655575 MatchPathPlanner.cpp:126] Find starts: 0, Pattern has 2 edges, root: __Argument_6, colNames: c
I20250122 11:43:19.273918 1655575 Symbols.cpp:48] New variable for: __Traverse_7
I20250122 11:43:19.273921 1655575 PlanNode.cpp:27] New variable: __Traverse_7
I20250122 11:43:19.273957 1655575 ServerBasedSchemaManager.cpp:68] Get Edge Schema Space 2, EdgeType 108, Version -1
I20250122 11:43:19.273962 1655575 Symbols.cpp:48] New variable for: __Traverse_8
I20250122 11:43:19.273965 1655575 PlanNode.cpp:27] New variable: __Traverse_8
I20250122 11:43:19.273998 1655575 ServerBasedSchemaManager.cpp:68] Get Edge Schema Space 2, EdgeType 92, Version -1
I20250122 11:43:19.274001 1655575 ServerBasedSchemaManager.cpp:68] Get Edge Schema Space 2, EdgeType 104, Version -1
I20250122 11:43:19.274005 1655575 Symbols.cpp:48] New variable for: __AppendVertices_9
I20250122 11:43:19.274008 1655575 PlanNode.cpp:27] New variable: __AppendVertices_9
I20250122 11:43:19.274046 1655575 Symbols.cpp:48] New variable for: __Project_10
I20250122 11:43:19.274049 1655575 PlanNode.cpp:27] New variable: __Project_10
I20250122 11:43:19.274053 1655575 Symbols.cpp:48] New variable for: __HashInnerJoin_11
I20250122 11:43:19.274056 1655575 PlanNode.cpp:27] New variable: __HashInnerJoin_11
I20250122 11:43:19.274062 1655575 Symbols.cpp:48] New variable for: __Aggregate_12
I20250122 11:43:19.274065 1655575 PlanNode.cpp:27] New variable: __Aggregate_12
I20250122 11:43:19.274068 1655575 ReturnClausePlanner.cpp:52] return root: __Aggregate_12 colNames: v1
I20250122 11:43:19.274071 1655575 MatchPlanner.cpp:172] root(Aggregate_12): __Aggregate_12, tail(Start_3): __Start_3
I20250122 11:43:19.274075 1655575 Validator.cpp:409] root: Aggregate tail: Start
I20250122 11:43:19.274077 1655575 Validator.cpp:409] root: Aggregate tail: Start
I20250122 11:43:19.274116 1655575 Symbols.cpp:48] New variable for: __AppendVertices_13
I20250122 11:43:19.274119 1655575 PlanNode.cpp:27] New variable: __AppendVertices_13
I20250122 11:43:19.274169 1655575 Symbols.cpp:48] New variable for: __AppendVertices_14
I20250122 11:43:19.274171 1655575 PlanNode.cpp:27] New variable: __AppendVertices_14
I20250122 11:43:19.274189 1655575 Symbols.cpp:48] New variable for: __Traverse_15
I20250122 11:43:19.274194 1655575 PlanNode.cpp:27] New variable: __Traverse_15
I20250122 11:43:19.274220 1655575 Symbols.cpp:48] New variable for: __AppendVertices_16
I20250122 11:43:19.274224 1655575 PlanNode.cpp:27] New variable: __AppendVertices_16
I20250122 11:43:19.274664 1655608 ThriftClientManager-inl.h:47] Getting a client to "ip1":9779
I20250122 11:43:19.274828 1655608 AsyncSocket.cpp:2711] this=0x7f1178a0f400, AsyncSocket::handleRead() got 108 bytes
I20250122 11:43:19.274947 1655610 ThriftClientManager-inl.h:47] Getting a client to "ip1":9779
I20250122 11:43:19.275187 1655610 AsyncSocket.cpp:2711] this=0x7f1176e1a000, AsyncSocket::handleRead() got 323 bytes
I20250122 11:43:19.275333 1655612 ThriftClientManager-inl.h:47] Getting a client to "ip3":9779
I20250122 11:43:19.275346 1655613 ThriftClientManager-inl.h:47] Getting a client to "ip1":9779
I20250122 11:43:19.275363 1655615 ThriftClientManager-inl.h:47] Getting a client to "ip7":9779
I20250122 11:43:19.275352 1655614 ThriftClientManager-inl.h:47] Getting a client to "ip5":9779
I20250122 11:43:19.275650 1655615 AsyncSocket.cpp:2711] this=0x7f1172818c00, AsyncSocket::handleRead() got 634 bytes
I20250122 11:43:19.275678 1655613 AsyncSocket.cpp:2711] this=0x7f1174419c00, AsyncSocket::handleRead() got 424 bytes
I20250122 11:43:19.275682 1655612 AsyncSocket.cpp:2711] this=0x7f1175219800, AsyncSocket::handleRead() got 218 bytes
I20250122 11:43:19.275696 1655614 AsyncSocket.cpp:2711] this=0x7f1173613000, AsyncSocket::handleRead() got 179 bytes
I20250122 11:43:19.275956 1655616 ThriftClientManager-inl.h:47] Getting a client to "ip2":9779
I20250122 11:43:19.275970 1655618 ThriftClientManager-inl.h:47] Getting a client to "ip1":9779
I20250122 11:43:19.275960 1655617 ThriftClientManager-inl.h:47] Getting a client to "ip5":9779
I20250122 11:43:19.275985 1655619 ThriftClientManager-inl.h:47] Getting a client to "ip7":9779
I20250122 11:43:19.276001 1655585 ThriftClientManager-inl.h:47] Getting a client to "ip6":9779
I20250122 11:43:19.275993 1655620 ThriftClientManager-inl.h:47] Getting a client to "ip3":9779
I20250122 11:43:19.276015 1655586 ThriftClientManager-inl.h:47] Getting a client to "ip4":9779
I20250122 11:43:19.276479 1655618 AsyncSocket.cpp:2711] this=0x7f116fe1a400, AsyncSocket::handleRead() got 116 bytes
I20250122 11:43:19.276525 1655616 AsyncSocket.cpp:2711] this=0x7f1171a19c00, AsyncSocket::handleRead() got 114 bytes
I20250122 11:43:19.276542 1655617 AsyncSocket.cpp:2711] this=0x7f1170c19400, AsyncSocket::handleRead() got 218 bytes
I20250122 11:43:19.276610 1655619 AsyncSocket.cpp:2711] this=0x7f116f018400, AsyncSocket::handleRead() got 252 bytes
I20250122 11:43:19.276623 1655586 AsyncSocket.cpp:2711] this=0x7f118c614000, AsyncSocket::handleRead() got 149 bytes
I20250122 11:43:19.276674 1655620 AsyncSocket.cpp:2711] this=0x7f116e219000, AsyncSocket::handleRead() got 252 bytes
I20250122 11:43:19.276948 1655585 AsyncSocket.cpp:2711] this=0x7f118d415c00, AsyncSocket::handleRead() got 150 bytes
I20250122 11:43:19.277141 1655549 QueryInstance.cpp:128] Finish query: match (c:tag1) where id(c)=='E0mhlhkIu0AMuKqmG' match (c)<-[:edge1]->(m:tag2)-[:goods]->(o:info) return count(distinct case when o.info.ob_status in [0,2] then m.tag2.cd else null end) as v1
I20250122 11:43:19.277161 1655549 ClientSession.cpp:52] Delete query, epId: 2447077

日志上看,超时查询相比正常查询日志中间多了一个插入语句

查看日志发现


是客户端主动调用的kill,并不是达到资源限制或者storaged服务响应超时导致的。
怀疑是storaged节点的线程池被占满导致的请求等待导致的查询耗时变长

对于下面的请求的线程复用的情况:是正常现象,因为查询使用的是一个线程池。当某个算子执行完之后就会把它持有的线程释放出来的,别的请求就有可能用到这个线程,而且某些算子内部就是多线程的,也有可能存在一些线程先执行完释放线程的操作。

鉴于上述情况分析,大概率还是storage的问题导致的。简单验证可以将storage的配置调大一倍,
–num_io_threads=64
–num_worker_threads=128
验证下超时频率是否下降

1 个赞

graph

########## basics ##########
# Whether to run as a daemon process
--daemonize=true
# The file to host the process id
--pid_file=pids/nebula-graphd.pid
# Whether to enable optimizer
--enable_optimizer=true
# Heartbeat interval of communication between meta client and graphd service
--heartbeat_interval_secs=10
# The default charset when a space is created
--default_charset=utf8
# The default collate when a space is created
--default_collate=utf8_bin

# Whether to use the configuration obtained from the configuration file
--local_config=true

########## logging ##########
# The directory to host logging files
--log_dir=logs
# Log level, 0, 1, 2, 3 for INFO, WARNING, ERROR, FATAL respectively
--minloglevel=0
# Verbose log level, 1, 2, 3, 4, the higher of the level, the more verbose of the logging
--v=4
# Maximum seconds to buffer the log messages
--logbufsecs=0
# Whether to redirect stdout and stderr to separate output files
--redirect_stdout=true
# Destination filename of stdout and stderr, which will also reside in log_dir.
--stdout_log_file=graphd-stdout.log
--stderr_log_file=graphd-stderr.log
# Copy log messages at or above this level to stderr in addition to logfiles. The numbers of severity levels INFO, WARNING, ERROR, and FATAL are 0, 1, 2, and 3, respectively.
--stderrthreshold=3
# wether logging files' name contain timestamp
--timestamp_in_logfile_name=true

########## query ##########
# Whether to treat partial success as an error.
# This flag is only used for Read-only access, and Modify access always treats partial success as an error.
--accept_partial_success=false
# Maximum sentence length, unit byte
--max_allowed_query_size=4194304

########## networking ##########
# Comma separated Meta Server Addresses
--meta_server_addrs=ip1:9559,ip2:9559,ip3:9559
# Local IP used to identify the nebula-graphd process.
# Change it to an address other than loopback if the service is distributed or
# will be accessed remotely.
--local_ip=ip1
# Network device to listen on
--listen_netdev=any
# Port to listen on
--port=9669
# To turn on SO_REUSEPORT or not
--reuse_port=false
# Backlog of the listen socket, adjust this together with net.core.somaxconn
--listen_backlog=4096
# The number of seconds Nebula service waits before closing the idle connections
--client_idle_timeout_secs=28800
# The number of seconds before idle sessions expire
# The range should be in [1, 604800]
--session_idle_timeout_secs=28800
# The number of threads to accept incoming connections
--num_accept_threads=1
# The number of networking IO threads, 0 for # of CPU cores
--num_netio_threads=32
# Max active connections for all networking threads. 0 means no limit.
# Max connections for each networking thread = num_max_connections / num_netio_threads
--num_max_connections=0
# The number of threads to execute user queries, 0 for # of CPU cores
--num_worker_threads=32
# HTTP service ip
--ws_ip=ip1
# HTTP service port
--ws_http_port=19669
# storage client timeout
--storage_client_timeout_ms=360000
# slow query threshold in us
--slow_query_threshold_us=500000
# Port to listen on Meta with HTTP protocol, it corresponds to ws_http_port in metad's configuration file
--ws_meta_http_port=19559

########## authentication ##########
# Enable authorization
--enable_authorize=true
# User login authentication type, password for nebula authentication, ldap for ldap authentication, cloud for cloud authentication
--auth_type=password

########## memory ##########
# System memory high watermark ratio, cancel the memory checking when the ratio greater than 1.0
--system_memory_high_watermark_ratio=0.95

########## metrics ##########
--enable_space_level_metrics=true

########## experimental feature ##########
# if use experimental features
--enable_experimental_feature=true

# if use balance data feature, only work if enable_experimental_feature is true
--enable_data_balance=true

# enable udf, written in c++ only for now
#--enable_udf=false

# set the directory where the .so files of udf are stored, when enable_udf is true
#--udf_path=/data/software/nebula-graph-3.6.0/udf/

########## session ##########
# Maximum number of sessions that can be created per IP and per user
--max_sessions_per_ip_per_user=500000

########## memory tracker ##########
# trackable memory ratio (trackable_memory / (total_memory - untracked_reserved_memory) )
--memory_tracker_limit_ratio=0.3
# untracked reserved memory in Mib
--memory_tracker_untracked_reserved_memory_mb=50

# enable log memory tracker stats periodically
--memory_tracker_detail_log=true
# log memory tacker stats interval in milliseconds
--memory_tracker_detail_log_interval_ms=10000

# enable memory background purge (if jemalloc is used)
--memory_purge_enabled=true
# memory background purge interval in seconds
--memory_purge_interval_seconds=10

########## performance optimization ##########
# The max job size in multi job mode
--max_job_size=8
# The min batch size for handling dataset in multi job mode, only enabled when max_job_size is greater than 1
--min_batch_size=8192
# if true, return directly without go through RPC
--optimize_appendvertices=false
# number of paths constructed by each thread
--path_batch_size=10000


--max_plan_depth=4096
--max_allowed_statements=4096
--timezone_name=UTC+08:00
--num_operator_threads=2

storage

########## basics ##########
# Whether to run as a daemon process
--daemonize=true
# The file to host the process id
--pid_file=pids/nebula-storaged-listener.pid
# Whether to use the configuration obtained from the configuration file
--local_config=true

########## logging ##########
# The directory to host logging files
--log_dir=logs
# Log level, 0, 1, 2, 3 for INFO, WARNING, ERROR, FATAL respectively
--minloglevel=0
# Verbose log level, 1, 2, 3, 4, the higher of the level, the more verbose of the logging
--v=2
# Maximum seconds to buffer the log messages
--logbufsecs=0
# Whether to redirect stdout and stderr to separate output files
--redirect_stdout=true
# Destination filename of stdout and stderr, which will also reside in log_dir.
--stdout_log_file=storaged-listener-stdout.log
--stderr_log_file=storaged-listener-stderr.log
# Copy log messages at or above this level to stderr in addition to logfiles. The numbers of severity levels INFO, WARNING, ERROR, and FATAL are 0, 1, 2, and 3, respectively.
--stderrthreshold=3
# Wether logging files' name contain timestamp.
--timestamp_in_logfile_name=true

########## networking ##########
# Comma separated Meta server addresses
--meta_server_addrs=ip1:9559,ip2:9559,ip3:9559
# Local IP used to identify the nebula-storaged process.
# Change it to an address other than loopback if the service is distributed or
# will be accessed remotely.
--local_ip=ip1
# Storage daemon listening port
--port=9779
# HTTP service ip
--ws_ip=ip2
# HTTP service port
--ws_http_port=19779
# heartbeat with meta service
--heartbeat_interval_secs=10

######### Raft #########
# Raft election timeout
--raft_heartbeat_interval_secs=60
# RPC timeout for raft client (ms)
--raft_rpc_timeout_ms=500
## recycle Raft WAL
--wal_ttl=1440

########## Disk ##########
# Root data path. split by comma. e.g. --data_path=/disk1/path1/,/disk2/path2/
# One path per Rocksdb instance.
--data_path=/data/nebula2/storaged,/data1/nebula2/storaged,/data2/nebula2/storaged

# Minimum reserved bytes of each data path
--minimum_reserved_bytes=268435456

# The default reserved bytes for one batch operation
--rocksdb_batch_size=4096
# The default block cache size used in BlockBasedTable. (MB)
# recommend: 1/3 of all memory
#--rocksdb_block_cache=4096
--rocksdb_block_cache=102400


# Disable page cache to better control memory used by rocksdb.
# Caution: Make sure to allocate enough block cache if disabling page cache!
#--disable_page_cache=false
--disable_page_cache=true

# Compression algorithm, options: no,snappy,lz4,lz4hc,zlib,bzip2,zstd
# For the sake of binary compatibility, the default value is snappy.
# Recommend to use:
#   * lz4 to gain more CPU performance, with the same compression ratio with snappy
#   * zstd to occupy less disk space
#   * lz4hc for the read-heavy write-light scenario
--rocksdb_compression=lz4

# Set different compressions for different levels
# For example, if --rocksdb_compression is snappy,
# "no:no:lz4:lz4::zstd" is identical to "no:no:lz4:lz4:snappy:zstd:snappy"
# In order to disable compression for level 0/1, set it to "no:no"
--rocksdb_compression_per_level=

############## rocksdb Options ##############
# rocksdb DBOptions in json, each name and value of option is a string, given as "option_name":"option_value" separated by comma
#--rocksdb_db_options={"max_subcompactions":"4","max_background_jobs":"4","skip_checking_sst_file_sizes_on_db_open":"true"}
--rocksdb_db_options={"max_subcompactions":"8","max_background_jobs":"10","skip_checking_sst_file_sizes_on_db_open":"true","max_background_compactions":"8","stats_dump_period_sec":"60"}

# rocksdb ColumnFamilyOptions in json, each name and value of option is string, given as "option_name":"option_value" separated by comma
#--rocksdb_column_family_options={"disable_auto_compactions":"false","write_buffer_size":"67108864","max_write_buffer_number"--rocksdb_column_family_options={"disable_auto_compactions":"false","write_buffer_size":"268435456","max_write_buffer_number":"4","max_bytes_for_level_base":"268435456","level0_file_num_compaction_trigger":"8","level0_slowdown_writes_trigger":"500","level0_stop_writes_trigger":"800","min_write_buffer_number_to_merge":"3"}

# rocksdb BlockBasedTableOptions in json, each name and value of option is string, given as "option_name":"option_value" separated by comma
#--rocksdb_block_based_table_options={"block_size":"8192"}
--rocksdb_block_based_table_options={"block_size":"16384","cache_index_and_filter_blocks":"1","pin_l0_filter_and_index_blocks_in_cache":"1"}


# Whether or not to enable rocksdb's statistics, disabled by default
--enable_rocksdb_statistics=true

# Statslevel used by rocksdb to collection statistics, optional values are
#   * kExceptHistogramOrTimers, disable timer stats, and skip histogram stats
#   * kExceptTimers, Skip timer stats
#   * kExceptDetailedTimers, Collect all stats except time inside mutex lock AND time spent on compression.
#   * kExceptTimeForMutex, Collect all stats except the counters requiring to get time inside the mutex lock.
#   * kAll, Collect all stats
--rocksdb_stats_level=kExceptHistogramOrTimers

# Whether or not to enable rocksdb's prefix bloom filter, enabled by default.
--enable_rocksdb_prefix_filtering=false
--rocksdb_filtering_prefix_length=12
# Whether or not to enable rocksdb's whole key bloom filter, disabled by default.
--enable_rocksdb_whole_key_filtering=true

############### misc ####################
# Whether turn on query in multiple thread
--query_concurrently=true
# Whether remove outdated space data
--auto_remove_invalid_space=true
# Network IO threads number
#--num_io_threads=16
--num_io_threads=16

# Max active connections for all networking threads. 0 means no limit.
# Max connections for each networking thread = num_max_connections / num_netio_threads
--num_max_connections=0
# Worker threads number to handle request
#--num_worker_threads=32
--num_worker_threads=128

# Maximum subtasks to run admin jobs concurrently
--max_concurrent_subtasks=5
# The rate limit in bytes when leader synchronizes snapshot data
--snapshot_part_rate_limit=10485760
#--snapshot_part_rate_limit=50000000
# The amount of data sent in each batch when leader synchronizes snapshot data
--snapshot_batch_size=1048576
# The rate limit in bytes when leader synchronizes rebuilding index
--rebuild_index_part_rate_limit=4194304
# The amount of data sent in each batch when leader synchronizes rebuilding index
--rebuild_index_batch_size=1048576

########## memory tracker ##########
# trackable memory ratio (trackable_memory / (total_memory - untracked_reserved_memory) )
--memory_tracker_limit_ratio=0.65
# untracked reserved memory in Mib
--memory_tracker_untracked_reserved_memory_mb=50

# enable log memory tracker stats periodically
#--memory_tracker_detail_log=false
--memory_tracker_detail_log=true
# log memory tacker stats interval in milliseconds
--memory_tracker_detail_log_interval_ms=60000
#--memory_tracker_detail_log_interval_ms=10000
# enable memory background purge (if jemalloc is used)
--memory_purge_enabled=true
# memory background purge interval in seconds
--memory_purge_interval_seconds=10


--timezone_name=UTC+08:00
--storage_client_timeout_ms=60000
--num_compaction_threads=8
--max_edge_returned_per_vertex=100000
#--enable_partitioned_index_filter=true
--reader_handlers=64
#--move_files=true
#--max_batch_size=1024
--rocksdb_rate_limit=40

rocksdb_db_options 改成:
{“max subcompactions”:“24”, “max background jobs”:“24”}

compaction 的并发度,建议设置为 cpu core 的一半,如 24