nebula服务线程数控制

  • nebula 版本:2.6.1
  • 部署方式:单机
  • 安装方式:源码编译
  • 是否为线上版本:N
  • 硬件信息
    • 磁盘 SSD 512G
    • CPU 多个AMD EPYC 7502
    • 内存 24G
  • 问题的具体描述: 观察到meta的线程数超过350个,希望能够进行控制,不过似乎配置里面没有相关参数?
$ ps -eLF | grep nebula-metad | wc -l
356
$ curl -G "http://127.0.0.1:34802/flags"
check_plan_killed_frequency=8
cluster_id_path="cluster.id"
expired_time_factor=5
heartbeat_interval_secs=10
meta_client_retry_interval_secs=1
meta_client_retry_times=3
meta_client_timeout_ms=60000
slow_op_threshhold_ms=50
containerized=0
system_memory_high_watermark_ratio=0.8
gflags_mode_json="share/resources/gflags.json"
ca_path=""
cert_path=""
enable_graph_ssl=0
enable_meta_ssl=0
enable_ssl=0
key_path=""
password_path=""
conn_timeout_ms=1000
timezone_file="share/resources/date_time_zonespec.csv"
timezone_name="UTC+00:00:00"
daemonize=0
data_path="data/meta"
local_ip="127.0.0.1"
meta_http_thread_num=3
meta_server_addrs="127.0.0.1:34800"
num_io_threads=16
num_worker_threads=32
pid_file="pids/nebula-metad.pid"
port=34800
reuse_port=1
redirect_stdout=1
stderr_log_file="metad-stderr.log"
stdout_log_file="metad-stdout.log"
disk_check_interval_secs=10
minimum_reserved_bytes=1073741824
ft_bulk_batch_size=100
ft_request_retry_times=3
listener_commit_batch_size=1000
listener_commit_interval_secs=1
listener_pursue_leader_threshold=1000
snapshot_batch_size=524288
snapshot_part_rate_limit=8388608
auto_remove_invalid_space=0
clean_wal_interval_secs=600
custom_filter_interval_secs=86400
engine_type="rocksdb"
num_workers=4
cluster_id=0
skip_wait_in_rate_limiter=0
move_files=0
cache_bucket_exp=8
enable_partitioned_index_filter=0
enable_rocksdb_prefix_filtering=1
enable_rocksdb_statistics=0
enable_rocksdb_whole_key_filtering=0
num_compaction_threads=0
rocksdb_backup_dir=""
rocksdb_backup_interval_secs=300
rocksdb_batch_size=4096
rocksdb_block_based_table_options="{}"
rocksdb_block_cache=1024
rocksdb_column_family_options="{}"
rocksdb_compact_change_level=1
rocksdb_compact_target_level=-1
rocksdb_compression="snappy"
rocksdb_compression_per_level=""
rocksdb_db_options="{}"
rocksdb_disable_wal=0
rocksdb_rate_limit=0
rocksdb_row_cache_num=16000000
rocksdb_stats_level="kExceptHistogramOrTimers"
rocksdb_table_format="BlockBasedTable"
rocksdb_wal_dir=""
rocksdb_wal_sync=0
max_appendlog_batch_size=128
max_outstanding_requests=1024
raft_rpc_timeout_ms=500
max_batch_size=256
raft_heartbeat_interval_secs=5
raft_snapshot_timeout=300
trace_raft=0
snapshot_io_threads=4
snapshot_send_retry_times=3
snapshot_send_timeout_ms=60000
snapshot_worker_threads=4
wal_buffer_size=8388608
wal_file_size=16777216
wal_sync=0
wal_ttl=14400
null_type=1
print_info=0
string_index_limit=64
meta_ingest_thread_num=3
max_retry_times_admin_op=30
task_concurrency=10
leader_balance_deviation=0.05
client_white_list=":2.5.0:2.5.1:2.6.0"
enable_client_white_list=1
job_check_intervals=5000
job_expired_secs=604800
default_parts_num=100
default_replica_factor=1
removed_threshold_sec=86400
ws_meta_h2_port=11002
ws_meta_http_port=11000
ws_storage_h2_port=12002
ws_storage_http_port=34704
ws_h2_port=34803
ws_http_port=34802
ws_ip="127.0.0.1"
ws_threads=4
codel_enabled=0
thrift_cpp2_protocol_reader_container_limit=0
thrift_cpp2_protocol_reader_string_limit=0
thrift_server_request_debug_log_entries_max=10000
service_identity=""
thrift_abort_if_exceeds_shutdown_deadline=1
thrift_ssl_policy="disabled"
dynamic_cputhreadpoolexecutor=1
codel_interval=100
codel_target_delay=5
dynamic_iothreadpoolexecutor=1
threadtimeout_ms=60000
observer_manager_pool_size=4
logging=""
folly_hazptr_use_executor=1
flagfile="/home/work/nebula/etc/nebula-metad.conf"
fromenv=""
tryfromenv=""
undefok=""
tab_completion_columns=80
tab_completion_word=""
help=0
helpfull=0
helpmatch=""
helpon=""
helppackage=0
helpshort=0
helpxml=0
version=0
zlib_compressor_buffer_growth=2024
s2shape_index_cell_size_to_long_edge_ratio=1
s2shape_index_default_max_edges_per_cell=10
s2shape_index_tmp_memory_budget_mb=100
s2cell_union_decode_max_num_cells=1000000
s2debug=0
s2loop_lazy_indexing=1
s2polygon_decode_max_num_vertices=50000000
s2polygon_decode_max_num_loops=10000000
s2polygon_lazy_indexing=1
dcache_unit_test=0
alsologtoemail=""
alsologtostderr=0
colorlogtostderr=0
drop_log_memory=1
log_backtrace_at=""
log_dir="logs"
log_link=""
log_prefix=1
logbuflevel=0
logbufsecs=0
logemaillevel=999
logfile_mode=436
logmailer="/bin/mail"
logtostderr=0
max_log_size=1800
minloglevel=0
stderrthreshold=0
stop_logging_if_full_disk=0
symbolize_stacktrace=1
v=0
vmodule=""

确实没有,现在 storage 里有 thread 相关的控制,meta 里没有,请问这个线程数有造成什么问题么?

num_io_threads	16	网络 I/O 线程的数量,用于发送 RPC 请求和接收响应。
num_worker_threads	32	Storage 的 RPC 服务的工作线程数量。

可以来 github 提一个增加 metad thread 控制的需求么,尽量详细描述一下没有控制的情况下你的观察、担心会很有帮助,感谢。

此话题已在最后回复的 30 天后被自动关闭。不再允许新回复。