nebula exchange SST生成文件异常

nebula 版本:2.5.0
部署方式单机
是否为线上版本:Y

当前通过HIVE生成SST,HDFS生成SST时出现以下报错。

Exception in thread "main" org.apache.spark.SparkException: Task not serializable
	at org.apache.spark.util.ClosureCleaner$.ensureSerializable(ClosureCleaner.scala:403)
	at org.apache.spark.util.ClosureCleaner$.org$apache$spark$util$ClosureCleaner$$clean(ClosureCleaner.scala:393)
	at org.apache.spark.util.ClosureCleaner$.clean(ClosureCleaner.scala:162)
	at org.apache.spark.SparkContext.clean(SparkContext.scala:2346)
	at org.apache.spark.rdd.RDD$$anonfun$mapPartitionsWithIndex$1.apply(RDD.scala:850)
	at org.apache.spark.rdd.RDD$$anonfun$mapPartitionsWithIndex$1.apply(RDD.scala:849)
	at org.apache.spark.rdd.RDDOperationScope$.withScope(RDDOperationScope.scala:151)
	at org.apache.spark.rdd.RDDOperationScope$.withScope(RDDOperationScope.scala:112)
	at org.apache.spark.rdd.RDD.withScope(RDD.scala:363)
	at org.apache.spark.rdd.RDD.mapPartitionsWithIndex(RDD.scala:849)
	at org.apache.spark.sql.execution.WholeStageCodegenExec.doExecute(WholeStageCodegenExec.scala:618)
	at org.apache.spark.sql.execution.SparkPlan$$anonfun$execute$1.apply(SparkPlan.scala:131)
	at org.apache.spark.sql.execution.SparkPlan$$anonfun$execute$1.apply(SparkPlan.scala:127)
	at org.apache.spark.sql.execution.SparkPlan$$anonfun$executeQuery$1.apply(SparkPlan.scala:155)
	at org.apache.spark.rdd.RDDOperationScope$.withScope(RDDOperationScope.scala:151)
	at org.apache.spark.sql.execution.SparkPlan.executeQuery(SparkPlan.scala:152)
	at org.apache.spark.sql.execution.SparkPlan.execute(SparkPlan.scala:127)
	at org.apache.spark.sql.execution.DeserializeToObjectExec.doExecute(objects.scala:89)
	at org.apache.spark.sql.execution.SparkPlan$$anonfun$execute$1.apply(SparkPlan.scala:131)
	at org.apache.spark.sql.execution.SparkPlan$$anonfun$execute$1.apply(SparkPlan.scala:127)
	at org.apache.spark.sql.execution.SparkPlan$$anonfun$executeQuery$1.apply(SparkPlan.scala:155)
	at org.apache.spark.rdd.RDDOperationScope$.withScope(RDDOperationScope.scala:151)
	at org.apache.spark.sql.execution.SparkPlan.executeQuery(SparkPlan.scala:152)
	at org.apache.spark.sql.execution.SparkPlan.execute(SparkPlan.scala:127)
	at org.apache.spark.sql.execution.QueryExecution.toRdd$lzycompute(QueryExecution.scala:80)
	at org.apache.spark.sql.execution.QueryExecution.toRdd(QueryExecution.scala:80)
	at org.apache.spark.sql.Dataset.rdd$lzycompute(Dataset.scala:3037)
	at org.apache.spark.sql.Dataset.rdd(Dataset.scala:3035)
	at org.apache.spark.sql.Dataset$$anonfun$foreachPartition$1.apply$mcV$sp(Dataset.scala:2735)
	at org.apache.spark.sql.Dataset$$anonfun$foreachPartition$1.apply(Dataset.scala:2735)
	at org.apache.spark.sql.Dataset$$anonfun$foreachPartition$1.apply(Dataset.scala:2735)
	at org.apache.spark.sql.Dataset$$anonfun$withNewRDDExecutionId$1.apply(Dataset.scala:3349)
	at org.apache.spark.sql.execution.SQLExecution$$anonfun$withNewExecutionId$1.apply(SQLExecution.scala:78)
	at org.apache.spark.sql.execution.SQLExecution$.withSQLConfPropagated(SQLExecution.scala:125)
	at org.apache.spark.sql.execution.SQLExecution$.withNewExecutionId(SQLExecution.scala:73)
	at org.apache.spark.sql.Dataset.withNewRDDExecutionId(Dataset.scala:3345)
	at org.apache.spark.sql.Dataset.foreachPartition(Dataset.scala:2734)
	at com.vesoft.nebula.exchange.processor.VerticesProcessor.process(VerticesProcessor.scala:180)
	at com.vesoft.nebula.exchange.Exchange$$anonfun$main$2.apply(Exchange.scala:152)
	at com.vesoft.nebula.exchange.Exchange$$anonfun$main$2.apply(Exchange.scala:128)
	at scala.collection.immutable.List.foreach(List.scala:392)
	at com.vesoft.nebula.exchange.Exchange$.main(Exchange.scala:128)
	at com.vesoft.nebula.exchange.Exchange.main(Exchange.scala)
	at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
	at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62)
	at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
	at java.lang.reflect.Method.invoke(Method.java:498)
	at org.apache.spark.deploy.JavaMainApplication.start(SparkApplication.scala:52)
	at org.apache.spark.deploy.SparkSubmit.org$apache$spark$deploy$SparkSubmit$$runMain(SparkSubmit.scala:851)
	at org.apache.spark.deploy.SparkSubmit.doRunMain$1(SparkSubmit.scala:167)
	at org.apache.spark.deploy.SparkSubmit.submit(SparkSubmit.scala:195)
	at org.apache.spark.deploy.SparkSubmit.doSubmit(SparkSubmit.scala:86)
	at org.apache.spark.deploy.SparkSubmit$$anon$2.doSubmit(SparkSubmit.scala:926)
	at org.apache.spark.deploy.SparkSubmit$.main(SparkSubmit.scala:935)
	at org.apache.spark.deploy.SparkSubmit.main(SparkSubmit.scala)
Caused by: java.io.NotSerializableException: com.google.common.net.HostAndPort
Serialization stack:
	- object not serializable (class: com.google.common.net.HostAndPort, value: 10.172.7.178:9559)
	- writeObject data (class: scala.collection.immutable.List$SerializationProxy)
	- object (class scala.collection.immutable.List$SerializationProxy, scala.collection.immutable.List$SerializationProxy@7bc239db)
	- writeReplace data (class: scala.collection.immutable.List$SerializationProxy)
	- object (class scala.collection.immutable.$colon$colon, List(10.172.7.178:9559))
	- field (class: com.vesoft.nebula.exchange.processor.VerticesProcessor$$anonfun$process$1, name: address$1, type: class scala.collection.immutable.List)
	- object (class com.vesoft.nebula.exchange.processor.VerticesProcessor$$anonfun$process$1, <function1>)
	- field (class: org.apache.spark.sql.execution.MapPartitionsExec, name: func, type: interface scala.Function1)
	- object (class org.apache.spark.sql.execution.MapPartitionsExec, MapPartitions <function1>, obj#11: scala.Tuple2
+- DeserializeToObject createexternalrow(a#0.toString, b#1.toString, StructField(a,StringType,true), StructField(b,StringType,true)), obj#10: org.apache.spark.sql.Row
   +- SortAggregate(key=[a#0], functions=[first(b#1, false)], output=[a#0, b#1])
      +- *(2) Sort [a#0 ASC NULLS FIRST], false, 0
         +- Exchange hashpartitioning(a#0, 1)
            +- SortAggregate(key=[a#0], functions=[partial_first(b#1, false)], output=[a#0, first#22, valueSet#23])
               +- *(1) Sort [a#0 ASC NULLS FIRST], false, 0
                  +- Exchange RoundRobinPartitioning(32)
                     +- Scan hive default.employee [a#0, b#1], HiveTableRelation `default`.`employee`, org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe, [a#0, b#1]
)
	- field (class: org.apache.spark.sql.execution.InputAdapter, name: child, type: class org.apache.spark.sql.execution.SparkPlan)
	- object (class org.apache.spark.sql.execution.InputAdapter, MapPartitions <function1>, obj#11: scala.Tuple2
+- DeserializeToObject createexternalrow(a#0.toString, b#1.toString, StructField(a,StringType,true), StructField(b,StringType,true)), obj#10: org.apache.spark.sql.Row
   +- SortAggregate(key=[a#0], functions=[first(b#1, false)], output=[a#0, b#1])
      +- *(2) Sort [a#0 ASC NULLS FIRST], false, 0
         +- Exchange hashpartitioning(a#0, 1)
            +- SortAggregate(key=[a#0], functions=[partial_first(b#1, false)], output=[a#0, first#22, valueSet#23])
               +- *(1) Sort [a#0 ASC NULLS FIRST], false, 0
                  +- Exchange RoundRobinPartitioning(32)
                     +- Scan hive default.employee [a#0, b#1], HiveTableRelation `default`.`employee`, org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe, [a#0, b#1]
)
	- field (class: org.apache.spark.sql.execution.SerializeFromObjectExec, name: child, type: class org.apache.spark.sql.execution.SparkPlan)
	- object (class org.apache.spark.sql.execution.SerializeFromObjectExec, SerializeFromObject [input[0, scala.Tuple2, true]._1 AS value#6 AS _1#12, input[0, scala.Tuple2, true]._2 AS value#7 AS _2#13]
+- MapPartitions <function1>, obj#11: scala.Tuple2
   +- DeserializeToObject createexternalrow(a#0.toString, b#1.toString, StructField(a,StringType,true), StructField(b,StringType,true)), obj#10: org.apache.spark.sql.Row
      +- SortAggregate(key=[a#0], functions=[first(b#1, false)], output=[a#0, b#1])
         +- *(2) Sort [a#0 ASC NULLS FIRST], false, 0
            +- Exchange hashpartitioning(a#0, 1)
               +- SortAggregate(key=[a#0], functions=[partial_first(b#1, false)], output=[a#0, first#22, valueSet#23])
                  +- *(1) Sort [a#0 ASC NULLS FIRST], false, 0
                     +- Exchange RoundRobinPartitioning(32)
                        +- Scan hive default.employee [a#0, b#1], HiveTableRelation `default`.`employee`, org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe, [a#0, b#1]
)
	- field (class: org.apache.spark.sql.execution.ProjectExec, name: child, type: class org.apache.spark.sql.execution.SparkPlan)
	- object (class org.apache.spark.sql.execution.ProjectExec, Project [_1#12 AS key#14, _2#13 AS value#15]
+- SerializeFromObject [input[0, scala.Tuple2, true]._1 AS value#6 AS _1#12, input[0, scala.Tuple2, true]._2 AS value#7 AS _2#13]
   +- MapPartitions <function1>, obj#11: scala.Tuple2
      +- DeserializeToObject createexternalrow(a#0.toString, b#1.toString, StructField(a,StringType,true), StructField(b,StringType,true)), obj#10: org.apache.spark.sql.Row
         +- SortAggregate(key=[a#0], functions=[first(b#1, false)], output=[a#0, b#1])
            +- *(2) Sort [a#0 ASC NULLS FIRST], false, 0
               +- Exchange hashpartitioning(a#0, 1)
                  +- SortAggregate(key=[a#0], functions=[partial_first(b#1, false)], output=[a#0, first#22, valueSet#23])
                     +- *(1) Sort [a#0 ASC NULLS FIRST], false, 0
                        +- Exchange RoundRobinPartitioning(32)
                           +- Scan hive default.employee [a#0, b#1], HiveTableRelation `default`.`employee`, org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe, [a#0, b#1]
)
	- field (class: org.apache.spark.sql.execution.SortExec, name: child, type: class org.apache.spark.sql.execution.SparkPlan)
	- object (class org.apache.spark.sql.execution.SortExec, Sort [key#14 ASC NULLS FIRST], false, 0
+- Project [_1#12 AS key#14, _2#13 AS value#15]
   +- SerializeFromObject [input[0, scala.Tuple2, true]._1 AS value#6 AS _1#12, input[0, scala.Tuple2, true]._2 AS value#7 AS _2#13]
      +- MapPartitions <function1>, obj#11: scala.Tuple2
         +- DeserializeToObject createexternalrow(a#0.toString, b#1.toString, StructField(a,StringType,true), StructField(b,StringType,true)), obj#10: org.apache.spark.sql.Row
            +- SortAggregate(key=[a#0], functions=[first(b#1, false)], output=[a#0, b#1])
               +- *(2) Sort [a#0 ASC NULLS FIRST], false, 0
                  +- Exchange hashpartitioning(a#0, 1)
                     +- SortAggregate(key=[a#0], functions=[partial_first(b#1, false)], output=[a#0, first#22, valueSet#23])
                        +- *(1) Sort [a#0 ASC NULLS FIRST], false, 0
                           +- Exchange RoundRobinPartitioning(32)
                              +- Scan hive default.employee [a#0, b#1], HiveTableRelation `default`.`employee`, org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe, [a#0, b#1]
)
	- element of array (index: 0)
	- array (class [Ljava.lang.Object;, size 4)
	- field (class: org.apache.spark.sql.execution.WholeStageCodegenExec$$anonfun$11, name: references$1, type: class [Ljava.lang.Object;)
	- object (class org.apache.spark.sql.execution.WholeStageCodegenExec$$anonfun$11, <function2>)
	at org.apache.spark.serializer.SerializationDebugger$.improveException(SerializationDebugger.scala:40)
	at org.apache.spark.serializer.JavaSerializationStream.writeObject(JavaSerializer.scala:46)
	at org.apache.spark.serializer.JavaSerializerInstance.serialize(JavaSerializer.scala:100)
	at org.apache.spark.util.ClosureCleaner$.ensureSerializable(ClosureCleaner.scala:400)
	... 54 more
2021-10-25 14:58:14 INFO  SparkContext:57 - Invoking stop() from shutdown hook
2021-10-25 14:58:14 INFO  AbstractConnector:318 - Stopped Spark@3301500b{HTTP/1.1,[http/1.1]}{0.0.0.0:4040}
2021-10-25 14:58:14 INFO  SparkUI:57 - Stopped Spark web UI at http://bj-ansyb-crs-15.cloud.onecloud.io:4040
2021-10-25 14:58:14 INFO  MapOutputTrackerMasterEndpoint:57 - MapOutputTrackerMasterEndpoint stopped!
2021-10-25 14:58:14 INFO  MemoryStore:57 - MemoryStore cleared
2021-10-25 14:58:14 INFO  BlockManager:57 - BlockManager stopped
2021-10-25 14:58:14 INFO  BlockManagerMaster:57 - BlockManagerMaster stopped

环境中guava 包版本是多少

就是guava问题,跑完一个Demo数据了。且提交方式不能是local,需要yarn提交。
谢谢,当前问题已解决。

目前SST文件已生成HDFS。配置CDH客户端后,报HADOOP Home问题,设置环境和重启meta服务后,还是无法下载SST的HDFS文件,应该是没有全部下载,INGEST命令执行后,有部分数据入库成功。麻烦帮忙看看,谢谢。

console异常:

(root@nebula) [gan_test13]> DOWNLOAD  HDFS "hdfs://10.172.7.214:8020/sst";
[ERROR (-1005)]: Download failed!

Tue, 26 Oct 2021 11:37:14 CST

(root@nebula) [gan_test13]> ingest
Execution succeeded (time spent 86776/87587 us)

Tue, 26 Oct 2021 11:37:31 CST


日志异常:

I1026 11:36:45.897601 199714 HdfsCommandHelper.cpp:18] Running HDFS Command: hdfs dfs -ls hdfs://10.172.7.214:8020/sst

I1026 11:36:45.897601 199714 HdfsCommandHelper.cpp:18] Running HDFS Command: hdfs dfs -ls hdfs://10.172.7.214:8020/sst

                                 Dload  Upload   Total   Spent    Left  Speed
  0     0    0     0    0     0      0      0 --:--: - -%  -T-o:t-a-l: - -   -%- :R-e-c:e-i-v e d   %  0Xferd  Average Speed   Time    Time     Time    %C urTroetnatl
        %   R e c e i v e d   %   X f e r d     A v e r a g e   S pDeleoda d    TUipmleo a d    T iTmoet a l      TSipmeen t  C u r rLeenftt
    S p e e d 
     0           0         0           0        D0l o a d    0U p l o a d  0    T o t a l0   - -S:p-e-n:t- -   - -L:e-f-t: - -S p-e-e:d-
  0     0    0     0    0     0      0      0 --:--:-- --:--:-- --:--:--     0--     0
  0     0    0     0    0     0      0      0 --:--:-- --:--:-- --:--:--     0

I1026 11:36:47.814124 199707 HttpClient.cpp:15] HTTP Get Command: /usr/bin/curl -G "http://10.172.7.185:19779/download?host=10.172.7.214&port=8020&path=/sst&parts=1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50&space=2302"
I1026 11:36:47.814129 199708 HttpClient.cpp:15] HTTP Get Command: /usr/bin/curl -G "http://10.172.7.187:19779/download?host=10.172.7.214&port=8020&path=/sst&parts=1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50&space=2302"
I1026 11:36:47.814124 199709 HttpClient.cpp:15] HTTP Get Command: /usr/bin/curl -G "http://10.172.7.192:19779/download?host=10.172.7.214&port=8020&path=/sst&parts=1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50&space=2302"

I1026 11:36:47.814124 199707 HttpClient.cpp:15] HTTP Get Command: /usr/bin/curl -G "http://10.172.7.185:19779/download?host=10.172.7.214&port=8020&path=/sst&parts=1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50&space=2302"
I1026 11:36:47.814129 199708 HttpClient.cpp:15] HTTP Get Command: /usr/bin/curl -G "http://10.172.7.187:19779/download?host=10.172.7.214&port=8020&path=/sst&parts=1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50&space=2302"
I1026 11:36:47.814124 199709 HttpClient.cpp:15] HTTP Get Command: /usr/bin/curl -G "http://10.172.7.192:19779/download?host=10.172.7.214&port=8020&path=/sst&parts=1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50&space=2302"

  0     0    0     0    0     0      0      0 --:--:--  0:00:01 --:--:--     0
  0     0    0     0    0     0      0      0 --:--:--  0:00:02 --:--:--     0
  0     0    0     0    0     0      0      0 --:--:--  0:00:03 --:--:--     0
  0     0    0     0    0     0      0      0 --:--:--  0:00:04 --:--:--     0
  0     0    0     0    0     0      0      0 --:--:--  0:00:05 --:--:--     0
  0     0    0     0    0     0      0      0 --:--:--  0:00:06 --:--:--     0
  0     0    0     0    0     0      0      0 --:--:--  0:00:07 --:--:--     0
  0     0    0     0    0     0      0      0 --:--:--  0:00:08 --:--:--     0
  0     0    0     0    0     0      0      0 --:--:--  0:00:09 --:--:--     0
  0     0    0     0    0     0      0      0 --:--:--  0:00:10 --:--:--     0
  0     0    0     0    0     0      0      0 --:--:--  0:00:11 --:--:--     0
  0     0    0     0    0     0      0      0 --:--:--  0:00:12 --:--:--     0
  0     0    0     0    0     0      0      0 --:--:--  0:00:13 --:--:--     0
  0     0    0     0    0     0      0      0 --:--:--  0:00:14 --:--:--     0
  0     0    0     0    0     0      0      0 --:--:--  0:00:15 --:--:--     0
  0     0    0     0    0     0      0      0 --:--:--  0:00:16 --:--:--     0
  0     0    0     0    0     0      0      0 --:--:--  0:00:17 --:--:--     0
  0     0    0     0    0     0      0      0 --:--:--  0:00:18 --:--:--     0
  0     0    0     0    0     0      0      0 --:--:--  0:00:19 --:--:--     0
  0     0    0     0    0     0      0      0 --:--:--  0:00:20 --:--:--     0
  0     0    0     0    0     0      0      0 --:--:--  0:00:21 --:--:--     0
  0     0    0     0    0     0      0      0 --:--:--  0:00:22 --:--:--     0
  0     0    0     0    0     0      0      0 --:--:--  0:00:23 --:--:--     0
  0     0    0     0    0     0      0      0 --:--:--  0:00:24 --:--:--     0
  0     0    0     0    0     0      0      0 --:--:--  0:00:25 --:--:--     0
  0    0    0     0    0     0      0      0 --:--:--  0:00:26 --:--:--     0

100    29  100    29    0     0      1      0  0:00:29  0:00:26  0:00:03     8

E1026 11:37:14.316876 199714 MetaHttpDownloadHandler.cpp:96] SSTFile dispatch failed

I1026 11:37:14.316673 199714 MetaHttpDownloadHandler.cpp:199] Download tasks have finished
E1026 11:37:14.316876 199714 MetaHttpDownloadHandler.cpp:96] SSTFile dispatch failed

E1026 11:37:14.316876 199714 MetaHttpDownloadHandler.cpp:96] SSTFile dispatch failed

E1026 11:37:14.316876 199714 MetaHttpDownloadHandler.cpp:96] SSTFile dispatch failed

I1026 11:37:14.316673 199714 MetaHttpDownloadHandler.cpp:199] Download tasks have finished
E1026 11:37:14.316876 199714 MetaHttpDownloadHandler.cpp:96] SSTFile dispatch failed

E1026 11:37:14.316876 199714 MetaHttpDownloadHandler.cpp:96] SSTFile dispatch failed
                                 Dload  Upload   Total   Spent    Left  Speed
                                 Dload  Upload   Total   Spent    Left  Speed
 1 0 0         2 7     1 0 0         2 7         0           0D l o a5d0 7 5U p l o a d  0   -T-o:t-a-l: - -  S-p-e:n-t- : - -  L-e-f:t- - :S-p-e e d5

是需要在每台机器上安装CDH客户端么,目前我只在Meta端安装了CDH客户端。

需要在每台storage实例所在的机器上安装HADOOP

:joy::joy::joy:

目前通过spark-submit 提交跑小Demo数据后,HDFS数据已生成SST。但是导入失败,目前正在安装CDH客户端。
通过spark-submit提交跑一亿实体数据共58G,且花费时间差不多4个小时,当时用importer导入时差不多2小时内,是因为生成SST文件,排序很费时间么?且报错如下:


Logged in as: dr.who
Application
Tools
Configuration
Local logs
Server stacks
Server metrics
Log Type: stderr

Log Upload Time: Mon Oct 25 23:37:36 +0800 2021

Log Length: 459421

21/10/25 21:49:29 INFO util.SignalUtils: Registered signal handler for TERM
21/10/25 21:49:29 INFO util.SignalUtils: Registered signal handler for HUP
21/10/25 21:49:29 INFO util.SignalUtils: Registered signal handler for INT
21/10/25 21:49:30 INFO spark.SecurityManager: Changing view acls to: yarn,root
21/10/25 21:49:30 INFO spark.SecurityManager: Changing modify acls to: yarn,root
21/10/25 21:49:30 INFO spark.SecurityManager: Changing view acls groups to: 
21/10/25 21:49:30 INFO spark.SecurityManager: Changing modify acls groups to: 
21/10/25 21:49:30 INFO spark.SecurityManager: SecurityManager: authentication disabled; ui acls disabled; users  with view permissions: Set(yarn, root); groups with view permissions: Set(); users  with modify permissions: Set(yarn, root); groups with modify permissions: Set()
21/10/25 21:49:30 INFO yarn.ApplicationMaster: ApplicationAttemptId: appattempt_1633660048269_0337_000002
21/10/25 21:49:30 INFO yarn.ApplicationMaster: Starting the user application in a separate Thread
21/10/25 21:49:30 INFO yarn.ApplicationMaster: Waiting for spark context initialization...
21/10/25 21:49:30 INFO config.Configs$: 10.172.7.205===export_sst.applications========export_sst.applications
...
21/10/25 21:49:32 INFO ui.JettyUtils: Adding filter org.apache.hadoop.yarn.server.webproxy.amfilter.AmIpFilter to /jobs, /jobs/json, /jobs/job, /jobs/job/json, /stages, /stages/json, /stages/stage, /stages/stage/json, /stages/pool, /stages/pool/json, /storage, /storage/json, /storage/rdd, /storage/rdd/json, /environment, /environment/json, /executors, /executors/json, /executors/threadDump, /executors/threadDump/json, /static, /, /api, /jobs/job/kill, /stages/stage/kill.
21/10/25 21:49:32 INFO server.Server: jetty-9.3.z-SNAPSHOT, build timestamp: 2018-09-05T05:11:46+08:00, git hash: 3ce520221d0240229c862b122d2b06c12a625732
21/10/25 21:49:32 INFO server.Server: Started @3521ms
21/10/25 21:49:32 INFO server.AbstractConnector: Started ServerConnector@2ed693e1{HTTP/1.1,[http/1.1]}{0.0.0.0:31677}
21/10/25 21:49:32 INFO util.Utils: Successfully started service 'SparkUI' on port 31677.
...
21/10/25 21:49:32 INFO ui.SparkUI: Bound SparkUI to 0.0.0.0, and started at http://bj-azsyb-demo4:31677
21/10/25 21:49:32 INFO cluster.YarnClusterScheduler: Created YarnClusterScheduler
21/10/25 21:49:32 INFO yarn.SparkRackResolver: Got an error when resolving hostNames. Falling back to /default-rack for all
21/10/25 21:49:32 INFO util.Utils: Using initial executors = 0, max of spark.dynamicAllocation.initialExecutors, spark.dynamicAllocation.minExecutors and spark.executor.instances
21/10/25 21:49:32 INFO cluster.SchedulerExtensionServices: Starting Yarn extension services with app application_1633660048269_0337 and attemptId Some(appattempt_1633660048269_0337_000002)
21/10/25 21:49:32 INFO util.Utils: Successfully started service 'org.apache.spark.network.netty.NettyBlockTransferService' on port 31191.
21/10/25 21:49:32 INFO netty.NettyBlockTransferService: Server created on bj-azsyb-demo4:31191
21/10/25 21:49:32 INFO storage.BlockManager: Using org.apache.spark.storage.RandomBlockReplicationPolicy for block replication policy
21/10/25 21:49:32 INFO storage.BlockManagerMaster: Registering BlockManager BlockManagerId(driver, bj-azsyb-demo4, 31191, None)
21/10/25 21:49:32 INFO storage.BlockManagerMasterEndpoint: Registering block manager bj-azsyb-demo4:31191 with 366.3 MB RAM, BlockManagerId(driver, bj-azsyb-demo4, 31191, None)
21/10/25 21:49:32 INFO storage.BlockManagerMaster: Registered BlockManager BlockManagerId(driver, bj-azsyb-demo4, 31191, None)
21/10/25 21:49:32 INFO storage.BlockManager: external shuffle service port = 7337
21/10/25 21:49:32 INFO storage.BlockManager: Initialized BlockManager: BlockManagerId(driver, bj-azsyb-demo4, 31191, None)
21/10/25 21:49:32 INFO ui.JettyUtils: Adding filter org.apache.hadoop.yarn.server.webproxy.amfilter.AmIpFilter to /metrics/json.
21/10/25 21:49:32 INFO handler.ContextHandler: Started o.s.j.s.ServletContextHandler@fe7d70e{/metrics/json,null,AVAILABLE,@Spark}
21/10/25 21:49:33 INFO yarn.SparkRackResolver: Got an error when resolving hostNames. Falling back to /default-rack for all
21/10/25 21:49:34 INFO scheduler.EventLoggingListener: Logging events to hdfs://bj-azsyb-demo2.cloud.onecloud.io:8020/user/spark/applicationHistory/application_1633660048269_0337_2
21/10/25 21:49:34 INFO util.Utils: Using initial executors = 0, max of spark.dynamicAllocation.initialExecutors, spark.dynamicAllocation.minExecutors and spark.executor.instances
21/10/25 21:49:34 WARN cluster.YarnSchedulerBackend$YarnSchedulerEndpoint: Attempted to request executors before the AM has registered!
21/10/25 21:49:34 WARN lineage.LineageWriter: Lineage directory /var/log/spark/lineage doesn't exist or is not writable. Lineage for this application will be disabled.
21/10/25 21:49:34 INFO util.Utils: Extension com.cloudera.spark.lineage.NavigatorAppListener not being initialized.
21/10/25 21:49:34 INFO client.RMProxy: Connecting to ResourceManager at bj-azsyb-demo2.cloud.onecloud.io/10.172.7.214:8030
21/10/25 21:49:34 INFO yarn.YarnRMClient: Registering the ApplicationMaster
21/10/25 21:49:34 INFO yarn.SparkRackResolver: Got an error when resolving hostNames. Falling back to /default-rack for all
21/10/25 21:49:34 INFO yarn.ApplicationMaster: Preparing Local resources
21/10/25 21:49:35 INFO yarn.ApplicationMaster: 
===============================================================================
YARN executor launch context:
  env:
    CLASSPATH -> {{HADOOP_COMMON_HOME}}/../../../CDH/lib/spark/jars/guava-14.0.1.jar<CPS>{{PWD}}<CPS>{{PWD}}/__spark_conf__<CPS>{{PWD}}/__spark_libs__/*<CPS>{{HADOOP_COMMON_HOME}}/../../../CDH-6.3.1-1.cdh6.3.1.p0.1470567/lib/spark/jars/*<CPS>{{HADOOP_COMMON_HOME}}/../../../CDH-6.3.1-1.cdh6.3.1.p0.1470567/lib/spark/hive/*<CPS>$HADOOP_CLIENT_CONF_DIR<CPS>$HADOOP_COMMON_HOME/*<CPS>$HADOOP_COMMON_HOME/lib/*<CPS>$HADOOP_HDFS_HOME/*<CPS>$HADOOP_HDFS_HOME/lib/*<CPS>$HADOOP_YARN_HOME/*<CPS>$HADOOP_YARN_HOME/lib/*<CPS>$HADOOP_CLIENT_CONF_DIR<CPS>$PWD/mr-framework/*<CPS>$MR2_CLASSPATH<CPS>{{HADOOP_COMMON_HOME}}/../../../CDH-6.3.1-1.cdh6.3.1.p0.1470567/lib/hadoop/client/accessors-smart-1.2.jar:{{HADOOP_COMMON_HOME}}/../../../CDH-6.3.1-1.cdh6.3.1.p0.1470567/lib/hadoop/client/accessors-smart.jar:{{HADOOP_COMMON_HOME}}/../../../CDH-6.3.1-1.cdh6.3.1.p0.1470567/lib/hadoop/client/asm-5.0.4.jar:{{HADOOP_COMMON_HOME}}/../../../CDH-6.3.1-1.cdh6.3.1.p0.1470567/lib/hadoop/client/asm.jar:{{HADOOP_COMMON_HOME}}/../../../CDH-6.3.1-1.cdh6.3.1.p0.1470567/lib/hadoop/client/avro.jar:{{HADOOP_COMMON_HOME}}/../../../CDH-6.3.1-1.cdh6.3.1.p0.1470567/lib/hadoop/client/aws-java-sdk-bundle-1.11.271.jar:{{HADOOP_COMMON_HOME}}/../../../CDH-6.3.1-1.cdh6.3.1.p0.1470567/lib/hadoop/client/aws-java-sdk-bundle.jar:{{HADOOP_COMMON_HOME}}/../../../CDH-6.3.1-1.cdh6.3.1.p0.1470567/lib/hadoop/client/azure-data-lake-store-sdk-2.2.9.jar:{{HADOOP_COMMON_HOME}}/../../../CDH-6.3.1-1.cdh6.3.1.p0.1470567/lib/hadoop/client/azure-data-lake-store-sdk.jar:{{HADOOP_COMMON_HOME}}/../../../CDH-6.3.1-1.cdh6.3.1.p0.1470567/lib/hadoop/client/commons-beanutils-1.9.4.jar:{{HADOOP_COMMON_HOME}}/../../../CDH-6.3.1-1.cdh6.3.1.p0.1470567/lib/hadoop/client/commons-beanutils.jar:{{HADOOP_COMMON_HOME}}/../../../CDH-6.3.1-1.cdh6.3.1.p0.1470567/lib/hadoop/client/commons-cli-1.2.jar:{{HADOOP_COMMON_HOME}}/../../../CDH-6.3.1-1.cdh6.3.1.p0.1470567/lib/hadoop/client/commons-cli.jar:{{HADOOP_COMMON_HOME}}/../../../CDH-6.3.1-1.cdh6.3.1.p0.1470567/lib/hadoop/client/commons-codec-1.11.jar:{{HADOOP_COMMON_HOME}}/../../../CDH-6.3.1-1.cdh6.3.1.p0.1470567/lib/hadoop/client/commons-codec.jar:{{HADOOP_COMMON_HOME}}/../../../CDH-6.3.1-1.cdh6.3.1.p0.1470567/lib/hadoop/client/commons-collections-3.2.2.jar:{{HADOOP_COMMON_HOME}}/../../../CDH-6.3.1-1.cdh6.3.1.p0.1470567/lib/hadoop/client/commons-collections.jar:{{HADOOP_COMMON_HOME}}/../../../CDH-6.3.1-1.cdh6.3.1.p0.1470567/lib/hadoop/client/commons-compress-1.18.jar:{{HADOOP_COMMON_HOME}}/../../../CDH-6.3.1-1.cdh6.3.1.p0.1470567/lib/hadoop/client/commons-compress.jar:{{HADOOP_COMMON_HOME}}/../../../CDH-6.3.1-1.cdh6.3.1.p0.1470567/lib/hadoop/client/commons-configuration2-2.1.1.jar:{{HADOOP_COMMON_HOME}}/../../../CDH-6.3.1-1.cdh6.3.1.p0.1470567/lib/hadoop/client/commons-configuration2.jar:{{HADOOP_COMMON_HOME}}/../../../CDH-6.3.1-1.cdh6.3.1.p0.1470567/lib/hadoop/client/commons-io-2.6.jar:{{HADOOP_COMMON_HOME}}/../../../CDH-6.3.1-1.cdh6.3.1.p0.1470567/lib/hadoop/client/commons-io.jar:{{HADOOP_COMMON_HOME}}/../../../CDH-6.3.1-1.cdh6.3.1.p0.1470567/lib/hadoop/client/commons-lang-2.6.jar:{{HADOOP_COMMON_HOME}}/../../../CDH-6.3.1-1.cdh6.3.1.p0.1470567/lib/hadoop/client/commons-lang.jar:{{HADOOP_COMMON_HOME}}/../../../CDH-6.3.1-1.cdh6.3.1.p0.1470567/lib/hadoop/client/commons-lang3-3.7.jar:{{HADOOP_COMMON_HOME}}/../../../CDH-6.3.1-1.cdh6.3.1.p0.1470567/lib/hadoop/client/commons-lang3.jar:{{HADOOP_COMMON_HOME}}/../../../CDH-6.3.1-1.cdh6.3.1.p0.1470567/lib/hadoop/client/commons-logging-1.1.3.jar:{{HADOOP_COMMON_HOME}}/../../../CDH-6.3.1-1.cdh6.3.1.p0.1470567/lib/hadoop/client/commons-logging.jar:{{HADOOP_COMMON_HOME}}/../../../CDH-6.3.1-1.cdh6.3.1.p0.1470567/lib/hadoop/client/commons-math3-3.1.1.jar:{{HADOOP_COMMON_HOME}}/../../../CDH-6.3.1-1.cdh6.3.1.p0.1470567/lib/hadoop/client/commons-math3.jar:{{HADOOP_COMMON_HOME}}/../../../CDH-6.3.1-1.cdh6.3.1.p0.1470567/lib/hadoop/client/commons-net-3.1.jar:{{HADOOP_COMMON_HOME}}/../../../CDH-6.3.1-1.cdh6.3.1.p0.1470567/lib/hadoop/client/commons-net.jar:{{HADOOP_COMMON_HOME}}/../../../CDH-6.3.1-1.cdh6.3.1.p0.1470567/lib/hadoop/client/curator-client-2.12.0.jar:{{HADOOP_COMMON_HOME}}/../../../CDH-6.3.1-1.cdh6.3.1.p0.1470567/lib/hadoop/client/curator-client.jar:{{HADOOP_COMMON_HOME}}/../../../CDH-6.3.1-1.cdh6.3.1.p0.1470567/lib/hadoop/client/curator-framework-2.12.0.jar:{{HADOOP_COMMON_HOME}}/../../../CDH-6.3.1-1.cdh6.3.1.p0.1470567/lib/hadoop/client/curator-framework.jar:{{HADOOP_COMMON_HOME}}/../../../CDH-6.3.1-1.cdh6.3.1.p0.1470567/lib/hadoop/client/curator-recipes-2.12.0.jar:{{HADOOP_COMMON_HOME}}/../../../CDH-6.3.1-1.cdh6.3.1.p0.1470567/lib/hadoop/client/curator-recipes.jar:{{HADOOP_COMMON_HOME}}/../../../CDH-6.3.1-1.cdh6.3.1.p0.1470567/lib/hadoop/client/gson-2.2.4.jar:{{HADOOP_COMMON_HOME}}/../../../CDH-6.3.1-1.cdh6.3.1.p0.1470567/lib/hadoop/client/gson.jar:{{HADOOP_COMMON_HOME}}/../../../CDH-6.3.1-1.cdh6.3.1.p0.1470567/lib/hadoop/client/guava-11.0.2.jar:{{HADOOP_COMMON_HOME}}/../../../CDH-6.3.1-1.cdh6.3.1.p0.1470567/lib/hadoop/client/guava.jar:{{HADOOP_COMMON_HOME}}/../../../CDH-6.3.1-1.cdh6.3.1.p0.1470567/lib/hadoop/client/hadoop-annotations-3.0.0-cdh6.3.1.jar:{{HADOOP_COMMON_HOME}}/../../../CDH-6.3.1-1.cdh6.3.1.p0.1470567/lib/hadoop/client/hadoop-annotations.jar:{{HADOOP_COMMON_HOME}}/../../../CDH-6.3.1-1.cdh6.3.1.p0.1470567/lib/hadoop/client/hadoop-auth-3.0.0-cdh6.3.1.jar:{{HADOOP_COMMON_HOME}}/../../../CDH-6.3.1-1.cdh6.3.1.p0.1470567/lib/hadoop/client/hadoop-auth.jar:{{HADOOP_COMMON_HOME}}/../../../CDH-6.3.1-1.cdh6.3.1.p0.1470567/lib/hadoop/client/hadoop-aws-3.0.0-cdh6.3.1.jar:{{HADOOP_COMMON_HOME}}/../../../CDH-6.3.1-1.cdh6.3.1.p0.1470567/lib/hadoop/client/hadoop-aws.jar:{{HADOOP_COMMON_HOME}}/../../../CDH-6.3.1-1.cdh6.3.1.p0.1470567/lib/hadoop/client/hadoop-azure-3.0.0-cdh6.3.1.jar:{{HADOOP_COMMON_HOME}}/../../../CDH-6.3.1-1.cdh6.3.1.p0.1470567/lib/hadoop/client/hadoop-azure-datalake-3.0.0-cdh6.3.1.jar:{{HADOOP_COMMON_HOME}}/../../../CDH-6.3.1-1.cdh6.3.1.p0.1470567/lib/hadoop/client/hadoop-azure-datalake.jar:{{HADOOP_COMMON_HOME}}/../../../CDH-6.3.1-1.cdh6.3.1.p0.1470567/lib/hadoop/client/hadoop-azure.jar:{{HADOOP_COMMON_HOME}}/../../../CDH-6.3.1-1.cdh6.3.1.p0.1470567/lib/hadoop/client/hadoop-common-3.0.0-cdh6.3.1.jar:{{HADOOP_COMMON_HOME}}/../../../CDH-6.3.1-1.cdh6.3.1.p0.1470567/lib/hadoop/client/hadoop-common.jar:{{HADOOP_COMMON_HOME}}/../../../CDH-6.3.1-1.cdh6.3.1.p0.1470567/lib/hadoop/client/hadoop-hdfs-client-3.0.0-cdh6.3.1.jar:{{HADOOP_COMMON_HOME}}/../../../CDH-6.3.1-1.cdh6.3.1.p0.1470567/lib/hadoop/client/hadoop-hdfs-client.jar:{{HADOOP_COMMON_HOME}}/../../../CDH-6.3.1-1.cdh6.3.1.p0.1470567/lib/hadoop/client/hadoop-mapreduce-client-common-3.0.0-cdh6.3.1.jar:{{HADOOP_COMMON_HOME}}/../../../CDH-6.3.1-1.cdh6.3.1.p0.1470567/lib/hadoop/client/hadoop-mapreduce-client-common.jar:{{HADOOP_COMMON_HOME}}/../../../CDH-6.3.1-1.cdh6.3.1.p0.1470567/lib/hadoop/client/hadoop-mapreduce-client-core-3.0.0-cdh6.3.1.jar:{{HADOOP_COMMON_HOME}}/../../../CDH-6.3.1-1.cdh6.3.1.p0.1470567/lib/hadoop/client/hadoop-mapreduce-client-core.jar:{{HADOOP_COMMON_HOME}}/../../../CDH-6.3.1-1.cdh6.3.1.p0.1470567/lib/hadoop/client/hadoop-mapreduce-client-jobclient-3.0.0-cdh6.3.1.jar:{{HADOOP_COMMON_HOME}}/../../../CDH-6.3.1-1.cdh6.3.1.p0.1470567/lib/hadoop/client/hadoop-mapreduce-client-jobclient.jar:{{HADOOP_COMMON_HOME}}/../../../CDH-6.3.1-1.cdh6.3.1.p0.1470567/lib/hadoop/client/hadoop-yarn-api-3.0.0-cdh6.3.1.jar:{{HADOOP_COMMON_HOME}}/../../../CDH-6.3.1-1.cdh6.3.1.p0.1470567/lib/hadoop/client/hadoop-yarn-api.jar:{{HADOOP_COMMON_HOME}}/../../../CDH-6.3.1-1.cdh6.3.1.p0.1470567/lib/hadoop/client/hadoop-yarn-client-3.0.0-cdh6.3.1.jar:{{HADOOP_COMMON_HOME}}/../../../CDH-6.3.1-1.cdh6.3.1.p0.1470567/lib/hadoop/client/hadoop-yarn-client.jar:{{HADOOP_COMMON_HOME}}/../../../CDH-6.3.1-1.cdh6.3.1.p0.1470567/lib/hadoop/client/hadoop-yarn-common-3.0.0-cdh6.3.1.jar:{{HADOOP_COMMON_HOME}}/../../../CDH-6.3.1-1.cdh6.3.1.p0.1470567/lib/hadoop/client/hadoop-yarn-common.jar:{{HADOOP_COMMON_HOME}}/../../../CDH-6.3.1-1.cdh6.3.1.p0.1470567/lib/hadoop/client/htrace-core4-4.1.0-incubating.jar:{{HADOOP_COMMON_HOME}}/../../../CDH-6.3.1-1.cdh6.3.1.p0.1470567/lib/hadoop/client/htrace-core4.jar:{{HADOOP_COMMON_HOME}}/../../../CDH-6.3.1-1.cdh6.3.1.p0.1470567/lib/hadoop/client/httpclient-4.5.3.jar:{{HADOOP_COMMON_HOME}}/../../../CDH-6.3.1-1.cdh6.3.1.p0.1470567/lib/hadoop/client/httpclient.jar:{{HADOOP_COMMON_HOME}}/../../../CDH-6.3.1-1.cdh6.3.1.p0.1470567/lib/hadoop/client/httpcore-4.4.6.jar:{{HADOOP_COMMON_HOME}}/../../../CDH-6.3.1-1.cdh6.3.1.p0.1470567/lib/hadoop/client/httpcore.jar:{{HADOOP_COMMON_HOME}}/../../../CDH-6.3.1-1.cdh6.3.1.p0.1470567/lib/hadoop/client/javax.activation-api-1.2.0.jar:{{HADOOP_COMMON_HOME}}/../../../CDH-6.3.1-1.cdh6.3.1.p0.1470567/lib/hadoop/client/javax.activation-api.jar:{{HADOOP_COMMON_HOME}}/../../../CDH-6.3.1-1.cdh6.3.1.p0.1470567/lib/hadoop/client/jaxb-api-2.2.11.jar:{{HADOOP_COMMON_HOME}}/../../../CDH-6.3.1-1.cdh6.3.1.p0.1470567/lib/hadoop/client/jaxb-api.jar:{{HADOOP_COMMON_HOME}}/../../../CDH-6.3.1-1.cdh6.3.1.p0.1470567/lib/hadoop/client/jcip-annotations-1.0-1.jar:{{HADOOP_COMMON_HOME}}/../../../CDH-6.3.1-1.cdh6.3.1.p0.1470567/lib/hadoop/client/jcip-annotations.jar:{{HADOOP_COMMON_HOME}}/../../../CDH-6.3.1-1.cdh6.3.1.p0.1470567/lib/hadoop/client/json-smart-2.3.jar:{{HADOOP_COMMON_HOME}}/../../../CDH-6.3.1-1.cdh6.3.1.p0.1470567/lib/hadoop/client/json-smart.jar:{{HADOOP_COMMON_HOME}}/../../../CDH-6.3.1-1.cdh6.3.1.p0.1470567/lib/hadoop/client/jsp-api-2.1.jar:{{HADOOP_COMMON_HOME}}/../../../CDH-6.3.1-1.cdh6.3.1.p0.1470567/lib/hadoop/client/jsp-api.jar:{{HADOOP_COMMON_HOME}}/../../../CDH-6.3.1-1.cdh6.3.1.p0.1470567/lib/hadoop/client/jsr305-3.0.0.jar:{{HADOOP_COMMON_HOME}}/../../../CDH-6.3.1-1.cdh6.3.1.p0.1470567/lib/hadoop/client/jsr305.jar:{{HADOOP_COMMON_HOME}}/../../../CDH-6.3.1-1.cdh6.3.1.p0.1470567/lib/hadoop/client/jsr311-api-1.1.1.jar:{{HADOOP_COMMON_HOME}}/../../../CDH-6.3.1-1.cdh6.3.1.p0.1470567/lib/hadoop/client/jsr311-api.jar:{{HADOOP_COMMON_HOME}}/../../../CDH-6.3.1-1.cdh6.3.1.p0.1470567/lib/hadoop/client/kerb-admin-1.0.0.jar:{{HADOOP_COMMON_HOME}}/../../../CDH-6.3.1-1.cdh6.3.1.p0.1470567/lib/hadoop/client/kerb-admin.jar:{{HADOOP_COMMON_HOME}}/../../../CDH-6.3.1-1.cdh6.3.1.p0.1470567/lib/hadoop/client/kerb-client-1.0.0.jar:{{HADOOP_COMMON_HOME}}/../../../CDH-6.3.1-1.cdh6.3.1.p0.1470567/lib/hadoop/client/kerb-client.jar:{{HADOOP_COMMON_HOME}}/../../../CDH-6.3.1-1.cdh6.3.1.p0.1470567/lib/hadoop/client/kerb-common-1.0.0.jar:{{HADOOP_COMMON_HOME}}/../../../CDH-6.3.1-1.cdh6.3.1.p0.1470567/lib/hadoop/client/kerb-common.jar:{{HADOOP_COMMON_HOME}}/../../../CDH-6.3.1-1.cdh6.3.1.p0.1470567/lib/hadoop/client/kerb-core-1.0.0.jar:{{HADOOP_COMMON_HOME}}/../../../CDH-6.3.1-1.cdh6.3.1.p0.1470567/lib/hadoop/client/kerb-core.jar:{{HADOOP_COMMON_HOME}}/../../../CDH-6.3.1-1.cdh6.3.1.p0.1470567/lib/hadoop/client/kerb-crypto-1.0.0.jar:{{HADOOP_COMMON_HOME}}/../../../CDH-6.3.1-1.cdh6.3.1.p0.1470567/lib/hadoop/client/kerb-crypto.jar:{{HADOOP_COMMON_HOME}}/../../../CDH-6.3.1-1.cdh6.3.1.p0.1470567/lib/hadoop/client/kerb-identity-1.0.0.jar:{{HADOOP_COMMON_HOME}}/../../../CDH-6.3.1-1.cdh6.3.1.p0.1470567/lib/hadoop/client/kerb-identity.jar:{{HADOOP_COMMON_HOME}}/../../../CDH-6.3.1-1.cdh6.3.1.p0.1470567/lib/hadoop/client/kerb-server-1.0.0.jar:{{HADOOP_COMMON_HOME}}/../../../CDH-6.3.1-1.cdh6.3.1.p0.1470567/lib/hadoop/client/kerb-server.jar:{{HADOOP_COMMON_HOME}}/../../../CDH-6.3.1-1.cdh6.3.1.p0.1470567/lib/hadoop/client/kerb-simplekdc-1.0.0.jar:{{HADOOP_COMMON_HOME}}/../../../CDH-6.3.1-1.cdh6.3.1.p0.1470567/lib/hadoop/client/kerb-simplekdc.jar:{{HADOOP_COMMON_HOME}}/../../../CDH-6.3.1-1.cdh6.3.1.p0.1470567/lib/hadoop/client/kerb-util-1.0.0.jar:{{HADOOP_COMMON_HOME}}/../../../CDH-6.3.1-1.cdh6.3.1.p0.1470567/lib/hadoop/client/kerb-util.jar:{{HADOOP_COMMON_HOME}}/../../../CDH-6.3.1-1.cdh6.3.1.p0.1470567/lib/hadoop/client/kerby-asn1-1.0.0.jar:{{HADOOP_COMMON_HOME}}/../../../CDH-6.3.1-1.cdh6.3.1.p0.1470567/lib/hadoop/client/kerby-asn1.jar:{{HADOOP_COMMON_HOME}}/../../../CDH-6.3.1-1.cdh6.3.1.p0.1470567/lib/hadoop/client/kerby-config-1.0.0.jar:{{HADOOP_COMMON_HOME}}/../../../CDH-6.3.1-1.cdh6.3.1.p0.1470567/lib/hadoop/client/kerby-config.jar:{{HADOOP_COMMON_HOME}}/../../../CDH-6.3.1-1.cdh6.3.1.p0.1470567/lib/hadoop/client/kerby-pkix-1.0.0.jar:{{HADOOP_COMMON_HOME}}/../../../CDH-6.3.1-1.cdh6.3.1.p0.1470567/lib/hadoop/client/kerby-pkix.jar:{{HADOOP_COMMON_HOME}}/../../../CDH-6.3.1-1.cdh6.3.1.p0.1470567/lib/hadoop/client/kerby-util-1.0.0.jar:{{HADOOP_COMMON_HOME}}/../../../CDH-6.3.1-1.cdh6.3.1.p0.1470567/lib/hadoop/client/kerby-util.jar:{{HADOOP_COMMON_HOME}}/../../../CDH-6.3.1-1.cdh6.3.1.p0.1470567/lib/hadoop/client/kerby-xdr-1.0.0.jar:{{HADOOP_COMMON_HOME}}/../../../CDH-6.3.1-1.cdh6.3.1.p0.1470567/lib/hadoop/client/kerby-xdr.jar:{{HADOOP_COMMON_HOME}}/../../../CDH-6.3.1-1.cdh6.3.1.p0.1470567/lib/hadoop/client/log4j-1.2.17.jar:{{HADOOP_COMMON_HOME}}/../../../CDH-6.3.1-1.cdh6.3.1.p0.1470567/lib/hadoop/client/log4j.jar:{{HADOOP_COMMON_HOME}}/../../../CDH-6.3.1-1.cdh6.3.1.p0.1470567/lib/hadoop/client/nimbus-jose-jwt-4.41.1.jar:{{HADOOP_COMMON_HOME}}/../../../CDH-6.3.1-1.cdh6.3.1.p0.1470567/lib/hadoop/client/nimbus-jose-jwt.jar:{{HADOOP_COMMON_HOME}}/../../../CDH-6.3.1-1.cdh6.3.1.p0.1470567/lib/hadoop/client/okhttp-2.7.5.jar:{{HADOOP_COMMON_HOME}}/../../../CDH-6.3.1-1.cdh6.3.1.p0.1470567/lib/hadoop/client/okhttp.jar:{{HADOOP_COMMON_HOME}}/../../../CDH-6.3.1-1.cdh6.3.1.p0.1470567/lib/hadoop/client/okio-1.6.0.jar:{{HADOOP_COMMON_HOME}}/../../../CDH-6.3.1-1.cdh6.3.1.p0.1470567/lib/hadoop/client/okio.jar:{{HADOOP_COMMON_HOME}}/../../../CDH-6.3.1-1.cdh6.3.1.p0.1470567/lib/hadoop/client/paranamer-2.8.jar:{{HADOOP_COMMON_HOME}}/../../../CDH-6.3.1-1.cdh6.3.1.p0.1470567/lib/hadoop/client/paranamer.jar:{{HADOOP_COMMON_HOME}}/../../../CDH-6.3.1-1.cdh6.3.1.p0.1470567/lib/hadoop/client/protobuf-java-2.5.0.jar:{{HADOOP_COMMON_HOME}}/../../../CDH-6.3.1-1.cdh6.3.1.p0.1470567/lib/hadoop/client/protobuf-java.jar:{{HADOOP_COMMON_HOME}}/../../../CDH-6.3.1-1.cdh6.3.1.p0.1470567/lib/hadoop/client/re2j-1.1.jar:{{HADOOP_COMMON_HOME}}/../../../CDH-6.3.1-1.cdh6.3.1.p0.1470567/lib/hadoop/client/re2j.jar:{{HADOOP_COMMON_HOME}}/../../../CDH-6.3.1-1.cdh6.3.1.p0.1470567/lib/hadoop/client/snappy-java-1.1.4.jar:{{HADOOP_COMMON_HOME}}/../../../CDH-6.3.1-1.cdh6.3.1.p0.1470567/lib/hadoop/client/snappy-java.jar:{{HADOOP_COMMON_HOME}}/../../../CDH-6.3.1-1.cdh6.3.1.p0.1470567/lib/hadoop/client/stax2-api-3.1.4.jar:{{HADOOP_COMMON_HOME}}/../../../CDH-6.3.1-1.cdh6.3.1.p0.1470567/lib/hadoop/client/stax2-api.jar:{{HADOOP_COMMON_HOME}}/../../../CDH-6.3.1-1.cdh6.3.1.p0.1470567/lib/hadoop/client/wildfly-openssl-1.0.4.Final.jar:{{HADOOP_COMMON_HOME}}/../../../CDH-6.3.1-1.cdh6.3.1.p0.1470567/lib/hadoop/client/wildfly-openssl.jar:{{HADOOP_COMMON_HOME}}/../../../CDH-6.3.1-1.cdh6.3.1.p0.1470567/lib/hadoop/client/woodstox-core-5.0.3.jar:{{HADOOP_COMMON_HOME}}/../../../CDH-6.3.1-1.cdh6.3.1.p0.1470567/lib/hadoop/client/woodstox-core.jar:{{HADOOP_COMMON_HOME}}/../../../CDH-6.3.1-1.cdh6.3.1.p0.1470567/lib/hadoop/client/xz-1.6.jar:{{HADOOP_COMMON_HOME}}/../../../CDH-6.3.1-1.cdh6.3.1.p0.1470567/lib/hadoop/client/xz.jar:{{HADOOP_COMMON_HOME}}/../../../CDH-6.3.1-1.cdh6.3.1.p0.1470567/lib/hbase/bin/../lib/client-facing-thirdparty/audience-annotations-0.5.0.jar:{{HADOOP_COMMON_HOME}}/../../../CDH-6.3.1-1.cdh6.3.1.p0.1470567/lib/hbase/bin/../lib/client-facing-thirdparty/commons-logging-1.2.jar:{{HADOOP_COMMON_HOME}}/../../../CDH-6.3.1-1.cdh6.3.1.p0.1470567/lib/hbase/bin/../lib/client-facing-thirdparty/findbugs-annotations-1.3.9-1.jar:{{HADOOP_COMMON_HOME}}/../../../CDH-6.3.1-1.cdh6.3.1.p0.1470567/lib/hbase/bin/../lib/client-facing-thirdparty/htrace-core4-4.2.0-incubating.jar:{{HADOOP_COMMON_HOME}}/../../../CDH-6.3.1-1.cdh6.3.1.p0.1470567/lib/hbase/bin/../lib/shaded-clients/hbase-shaded-mapreduce-2.1.0-cdh6.3.1.jar:{{HADOOP_COMMON_HOME}}/../../../CDH-6.3.1-1.cdh6.3.1.p0.1470567/lib/hbase/hbase-spark.jar<CPS>{{PWD}}/__spark_conf__/__hadoop_conf__
    MKL_NUM_THREADS -> 1
    SPARK_DIST_CLASSPATH -> /opt/cloudera/parcels/CDH-6.3.1-1.cdh6.3.1.p0.1470567/lib/hadoop/client/accessors-smart-1.2.jar:/opt/cloudera/parcels/CDH-6.3.1-1.cdh6.3.1.p0.1470567/lib/hadoop/client/accessors-smart.jar:/opt/cloudera/parcels/CDH-6.3.1-1.cdh6.3.1.p0.1470567/lib/hadoop/client/asm-5.0.4.jar:/opt/cloudera/parcels/CDH-6.3.1-1.cdh6.3.1.p0.1470567/lib/hadoop/client/asm.jar:/opt/cloudera/parcels/CDH-6.3.1-1.cdh6.3.1.p0.1470567/lib/hadoop/client/avro.jar:/opt/cloudera/parcels/CDH-6.3.1-1.cdh6.3.1.p0.1470567/lib/hadoop/client/aws-java-sdk-bundle-1.11.271.jar:/opt/cloudera/parcels/CDH-6.3.1-1.cdh6.3.1.p0.1470567/lib/hadoop/client/aws-java-sdk-bundle.jar:/opt/cloudera/parcels/CDH-6.3.1-1.cdh6.3.1.p0.1470567/lib/hadoop/client/azure-data-lake-store-sdk-2.2.9.jar:/opt/cloudera/parcels/CDH-6.3.1-1.cdh6.3.1.p0.1470567/lib/hadoop/client/azure-data-lake-store-sdk.jar:/opt/cloudera/parcels/CDH-6.3.1-1.cdh6.3.1.p0.1470567/lib/hadoop/client/commons-beanutils-1.9.4.jar:/opt/cloudera/parcels/CDH-6.3.1-1.cdh6.3.1.p0.1470567/lib/hadoop/client/commons-beanutils.jar:/opt/cloudera/parcels/CDH-6.3.1-1.cdh6.3.1.p0.1470567/lib/hadoop/client/commons-cli-1.2.jar:/opt/cloudera/parcels/CDH-6.3.1-1.cdh6.3.1.p0.1470567/lib/hadoop/client/commons-cli.jar:/opt/cloudera/parcels/CDH-6.3.1-1.cdh6.3.1.p0.1470567/lib/hadoop/client/commons-codec-1.11.jar:/opt/cloudera/parcels/CDH-6.3.1-1.cdh6.3.1.p0.1470567/lib/hadoop/client/commons-codec.jar:/opt/cloudera/parcels/CDH-6.3.1-1.cdh6.3.1.p0.1470567/lib/hadoop/client/commons-collections-3.2.2.jar:/opt/cloudera/parcels/CDH-6.3.1-1.cdh6.3.1.p0.1470567/lib/hadoop/client/commons-collections.jar:/opt/cloudera/parcels/CDH-6.3.1-1.cdh6.3.1.p0.1470567/lib/hadoop/client/commons-compress-1.18.jar:/opt/cloudera/parcels/CDH-6.3.1-1.cdh6.3.1.p0.1470567/lib/hadoop/client/commons-compress.jar:/opt/cloudera/parcels/CDH-6.3.1-1.cdh6.3.1.p0.1470567/lib/hadoop/client/commons-configuration2-2.1.1.jar:/opt/cloudera/parcels/CDH-6.3.1-1.cdh6.3.1.p0.1470567/lib/hadoop/client/commons-configuration2.jar:/opt/cloudera/parcels/CDH-6.3.1-1.cdh6.3.1.p0.1470567/lib/hadoop/client/commons-io-2.6.jar:/opt/cloudera/parcels/CDH-6.3.1-1.cdh6.3.1.p0.1470567/lib/hadoop/client/commons-io.jar:/opt/cloudera/parcels/CDH-6.3.1-1.cdh6.3.1.p0.1470567/lib/hadoop/client/commons-lang-2.6.jar:/opt/cloudera/parcels/CDH-6.3.1-1.cdh6.3.1.p0.1470567/lib/hadoop/client/commons-lang.jar:/opt/cloudera/parcels/CDH-6.3.1-1.cdh6.3.1.p0.1470567/lib/hadoop/client/commons-lang3-3.7.jar:/opt/cloudera/parcels/CDH-6.3.1-1.cdh6.3.1.p0.1470567/lib/hadoop/client/commons-lang3.jar:/opt/cloudera/parcels/CDH-6.3.1-1.cdh6.3.1.p0.1470567/lib/hadoop/client/commons-logging-1.1.3.jar:/opt/cloudera/parcels/CDH-6.3.1-1.cdh6.3.1.p0.1470567/lib/hadoop/client/commons-logging.jar:/opt/cloudera/parcels/CDH-6.3.1-1.cdh6.3.1.p0.1470567/lib/hadoop/client/commons-math3-3.1.1.jar:/opt/cloudera/parcels/CDH-6.3.1-1.cdh6.3.1.p0.1470567/lib/hadoop/client/commons-math3.jar:/opt/cloudera/parcels/CDH-6.3.1-1.cdh6.3.1.p0.1470567/lib/hadoop/client/commons-net-3.1.jar:/opt/cloudera/parcels/CDH-6.3.1-1.cdh6.3.1.p0.1470567/lib/hadoop/client/commons-net.jar:/opt/cloudera/parcels/CDH-6.3.1-1.cdh6.3.1.p0.1470567/lib/hadoop/client/curator-client-2.12.0.jar:/opt/cloudera/parcels/CDH-6.3.1-1.cdh6.3.1.p0.1470567/lib/hadoop/client/curator-client.jar:/opt/cloudera/parcels/CDH-6.3.1-1.cdh6.3.1.p0.1470567/lib/hadoop/client/curator-framework-2.12.0.jar:/opt/cloudera/parcels/CDH-6.3.1-1.cdh6.3.1.p0.1470567/lib/hadoop/client/curator-framework.jar:/opt/cloudera/parcels/CDH-6.3.1-1.cdh6.3.1.p0.1470567/lib/hadoop/client/curator-recipes-2.12.0.jar:/opt/cloudera/parcels/CDH-6.3.1-1.cdh6.3.1.p0.1470567/lib/hadoop/client/curator-recipes.jar:/opt/cloudera/parcels/CDH-6.3.1-1.cdh6.3.1.p0.1470567/lib/hadoop/client/gson-2.2.4.jar:/opt/cloudera/parcels/CDH-6.3.1-1.cdh6.3.1.p0.1470567/lib/hadoop/client/gson.jar:/opt/cloudera/parcels/CDH-6.3.1-1.cdh6.3.1.p0.1470567/lib/hadoop/client/guava-11.0.2.jar:/opt/cloudera/parcels/CDH-6.3.1-1.cdh6.3.1.p0.1470567/lib/hadoop/client/guava.jar:/opt/cloudera/parcels/CDH-6.3.1-1.cdh6.3.1.p0.1470567/lib/hadoop/client/hadoop-annotations-3.0.0-cdh6.3.1.jar:/opt/cloudera/parcels/CDH-6.3.1-1.cdh6.3.1.p0.1470567/lib/hadoop/client/hadoop-annotations.jar:/opt/cloudera/parcels/CDH-6.3.1-1.cdh6.3.1.p0.1470567/lib/hadoop/client/hadoop-auth-3.0.0-cdh6.3.1.jar:/opt/cloudera/parcels/CDH-6.3.1-1.cdh6.3.1.p0.1470567/lib/hadoop/client/hadoop-auth.jar:/opt/cloudera/parcels/CDH-6.3.1-1.cdh6.3.1.p0.1470567/lib/hadoop/client/hadoop-aws-3.0.0-cdh6.3.1.jar:/opt/cloudera/parcels/CDH-6.3.1-1.cdh6.3.1.p0.1470567/lib/hadoop/client/hadoop-aws.jar:/opt/cloudera/parcels/CDH-6.3.1-1.cdh6.3.1.p0.1470567/lib/hadoop/client/hadoop-azure-3.0.0-cdh6.3.1.jar:/opt/cloudera/parcels/CDH-6.3.1-1.cdh6.3.1.p0.1470567/lib/hadoop/client/hadoop-azure-datalake-3.0.0-cdh6.3.1.jar:/opt/cloudera/parcels/CDH-6.3.1-1.cdh6.3.1.p0.1470567/lib/hadoop/client/hadoop-azure-datalake.jar:/opt/cloudera/parcels/CDH-6.3.1-1.cdh6.3.1.p0.1470567/lib/hadoop/client/hadoop-azure.jar:/opt/cloudera/parcels/CDH-6.3.1-1.cdh6.3.1.p0.1470567/lib/hadoop/client/hadoop-common-3.0.0-cdh6.3.1.jar:/opt/cloudera/parcels/CDH-6.3.1-1.cdh6.3.1.p0.1470567/lib/hadoop/client/hadoop-common.jar:/opt/cloudera/parcels/CDH-6.3.1-1.cdh6.3.1.p0.1470567/lib/hadoop/client/hadoop-hdfs-client-3.0.0-cdh6.3.1.jar:/opt/cloudera/parcels/CDH-6.3.1-1.cdh6.3.1.p0.1470567/lib/hadoop/client/hadoop-hdfs-client.jar:/opt/cloudera/parcels/CDH-6.3.1-1.cdh6.3.1.p0.1470567/lib/hadoop/client/hadoop-mapreduce-client-common-3.0.0-cdh6.3.1.jar:/opt/cloudera/parcels/CDH-6.3.1-1.cdh6.3.1.p0.1470567/lib/hadoop/client/hadoop-mapreduce-client-common.jar:/opt/cloudera/parcels/CDH-6.3.1-1.cdh6.3.1.p0.1470567/lib/hadoop/client/hadoop-mapreduce-client-core-3.0.0-cdh6.3.1.jar:/opt/cloudera/parcels/CDH-6.3.1-1.cdh6.3.1.p0.1470567/lib/hadoop/client/hadoop-mapreduce-client-core.jar:/opt/cloudera/parcels/CDH-6.3.1-1.cdh6.3.1.p0.1470567/lib/hadoop/client/hadoop-mapreduce-client-jobclient-3.0.0-cdh6.3.1.jar:/opt/cloudera/parcels/CDH-6.3.1-1.cdh6.3.1.p0.1470567/lib/hadoop/client/hadoop-mapreduce-client-jobclient.jar:/opt/cloudera/parcels/CDH-6.3.1-1.cdh6.3.1.p0.1470567/lib/hadoop/client/hadoop-yarn-api-3.0.0-cdh6.3.1.jar:/opt/cloudera/parcels/CDH-6.3.1-1.cdh6.3.1.p0.1470567/lib/hadoop/client/hadoop-yarn-api.jar:/opt/cloudera/parcels/CDH-6.3.1-1.cdh6.3.1.p0.1470567/lib/hadoop/client/hadoop-yarn-client-3.0.0-cdh6.3.1.jar:/opt/cloudera/parcels/CDH-6.3.1-1.cdh6.3.1.p0.1470567/lib/hadoop/client/hadoop-yarn-client.jar:/opt/cloudera/parcels/CDH-6.3.1-1.cdh6.3.1.p0.1470567/lib/hadoop/client/hadoop-yarn-common-3.0.0-cdh6.3.1.jar:/opt/cloudera/parcels/CDH-6.3.1-1.cdh6.3.1.p0.1470567/lib/hadoop/client/hadoop-yarn-common.jar:/opt/cloudera/parcels/CDH-6.3.1-1.cdh6.3.1.p0.1470567/lib/hadoop/client/htrace-core4-4.1.0-incubating.jar:/opt/cloudera/parcels/CDH-6.3.1-1.cdh6.3.1.p0.1470567/lib/hadoop/client/htrace-core4.jar:/opt/cloudera/parcels/CDH-6.3.1-1.cdh6.3.1.p0.1470567/lib/hadoop/client/httpclient-4.5.3.jar:/opt/cloudera/parcels/CDH-6.3.1-1.cdh6.3.1.p0.1470567/lib/hadoop/client/httpclient.jar:/opt/cloudera/parcels/CDH-6.3.1-1.cdh6.3.1.p0.1470567/lib/hadoop/client/httpcore-4.4.6.jar:/opt/cloudera/parcels/CDH-6.3.1-1.cdh6.3.1.p0.1470567/lib/hadoop/client/httpcore.jar:/opt/cloudera/parcels/CDH-6.3.1-1.cdh6.3.1.p0.1470567/lib/hadoop/client/javax.activation-api-1.2.0.jar:/opt/cloudera/parcels/CDH-6.3.1-1.cdh6.3.1.p0.1470567/lib/hadoop/client/javax.activation-api.jar:/opt/cloudera/parcels/CDH-6.3.1-1.cdh6.3.1.p0.1470567/lib/hadoop/client/jaxb-api-2.2.11.jar:/opt/cloudera/parcels/CDH-6.3.1-1.cdh6.3.1.p0.1470567/lib/hadoop/client/jaxb-api.jar:/opt/cloudera/parcels/CDH-6.3.1-1.cdh6.3.1.p0.1470567/lib/hadoop/client/jcip-annotations-1.0-1.jar:/opt/cloudera/parcels/CDH-6.3.1-1.cdh6.3.1.p0.1470567/lib/hadoop/client/jcip-annotations.jar:/opt/cloudera/parcels/CDH-6.3.1-1.cdh6.3.1.p0.1470567/lib/hadoop/client/json-smart-2.3.jar:/opt/cloudera/parcels/CDH-6.3.1-1.cdh6.3.1.p0.1470567/lib/hadoop/client/json-smart.jar:/opt/cloudera/parcels/CDH-6.3.1-1.cdh6.3.1.p0.1470567/lib/hadoop/client/jsp-api-2.1.jar:/opt/cloudera/parcels/CDH-6.3.1-1.cdh6.3.1.p0.1470567/lib/hadoop/client/jsp-api.jar:/opt/cloudera/parcels/CDH-6.3.1-1.cdh6.3.1.p0.1470567/lib/hadoop/client/jsr305-3.0.0.jar:/opt/cloudera/parcels/CDH-6.3.1-1.cdh6.3.1.p0.1470567/lib/hadoop/client/jsr305.jar:/opt/cloudera/parcels/CDH-6.3.1-1.cdh6.3.1.p0.1470567/lib/hadoop/client/jsr311-api-1.1.1.jar:/opt/cloudera/parcels/CDH-6.3.1-1.cdh6.3.1.p0.1470567/lib/hadoop/client/jsr311-api.jar:/opt/cloudera/parcels/CDH-6.3.1-1.cdh6.3.1.p0.1470567/lib/hadoop/client/kerb-admin-1.0.0.jar:/opt/cloudera/parcels/CDH-6.3.1-1.cdh6.3.1.p0.1470567/lib/hadoop/client/kerb-admin.jar:/opt/cloudera/parcels/CDH-6.3.1-1.cdh6.3.1.p0.1470567/lib/hadoop/client/kerb-client-1.0.0.jar:/opt/cloudera/parcels/CDH-6.3.1-1.cdh6.3.1.p0.1470567/lib/hadoop/client/kerb-client.jar:/opt/cloudera/parcels/CDH-6.3.1-1.cdh6.3.1.p0.1470567/lib/hadoop/client/kerb-common-1.0.0.jar:/opt/cloudera/parcels/CDH-6.3.1-1.cdh6.3.1.p0.1470567/lib/hadoop/client/kerb-common.jar:/opt/cloudera/parcels/CDH-6.3.1-1.cdh6.3.1.p0.1470567/lib/hadoop/client/kerb-core-1.0.0.jar:/opt/cloudera/parcels/CDH-6.3.1-1.cdh6.3.1.p0.1470567/lib/hadoop/client/kerb-core.jar:/opt/cloudera/parcels/CDH-6.3.1-1.cdh6.3.1.p0.1470567/lib/hadoop/client/kerb-crypto-1.0.0.jar:/opt/cloudera/parcels/CDH-6.3.1-1.cdh6.3.1.p0.1470567/lib/hadoop/client/kerb-crypto.jar:/opt/cloudera/parcels/CDH-6.3.1-1.cdh6.3.1.p0.1470567/lib/hadoop/client/kerb-identity-1.0.0.jar:/opt/cloudera/parcels/CDH-6.3.1-1.cdh6.3.1.p0.1470567/lib/hadoop/client/kerb-identity.jar:/opt/cloudera/parcels/CDH-6.3.1-1.cdh6.3.1.p0.1470567/lib/hadoop/client/kerb-server-1.0.0.jar:/opt/cloudera/parcels/CDH-6.3.1-1.cdh6.3.1.p0.1470567/lib/hadoop/client/kerb-server.jar:/opt/cloudera/parcels/CDH-6.3.1-1.cdh6.3.1.p0.1470567/lib/hadoop/client/kerb-simplekdc-1.0.0.jar:/opt/cloudera/parcels/CDH-6.3.1-1.cdh6.3.1.p0.1470567/lib/hadoop/client/kerb-simplekdc.jar:/opt/cloudera/parcels/CDH-6.3.1-1.cdh6.3.1.p0.1470567/lib/hadoop/client/kerb-util-1.0.0.jar:/opt/cloudera/parcels/CDH-6.3.1-1.cdh6.3.1.p0.1470567/lib/hadoop/client/kerb-util.jar:/opt/cloudera/parcels/CDH-6.3.1-1.cdh6.3.1.p0.1470567/lib/hadoop/client/kerby-asn1-1.0.0.jar:/opt/cloudera/parcels/CDH-6.3.1-1.cdh6.3.1.p0.1470567/lib/hadoop/client/kerby-asn1.jar:/opt/cloudera/parcels/CDH-6.3.1-1.cdh6.3.1.p0.1470567/lib/hadoop/client/kerby-config-1.0.0.jar:/opt/cloudera/parcels/CDH-6.3.1-1.cdh6.3.1.p0.1470567/lib/hadoop/client/kerby-config.jar:/opt/cloudera/parcels/CDH-6.3.1-1.cdh6.3.1.p0.1470567/lib/hadoop/client/kerby-pkix-1.0.0.jar:/opt/cloudera/parcels/CDH-6.3.1-1.cdh6.3.1.p0.1470567/lib/hadoop/client/kerby-pkix.jar:/opt/cloudera/parcels/CDH-6.3.1-1.cdh6.3.1.p0.1470567/lib/hadoop/client/kerby-util-1.0.0.jar:/opt/cloudera/parcels/CDH-6.3.1-1.cdh6.3.1.p0.1470567/lib/hadoop/client/kerby-util.jar:/opt/cloudera/parcels/CDH-6.3.1-1.cdh6.3.1.p0.1470567/lib/hadoop/client/kerby-xdr-1.0.0.jar:/opt/cloudera/parcels/CDH-6.3.1-1.cdh6.3.1.p0.1470567/lib/hadoop/client/kerby-xdr.jar:/opt/cloudera/parcels/CDH-6.3.1-1.cdh6.3.1.p0.1470567/lib/hadoop/client/log4j-1.2.17.jar:/opt/cloudera/parcels/CDH-6.3.1-1.cdh6.3.1.p0.1470567/lib/hadoop/client/log4j.jar:/opt/cloudera/parcels/CDH-6.3.1-1.cdh6.3.1.p0.1470567/lib/hadoop/client/nimbus-jose-jwt-4.41.1.jar:/opt/cloudera/parcels/CDH-6.3.1-1.cdh6.3.1.p0.1470567/lib/hadoop/client/nimbus-jose-jwt.jar:/opt/cloudera/parcels/CDH-6.3.1-1.cdh6.3.1.p0.1470567/lib/hadoop/client/okhttp-2.7.5.jar:/opt/cloudera/parcels/CDH-6.3.1-1.cdh6.3.1.p0.1470567/lib/hadoop/client/okhttp.jar:/opt/cloudera/parcels/CDH-6.3.1-1.cdh6.3.1.p0.1470567/lib/hadoop/client/okio-1.6.0.jar:/opt/cloudera/parcels/CDH-6.3.1-1.cdh6.3.1.p0.1470567/lib/hadoop/client/okio.jar:/opt/cloudera/parcels/CDH-6.3.1-1.cdh6.3.1.p0.1470567/lib/hadoop/client/paranamer-2.8.jar:/opt/cloudera/parcels/CDH-6.3.1-1.cdh6.3.1.p0.1470567/lib/hadoop/client/paranamer.jar:/opt/cloudera/parcels/CDH-6.3.1-1.cdh6.3.1.p0.1470567/lib/hadoop/client/protobuf-java-2.5.0.jar:/opt/cloudera/parcels/CDH-6.3.1-1.cdh6.3.1.p0.1470567/lib/hadoop/client/protobuf-java.jar:/opt/cloudera/parcels/CDH-6.3.1-1.cdh6.3.1.p0.1470567/lib/hadoop/client/re2j-1.1.jar:/opt/cloudera/parcels/CDH-6.3.1-1.cdh6.3.1.p0.1470567/lib/hadoop/client/re2j.jar:/opt/cloudera/parcels/CDH-6.3.1-1.cdh6.3.1.p0.1470567/lib/hadoop/client/snappy-java-1.1.4.jar:/opt/cloudera/parcels/CDH-6.3.1-1.cdh6.3.1.p0.1470567/lib/hadoop/client/snappy-java.jar:/opt/cloudera/parcels/CDH-6.3.1-1.cdh6.3.1.p0.1470567/lib/hadoop/client/stax2-api-3.1.4.jar:/opt/cloudera/parcels/CDH-6.3.1-1.cdh6.3.1.p0.1470567/lib/hadoop/client/stax2-api.jar:/opt/cloudera/parcels/CDH-6.3.1-1.cdh6.3.1.p0.1470567/lib/hadoop/client/wildfly-openssl-1.0.4.Final.jar:/opt/cloudera/parcels/CDH-6.3.1-1.cdh6.3.1.p0.1470567/lib/hadoop/client/wildfly-openssl.jar:/opt/cloudera/parcels/CDH-6.3.1-1.cdh6.3.1.p0.1470567/lib/hadoop/client/woodstox-core-5.0.3.jar:/opt/cloudera/parcels/CDH-6.3.1-1.cdh6.3.1.p0.1470567/lib/hadoop/client/woodstox-core.jar:/opt/cloudera/parcels/CDH-6.3.1-1.cdh6.3.1.p0.1470567/lib/hadoop/client/xz-1.6.jar:/opt/cloudera/parcels/CDH-6.3.1-1.cdh6.3.1.p0.1470567/lib/hadoop/client/xz.jar:/opt/cloudera/parcels/CDH-6.3.1-1.cdh6.3.1.p0.1470567/lib/hbase/bin/../lib/client-facing-thirdparty/audience-annotations-0.5.0.jar:/opt/cloudera/parcels/CDH-6.3.1-1.cdh6.3.1.p0.1470567/lib/hbase/bin/../lib/client-facing-thirdparty/commons-logging-1.2.jar:/opt/cloudera/parcels/CDH-6.3.1-1.cdh6.3.1.p0.1470567/lib/hbase/bin/../lib/client-facing-thirdparty/findbugs-annotations-1.3.9-1.jar:/opt/cloudera/parcels/CDH-6.3.1-1.cdh6.3.1.p0.1470567/lib/hbase/bin/../lib/client-facing-thirdparty/htrace-core4-4.2.0-incubating.jar:/opt/cloudera/parcels/CDH-6.3.1-1.cdh6.3.1.p0.1470567/lib/hbase/bin/../lib/shaded-clients/hbase-shaded-mapreduce-2.1.0-cdh6.3.1.jar:/opt/cloudera/parcels/CDH-6.3.1-1.cdh6.3.1.p0.1470567/lib/hbase/hbase-spark.jar
    SPARK_YARN_STAGING_DIR -> hdfs://bj-azsyb-demo2.cloud.onecloud.io:8020/user/root/.sparkStaging/application_1633660048269_0337
    SPARK_USER -> root
    OPENBLAS_NUM_THREADS -> 1

  command:
    LD_LIBRARY_PATH=\"{{HADOOP_COMMON_HOME}}/../../../CDH-6.3.1-1.cdh6.3.1.p0.1470567/lib/hadoop/lib/native:$LD_LIBRARY_PATH\" \ 
      {{JAVA_HOME}}/bin/java \ 
      -server \ 
      -Xmx1024m \ 
      -Djava.io.tmpdir={{PWD}}/tmp \ 
      '-Dspark.authenticate=false' \ 
      '-Dspark.driver.port=13505' \ 
      '-Dspark.network.crypto.enabled=false' \ 
      '-Dspark.shuffle.service.port=7337' \ 
      '-Dspark.ui.port=0' \ 
      -Dspark.yarn.app.container.log.dir=<LOG_DIR> \ 
      -XX:OnOutOfMemoryError='kill %p' \ 
      org.apache.spark.executor.CoarseGrainedExecutorBackend \ 
      --driver-url \ 
      spark://CoarseGrainedScheduler@bj-azsyb-demo4:13505 \ 
      --executor-id \ 
      <executorId> \ 
      --hostname \ 
      <hostname> \ 
      --cores \ 
      1 \ 
      --app-id \ 
      application_1633660048269_0337 \ 
      --user-class-path \ 
      file:$PWD/__app__.jar \ 
      1><LOG_DIR>/stdout \ 
      2><LOG_DIR>/stderr

  resources:
    __app__.jar -> resource { scheme: "hdfs" host: "bj-azsyb-demo2.cloud.onecloud.io" port: 8020 file: "/user/root/.sparkStaging/application_1633660048269_0337/nebula-exchange-2.5.1.jar" } size: 201751298 timestamp: 1635161532058 type: FILE visibility: PRIVATE
    export_sst.applications -> resource { scheme: "hdfs" host: "bj-azsyb-demo2.cloud.onecloud.io" port: 8020 file: "/user/root/.sparkStaging/application_1633660048269_0337/export_sst.applications" } size: 5225 timestamp: 1635161532381 type: FILE visibility: PRIVATE
    __spark_conf__ -> resource { scheme: "hdfs" host: "bj-azsyb-demo2.cloud.onecloud.io" port: 8020 file: "/user/root/.sparkStaging/application_1633660048269_0337/__spark_conf__.zip" } size: 165877 timestamp: 1635161533261 type: ARCHIVE visibility: PRIVATE

===============================================================================
...
10/25 21:49:35 INFO exchange.Exchange$: Processing Tag person2
21/10/25 21:49:35 INFO exchange.Exchange$: field keys: _c0, _c1, _c2, _c3, _c4, _c5, _c6, _c7, _c8, _c9, _c10, _c11, _c12, _c13, _c14, _c15, _c16, _c17, _c18, _c19, _c20, _c21, _c22, _c23, _c24, _c25, _c26, _c27, _c28, _c29, _c30, _c31, _c32, _c33, _c34, _c35, _c36, _c37, _c38, _c39, _c40, _c41, _c42, _c43, _c44, _c45, _c46, _c47, _c48, _c49, _c50, _c51, _c52, _c53, _c54, _c55, _c56, _c57, _c58, _c59, _c60, _c61, _c62, _c63, _c64, _c65, _c66, _c67, _c68, _c69, _c70, _c71, _c72, _c73, _c74, _c75, _c76, _c77, _c78, _c79, _c80, _c81, _c82, _c83, _c84, _c85, _c86, _c87, _c88, _c89, _c90, _c91, _c92, _c93, _c94, _c95, _c96, _c97, _c98, _c99, _c100, _c101, _c102, _c103, _c104, _c105, _c106, _c107, _c108, _c109, _c110, _c111, _c112, _c113, _c114, _c115, _c116, _c117, _c118, _c119, _c120, _c121, _c122, _c123, _c124, _c125, _c126, _c127, _c128, _c129, _c130, _c131, _c132, _c133, _c134, _c135, _c136, _c137, _c138, _c139, _c140, _c141, _c142, _c143, _c144, _c145, _c146, _c147, _c148, _c149, _c150, _c151
21/10/25 21:49:35 INFO exchange.Exchange$: nebula keys: xxzjbh, rybh, yy_hklbdm, gllbdm, ryzzbdbh, xm, xmhypy, cym, cympy, xbdm, csrq, gmsfhm, xxdm, mzdm, zjxydm, xldm, hyzkdm, zzmmdm, byzkdm, tsrqdm, zylbdm, zy, zw, zcdm, fwcs, fwcdzmc, lxdh, hkszdlxdm, hjdz_jyqk, hjdz_xzqhdm, hjdz_dzmc, hjdpcs_gajgjgdm, zzbh, xzz_jyqk, xzz_xzqhdm, xzz_dzmc, xzzpcs_gajgjgdm, xzzzazrr_xm, jzzbh, dzxx, qq_fwbzh, sfjy_pdbz, jy_jyqk, gzdw_dwmc, gzdw_dzmc, gzdw_lxdh, cblxdm, htlxdm, sbkh, sfblylbx_pdbz, ylbxbl_kssj, sfblgsbx_pdbz, gsbxbl_kssj, sfblsybx_pdbz, sybxbl_kssj, sfghhy_pdbz, sffqtx_pdbz, xxry_rs, xxzn_rs, sfcj_pdbz, cjlxdm, cj_jyqk, ywsxcl_pdbz, sxcl_cllxdm, sxcl_jdchphm, sfsy_pdbz, hyzmbh, hyzm_fzjg_dwmc, hyzm_fzrq, synh_rs, synvh_rs, sfcqjycs_pdbz, jycs_jyqk, jysj, wbyyy_jyqk, ywjhsyzm_pdbz, sftbjsbm_pdbz, jqsfjzym_pdbz, qrlrrq, lzd_jyqk, lzd_gjhdqdm, lzd_xzqhdm, lzd_dzmc, jzsydm, jzfsdm, jzcsdm, njzsjdm, yhzgx_jyqk, sfzyfw_pdbz, yfzgx_rygxdm, fz_xm, fz_gmsfhm, fzlldm, bfdxdm, hdqkdm, jzzblqkdm, jzz_qfrq, jzzyxqxdm, jzz_yxqqsrq, jzz_yxqjzrq, jzzyq_qsrq, jzzyq_jzrq, fq_xm, fq_gmsfhm, mq_xm, mq_gmsfhm, po_xm, po_gmsfhm, jhr_jhgxdm, jhr_xm, jhr_xmhypy, jhr_gmsfhm, gzdzazrr_xm, gzdzazrr_yddh, gzdzazrr_gddh, qwd_jyqk, qwd_gjhdqdm, qwd_xzqhdm, qwd_dzmc, hc_pdbz, syqkdm, hfrq, wfxydj_jyqk, elrkwfxydj_jyqk, zdqs_pdbz, sssq_jyqk, bz, zx_pdbz, zxrq, zxyydm, zxlbdm, zxdw_gajgjgdm, zxdw_gajgmc, zxr_xm, zxr_gmsfhm, zxr_lxdh, djsj, djdw_gajgjgdm, djdw_gajgmc, djr_xm, djr_gmsfhm, djr_lxdh, czbs, czsj, czdw_gajgjgdm, czdw_gajgmc, czr_xm, czr_gmsfhm, czr_lxdh, xxly_xzqhdm, sc_xxrksj, xxrksj
21/10/25 21:49:35 INFO exchange.Exchange$: Loading CSV files from hdfs://bj-azsyb-demo2.cloud.onecloud.io:8020/user/hive/warehouse/person/person_complex_1y.csv
21/10/25 21:49:35 INFO internal.SharedState: loading hive config file: file:/data/yarn/nm/usercache/root/filecache/5269/__spark_conf__.zip/__hadoop_conf__/hive-site.xml
21/10/25 21:49:35 INFO internal.SharedState: spark.sql.warehouse.dir is not set, but hive.metastore.warehouse.dir is set. Setting spark.sql.warehouse.dir to the value of hive.metastore.warehouse.dir ('/user/hive/warehouse').
21/10/25 21:49:35 INFO internal.SharedState: Warehouse path is '/user/hive/warehouse'.
21/10/25 21:49:35 INFO yarn.SparkRackResolver: Got an error when resolving hostNames. Falling back to /default-rack for all
21/10/25 21:49:35 INFO ui.JettyUtils: Adding filter org.apache.hadoop.yarn.server.webproxy.amfilter.AmIpFilter to /SQL.
21/10/25 21:49:35 INFO handler.ContextHandler: Started o.s.j.s.ServletContextHandler@303d9f86{/SQL,null,AVAILABLE,@Spark}
21/10/25 21:49:35 INFO ui.JettyUtils: Adding filter org.apache.hadoop.yarn.server.webproxy.amfilter.AmIpFilter to /SQL/json.
21/10/25 21:49:35 INFO handler.ContextHandler: Started o.s.j.s.ServletContextHandler@272f87e{/SQL/json,null,AVAILABLE,@Spark}
21/10/25 21:49:35 INFO ui.JettyUtils: Adding filter org.apache.hadoop.yarn.server.webproxy.amfilter.AmIpFilter to /SQL/execution.
21/10/25 21:49:35 INFO handler.ContextHandler: Started o.s.j.s.ServletContextHandler@6c8e67b6{/SQL/execution,null,AVAILABLE,@Spark}
21/10/25 21:49:35 INFO ui.JettyUtils: Adding filter org.apache.hadoop.yarn.server.webproxy.amfilter.AmIpFilter to /SQL/execution/json.
21/10/25 21:49:35 INFO handler.ContextHandler: Started o.s.j.s.ServletContextHandler@14f6f615{/SQL/execution/json,null,AVAILABLE,@Spark}
21/10/25 21:49:35 INFO ui.JettyUtils: Adding filter org.apache.hadoop.yarn.server.webproxy.amfilter.AmIpFilter to /static/sql.
21/10/25 21:49:35 INFO handler.ContextHandler: Started o.s.j.s.ServletContextHandler@13c355e5{/static/sql,null,AVAILABLE,@Spark}
21/10/25 21:49:36 INFO state.StateStoreCoordinatorRef: Registered StateStoreCoordinator endpoint
21/10/25 21:49:36 INFO yarn.SparkRackResolver: Got an error when resolving hostNames. Falling back to /default-rack for all
21/10/25 21:49:36 WARN lineage.LineageWriter: Lineage directory /var/log/spark/lineage doesn't exist or is not writable. Lineage for this application will be disabled.
21/10/25 21:49:36 INFO util.Utils: Extension com.cloudera.spark.lineage.NavigatorQueryListener not being initialized.
21/10/25 21:49:36 WARN datasources.DataSource: Multiple sources found for csv (org.apache.spark.sql.execution.datasources.csv.CSVFileFormat, com.databricks.spark.csv.DefaultSource15), defaulting to the internal datasource (org.apache.spark.sql.execution.datasources.csv.CSVFileFormat).
21/10/25 21:49:36 WARN datasources.DataSource: Multiple sources found for csv (org.apache.spark.sql.execution.datasources.csv.CSVFileFormat, com.databricks.spark.csv.DefaultSource15), defaulting to the internal datasource (org.apache.spark.sql.execution.datasources.csv.CSVFileFormat).
21/10/25 21:49:37 INFO yarn.SparkRackResolver: Got an error when resolving hostNames. Falling back to /default-rack for all
21/10/25 21:49:38 INFO yarn.SparkRackResolver: Got an error when resolving hostNames. Falling back to /default-rack for all
21/10/25 21:49:39 INFO yarn.SparkRackResolver: Got an error when resolving hostNames. Falling back to /default-rack for all
21/10/25 21:49:40 INFO yarn.SparkRackResolver: Got an error when resolving hostNames. Falling back to /default-rack for all
21/10/25 21:49:40 INFO datasources.FileSourceStrategy: Pruning directories with: 
21/10/25 21:49:40 INFO datasources.FileSourceStrategy: Post-Scan Filters: (length(trim(value#0, None)) > 0)
21/10/25 21:49:40 INFO datasources.FileSourceStrategy: Output Data Schema: struct<value: string>
21/10/25 21:49:40 INFO execution.FileSourceScanExec: Pushed Filters: 
21/10/25 21:49:40 INFO codegen.CodeGenerator: Code generated in 246.250267 ms
21/10/25 21:49:41 INFO codegen.CodeGenerator: Code generated in 21.368918 ms
21/10/25 21:49:41 INFO memory.MemoryStore: Block broadcast_0 stored as values in memory (estimated size 473.8 KB, free 365.8 MB)
21/10/25 21:49:41 INFO yarn.SparkRackResolver: Got an error when resolving hostNames. Falling back to /default-rack for all
21/10/25 21:49:41 INFO memory.MemoryStore: Block broadcast_0_piece0 stored as bytes in memory (estimated size 49.5 KB, free 365.8 MB)
21/10/25 21:49:41 INFO storage.BlockManagerInfo: Added broadcast_0_piece0 in memory on bj-azsyb-demo4:31191 (size: 49.5 KB, free: 366.3 MB)
21/10/25 21:49:41 INFO spark.SparkContext: Created broadcast 0 from csv at FileBaseReader.scala:87
21/10/25 21:49:41 INFO execution.FileSourceScanExec: Planning scan with bin packing, max size: 134217728 bytes, open cost is considered as scanning 4194304 bytes.
21/10/25 21:49:41 INFO spark.SparkContext: Starting job: csv at FileBaseReader.scala:87
21/10/25 21:49:41 INFO scheduler.DAGScheduler: Got job 0 (csv at FileBaseReader.scala:87) with 1 output partitions
21/10/25 21:49:41 INFO scheduler.DAGScheduler: Final stage: ResultStage 0 (csv at FileBaseReader.scala:87)
21/10/25 21:49:41 INFO scheduler.DAGScheduler: Parents of final stage: List()
21/10/25 21:49:41 INFO scheduler.DAGScheduler: Missing parents: List()
21/10/25 21:49:41 INFO scheduler.DAGScheduler: Submitting ResultStage 0 (MapPartitionsRDD[3] at csv at FileBaseReader.scala:87), which has no missing parents
21/10/25 21:49:41 INFO yarn.YarnAllocator: Driver requested a total number of 1 executor(s).
21/10/25 21:49:41 INFO spark.ExecutorAllocationManager: Requesting 1 new executor because tasks are backlogged (new desired total will be 1)
21/10/25 21:49:41 INFO yarn.YarnAllocator: Will request 1 executor container(s), each with 1 core(s) and 1408 MB memory (including 384 MB of overhead)
21/10/25 21:49:41 INFO memory.MemoryStore: Block broadcast_1 stored as values in memory (estimated size 8.8 KB, free 365.8 MB)
21/10/25 21:49:41 INFO memory.MemoryStore: Block broadcast_1_piece0 stored as bytes in memory (estimated size 4.5 KB, free 365.8 MB)
21/10/25 21:49:41 INFO storage.BlockManagerInfo: Added broadcast_1_piece0 in memory on bj-azsyb-demo4:31191 (size: 4.5 KB, free: 366.2 MB)
21/10/25 21:49:41 INFO spark.SparkContext: Created broadcast 1 from broadcast at DAGScheduler.scala:1164
21/10/25 21:49:41 INFO scheduler.DAGScheduler: Submitting 1 missing tasks from ResultStage 0 (MapPartitionsRDD[3] at csv at FileBaseReader.scala:87) (first 15 tasks are for partitions Vector(0))
21/10/25 21:49:41 INFO cluster.YarnClusterScheduler: Adding task set 0.0 with 1 tasks
21/10/25 21:49:42 INFO yarn.SparkRackResolver: Got an error when resolving hostNames. Falling back to /default-rack for all
21/10/25 21:49:42 INFO yarn.YarnAllocator: Submitted container request for host bj-azsyb-demo2,bj-azsyb-demo1,bj-azsyb-demo3.
21/10/25 21:49:42 INFO yarn.SparkRackResolver: Got an error when resolving hostNames. Falling back to /default-rack for all
21/10/25 21:49:42 INFO yarn.YarnAllocator: Launching container container_1633660048269_0337_02_000003 on host bj-azsyb-demo4 for executor with ID 1
21/10/25 21:49:42 INFO yarn.YarnAllocator: Received 1 containers from YARN, launching executors on 1 of them.
21/10/25 21:49:43 INFO yarn.SparkRackResolver: Got an error when resolving hostNames. Falling back to /default-rack for all
21/10/25 21:49:44 INFO yarn.SparkRackResolver: Got an error when resolving hostNames. Falling back to /default-rack for all
21/10/25 21:49:45 INFO cluster.YarnSchedulerBackend$YarnDriverEndpoint: Registered executor NettyRpcEndpointRef(spark-client://Executor) (10.172.7.205:59642) with ID 1
21/10/25 21:49:45 INFO spark.ExecutorAllocationManager: New executor 1 has registered (new total is 1)
21/10/25 21:49:45 INFO scheduler.TaskSetManager: Starting task 0.0 in stage 0.0 (TID 0, bj-azsyb-demo4, executor 1, partition 0, RACK_LOCAL, 8360 bytes)
21/10/25 21:49:45 INFO storage.BlockManagerMasterEndpoint: Registering block manager bj-azsyb-demo4:25619 with 397.5 MB RAM, BlockManagerId(1, bj-azsyb-demo4, 25619, None)
21/10/25 21:49:45 INFO storage.BlockManagerInfo: Added broadcast_1_piece0 in memory on bj-azsyb-demo4:25619 (size: 4.5 KB, free: 397.5 MB)
21/10/25 21:49:47 INFO storage.BlockManagerInfo: Added broadcast_0_piece0 in memory on bj-azsyb-demo4:25619 (size: 49.5 KB, free: 397.4 MB)
21/10/25 21:49:48 INFO scheduler.TaskSetManager: Finished task 0.0 in stage 0.0 (TID 0) in 2789 ms on bj-azsyb-demo4 (executor 1) (1/1)
21/10/25 21:49:48 INFO cluster.YarnClusterScheduler: Removed TaskSet 0.0, whose tasks have all completed, from pool 
21/10/25 21:49:48 INFO scheduler.DAGScheduler: ResultStage 0 (csv at FileBaseReader.scala:87) finished in 6.529 s
21/10/25 21:49:48 INFO scheduler.DAGScheduler: Job 0 finished: csv at FileBaseReader.scala:87, took 6.630765 s
21/10/25 21:49:48 INFO yarn.YarnAllocator: Driver requested a total number of 0 executor(s).
21/10/25 21:49:48 INFO datasources.FileSourceStrategy: Pruning directories with: 
21/10/25 21:49:48 INFO datasources.FileSourceStrategy: Post-Scan Filters: 
21/10/25 21:49:48 INFO datasources.FileSourceStrategy: Output Data Schema: struct<value: string>
21/10/25 21:49:48 INFO execution.FileSourceScanExec: Pushed Filters: 
21/10/25 21:49:48 INFO codegen.CodeGenerator: Code generated in 8.522715 ms
21/10/25 21:49:48 INFO memory.MemoryStore: Block broadcast_2 stored as values in memory (estimated size 473.8 KB, free 365.3 MB)
21/10/25 21:49:48 INFO memory.MemoryStore: Block broadcast_2_piece0 stored as bytes in memory (estimated size 49.5 KB, free 365.3 MB)
21/10/25 21:49:48 INFO storage.BlockManagerInfo: Added broadcast_2_piece0 in memory on bj-azsyb-demo4:31191 (size: 49.5 KB, free: 366.2 MB)
21/10/25 21:49:48 INFO spark.SparkContext: Created broadcast 2 from csv at FileBaseReader.scala:87
21/10/25 21:49:48 INFO execution.FileSourceScanExec: Planning scan with bin packing, max size: 134217728 bytes, open cost is considered as scanning 4194304 bytes.
21/10/25 21:49:49 WARN util.Utils: Truncated the string representation of a plan since it was too large. This behavior can be adjusted by setting 'spark.debug.maxToStringFields' in SparkEnv.conf.
21/10/25 21:49:49 INFO datasources.FileSourceStrategy: Pruning directories with: 
21/10/25 21:49:49 INFO datasources.FileSourceStrategy: Post-Scan Filters: 
21/10/25 21:49:49 INFO datasources.FileSourceStrategy: Output Data Schema: struct<_c0: string, _c1: string, _c2: string, _c3: string, _c4: string ... 150 more fields>
21/10/25 21:49:49 INFO execution.FileSourceScanExec: Pushed Filters: 
21/10/25 21:49:49 INFO codegen.CodeGenerator: Code generated in 24.517355 ms
21/10/25 21:49:49 INFO memory.MemoryStore: Block broadcast_3 stored as values in memory (estimated size 473.8 KB, free 364.8 MB)
21/10/25 21:49:49 INFO memory.MemoryStore: Block broadcast_3_piece0 stored as bytes in memory (estimated size 49.5 KB, free 364.8 MB)
21/10/25 21:49:49 INFO storage.BlockManagerInfo: Added broadcast_3_piece0 in memory on bj-azsyb-demo4:31191 (size: 49.5 KB, free: 366.2 MB)
21/10/25 21:49:49 INFO spark.SparkContext: Created broadcast 3 from foreachPartition at VerticesProcessor.scala:180
21/10/25 21:49:49 INFO execution.FileSourceScanExec: Planning scan with bin packing, max size: 134217728 bytes, open cost is considered as scanning 4194304 bytes.
21/10/25 21:49:49 INFO spark.SparkContext: Starting job: foreachPartition at VerticesProcessor.scala:180
21/10/25 21:49:49 INFO scheduler.DAGScheduler: Registering RDD 12 (foreachPartition at VerticesProcessor.scala:180)
21/10/25 21:49:49 INFO scheduler.DAGScheduler: Registering RDD 16 (foreachPartition at VerticesProcessor.scala:180)
21/10/25 21:49:49 INFO scheduler.DAGScheduler: Got job 1 (foreachPartition at VerticesProcessor.scala:180) with 1 output partitions
21/10/25 21:49:49 INFO scheduler.DAGScheduler: Final stage: ResultStage 3 (foreachPartition at VerticesProcessor.scala:180)
21/10/25 21:49:49 INFO scheduler.DAGScheduler: Parents of final stage: List(ShuffleMapStage 2)
21/10/25 21:49:49 INFO scheduler.DAGScheduler: Missing parents: List(ShuffleMapStage 2)
21/10/25 21:49:49 INFO scheduler.DAGScheduler: Submitting ShuffleMapStage 1 (MapPartitionsRDD[12] at foreachPartition at VerticesProcessor.scala:180), which has no missing parents
21/10/25 21:49:50 INFO memory.MemoryStore: Block broadcast_4 stored as values in memory (estimated size 32.7 KB, free 364.7 MB)
21/10/25 21:49:50 INFO memory.MemoryStore: Block broadcast_4_piece0 stored as bytes in memory (estimated size 11.9 KB, free 364.7 MB)
21/10/25 21:49:50 INFO storage.BlockManagerInfo: Added broadcast_4_piece0 in memory on bj-azsyb-demo4:31191 (size: 11.9 KB, free: 366.1 MB)
21/10/25 21:49:50 INFO spark.SparkContext: Created broadcast 4 from broadcast at DAGScheduler.scala:1164
21/10/25 21:49:50 INFO scheduler.DAGScheduler: Submitting 462 missing tasks from ShuffleMapStage 1 (MapPartitionsRDD[12] at foreachPartition at VerticesProcessor.scala:180) (first 15 tasks are for partitions Vector(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14))
21/10/25 21:49:50 INFO cluster.YarnClusterScheduler: Adding task set 1.0 with 462 tasks
21/10/25 21:49:50 INFO scheduler.TaskSetManager: Starting task 1.0 in stage 1.0 (TID 1, bj-azsyb-demo4, executor 1, partition 1, NODE_LOCAL, 8349 bytes)
21/10/25 21:49:50 INFO storage.BlockManagerInfo: Added broadcast_4_piece0 in memory on bj-azsyb-demo4:25619 (size: 11.9 KB, free: 397.4 MB)
21/10/25 21:49:50 INFO storage.BlockManagerInfo: Added broadcast_3_piece0 in memory on bj-azsyb-demo4:25619 (size: 49.5 KB, free: 397.4 MB)
21/10/25 21:49:51 INFO yarn.YarnAllocator: Driver requested a total number of 2 executor(s).
21/10/25 21:49:51 INFO spark.ExecutorAllocationManager: Requesting 2 new executors because tasks are backlogged (new desired total will be 2)
21/10/25 21:49:51 INFO yarn.YarnAllocator: Will request 1 executor container(s), each with 1 core(s) and 1408 MB memory (including 384 MB of overhead)
21/10/25 21:49:51 INFO yarn.YarnAllocator: Submitted container request for host bj-azsyb-demo3,bj-azsyb-demo4,bj-azsyb-demo1,bj-azsyb-demo2,bj-azsyb-demo5.
21/10/25 21:49:51 INFO yarn.YarnAllocator: Launching container container_1633660048269_0337_02_000005 on host bj-azsyb-demo4 for executor with ID 2
21/10/25 21:49:51 INFO yarn.YarnAllocator: Received 1 containers from YARN, launching executors on 1 of them.
21/10/25 21:49:52 INFO yarn.YarnAllocator: Driver requested a total number of 3 executor(s).
21/10/25 21:49:52 INFO spark.ExecutorAllocationManager: Requesting 1 new executor because tasks are backlogged (new desired total will be 3)
21/10/25 21:49:52 INFO yarn.YarnAllocator: Will request 1 executor container(s), each with 1 core(s) and 1408 MB memory (including 384 MB of overhead)
21/10/25 21:49:52 INFO yarn.YarnAllocator: Submitted container request for host bj-azsyb-demo3,bj-azsyb-demo4,bj-azsyb-demo1,bj-azsyb-demo2,bj-azsyb-demo5.
21/10/25 21:49:53 INFO yarn.YarnAllocator: Driver requested a total number of 5 executor(s).
21/10/25 21:49:53 INFO yarn.YarnAllocator: Will request 2 executor container(s), each with 1 core(s) and 1408 MB memory (including 384 MB of overhead)
21/10/25 21:49:53 INFO yarn.YarnAllocator: Submitted container request for host bj-azsyb-demo3,bj-azsyb-demo4,bj-azsyb-demo1,bj-azsyb-demo2,bj-azsyb-demo5.
21/10/25 21:49:53 INFO yarn.YarnAllocator: Submitted container request for host bj-azsyb-demo3,bj-azsyb-demo4,bj-azsyb-demo1,bj-azsyb-demo2,bj-azsyb-demo5.
21/10/25 21:49:53 INFO spark.ExecutorAllocationManager: Requesting 2 new executors because tasks are backlogged (new desired total will be 5)
21/10/25 21:49:54 INFO yarn.YarnAllocator: Driver requested a total number of 9 executor(s).
21/10/25 21:49:54 INFO yarn.YarnAllocator: Will request 4 executor container(s), each with 1 core(s) and 1408 MB memory (including 384 MB of overhead)
21/10/25 21:49:54 INFO spark.ExecutorAllocationManager: Requesting 4 new executors because tasks are backlogged (new desired total will be 9)
21/10/25 21:49:54 INFO yarn.YarnAllocator: Submitted container request for host bj-azsyb-demo3,bj-azsyb-demo4,bj-azsyb-demo1,bj-azsyb-demo2,bj-azsyb-demo5.
21/10/25 21:49:54 INFO yarn.YarnAllocator: Submitted container request for host bj-azsyb-demo3,bj-azsyb-demo4,bj-azsyb-demo1,bj-azsyb-demo2,bj-azsyb-demo5.
21/10/25 21:49:54 INFO yarn.YarnAllocator: Submitted container request for host bj-azsyb-demo3,bj-azsyb-demo4,bj-azsyb-demo1,bj-azsyb-demo2,bj-azsyb-demo5.
21/10/25 21:49:54 INFO yarn.YarnAllocator: Submitted container request for host bj-azsyb-demo3,bj-azsyb-demo4,bj-azsyb-demo1,bj-azsyb-demo2,bj-azsyb-demo5.
21/10/25 21:49:54 INFO cluster.YarnSchedulerBackend$YarnDriverEndpoint: Registered executor NettyRpcEndpointRef(spark-client://Executor) (10.172.7.205:59670) with ID 2
21/10/25 21:49:54 INFO scheduler.TaskSetManager: Starting task 3.0 in stage 1.0 (TID 2, bj-azsyb-demo4, executor 2, partition 3, NODE_LOCAL, 8349 bytes)
21/10/25 21:49:54 INFO spark.ExecutorAllocationManager: New executor 2 has registered (new total is 2)
21/10/25 21:49:54 INFO storage.BlockManagerMasterEndpoint: Registering block manager bj-azsyb-demo4:2730 with 397.5 MB RAM, BlockManagerId(2, bj-azsyb-demo4, 2730, None)
21/10/25 21:49:54 INFO storage.BlockManagerInfo: Added broadcast_4_piece0 in memory on bj-azsyb-demo4:2730 (size: 11.9 KB, free: 397.5 MB)
21/10/25 21:49:55 INFO yarn.YarnAllocator: Driver requested a total number of 17 executor(s).
21/10/25 21:49:55 INFO spark.ExecutorAllocationManager: Requesting 8 new executors because tasks are backlogged (new desired total will be 17)
21/10/25 21:49:55 INFO yarn.YarnAllocator: Will request 8 executor container(s), each with 1 core(s) and 1408 MB memory (including 384 MB of overhead)
21/10/25 21:49:55 INFO yarn.YarnAllocator: Submitted container request for host bj-azsyb-demo3,bj-azsyb-demo4,bj-azsyb-demo1,bj-azsyb-demo2,bj-azsyb-demo5.
...
21/10/25 21:50:00 INFO yarn.YarnAllocator: Submitted container request for host bj-azsyb-demo5.
...
21/10/25 21:50:03 INFO scheduler.TaskSetManager: Starting task 6.0 in stage 1.0 (TID 4, bj-azsyb-demo4, executor 1, partition 6, NODE_LOCAL, 8349 bytes)
21/10/25 21:50:03 INFO scheduler.TaskSetManager: Finished task 4.0 in stage 1.0 (TID 3) in 6339 ms on bj-azsyb-demo4 (executor 1) (2/462)
...
21/10/25 21:55:37 INFO yarn.YarnAllocator: Canceling requests for 1 executor container(s) to have a new desired total 49 executors.
21/10/25 21:55:37 INFO scheduler.TaskSetManager: Finished task 453.0 in stage 1.0 (TID 414) in 5236 ms on bj-azsyb-demo4 (executor 8) (414/462)
21/10/25 21:55:37 INFO yarn.YarnAllocator: Driver requested a total number of 48 executor(s).
21/10/25 21:55:37 INFO yarn.YarnAllocator: Canceling requests for 1 executor container(s) to have a new desired total 48 executors.
21/10/25 21:55:39 INFO scheduler.TaskSetManager: Starting task 16.0 in stage 1.0 (TID 417, bj-azsyb-demo3, executor 4, partition 16, RACK_LOCAL, 8349 bytes)
21/10/25 21:55:39 INFO scheduler.TaskSetManager: Starting task 28.0 in stage 1.0 (TID 418, bj-azsyb-demo3, executor 5, partition 28, RACK_LOCAL, 8349 bytes)
21/10/25 21:55:39 INFO scheduler.TaskSetManager: Starting task 30.0 in stage 1.0 (TID 419, bj-azsyb-demo3, executor 9, partition 30, RACK_LOCAL, 8349 bytes)
21/10/25 21:55:39 INFO scheduler.TaskSetManager: Starting task 35.0 in stage 1.0 (TID 420, bj-azsyb-demo3, executor 7, partition 35, RACK_LOCAL, 8349 bytes)
21/10/25 21:55:39 INFO scheduler.TaskSetManager: Starting task 41.0 in stage 1.0 (TID 421, bj-azsyb-demo3, executor 3, partition 41, RACK_LOCAL, 8349 bytes)
21/10/25 21:55:39 INFO scheduler.TaskSetManager: Starting task 42.0 in stage 1.0 (TID 422, bj-azsyb-demo3, executor 11, partition 42, RACK_LOCAL, 8349 bytes)
21/10/25 21:55:39 INFO scheduler.TaskSetManager: Starting task 74.0 in stage 1.0 (TID 423, bj-azsyb-demo4, executor 1, partition 74, RACK_LOCAL, 8349 bytes)
21/10/25 21:55:39 INFO scheduler.TaskSetManager: Starting task 75.0 in stage 1.0 (TID 424, bj-azsyb-demo4, executor 10, partition 75, RACK_LOCAL, 8349 bytes)
21/10/25 21:55:39 INFO scheduler.TaskSetManager: Starting task 81.0 in stage 1.0 (TID 425, bj-azsyb-demo3, executor 13, partition 81, RACK_LOCAL, 8349 bytes)
21/10/25 21:55:39 INFO scheduler.TaskSetManager: Starting task 83.0 in stage 1.0 (TID 426, bj-azsyb-demo3, executor 6, partition 83, RACK_LOCAL, 8349 bytes)
21/10/25 21:55:39 INFO scheduler.TaskSetManager: Starting task 98.0 in stage 1.0 (TID 427, bj-azsyb-demo4, executor 8, partition 98, RACK_LOCAL, 8349 bytes)
21/10/25 21:55:39 INFO scheduler.TaskSetManager: Starting task 133.0 in stage 1.0 (TID 428, bj-azsyb-demo4, executor 2, partition 133, RACK_LOCAL, 8349 bytes)
...
...TaskSetManager: Finished task 457.0 in stage 1.0 (TID 462) in 24826 ms on bj-azsyb-demo3 (executor 5) (462/462)
21/10/25 21:57:16 INFO cluster.YarnClusterScheduler: Removed TaskSet 1.0, whose tasks have all completed, from pool 
21/10/25 21:57:16 INFO scheduler.DAGScheduler: ShuffleMapStage 1 (foreachPartition at VerticesProcessor.scala:180) finished in 446.250 s
21/10/25 21:57:16 INFO scheduler.DAGScheduler: looking for newly runnable stages
21/10/25 21:57:16 INFO scheduler.DAGScheduler: running: Set()
21/10/25 21:57:16 INFO scheduler.DAGScheduler: waiting: Set(ShuffleMapStage 2, ResultStage 3)
21/10/25 21:57:16 INFO scheduler.DAGScheduler: failed: Set()
21/10/25 21:57:16 INFO scheduler.DAGScheduler: Submitting ShuffleMapStage 2 (MapPartitionsRDD[16] at foreachPartition at VerticesProcessor.scala:180), which has no missing parents
21/10/25 21:57:16 INFO memory.MemoryStore: Block broadcast_5 stored as values in memory (estimated size 88.5 KB, free 364.6 MB)
21/10/25 21:57:16 INFO memory.MemoryStore: Block broadcast_5_piece0 stored as bytes in memory (estimated size 27.5 KB, free 364.6 MB)
21/10/25 21:57:16 INFO storage.BlockManagerInfo: Added broadcast_5_piece0 in memory on bj-azsyb-demo4:31191 (size: 27.5 KB, free: 366.1 MB)
21/10/25 21:57:16 INFO spark.SparkContext: Created broadcast 5 from broadcast at DAGScheduler.scala:1164
21/10/25 21:57:16 INFO scheduler.DAGScheduler: Submitting 32 missing tasks from ShuffleMapStage 2 (MapPartitionsRDD[16] at foreachPartition at VerticesProcessor.scala:180) (first 15 tasks are for partitions Vector(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14))
21/10/25 21:57:16 INFO cluster.YarnClusterScheduler: Adding task set 2.0 with 32 tasks
21/10/25 21:57:16 INFO yarn.SparkRackResolver: Got an error when resolving hostNames. Falling back to /default-rack for all
21/10/25 21:57:16 INFO scheduler.TaskSetManager: Starting task 0.0 in stage 2.0 (TID 463, bj-azsyb-demo3, executor 11, partition 0, PROCESS_LOCAL, 7745 bytes)
21/10/25 21:57:16 INFO scheduler.TaskSetManager: Starting task 1.0 in stage 2.0 (TID 464, bj-azsyb-demo3, executor 7, partition 1, PROCESS_LOCAL, 7745 bytes)
21/10/25 21:57:16 INFO scheduler.TaskSetManager: Starting task 2.0 in stage 2.0 (TID 465, bj-azsyb-demo3, executor 13, partition 2, PROCESS_LOCAL, 7745 bytes)
21/10/25 21:57:16 INFO scheduler.TaskSetManager: Starting task 3.0 in stage 2.0 (TID 466, bj-azsyb-demo4, executor 10, partition 3, PROCESS_LOCAL, 7745 bytes)
21/10/25 21:57:16 INFO scheduler.TaskSetManager: Starting task 4.0 in stage 2.0 (TID 467, bj-azsyb-demo3, executor 3, partition 4, PROCESS_LOCAL, 7745 bytes)
21/10/25 21:57:16 INFO scheduler.TaskSetManager: Starting task 5.0 in stage 2.0 (TID 468, bj-azsyb-demo4, executor 1, partition 5, PROCESS_LOCAL, 7745 bytes)
21/10/25 21:57:16 INFO scheduler.TaskSetManager: Starting task 6.0 in stage 2.0 (TID 469, bj-azsyb-demo4, executor 12, partition 6, PROCESS_LOCAL, 7745 bytes)
21/10/25 21:57:16 INFO scheduler.TaskSetManager: Starting task 7.0 in stage 2.0 (TID 470, bj-azsyb-demo4, executor 2, partition 7, PROCESS_LOCAL, 7745 bytes)
21/10/25 21:57:16 INFO scheduler.TaskSetManager: Starting task 8.0 in stage 2.0 (TID 471, bj-azsyb-demo3, executor 4, partition 8, PROCESS_LOCAL, 7745 bytes)
21/10/25 21:57:16 INFO scheduler.TaskSetManager: Starting task 9.0 in stage 2.0 (TID 472, bj-azsyb-demo3, executor 5, partition 9, PROCESS_LOCAL, 7745 bytes)
21/10/25 21:57:16 INFO scheduler.TaskSetManager: Starting task 10.0 in stage 2.0 (TID 473, bj-azsyb-demo3, executor 9, partition 10, PROCESS_LOCAL, 7745 bytes)
21/10/25 21:57:16 INFO scheduler.TaskSetManager: Starting task 11.0 in stage 2.0 (TID 474, bj-azsyb-demo4, executor 8, partition 11, PROCESS_LOCAL, 7745 bytes)
21/10/25 21:57:16 INFO scheduler.TaskSetManager: Starting task 12.0 in stage 2.0 (TID 475, bj-azsyb-demo3, executor 6, partition 12, PROCESS_LOCAL, 7745 bytes)
21/10/25 21:57:16 INFO storage.BlockManagerInfo: Added broadcast_5_piece0 in memory on bj-azsyb-demo4:29504 (size: 27.5 KB, free: 397.4 MB)
21/10/25 21:57:16 INFO storage.BlockManagerInfo: Added broadcast_5_piece0 in memory on bj-azsyb-demo4:10634 (size: 27.5 KB, free: 397.4 MB)
...
21/10/25 21:57:16 INFO spark.MapOutputTrackerMasterEndpoint: Asked to send map output locations for shuffle 0 to 10.172.7.205:60060
21/10/25 21:57:16 INFO storage.BlockManagerInfo: Added broadcast_5_piece0 in memory on bj-azsyb-demo3:2299 (size: 27.5 KB, free: 397.4 MB)
21/10/25 21:57:16 INFO spark.MapOutputTrackerMasterEndpoint: Asked to send map output locations for shuffle 0 to 10.172.7.210:41994
21/10/25 21:57:16 INFO spark.MapOutputTrackerMasterEndpoint: Asked to send map output locations for shuffle 0 to 10.172.7.210:42210
21/10/25 21:57:16 INFO spark.MapOutputTrackerMasterEndpoint: Asked to send map output locations for shuffle 0 to 10.172.7.210:42130
21/10/25 21:57:16 INFO spark.MapOutputTrackerMasterEndpoint: Asked to send map output locations for shuffle 0 to 10.172.7.210:41952
21/10/25 21:57:16 INFO spark.MapOutputTrackerMasterEndpoint: Asked to send map output locations for shuffle 0 to 10.172.7.210:42268
21/10/25 21:57:17 INFO yarn.YarnAllocator: Driver requested a total number of 14 executor(s).
...
21/10/25 22:06:38 INFO yarn.YarnAllocator: Canceling requests for 1 executor container(s) to have a new desired total 13 executors.
21/10/25 22:06:49 INFO scheduler.TaskSetManager: Finished task 20.0 in stage 2.0 (TID 483) in 248344 ms on bj-azsyb-demo3 (executor 4) (20/32)
...
21/10/25 22:09:31 INFO scheduler.TaskSetManager: Finished task 31.0 in stage 2.0 (TID 494) in 173181 ms on bj-azsyb-demo3 (executor 11) (32/32)
21/10/25 22:09:31 INFO cluster.YarnClusterScheduler: Removed TaskSet 2.0, whose tasks have all completed, from pool 
21/10/25 22:09:31 INFO scheduler.DAGScheduler: ShuffleMapStage 2 (foreachPartition at VerticesProcessor.scala:180) finished in 735.418 s
21/10/25 22:09:31 INFO scheduler.DAGScheduler: looking for newly runnable stages
21/10/25 22:09:31 INFO scheduler.DAGScheduler: running: Set()
...
21/10/25 22:19:34 INFO storage.BlockManagerInfo: Removed broadcast_0_piece0 on bj-azsyb-demo4:31191 in memory (size: 49.5 KB, free: 366.1 MB)
21/10/25 22:19:34 INFO storage.BlockManagerInfo: Removed broadcast_0_piece0 on bj-azsyb-demo4:25619 in memory (size: 49.5 KB, free: 397.4 MB)
...
21/10/25 22:19:34 INFO spark.ContextCleaner: Cleaned accumulator 15
21/10/25 22:33:40 INFO cluster.YarnSchedulerBackend$YarnDriverEndpoint: Disabling executor 1.
21/10/25 22:33:40 INFO yarn.SparkRackResolver: Got an error when resolving hostNames. Falling back to /default-rack for all
21/10/25 22:33:40 INFO scheduler.DAGScheduler: Executor lost: 1 (epoch 2)
21/10/25 22:33:40 INFO storage.BlockManagerMasterEndpoint: Trying to remove executor 1 from BlockManagerMaster.
21/10/25 22:33:40 INFO storage.BlockManagerMasterEndpoint: Removing block manager BlockManagerId(1, bj-azsyb-demo4, 25619, None)
21/10/25 22:33:40 INFO storage.BlockManagerMaster: Removed 1 successfully in removeExecutor
21/10/25 22:33:40 INFO yarn.YarnAllocator: Completed container container_1633660048269_0337_02_000003 on host: bj-azsyb-demo4 (state: COMPLETE, exit status: 143)
21/10/25 22:33:40 WARN yarn.YarnAllocator: Container marked as failed: container_1633660048269_0337_02_000003 on host: bj-azsyb-demo4. Exit status: 143. Diagnostics: [2021-10-25 22:33:40.271]Container killed on request. Exit code is 143
[2021-10-25 22:33:40.272]Container exited with a non-zero exit code 143. 
[2021-10-25 22:33:40.274]Killed by external signal

21/10/25 22:33:40 WARN cluster.YarnSchedulerBackend$YarnSchedulerEndpoint: Requesting driver to remove executor 1 for reason Container marked as failed: container_1633660048269_0337_02_000003 on host: bj-azsyb-demo4. Exit status: 143. Diagnostics: [2021-10-25 22:33:40.271]Container killed on request. Exit code is 143
[2021-10-25 22:33:40.272]Container exited with a non-zero exit code 143. 
[2021-10-25 22:33:40.274]Killed by external signal

21/10/25 22:33:40 ERROR cluster.YarnClusterScheduler: Lost executor 1 on bj-azsyb-demo4: Container marked as failed: container_1633660048269_0337_02_000003 on host: bj-azsyb-demo4. Exit status: 143. Diagnostics: [2021-10-25 22:33:40.271]Container killed on request. Exit code is 143
[2021-10-25 22:33:40.272]Container exited with a non-zero exit code 143. 
[2021-10-25 22:33:40.274]Killed by external signal

21/10/25 22:33:40 WARN scheduler.TaskSetManager: Lost task 0.0 in stage 3.0 (TID 495, bj-azsyb-demo4, executor 1): ExecutorLostFailure (executor 1 exited caused by one of the running tasks) Reason: Container marked as failed: container_1633660048269_0337_02_000003 on host: bj-azsyb-demo4. Exit status: 143. Diagnostics: [2021-10-25 22:33:40.271]Container killed on request. Exit code is 143
[2021-10-25 22:33:40.272]Container exited with a non-zero exit code 143. 
[2021-10-25 22:33:40.274]Killed by external signal

21/10/25 22:33:40 INFO spark.ExecutorAllocationManager: Existing executor 1 has been removed (new total is 0)
21/10/25 22:33:40 INFO storage.BlockManagerMasterEndpoint: Trying to remove executor 1 from BlockManagerMaster.
21/10/25 22:33:40 INFO storage.BlockManagerMaster: Removal of executor 1 requested
21/10/25 22:33:40 INFO cluster.YarnSchedulerBackend$YarnDriverEndpoint: Asked to remove non-existent executor 1
21/10/25 22:33:40 INFO yarn.SparkRackResolver: Got an error when resolving hostNames. Falling back to /default-rack for all
21/10/25 22:33:41 INFO yarn.SparkRackResolver: Got an error when resolving hostNames. Falling back to /default-rack for all
21/10/25 22:33:42 INFO yarn.SparkRackResolver: Got an error when resolving hostNames. Falling back to /default-rack for all
21/10/25 22:33:43 INFO yarn.SparkRackResolver: Got an error when resolving hostNames. Falling back to /default-rack for all
21/10/25 22:33:43 INFO yarn.YarnAllocator: Will request 1 executor container(s), each with 1 core(s) and 1408 MB memory (including 384 MB of overhead)
21/10/25 22:33:43 INFO yarn.YarnAllocator: Submitted 1 unlocalized container requests.
21/10/25 22:33:44 INFO yarn.YarnAllocator: Launching container container_1633660048269_0337_02_000019 on host bj-azsyb-demo3 for executor with ID 14
21/10/25 22:33:44 INFO yarn.YarnAllocator: Received 1 containers from YARN, launching executors on 1 of them.
21/10/25 22:33:44 INFO yarn.SparkRackResolver: Got an error when resolving hostNames. Falling back to /default-rack for all
21/10/25 22:33:45 INFO yarn.SparkRackResolver: Got an error when resolving hostNames. Falling back to /default-rack for all
21/10/25 22:33:46 INFO yarn.SparkRackResolver: Got an error when resolving hostNames. Falling back to /default-rack for all
21/10/25 22:33:47 INFO yarn.SparkRackResolver: Got an error when resolving hostNames. Falling back to /default-rack for all
21/10/25 22:33:48 INFO yarn.SparkRackResolver: Got an error when resolving hostNames. Falling back to /default-rack for all
21/10/25 22:33:49 INFO cluster.YarnSchedulerBackend$YarnDriverEndpoint: Registered executor NettyRpcEndpointRef(spark-client://Executor) (10.172.7.210:45528) with ID 14
21/10/25 22:33:49 INFO spark.ExecutorAllocationManager: New executor 14 has registered (new total is 1)
21/10/25 22:33:49 INFO scheduler.TaskSetManager: Starting task 0.1 in stage 3.0 (TID 496, bj-azsyb-demo3, executor 14, partition 0, PROCESS_LOCAL, 7756 bytes)
21/10/25 22:33:49 INFO storage.BlockManagerMasterEndpoint: Registering block manager bj-azsyb-demo3:22935 with 397.5 MB RAM, BlockManagerId(14, bj-azsyb-demo3, 22935, None)
21/10/25 22:33:49 INFO storage.BlockManagerInfo: Added broadcast_6_piece0 in memory on bj-azsyb-demo3:22935 (size: 61.0 KB, free: 397.4 MB)
21/10/25 22:33:51 INFO spark.MapOutputTrackerMasterEndpoint: Asked to send map output locations for shuffle 1 to 10.172.7.210:45528
21/10/25 22:54:21 INFO cluster.YarnSchedulerBackend$YarnDriverEndpoint: Disabling executor 14.
21/10/25 22:54:21 INFO yarn.SparkRackResolver: Got an error when resolving hostNames. Falling back to /default-rack for all
21/10/25 22:54:21 INFO scheduler.DAGScheduler: Executor lost: 14 (epoch 2)
21/10/25 22:54:21 INFO storage.BlockManagerMasterEndpoint: Trying to remove executor 14 from BlockManagerMaster.
21/10/25 22:54:21 INFO storage.BlockManagerMasterEndpoint: Removing block manager BlockManagerId(14, bj-azsyb-demo3, 22935, None)
21/10/25 22:54:21 INFO storage.BlockManagerMaster: Removed 14 successfully in removeExecutor
21/10/25 22:54:22 INFO yarn.YarnAllocator: Completed container container_1633660048269_0337_02_000019 on host: bj-azsyb-demo3 (state: COMPLETE, exit status: 143)
21/10/25 22:54:22 WARN yarn.YarnAllocator: Container marked as failed: container_1633660048269_0337_02_000019 on host: bj-azsyb-demo3. Exit status: 143. Diagnostics: [2021-10-25 22:54:21.916]Container killed on request. Exit code is 143
[2021-10-25 22:54:21.917]Container exited with a non-zero exit code 143. 
[2021-10-25 22:54:21.921]Killed by external signal

21/10/25 22:54:22 WARN cluster.YarnSchedulerBackend$YarnSchedulerEndpoint: Requesting driver to remove executor 14 for reason Container marked as failed: container_1633660048269_0337_02_000019 on host: bj-azsyb-demo3. Exit status: 143. Diagnostics: [2021-10-25 22:54:21.916]Container killed on request. Exit code is 143
[2021-10-25 22:54:21.917]Container exited with a non-zero exit code 143. 
[2021-10-25 22:54:21.921]Killed by external signal

21/10/25 22:54:22 ERROR cluster.YarnClusterScheduler: Lost executor 14 on bj-azsyb-demo3: Container marked as failed: container_1633660048269_0337_02_000019 on host: bj-azsyb-demo3. Exit status: 143. Diagnostics: [2021-10-25 22:54:21.916]Container killed on request. Exit code is 143
[2021-10-25 22:54:21.917]Container exited with a non-zero exit code 143. 
[2021-10-25 22:54:21.921]Killed by external signal

21/10/25 22:54:22 WARN scheduler.TaskSetManager: Lost task 0.1 in stage 3.0 (TID 496, bj-azsyb-demo3, executor 14): ExecutorLostFailure (executor 14 exited caused by one of the running tasks) Reason: Container marked as failed: container_1633660048269_0337_02_000019 on host: bj-azsyb-demo3. Exit status: 143. Diagnostics: [2021-10-25 22:54:21.916]Container killed on request. Exit code is 143
[2021-10-25 22:54:21.917]Container exited with a non-zero exit code 143. 
[2021-10-25 22:54:21.921]Killed by external signal

21/10/25 22:54:22 INFO storage.BlockManagerMaster: Removal of executor 14 requested
21/10/25 22:54:22 INFO cluster.YarnSchedulerBackend$YarnDriverEndpoint: Asked to remove non-existent executor 14
21/10/25 22:54:22 INFO storage.BlockManagerMasterEndpoint: Trying to remove executor 14 from BlockManagerMaster.
21/10/25 22:54:22 INFO spark.ExecutorAllocationManager: Existing executor 14 has been removed (new total is 0)
21/10/25 22:54:22 INFO yarn.SparkRackResolver: Got an error when resolving hostNames. Falling back to /default-rack for all
21/10/25 22:54:23 INFO yarn.SparkRackResolver: Got an error when resolving hostNames. Falling back to /default-rack for all
21/10/25 22:54:24 INFO yarn.SparkRackResolver: Got an error when resolving hostNames. Falling back to /default-rack for all
21/10/25 22:54:25 INFO yarn.YarnAllocator: Will request 1 executor container(s), each with 1 core(s) and 1408 MB memory (including 384 MB of overhead)
21/10/25 22:54:25 INFO yarn.YarnAllocator: Submitted 1 unlocalized container requests.
21/10/25 22:54:25 INFO yarn.SparkRackResolver: Got an error when resolving hostNames. Falling back to /default-rack for all
21/10/25 22:54:26 INFO yarn.SparkRackResolver: Got an error when resolving hostNames. Falling back to /default-rack for all
21/10/25 22:54:26 INFO yarn.YarnAllocator: Launching container container_1633660048269_0337_02_000020 on host bj-azsyb-demo3 for executor with ID 15
21/10/25 22:54:26 INFO yarn.YarnAllocator: Received 1 containers from YARN, launching executors on 1 of them.
21/10/25 22:54:27 INFO yarn.SparkRackResolver: Got an error when resolving hostNames. Falling back to /default-rack for all
21/10/25 22:54:28 INFO yarn.SparkRackResolver: Got an error when resolving hostNames. Falling back to /default-rack for all
21/10/25 22:54:29 INFO yarn.SparkRackResolver: Got an error when resolving hostNames. Falling back to /default-rack for all
21/10/25 22:54:30 INFO yarn.SparkRackResolver: Got an error when resolving hostNames. Falling back to /default-rack for all
21/10/25 22:54:30 INFO cluster.YarnSchedulerBackend$YarnDriverEndpoint: Registered executor NettyRpcEndpointRef(spark-client://Executor) (10.172.7.210:47282) with ID 15
21/10/25 22:54:30 INFO spark.ExecutorAllocationManager: New executor 15 has registered (new total is 1)
21/10/25 22:54:30 INFO scheduler.TaskSetManager: Starting task 0.2 in stage 3.0 (TID 497, bj-azsyb-demo3, executor 15, partition 0, PROCESS_LOCAL, 7756 bytes)
21/10/25 22:54:30 INFO storage.BlockManagerMasterEndpoint: Registering block manager bj-azsyb-demo3:32393 with 397.5 MB RAM, BlockManagerId(15, bj-azsyb-demo3, 32393, None)
21/10/25 22:54:31 INFO storage.BlockManagerInfo: Added broadcast_6_piece0 in memory on bj-azsyb-demo3:32393 (size: 61.0 KB, free: 397.4 MB)
21/10/25 22:54:33 INFO spark.MapOutputTrackerMasterEndpoint: Asked to send map output locations for shuffle 1 to 10.172.7.210:47282
21/10/25 23:16:38 INFO cluster.YarnSchedulerBackend$YarnDriverEndpoint: Disabling executor 15.
21/10/25 23:16:38 INFO yarn.SparkRackResolver: Got an error when resolving hostNames. Falling back to /default-rack for all
21/10/25 23:16:38 INFO scheduler.DAGScheduler: Executor lost: 15 (epoch 2)
21/10/25 23:16:38 INFO storage.BlockManagerMasterEndpoint: Trying to remove executor 15 from BlockManagerMaster.
21/10/25 23:16:38 INFO storage.BlockManagerMasterEndpoint: Removing block manager BlockManagerId(15, bj-azsyb-demo3, 32393, None)
21/10/25 23:16:38 INFO storage.BlockManagerMaster: Removed 15 successfully in removeExecutor
21/10/25 23:16:38 INFO yarn.SparkRackResolver: Got an error when resolving hostNames. Falling back to /default-rack for all
21/10/25 23:16:38 INFO yarn.YarnAllocator: Completed container container_1633660048269_0337_02_000020 on host: bj-azsyb-demo3 (state: COMPLETE, exit status: 143)
21/10/25 23:16:38 WARN yarn.YarnAllocator: Container marked as failed: container_1633660048269_0337_02_000020 on host: bj-azsyb-demo3. Exit status: 143. Diagnostics: [2021-10-25 23:16:38.347]Container killed on request. Exit code is 143
[2021-10-25 23:16:38.348]Container exited with a non-zero exit code 143. 
[2021-10-25 23:16:38.350]Killed by external signal
...
[2021-10-25 23:37:31.900]Killed by external signal

Driver stacktrace:
21/10/25 23:37:32 INFO yarn.SparkRackResolver: Got an error when resolving hostNames. Falling back to /default-rack for all
21/10/25 23:37:32 INFO scheduler.DAGScheduler: Job 1 failed: foreachPartition at VerticesProcessor.scala:180, took 6462.564497 s
21/10/25 23:37:32 ERROR yarn.ApplicationMaster: User class threw exception: org.apache.spark.SparkException: Job aborted due to stage failure: Task 0 in stage 3.0 failed 4 times, most recent failure: Lost task 0.3 in stage 3.0 (TID 498, bj-azsyb-demo4, executor 16): ExecutorLostFailure (executor 16 exited caused by one of the running tasks) Reason: Container marked as failed: container_1633660048269_0337_02_000021 on host: bj-azsyb-demo4. Exit status: 143. Diagnostics: [2021-10-25 23:37:31.899]Container killed on request. Exit code is 143
[2021-10-25 23:37:31.899]Container exited with a non-zero exit code 143. 
[2021-10-25 23:37:31.900]Killed by external signal

Driver stacktrace:
org.apache.spark.SparkException: Job aborted due to stage failure: Task 0 in stage 3.0 failed 4 times, most recent failure: Lost task 0.3 in stage 3.0 (TID 498, bj-azsyb-demo4, executor 16): ExecutorLostFailure (executor 16 exited caused by one of the running tasks) Reason: Container marked as failed: container_1633660048269_0337_02_000021 on host: bj-azsyb-demo4. Exit status: 143. Diagnostics: [2021-10-25 23:37:31.899]Container killed on request. Exit code is 143
[2021-10-25 23:37:31.899]Container exited with a non-zero exit code 143. 
[2021-10-25 23:37:31.900]Killed by external signal

Driver stacktrace:
	at org.apache.spark.scheduler.DAGScheduler.org$apache$spark$scheduler$DAGScheduler$$failJobAndIndependentStages(DAGScheduler.scala:1890)
	at org.apache.spark.scheduler.DAGScheduler$$anonfun$abortStage$1.apply(DAGScheduler.scala:1878)
	at org.apache.spark.scheduler.DAGScheduler$$anonfun$abortStage$1.apply(DAGScheduler.scala:1877)
	at scala.collection.mutable.ResizableArray$class.foreach(ResizableArray.scala:59)
	at scala.collection.mutable.ArrayBuffer.foreach(ArrayBuffer.scala:48)
	at org.apache.spark.scheduler.DAGScheduler.abortStage(DAGScheduler.scala:1877)
	at org.apache.spark.scheduler.DAGScheduler$$anonfun$handleTaskSetFailed$1.apply(DAGScheduler.scala:929)
	at org.apache.spark.scheduler.DAGScheduler$$anonfun$handleTaskSetFailed$1.apply(DAGScheduler.scala:929)
	at scala.Option.foreach(Option.scala:257)
	at org.apache.spark.scheduler.DAGScheduler.handleTaskSetFailed(DAGScheduler.scala:929)
	at org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.doOnReceive(DAGScheduler.scala:2111)
	at org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.onReceive(DAGScheduler.scala:2060)
	at org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.onReceive(DAGScheduler.scala:2049)
	at org.apache.spark.util.EventLoop$$anon$1.run(EventLoop.scala:49)
	at org.apache.spark.scheduler.DAGScheduler.runJob(DAGScheduler.scala:740)
	at org.apache.spark.SparkContext.runJob(SparkContext.scala:2081)
	at org.apache.spark.SparkContext.runJob(SparkContext.scala:2102)
	at org.apache.spark.SparkContext.runJob(SparkContext.scala:2121)
	at org.apache.spark.SparkContext.runJob(SparkContext.scala:2146)
	at org.apache.spark.rdd.RDD$$anonfun$foreachPartition$1.apply(RDD.scala:935)
	at org.apache.spark.rdd.RDD$$anonfun$foreachPartition$1.apply(RDD.scala:933)
	at org.apache.spark.rdd.RDDOperationScope$.withScope(RDDOperationScope.scala:151)
	at org.apache.spark.rdd.RDDOperationScope$.withScope(RDDOperationScope.scala:112)
	at org.apache.spark.rdd.RDD.withScope(RDD.scala:363)
	at org.apache.spark.rdd.RDD.foreachPartition(RDD.scala:933)
	at org.apache.spark.sql.Dataset$$anonfun$foreachPartition$1.apply$mcV$sp(Dataset.scala:2735)
	at org.apache.spark.sql.Dataset$$anonfun$foreachPartition$1.apply(Dataset.scala:2735)
	at org.apache.spark.sql.Dataset$$anonfun$foreachPartition$1.apply(Dataset.scala:2735)
	at org.apache.spark.sql.Dataset$$anonfun$withNewRDDExecutionId$1.apply(Dataset.scala:3349)
	at org.apache.spark.sql.execution.SQLExecution$$anonfun$withNewExecutionId$1.apply(SQLExecution.scala:78)
	at org.apache.spark.sql.execution.SQLExecution$.withSQLConfPropagated(SQLExecution.scala:125)
	at org.apache.spark.sql.execution.SQLExecution$.withNewExecutionId(SQLExecution.scala:73)
	at org.apache.spark.sql.Dataset.withNewRDDExecutionId(Dataset.scala:3345)
	at org.apache.spark.sql.Dataset.foreachPartition(Dataset.scala:2734)
	at com.vesoft.nebula.exchange.processor.VerticesProcessor.process(VerticesProcessor.scala:180)
	at com.vesoft.nebula.exchange.Exchange$$anonfun$main$2.apply(Exchange.scala:152)
	at com.vesoft.nebula.exchange.Exchange$$anonfun$main$2.apply(Exchange.scala:128)
	at scala.collection.immutable.List.foreach(List.scala:392)
	at com.vesoft.nebula.exchange.Exchange$.main(Exchange.scala:128)
	at com.vesoft.nebula.exchange.Exchange.main(Exchange.scala)
	at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
	at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62)
	at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
	at java.lang.reflect.Method.invoke(Method.java:498)
	at org.apache.spark.deploy.yarn.ApplicationMaster$$anon$2.run(ApplicationMaster.scala:673)
21/10/25 23:37:32 INFO yarn.YarnAllocator: Driver requested a total number of 0 executor(s).
21/10/25 23:37:32 INFO yarn.ApplicationMaster: Final app status: FAILED, exitCode: 15, (reason: User class threw exception: org.apache.spark.SparkException: Job aborted due to stage failure: Task 0 in stage 3.0 failed 4 times, most recent failure: Lost task 0.3 in stage 3.0 (TID 498, bj-azsyb-demo4, executor 16): ExecutorLostFailure (executor 16 exited caused by one of the running tasks) Reason: Container marked as failed: container_1633660048269_0337_02_000021 on host: bj-azsyb-demo4. Exit status: 143. Diagnostics: [2021-10-25 23:37:31.899]Container killed on request. Exit code is 143
[2021-10-25 23:37:31.899]Container exited with a non-zero exit code 143. 
[2021-10-25 23:37:31.900]Killed by external signal

Driver stacktrace:
	at org.apache.spark.scheduler.DAGScheduler.org$apache$spark$scheduler$DAGScheduler$$failJobAndIndependentStages(DAGScheduler.scala:1890)
	at org.apache.spark.scheduler.DAGScheduler$$anonfun$abortStage$1.apply(DAGScheduler.scala:1878)
	at org.apache.spark.scheduler.DAGScheduler$$anonfun$abortStage$1.apply(DAGScheduler.scala:1877)
	at scala.collection.mutable.ResizableArray$class.foreach(ResizableArray.scala:59)
	at scala.collection.mutable.ArrayBuffer.foreach(ArrayBuffer.scala:48)
	at org.apache.spark.scheduler.DAGScheduler.abortStage(DAGScheduler.scala:1877)
	at org.apache.spark.scheduler.DAGScheduler$$anonfun$handleTaskSetFailed$1.apply(DAGScheduler.scala:929)
	at org.apache.spark.scheduler.DAGScheduler$$anonfun$handleTaskSetFailed$1.apply(DAGScheduler.scala:929)
	at scala.Option.foreach(Option.scala:257)
	at org.apache.spark.scheduler.DAGScheduler.handleTaskSetFailed(DAGScheduler.scala:929)
	at org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.doOnReceive(DAGScheduler.scala:2111)
	at org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.onReceive(DAGScheduler.scala:2060)
	at org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.onReceive(DAGScheduler.scala:2049)
	at org.apache.spark.util.EventLoop$$anon$1.run(EventLoop.scala:49)
	at org.apache.spark.scheduler.DAGScheduler.runJob(DAGScheduler.scala:740)
	at org.apache.spark.SparkContext.runJob(SparkContext.scala:2081)
	at org.apache.spark.SparkContext.runJob(SparkContext.scala:2102)
	at org.apache.spark.SparkContext.runJob(SparkContext.scala:2121)
	at org.apache.spark.SparkContext.runJob(SparkContext.scala:2146)
	at org.apache.spark.rdd.RDD$$anonfun$foreachPartition$1.apply(RDD.scala:935)
	at org.apache.spark.rdd.RDD$$anonfun$foreachPartition$1.apply(RDD.scala:933)
	at org.apache.spark.rdd.RDDOperationScope$.withScope(RDDOperationScope.scala:151)
	at org.apache.spark.rdd.RDDOperationScope$.withScope(RDDOperationScope.scala:112)
	at org.apache.spark.rdd.RDD.withScope(RDD.scala:363)
	at org.apache.spark.rdd.RDD.foreachPartition(RDD.scala:933)
	at org.apache.spark.sql.Dataset$$anonfun$foreachPartition$1.apply$mcV$sp(Dataset.scala:2735)
	at org.apache.spark.sql.Dataset$$anonfun$foreachPartition$1.apply(Dataset.scala:2735)
	at org.apache.spark.sql.Dataset$$anonfun$foreachPartition$1.apply(Dataset.scala:2735)
	at org.apache.spark.sql.Dataset$$anonfun$withNewRDDExecutionId$1.apply(Dataset.scala:3349)
	at org.apache.spark.sql.execution.SQLExecution$$anonfun$withNewExecutionId$1.apply(SQLExecution.scala:78)
	at org.apache.spark.sql.execution.SQLExecution$.withSQLConfPropagated(SQLExecution.scala:125)
	at org.apache.spark.sql.execution.SQLExecution$.withNewExecutionId(SQLExecution.scala:73)
	at org.apache.spark.sql.Dataset.withNewRDDExecutionId(Dataset.scala:3345)
	at org.apache.spark.sql.Dataset.foreachPartition(Dataset.scala:2734)
	at com.vesoft.nebula.exchange.processor.VerticesProcessor.process(VerticesProcessor.scala:180)
	at com.vesoft.nebula.exchange.Exchange$$anonfun$main$2.apply(Exchange.scala:152)
	at com.vesoft.nebula.exchange.Exchange$$anonfun$main$2.apply(Exchange.scala:128)
	at scala.collection.immutable.List.foreach(List.scala:392)
	at com.vesoft.nebula.exchange.Exchange$.main(Exchange.scala:128)
	at com.vesoft.nebula.exchange.Exchange.main(Exchange.scala)
	at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
	at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62)
	at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
	at java.lang.reflect.Method.invoke(Method.java:498)
	at org.apache.spark.deploy.yarn.ApplicationMaster$$anon$2.run(ApplicationMaster.scala:673)
)
21/10/25 23:37:32 WARN yarn.ApplicationMaster: Reporter thread fails 1 time(s) in a row.
java.io.InterruptedIOException: Call interrupted
	at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1493)
	at org.apache.hadoop.ipc.Client.call(Client.java:1445)
	at org.apache.hadoop.ipc.Client.call(Client.java:1355)
	at org.apache.hadoop.ipc.ProtobufRpcEngine$Invoker.invoke(ProtobufRpcEngine.java:228)
	at org.apache.hadoop.ipc.ProtobufRpcEngine$Invoker.invoke(ProtobufRpcEngine.java:116)
	at com.sun.proxy.$Proxy21.allocate(Unknown Source)
	at org.apache.hadoop.yarn.api.impl.pb.client.ApplicationMasterProtocolPBClientImpl.allocate(ApplicationMasterProtocolPBClientImpl.java:77)
	at sun.reflect.GeneratedMethodAccessor19.invoke(Unknown Source)
	at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
	at java.lang.reflect.Method.invoke(Method.java:498)
	at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:422)
	at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:165)
	at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:157)
	at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:95)
	at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:359)
	at com.sun.proxy.$Proxy22.allocate(Unknown Source)
	at org.apache.hadoop.yarn.client.api.impl.AMRMClientImpl.allocate(AMRMClientImpl.java:291)
	at org.apache.spark.deploy.yarn.YarnAllocator.allocateResources(YarnAllocator.scala:257)
	at org.apache.spark.deploy.yarn.ApplicationMaster.org$apache$spark$deploy$yarn$ApplicationMaster$$allocationThreadImpl(ApplicationMaster.scala:519)
	at org.apache.spark.deploy.yarn.ApplicationMaster$$anon$1.run(ApplicationMaster.scala:585)
21/10/25 23:37:32 INFO spark.SparkContext: Invoking stop() from shutdown hook
21/10/25 23:37:32 INFO server.AbstractConnector: Stopped Spark@2ed693e1{HTTP/1.1,[http/1.1]}{0.0.0.0:0}
21/10/25 23:37:32 INFO ui.SparkUI: Stopped Spark web UI at http://bj-azsyb-demo4:31677
21/10/25 23:37:32 INFO cluster.YarnClusterSchedulerBackend: Shutting down all executors
21/10/25 23:37:32 INFO cluster.YarnSchedulerBackend$YarnDriverEndpoint: Asking each executor to shut down
21/10/25 23:37:32 INFO cluster.SchedulerExtensionServices: Stopping SchedulerExtensionServices
(serviceOption=None,
 services=List(),
 started=false)
21/10/25 23:37:33 INFO spark.MapOutputTrackerMasterEndpoint: MapOutputTrackerMasterEndpoint stopped!
21/10/25 23:37:33 INFO memory.MemoryStore: MemoryStore cleared
21/10/25 23:37:33 INFO storage.BlockManager: BlockManager stopped
21/10/25 23:37:33 INFO storage.BlockManagerMaster: BlockManagerMaster stopped
21/10/25 23:37:33 INFO scheduler.OutputCommitCoordinator$OutputCommitCoordinatorEndpoint: OutputCommitCoordinator stopped!
21/10/25 23:37:33 INFO spark.SparkContext: Successfully stopped SparkContext
21/10/25 23:37:33 INFO yarn.ApplicationMaster: Unregistering ApplicationMaster with FAILED (diag message: User class threw exception: org.apache.spark.SparkException: Job aborted due to stage failure: Task 0 in stage 3.0 failed 4 times, most recent failure: Lost task 0.3 in stage 3.0 (TID 498, bj-azsyb-demo4, executor 16): ExecutorLostFailure (executor 16 exited caused by one of the running tasks) Reason: Container marked as failed: container_1633660048269_0337_02_000021 on host: bj-azsyb-demo4. Exit status: 143. Diagnostics: [2021-10-25 23:37:31.899]Container killed on request. Exit code is 143
[2021-10-25 23:37:31.899]Container exited with a non-zero exit code 143. 
[2021-10-25 23:37:31.900]Killed by external signal

Driver stacktrace:
	at org.apache.spark.scheduler.DAGScheduler.org$apache$spark$scheduler$DAGScheduler$$failJobAndIndependentStages(DAGScheduler.scala:1890)
	at org.apache.spark.scheduler.DAGScheduler$$anonfun$abortStage$1.apply(DAGScheduler.scala:1878)
	at org.apache.spark.scheduler.DAGScheduler$$anonfun$abortStage$1.apply(DAGScheduler.scala:1877)
	at scala.collection.mutable.ResizableArray$class.foreach(ResizableArray.scala:59)
	at scala.collection.mutable.ArrayBuffer.foreach(ArrayBuffer.scala:48)
	at org.apache.spark.scheduler.DAGScheduler.abortStage(DAGScheduler.scala:1877)
	at org.apache.spark.scheduler.DAGScheduler$$anonfun$handleTaskSetFailed$1.apply(DAGScheduler.scala:929)
	at org.apache.spark.scheduler.DAGScheduler$$anonfun$handleTaskSetFailed$1.apply(DAGScheduler.scala:929)
	at scala.Option.foreach(Option.scala:257)
	at org.apache.spark.scheduler.DAGScheduler.handleTaskSetFailed(DAGScheduler.scala:929)
	at org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.doOnReceive(DAGScheduler.scala:2111)
	at org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.onReceive(DAGScheduler.scala:2060)
	at org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.onReceive(DAGScheduler.scala:2049)
	at org.apache.spark.util.EventLoop$$anon$1.run(EventLoop.scala:49)
	at org.apache.spark.scheduler.DAGScheduler.runJob(DAGScheduler.scala:740)
	at org.apache.spark.SparkContext.runJob(SparkContext.scala:2081)
	at org.apache.spark.SparkContext.runJob(SparkContext.scala:2102)
	at org.apache.spark.SparkContext.runJob(SparkContext.scala:2121)
	at org.apache.spark.SparkContext.runJob(SparkContext.scala:2146)
	at org.apache.spark.rdd.RDD$$anonfun$foreachPartition$1.apply(RDD.scala:935)
	at org.apache.spark.rdd.RDD$$anonfun$foreachPartition$1.apply(RDD.scala:933)
	at org.apache.spark.rdd.RDDOperationScope$.withScope(RDDOperationScope.scala:151)
	at org.apache.spark.rdd.RDDOperationScope$.withScope(RDDOperationScope.scala:112)
	at org.apache.spark.rdd.RDD.withScope(RDD.scala:363)
	at org.apache.spark.rdd.RDD.foreachPartition(RDD.scala:933)
	at org.apache.spark.sql.Dataset$$anonfun$foreachPartition$1.apply$mcV$sp(Dataset.scala:2735)
	at org.apache.spark.sql.Dataset$$anonfun$foreachPartition$1.apply(Dataset.scala:2735)
	at org.apache.spark.sql.Dataset$$anonfun$foreachPartition$1.apply(Dataset.scala:2735)
	at org.apache.spark.sql.Dataset$$anonfun$withNewRDDExecutionId$1.apply(Dataset.scala:3349)
	at org.apache.spark.sql.execution.SQLExecution$$anonfun$withNewExecutionId$1.apply(SQLExecution.scala:78)
	at org.apache.spark.sql.execution.SQLExecution$.withSQLConfPropagated(SQLExecution.scala:125)
	at org.apache.spark.sql.execution.SQLExecution$.withNewExecutionId(SQLExecution.scala:73)
	at org.apache.spark.sql.Dataset.withNewRDDExecutionId(Dataset.scala:3345)
	at org.apache.spark.sql.Dataset.foreachPartition(Dataset.scala:2734)
	at com.vesoft.nebula.exchange.processor.VerticesProcessor.process(VerticesProcessor.scala:180)
	at com.vesoft.nebula.exchange.Exchange$$anonfun$main$2.apply(Exchange.scala:152)
	at com.vesoft.nebula.exchange.Exchange$$anonfun$main$2.apply(Exchange.scala:128)
	at scala.collection.immutable.List.foreach(List.scala:392)
	at com.vesoft.nebula.exchange.Exchange$.main(Exchange.scala:128)
	at com.vesoft.nebula.exchange.Exchange.main(Exchange.scala)
	at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
	at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62)
	at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
	at java.lang.reflect.Method.invoke(Method.java:498)
	at org.apache.spark.deploy.yarn.ApplicationMaster$$anon$2.run(ApplicationMaster.scala:673)
)
21/10/25 23:37:33 INFO impl.AMRMClientImpl: Waiting for application to be successfully unregistered.
21/10/25 23:37:33 INFO yarn.ApplicationMaster: Deleting staging directory hdfs://bj-azsyb-demo2.cloud.onecloud.io:8020/user/root/.sparkStaging/application_1633660048269_0337
21/10/25 23:37:33 INFO util.ShutdownHookManager: Shutdown hook called
21/10/25 23:37:33 INFO util.ShutdownHookManager: Deleting directory /data/yarn/nm/usercache/root/appcache/application_1633660048269_0337/spark-257056b9-89b9-4617-bb53-b45a5bd9103e

排序是耗时的,你可以把partition设大一点,排序是在partition内部做排序。
贴一下container的日志,这是driver的日志看不到真正的错误。

谢谢大神,小Demo已生成SST,安装完CDH客户端,重启nebula节点后已成功导入数据。
下面是1亿实体数据生SST文件日志:

Log Type: stderr

Log Upload Time: Tue Oct 26 19:17:32 +0800 2021

Log Length: 116646

21/10/26 18:04:21 INFO executor.CoarseGrainedExecutorBackend: Started daemon with process name: 56733@bj-azsyb-demo3
21/10/26 18:04:21 INFO util.SignalUtils: Registered signal handler for TERM
21/10/26 18:04:21 INFO util.SignalUtils: Registered signal handler for HUP
21/10/26 18:04:21 INFO util.SignalUtils: Registered signal handler for INT
21/10/26 18:04:22 INFO spark.SecurityManager: Changing view acls to: yarn,root
21/10/26 18:04:22 INFO spark.SecurityManager: Changing modify acls to: yarn,root
21/10/26 18:04:22 INFO spark.SecurityManager: Changing view acls groups to: 
21/10/26 18:04:22 INFO spark.SecurityManager: Changing modify acls groups to: 
21/10/26 18:04:22 INFO spark.SecurityManager: SecurityManager: authentication disabled; ui acls disabled; users  with view permissions: Set(yarn, root); groups with view permissions: Set(); users  with modify permissions: Set(yarn, root); groups with modify permissions: Set()
21/10/26 18:04:22 INFO client.TransportClientFactory: Successfully created connection to bj-azsyb-demo4/10.172.7.205:4602 after 69 ms (0 ms spent in bootstraps)
21/10/26 18:04:22 INFO spark.SecurityManager: Changing view acls to: yarn,root
21/10/26 18:04:22 INFO spark.SecurityManager: Changing modify acls to: yarn,root
21/10/26 18:04:22 INFO spark.SecurityManager: Changing view acls groups to: 
21/10/26 18:04:22 INFO spark.SecurityManager: Changing modify acls groups to: 
21/10/26 18:04:22 INFO spark.SecurityManager: SecurityManager: authentication disabled; ui acls disabled; users  with view permissions: Set(yarn, root); groups with view permissions: Set(); users  with modify permissions: Set(yarn, root); groups with modify permissions: Set()
21/10/26 18:04:22 INFO client.TransportClientFactory: Successfully created connection to bj-azsyb-demo4/10.172.7.205:4602 after 1 ms (0 ms spent in bootstraps)
21/10/26 18:04:22 INFO storage.DiskBlockManager: Created local directory at /data/yarn/nm/usercache/root/appcache/application_1633660048269_0371/blockmgr-796ccdfa-30bd-49a7-8576-fe59422664d5
21/10/26 18:04:22 INFO memory.MemoryStore: MemoryStore started with capacity 397.5 MB
21/10/26 18:04:23 INFO executor.CoarseGrainedExecutorBackend: Connecting to driver: spark://CoarseGrainedScheduler@bj-azsyb-demo4:4602
21/10/26 18:04:23 INFO executor.CoarseGrainedExecutorBackend: Successfully registered with driver
21/10/26 18:04:23 INFO executor.Executor: Starting executor ID 6 on host bj-azsyb-demo3
21/10/26 18:04:23 INFO util.Utils: Successfully started service 'org.apache.spark.network.netty.NettyBlockTransferService' on port 26649.
21/10/26 18:04:23 INFO netty.NettyBlockTransferService: Server created on bj-azsyb-demo3:26649
21/10/26 18:04:23 INFO storage.BlockManager: Using org.apache.spark.storage.RandomBlockReplicationPolicy for block replication policy
21/10/26 18:04:23 INFO storage.BlockManagerMaster: Registering BlockManager BlockManagerId(6, bj-azsyb-demo3, 26649, None)
21/10/26 18:04:23 INFO storage.BlockManagerMaster: Registered BlockManager BlockManagerId(6, bj-azsyb-demo3, 26649, None)
21/10/26 18:04:23 INFO storage.BlockManager: external shuffle service port = 7337
21/10/26 18:04:23 INFO storage.BlockManager: Registering executor with local external shuffle service.
21/10/26 18:04:23 INFO client.TransportClientFactory: Successfully created connection to bj-azsyb-demo3/10.172.7.210:7337 after 1 ms (0 ms spent in bootstraps)
21/10/26 18:04:23 INFO storage.BlockManager: Initialized BlockManager: BlockManagerId(6, bj-azsyb-demo3, 26649, None)
21/10/26 18:04:23 INFO executor.CoarseGrainedExecutorBackend: Got assigned task 80
21/10/26 18:04:23 INFO executor.Executor: Running task 20.0 in stage 1.0 (TID 80)
21/10/26 18:04:23 INFO broadcast.TorrentBroadcast: Started reading broadcast variable 4
21/10/26 18:04:23 INFO client.TransportClientFactory: Successfully created connection to bj-azsyb-demo3/10.172.7.210:11926 after 2 ms (0 ms spent in bootstraps)
21/10/26 18:04:23 INFO memory.MemoryStore: Block broadcast_4_piece0 stored as bytes in memory (estimated size 11.9 KB, free 397.5 MB)
21/10/26 18:04:23 INFO broadcast.TorrentBroadcast: Reading broadcast variable 4 took 132 ms
21/10/26 18:04:24 INFO memory.MemoryStore: Block broadcast_4 stored as values in memory (estimated size 32.7 KB, free 397.5 MB)
21/10/26 18:04:24 INFO datasources.FileScanRDD: Reading File path: hdfs://bj-azsyb-demo2.cloud.onecloud.io:8020/user/hive/warehouse/person/person_complex_1y.csv, range: 2684354560-2818572288, partition values: [empty row]
21/10/26 18:04:25 INFO codegen.CodeGenerator: Code generated in 416.910906 ms
21/10/26 18:04:25 INFO broadcast.TorrentBroadcast: Started reading broadcast variable 3
21/10/26 18:04:25 INFO memory.MemoryStore: Block broadcast_3_piece0 stored as bytes in memory (estimated size 49.4 KB, free 397.3 MB)
21/10/26 18:04:25 INFO broadcast.TorrentBroadcast: Reading broadcast variable 3 took 43 ms
21/10/26 18:04:25 INFO memory.MemoryStore: Block broadcast_3 stored as values in memory (estimated size 716.4 KB, free 396.6 MB)
21/10/26 18:04:32 INFO executor.Executor: Finished task 20.0 in stage 1.0 (TID 80). 1381 bytes result sent to driver
21/10/26 18:04:32 INFO executor.CoarseGrainedExecutorBackend: Got assigned task 93
21/10/26 18:04:32 INFO executor.Executor: Running task 32.0 in stage 1.0 (TID 93)
21/10/26 18:04:32 INFO datasources.FileScanRDD: Reading File path: hdfs://bj-azsyb-demo2.cloud.onecloud.io:8020/user/hive/warehouse/person/person_complex_1y.csv, range: 4294967296-4429185024, partition values: [empty row]
21/10/26 18:04:37 INFO executor.Executor: Finished task 32.0 in stage 1.0 (TID 93). 1338 bytes result sent to driver
21/10/26 18:04:37 INFO executor.CoarseGrainedExecutorBackend: Got assigned task 99
21/10/26 18:04:37 INFO executor.Executor: Running task 49.0 in stage 1.0 (TID 99)
21/10/26 18:04:37 INFO datasources.FileScanRDD: Reading File path: hdfs://bj-azsyb-demo2.cloud.onecloud.io:8020/user/hive/warehouse/person/person_complex_1y.csv, range: 6576668672-6710886400, partition values: [empty row]
21/10/26 18:04:43 INFO executor.Executor: Finished task 49.0 in stage 1.0 (TID 99). 1338 bytes result sent to driver
21/10/26 18:04:43 INFO executor.CoarseGrainedExecutorBackend: Got assigned task 108
21/10/26 18:04:43 INFO executor.Executor: Running task 54.0 in stage 1.0 (TID 108)
21/10/26 18:04:43 INFO datasources.FileScanRDD: Reading File path: hdfs://bj-azsyb-demo2.cloud.onecloud.io:8020/user/hive/warehouse/person/person_complex_1y.csv, range: 7247757312-7381975040, partition values: [empty row]
21/10/26 18:04:49 INFO executor.Executor: Finished task 54.0 in stage 1.0 (TID 108). 1338 bytes result sent to driver
21/10/26 18:04:49 INFO executor.CoarseGrainedExecutorBackend: Got assigned task 120
21/10/26 18:04:49 INFO executor.Executor: Running task 73.0 in stage 1.0 (TID 120)
21/10/26 18:04:49 INFO datasources.FileScanRDD: Reading File path: hdfs://bj-azsyb-demo2.cloud.onecloud.io:8020/user/hive/warehouse/person/person_complex_1y.csv, range: 9797894144-9932111872, partition values: [empty row]
21/10/26 18:04:55 INFO executor.Executor: Finished task 73.0 in stage 1.0 (TID 120). 1338 bytes result sent to driver
21/10/26 18:04:55 INFO executor.CoarseGrainedExecutorBackend: Got assigned task 133
21/10/26 18:04:55 INFO executor.Executor: Running task 93.0 in stage 1.0 (TID 133)
21/10/26 18:04:55 INFO datasources.FileScanRDD: Reading File path: hdfs://bj-azsyb-demo2.cloud.onecloud.io:8020/user/hive/warehouse/person/person_complex_1y.csv, range: 12482248704-12616466432, partition values: [empty row]
21/10/26 18:05:02 INFO executor.Executor: Finished task 93.0 in stage 1.0 (TID 133). 1338 bytes result sent to driver
21/10/26 18:05:02 INFO executor.CoarseGrainedExecutorBackend: Got assigned task 146
21/10/26 18:05:02 INFO executor.Executor: Running task 111.0 in stage 1.0 (TID 146)
21/10/26 18:05:02 INFO datasources.FileScanRDD: Reading File path: hdfs://bj-azsyb-demo2.cloud.onecloud.io:8020/user/hive/warehouse/person/person_complex_1y.csv, range: 14898167808-15032385536, partition values: [empty row]
21/10/26 18:05:08 INFO executor.Executor: Finished task 111.0 in stage 1.0 (TID 146). 1338 bytes result sent to driver
21/10/26 18:05:08 INFO executor.CoarseGrainedExecutorBackend: Got assigned task 160
21/10/26 18:05:08 INFO executor.Executor: Running task 129.0 in stage 1.0 (TID 160)
21/10/26 18:05:08 INFO datasources.FileScanRDD: Reading File path: hdfs://bj-azsyb-demo2.cloud.onecloud.io:8020/user/hive/warehouse/person/person_complex_1y.csv, range: 17314086912-17448304640, partition values: [empty row]
21/10/26 18:05:16 INFO executor.Executor: Finished task 129.0 in stage 1.0 (TID 160). 1381 bytes result sent to driver
21/10/26 18:05:16 INFO executor.CoarseGrainedExecutorBackend: Got assigned task 177
21/10/26 18:05:16 INFO executor.Executor: Running task 147.0 in stage 1.0 (TID 177)
21/10/26 18:05:16 INFO datasources.FileScanRDD: Reading File path: hdfs://bj-azsyb-demo2.cloud.onecloud.io:8020/user/hive/warehouse/person/person_complex_1y.csv, range: 19730006016-19864223744, partition values: [empty row]
21/10/26 18:05:22 INFO executor.Executor: Finished task 147.0 in stage 1.0 (TID 177). 1338 bytes result sent to driver
21/10/26 18:05:22 INFO executor.CoarseGrainedExecutorBackend: Got assigned task 192
21/10/26 18:05:22 INFO executor.Executor: Running task 175.0 in stage 1.0 (TID 192)
21/10/26 18:05:22 INFO datasources.FileScanRDD: Reading File path: hdfs://bj-azsyb-demo2.cloud.onecloud.io:8020/user/hive/warehouse/person/person_complex_1y.csv, range: 23488102400-23622320128, partition values: [empty row]
21/10/26 18:05:29 INFO executor.Executor: Finished task 175.0 in stage 1.0 (TID 192). 1338 bytes result sent to driver
21/10/26 18:05:29 INFO executor.CoarseGrainedExecutorBackend: Got assigned task 207
21/10/26 18:05:29 INFO executor.Executor: Running task 196.0 in stage 1.0 (TID 207)
21/10/26 18:05:29 INFO datasources.FileScanRDD: Reading File path: hdfs://bj-azsyb-demo2.cloud.onecloud.io:8020/user/hive/warehouse/person/person_complex_1y.csv, range: 26306674688-26440892416, partition values: [empty row]
21/10/26 18:05:36 INFO executor.Executor: Finished task 196.0 in stage 1.0 (TID 207). 1338 bytes result sent to driver
21/10/26 18:05:36 INFO executor.CoarseGrainedExecutorBackend: Got assigned task 222
21/10/26 18:05:36 INFO executor.Executor: Running task 210.0 in stage 1.0 (TID 222)
21/10/26 18:05:36 INFO datasources.FileScanRDD: Reading File path: hdfs://bj-azsyb-demo2.cloud.onecloud.io:8020/user/hive/warehouse/person/person_complex_1y.csv, range: 28185722880-28319940608, partition values: [empty row]
21/10/26 18:05:44 INFO executor.Executor: Finished task 210.0 in stage 1.0 (TID 222). 1338 bytes result sent to driver
21/10/26 18:05:44 INFO executor.CoarseGrainedExecutorBackend: Got assigned task 240
21/10/26 18:05:44 INFO executor.Executor: Running task 232.0 in stage 1.0 (TID 240)
21/10/26 18:05:44 INFO datasources.FileScanRDD: Reading File path: hdfs://bj-azsyb-demo2.cloud.onecloud.io:8020/user/hive/warehouse/person/person_complex_1y.csv, range: 31138512896-31272730624, partition values: [empty row]
21/10/26 18:05:51 INFO executor.Executor: Finished task 232.0 in stage 1.0 (TID 240). 1338 bytes result sent to driver
21/10/26 18:05:51 INFO executor.CoarseGrainedExecutorBackend: Got assigned task 254
21/10/26 18:05:51 INFO executor.Executor: Running task 264.0 in stage 1.0 (TID 254)
21/10/26 18:05:51 INFO datasources.FileScanRDD: Reading File path: hdfs://bj-azsyb-demo2.cloud.onecloud.io:8020/user/hive/warehouse/person/person_complex_1y.csv, range: 35433480192-35567697920, partition values: [empty row]
21/10/26 18:05:56 INFO executor.Executor: Finished task 264.0 in stage 1.0 (TID 254). 1338 bytes result sent to driver
21/10/26 18:05:56 INFO executor.CoarseGrainedExecutorBackend: Got assigned task 268
21/10/26 18:05:56 INFO executor.Executor: Running task 283.0 in stage 1.0 (TID 268)
21/10/26 18:05:56 INFO datasources.FileScanRDD: Reading File path: hdfs://bj-azsyb-demo2.cloud.onecloud.io:8020/user/hive/warehouse/person/person_complex_1y.csv, range: 37983617024-38117834752, partition values: [empty row]
21/10/26 18:06:04 INFO executor.Executor: Finished task 283.0 in stage 1.0 (TID 268). 1338 bytes result sent to driver
21/10/26 18:06:04 INFO executor.CoarseGrainedExecutorBackend: Got assigned task 283
21/10/26 18:06:04 INFO executor.Executor: Running task 308.0 in stage 1.0 (TID 283)
21/10/26 18:06:04 INFO datasources.FileScanRDD: Reading File path: hdfs://bj-azsyb-demo2.cloud.onecloud.io:8020/user/hive/warehouse/person/person_complex_1y.csv, range: 41339060224-41473277952, partition values: [empty row]
21/10/26 18:06:09 INFO executor.Executor: Finished task 308.0 in stage 1.0 (TID 283). 1338 bytes result sent to driver
21/10/26 18:06:09 INFO executor.CoarseGrainedExecutorBackend: Got assigned task 298
21/10/26 18:06:09 INFO executor.Executor: Running task 336.0 in stage 1.0 (TID 298)
21/10/26 18:06:09 INFO datasources.FileScanRDD: Reading File path: hdfs://bj-azsyb-demo2.cloud.onecloud.io:8020/user/hive/warehouse/person/person_complex_1y.csv, range: 45097156608-45231374336, partition values: [empty row]
21/10/26 18:06:16 INFO executor.Executor: Finished task 336.0 in stage 1.0 (TID 298). 1338 bytes result sent to driver
21/10/26 18:06:16 INFO executor.CoarseGrainedExecutorBackend: Got assigned task 314
21/10/26 18:06:16 INFO executor.Executor: Running task 352.0 in stage 1.0 (TID 314)
21/10/26 18:06:16 INFO datasources.FileScanRDD: Reading File path: hdfs://bj-azsyb-demo2.cloud.onecloud.io:8020/user/hive/warehouse/person/person_complex_1y.csv, range: 47244640256-47378857984, partition values: [empty row]
21/10/26 18:06:24 INFO executor.Executor: Finished task 352.0 in stage 1.0 (TID 314). 1338 bytes result sent to driver
21/10/26 18:06:24 INFO executor.CoarseGrainedExecutorBackend: Got assigned task 330
21/10/26 18:06:24 INFO executor.Executor: Running task 363.0 in stage 1.0 (TID 330)
21/10/26 18:06:24 INFO datasources.FileScanRDD: Reading File path: hdfs://bj-azsyb-demo2.cloud.onecloud.io:8020/user/hive/warehouse/person/person_complex_1y.csv, range: 48721035264-48855252992, partition values: [empty row]
21/10/26 18:06:30 INFO executor.Executor: Finished task 363.0 in stage 1.0 (TID 330). 1338 bytes result sent to driver
21/10/26 18:06:30 INFO executor.CoarseGrainedExecutorBackend: Got assigned task 346
21/10/26 18:06:30 INFO executor.Executor: Running task 376.0 in stage 1.0 (TID 346)
21/10/26 18:06:30 INFO datasources.FileScanRDD: Reading File path: hdfs://bj-azsyb-demo2.cloud.onecloud.io:8020/user/hive/warehouse/person/person_complex_1y.csv, range: 50465865728-50600083456, partition values: [empty row]
21/10/26 18:06:36 INFO executor.Executor: Finished task 376.0 in stage 1.0 (TID 346). 1338 bytes result sent to driver
21/10/26 18:06:36 INFO executor.CoarseGrainedExecutorBackend: Got assigned task 363
21/10/26 18:06:36 INFO executor.Executor: Running task 399.0 in stage 1.0 (TID 363)
21/10/26 18:06:36 INFO datasources.FileScanRDD: Reading File path: hdfs://bj-azsyb-demo2.cloud.onecloud.io:8020/user/hive/warehouse/person/person_complex_1y.csv, range: 53552873472-53687091200, partition values: [empty row]
21/10/26 18:06:45 INFO executor.Executor: Finished task 399.0 in stage 1.0 (TID 363). 1338 bytes result sent to driver
21/10/26 18:06:45 INFO executor.CoarseGrainedExecutorBackend: Got assigned task 376
21/10/26 18:06:45 INFO executor.Executor: Running task 410.0 in stage 1.0 (TID 376)
21/10/26 18:06:45 INFO datasources.FileScanRDD: Reading File path: hdfs://bj-azsyb-demo2.cloud.onecloud.io:8020/user/hive/warehouse/person/person_complex_1y.csv, range: 55029268480-55163486208, partition values: [empty row]
21/10/26 18:06:51 INFO executor.Executor: Finished task 410.0 in stage 1.0 (TID 376). 1338 bytes result sent to driver
21/10/26 18:06:51 INFO executor.CoarseGrainedExecutorBackend: Got assigned task 393
21/10/26 18:06:51 INFO executor.Executor: Running task 433.0 in stage 1.0 (TID 393)
21/10/26 18:06:51 INFO datasources.FileScanRDD: Reading File path: hdfs://bj-azsyb-demo2.cloud.onecloud.io:8020/user/hive/warehouse/person/person_complex_1y.csv, range: 58116276224-58250493952, partition values: [empty row]
21/10/26 18:06:57 INFO executor.Executor: Finished task 433.0 in stage 1.0 (TID 393). 1338 bytes result sent to driver
21/10/26 18:06:57 INFO executor.CoarseGrainedExecutorBackend: Got assigned task 410
21/10/26 18:06:57 INFO executor.Executor: Running task 459.0 in stage 1.0 (TID 410)
21/10/26 18:06:57 INFO datasources.FileScanRDD: Reading File path: hdfs://bj-azsyb-demo2.cloud.onecloud.io:8020/user/hive/warehouse/person/person_complex_1y.csv, range: 61605937152-61740154880, partition values: [empty row]
21/10/26 18:07:05 INFO executor.Executor: Finished task 459.0 in stage 1.0 (TID 410). 1338 bytes result sent to driver
21/10/26 18:07:07 INFO executor.CoarseGrainedExecutorBackend: Got assigned task 423
21/10/26 18:07:07 INFO executor.Executor: Running task 74.0 in stage 1.0 (TID 423)
21/10/26 18:07:07 INFO datasources.FileScanRDD: Reading File path: hdfs://bj-azsyb-demo2.cloud.onecloud.io:8020/user/hive/warehouse/person/person_complex_1y.csv, range: 9932111872-10066329600, partition values: [empty row]
21/10/26 18:07:30 INFO executor.Executor: Finished task 74.0 in stage 1.0 (TID 423). 1381 bytes result sent to driver
21/10/26 18:07:30 INFO executor.CoarseGrainedExecutorBackend: Got assigned task 437
21/10/26 18:07:30 INFO executor.Executor: Running task 240.0 in stage 1.0 (TID 437)
21/10/26 18:07:30 INFO datasources.FileScanRDD: Reading File path: hdfs://bj-azsyb-demo2.cloud.onecloud.io:8020/user/hive/warehouse/person/person_complex_1y.csv, range: 32212254720-32346472448, partition values: [empty row]
21/10/26 18:07:56 INFO executor.Executor: Finished task 240.0 in stage 1.0 (TID 437). 1338 bytes result sent to driver
21/10/26 18:07:56 INFO executor.CoarseGrainedExecutorBackend: Got assigned task 450
21/10/26 18:07:56 INFO executor.Executor: Running task 335.0 in stage 1.0 (TID 450)
21/10/26 18:07:56 INFO datasources.FileScanRDD: Reading File path: hdfs://bj-azsyb-demo2.cloud.onecloud.io:8020/user/hive/warehouse/person/person_complex_1y.csv, range: 44962938880-45097156608, partition values: [empty row]
21/10/26 18:08:23 INFO executor.Executor: Finished task 335.0 in stage 1.0 (TID 450). 1338 bytes result sent to driver
21/10/26 18:08:37 INFO executor.CoarseGrainedExecutorBackend: Got assigned task 467
21/10/26 18:08:37 INFO executor.Executor: Running task 4.0 in stage 2.0 (TID 467)
21/10/26 18:08:37 INFO spark.MapOutputTrackerWorker: Updating epoch to 1 and clearing cache
21/10/26 18:08:37 INFO broadcast.TorrentBroadcast: Started reading broadcast variable 5
21/10/26 18:08:37 INFO client.TransportClientFactory: Successfully created connection to bj-azsyb-demo4/10.172.7.205:31711 after 2 ms (0 ms spent in bootstraps)
21/10/26 18:08:37 INFO memory.MemoryStore: Block broadcast_5_piece0 stored as bytes in memory (estimated size 27.5 KB, free 396.7 MB)
21/10/26 18:08:37 INFO broadcast.TorrentBroadcast: Reading broadcast variable 5 took 29 ms
21/10/26 18:08:37 INFO memory.MemoryStore: Block broadcast_5 stored as values in memory (estimated size 88.5 KB, free 396.6 MB)
21/10/26 18:08:38 INFO spark.MapOutputTrackerWorker: Don't have map outputs for shuffle 0, fetching them
21/10/26 18:08:38 INFO spark.MapOutputTrackerWorker: Doing the fetch; tracker endpoint = NettyRpcEndpointRef(spark://MapOutputTracker@bj-azsyb-demo4:4602)
21/10/26 18:08:38 INFO spark.MapOutputTrackerWorker: Got the output locations
21/10/26 18:08:38 INFO storage.ShuffleBlockFetcherIterator: Getting 462 non-empty blocks including 27 local blocks and 435 remote blocks
21/10/26 18:08:38 INFO client.TransportClientFactory: Successfully created connection to bj-azsyb-demo3/10.172.7.210:7337 after 12 ms (0 ms spent in bootstraps)
21/10/26 18:08:38 INFO client.TransportClientFactory: Successfully created connection to bj-azsyb-demo4/10.172.7.205:7337 after 8 ms (0 ms spent in bootstraps)
21/10/26 18:08:38 INFO storage.ShuffleBlockFetcherIterator: Started 3 remote fetches in 94 ms
21/10/26 18:08:39 INFO codegen.CodeGenerator: Code generated in 93.089896 ms
21/10/26 18:08:39 INFO codegen.CodeGenerator: Code generated in 34.074133 ms
21/10/26 18:08:42 INFO sort.UnsafeExternalSorter: Thread 41 spilling sort data of 392.0 MB to disk (0  time so far)
...
21/10/26 18:12:31 INFO sort.UnsafeExternalSorter: Thread 41 spilling sort data of 392.0 MB to disk (12  times so far)
21/10/26 18:12:41 INFO codegen.CodeGenerator: Code generated in 279.726284 ms
21/10/26 18:12:43 INFO codegen.CodeGenerator: Code generated in 1491.693732 ms
21/10/26 18:12:43 INFO codegen.CodeGenerator: Code generated in 52.904251 ms
21/10/26 18:12:44 INFO codegen.CodeGenerator: Code generated in 223.329639 ms
21/10/26 18:12:44 INFO codegen.CodeGenerator: Code generated in 77.025746 ms
21/10/26 18:14:58 INFO executor.Executor: Finished task 4.0 in stage 2.0 (TID 467). 2328 bytes result sent to driver
21/10/26 18:14:58 INFO executor.CoarseGrainedExecutorBackend: Got assigned task 490
21/10/26 18:14:58 INFO executor.Executor: Running task 27.0 in stage 2.0 (TID 490)
21/10/26 18:14:58 INFO storage.ShuffleBlockFetcherIterator: Getting 462 non-empty blocks including 27 local blocks and 435 remote blocks
21/10/26 18:14:58 INFO client.TransportClientFactory: Found inactive connection to bj-azsyb-demo4/10.172.7.205:7337, creating a new one.
21/10/26 18:14:58 INFO client.TransportClientFactory: Successfully created connection to bj-azsyb-demo4/10.172.7.205:7337 after 2 ms (0 ms spent in bootstraps)
21/10/26 18:14:58 INFO client.TransportClientFactory: Found inactive connection to bj-azsyb-demo3/10.172.7.210:7337, creating a new one.
21/10/26 18:14:59 INFO client.TransportClientFactory: Successfully created connection to bj-azsyb-demo3/10.172.7.210:7337 after 9 ms (0 ms spent in bootstraps)
21/10/26 18:14:59 INFO storage.ShuffleBlockFetcherIterator: Started 3 remote fetches in 18 ms
21/10/26 18:15:20 INFO sort.UnsafeExternalSorter: Thread 41 spilling sort data of 392.0 MB to disk (0  time so far)
...
21/10/26 18:18:03 INFO sort.UnsafeExternalSorter: Thread 41 spilling sort data of 392.0 MB to disk (12  times so far)
21/10/26 18:19:51 INFO executor.Executor: Finished task 27.0 in stage 2.0 (TID 490). 2328 bytes result sent to driver
21/10/26 18:20:10 INFO executor.CoarseGrainedExecutorBackend: Got assigned task 495
21/10/26 18:20:10 INFO executor.Executor: Running task 0.0 in stage 3.0 (TID 495)
21/10/26 18:20:10 INFO spark.MapOutputTrackerWorker: Updating epoch to 2 and clearing cache
21/10/26 18:20:10 INFO broadcast.TorrentBroadcast: Started reading broadcast variable 6
21/10/26 18:20:10 INFO memory.MemoryStore: Block broadcast_6_piece0 stored as bytes in memory (estimated size 61.0 KB, free 396.5 MB)
21/10/26 18:20:10 INFO broadcast.TorrentBroadcast: Reading broadcast variable 6 took 12 ms
21/10/26 18:20:10 INFO memory.MemoryStore: Block broadcast_6 stored as values in memory (estimated size 213.2 KB, free 396.3 MB)
21/10/26 18:20:18 INFO spark.MapOutputTrackerWorker: Don't have map outputs for shuffle 1, fetching them
21/10/26 18:20:18 INFO spark.MapOutputTrackerWorker: Doing the fetch; tracker endpoint = NettyRpcEndpointRef(spark://MapOutputTracker@bj-azsyb-demo4:4602)
21/10/26 18:20:18 INFO spark.MapOutputTrackerWorker: Got the output locations
21/10/26 18:20:18 INFO storage.ShuffleBlockFetcherIterator: Getting 32 non-empty blocks including 2 local blocks and 30 remote blocks
21/10/26 18:20:18 INFO client.TransportClientFactory: Found inactive connection to bj-azsyb-demo4/10.172.7.205:7337, creating a new one.
21/10/26 18:20:18 INFO client.TransportClientFactory: Successfully created connection to bj-azsyb-demo4/10.172.7.205:7337 after 3 ms (0 ms spent in bootstraps)
21/10/26 18:20:18 INFO storage.ShuffleBlockFetcherIterator: Started 1 remote fetches in 6 ms
21/10/26 18:20:18 INFO codegen.CodeGenerator: Code generated in 31.035276 ms
21/10/26 18:20:18 INFO codegen.CodeGenerator: Code generated in 275.525078 ms
21/10/26 18:20:20 INFO sort.UnsafeExternalSorter: Thread 41 spilling sort data of 388.0 MB to disk (0  time so far)
...
21/10/26 18:22:55 INFO sort.UnsafeExternalSorter: Thread 41 spilling sort data of 388.0 MB to disk (116  times so far)
21/10/26 18:23:06 INFO client.TransportClientFactory: Found inactive connection to bj-azsyb-demo3/10.172.7.210:7337, creating a new one.
21/10/26 18:23:06 INFO client.TransportClientFactory: Successfully created connection to bj-azsyb-demo3/10.172.7.210:7337 after 1 ms (0 ms spent in bootstraps)
21/10/26 18:23:06 INFO sort.UnsafeExternalSorter: Thread 41 spilling sort data of 388.0 MB to disk (117  times so far)
...
21/10/26 18:35:41 INFO sort.UnsafeExternalSorter: Thread 41 spilling sort data of 388.0 MB to disk (678  times so far)
21/10/26 18:35:42 INFO client.TransportClientFactory: Found inactive connection to bj-azsyb-demo4/10.172.7.205:7337, creating a new one.
21/10/26 18:35:42 INFO client.TransportClientFactory: Successfully created connection to bj-azsyb-demo4/10.172.7.205:7337 after 2 ms (0 ms spent in bootstraps)
21/10/26 18:35:42 INFO sort.UnsafeExternalSorter: Thread 41 spilling sort data of 388.0 MB to disk (679  times so far)
...
21/10/26 18:37:31 ERROR executor.CoarseGrainedExecutorBackend: RECEIVED SIGNAL TERM
21/10/26 18:37:31 INFO storage.DiskBlockManager: Shutdown hook called
21/10/26 18:37:31 INFO util.ShutdownHookManager: Shutdown hook called