nebula 版本:2.5.0
部署方式(分布式 rpm):
是否为线上版本:Y /
硬件信息
磁盘500ssd
CPU16c、32G内存信息
问题的具体描述
nebula2.5.0 sst 导入连接hive生成sst 报错
tags: [
# 与上述类似
# 从 Hive 加载将执行命令 $ {exec} 作为数据集
{
name: Thing
type: {
source: hive
sink: SST
}
exec: "select thing_id, thing_name, thing_title from oppo_kg_dw.dwd_kg_release_spo_thing_df_v3_4_ht_v6 where ds = '20211011' limit 300"
fields: [ thing_name, thing_title]
nebula.fields: [Thing_name, Thing_title]
vertex: {field:thing_id}
header: true
batch: 128
partition: 24
}
]
21/10/19 14:27:56 ERROR VerticesProcessor: java.lang.RuntimeException: Unsupported default value yet
java.lang.RuntimeException: Unsupported default value yet
at com.vesoft.nebula.encoder.RowWriterImpl.checkUnsetFields(RowWriterImpl.java:766)
at com.vesoft.nebula.encoder.RowWriterImpl.finish(RowWriterImpl.java:855)
at com.vesoft.nebula.encoder.NebulaCodecImpl.encode(NebulaCodecImpl.java:200)
at com.vesoft.nebula.encoder.NebulaCodecImpl.encodeTag(NebulaCodecImpl.java:157)
at com.vesoft.nebula.exchange.processor.VerticesProcessor$$anonfun$process$1$$anonfun$apply$1.apply(VerticesProcessor.scala:175)
at com.vesoft.nebula.exchange.processor.VerticesProcessor$$anonfun$process$1$$anonfun$apply$1.apply(VerticesProcessor.scala:127)
at scala.collection.Iterator$$anon$11.next(Iterator.scala:410)
at scala.collection.Iterator$$anon$11.next(Iterator.scala:410)
at org.apache.spark.sql.catalyst.expressions.GeneratedClass$GeneratedIteratorForCodegenStage5.sort_addToSorter_0$(Unknown Source)
at org.apache.spark.sql.catalyst.expressions.GeneratedClass$GeneratedIteratorForCodegenStage5.processNext(Unknown Source)
at org.apache.spark.sql.execution.BufferedRowIterator.hasNext(BufferedRowIterator.java:43)
at org.apache.spark.sql.execution.WholeStageCodegenExec$$anonfun$11$$anon$1.hasNext(WholeStageCodegenExec.scala:619)
at scala.collection.Iterator$$anon$11.hasNext(Iterator.scala:409)
at scala.collection.Iterator$$anon$11.hasNext(Iterator.scala:409)
at scala.collection.Iterator$class.foreach(Iterator.scala:891)
at scala.collection.AbstractIterator.foreach(Iterator.scala:1334)
at com.vesoft.nebula.exchange.processor.VerticesProcessor$$anonfun$process$2.apply(VerticesProcessor.scala:189)
at com.vesoft.nebula.exchange.processor.VerticesProcessor$$anonfun$process$2.apply(VerticesProcessor.scala:181)
at org.apache.spark.rdd.RDD$$anonfun$foreachPartition$1$$anonfun$apply$28.apply(RDD.scala:935)
at org.apache.spark.rdd.RDD$$anonfun$foreachPartition$1$$anonfun$apply$28.apply(RDD.scala:935)
at org.apache.spark.SparkContext$$anonfun$runJob$5.apply(SparkContext.scala:2101)
at org.apache.spark.SparkContext$$anonfun$runJob$5.apply(SparkContext.scala:2101)
at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:90)
at org.apache.spark.scheduler.Task.run(Task.scala:121)
at org.apache.spark.executor.Executor$TaskRunner$$anonfun$10.apply(Executor.scala:402)
at org.apache.spark.util.Utils$.tryWithSafeFinally(Utils.scala:1360)
at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:408)
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)
at java.lang.Thread.run(Thread.java:748)