nebula 版本:1.0.1
-
部署方式(分布式 / 单机 / Docker / DBaaS):分布式
-
出问题的 Space 的创建方式: CREATE SPACE
nba
(partition_num = 10, replica_factor = 1, charset = , collate = ) -
问题的具体描述
按照spark-connector文档,编译nebula-spark之后,将得到的jar包上传服务器,之后用spark-shell执行ConnectReaderExample,但是提示Method not found
代码如下:
spark-shell --jars "/xxx/nebula-spark-1.0.1.jar"
import com.facebook.thrift.protocol.TCompactProtocol
import com.vesoft.nebula.tools.connector.NebulaDataFrameReader
import org.apache.spark.SparkConf
import org.apache.spark.graphx.Graph
import org.apache.spark.sql.{Dataset, Row, SparkSession}
import org.slf4j.LoggerFactory
def readNebulaVertex(spark: SparkSession): Unit = {
//LOG.info("start loading nebula vertex to DataFrame ========")
val vertexDataset: Dataset[Row] =
spark.read
.nebula("xxx.xxx.xxx", "nba", "10")
.loadVerticesToDF("player", "*")
val count = vertexDataset.count()
vertexDataset.printSchema()
vertexDataset.show()
//LOG.error("**********vertex count:"+count, null);
}
val sparkConf = new SparkConf
sparkConf.set("spark.serializer", "org.apache.spark.serializer.KryoSerializer").registerKryoClasses(Array[Class[_]](classOf[TCompactProtocol]))
val spark = SparkSession.builder().master("local").config(sparkConf).getOrCreate()
readNebulaVertex(spark)