scala> val parquetDF=sqlContext.read.parquet("hdfs://hadoop14:9000/yuhui/parquet/part-r-00004.gz.parquet") df: org.apache.spark.sql.DataFrame=[timestamp: string, appkey: string, app_version: string, channel: string, lang: string, os_type: string, os_version: string, display: string, ...
www.dbjr.com.cn/article/1417...htm 2025-6-7