我们正在尝试针对HIVE运行INSERT SQL,其中数据来自Spark中的数据框。所使用的 session 具有所有内容。
有2个问题:
问题)即使我们在forEach循环中创建 session ,但同时尝试使用INSERT和INSERT仍然失败
1)数据框
2)直接Spark SQL
下面是代码(Spark SQL方法):
import java.time.Instant
import org.apache.spark.sql.{DataFrame, Row, types}
import org.apache.spark.sql.functions.{current_timestamp, first, isnull, lit, max}
import org.apache.spark.sql.types.{StringType, StructField, StructType, TimestampType}
import scala.collection.mutable.ListBuffer
class Controller extends DatabaseServices
with Loggers {
val session = createSparkSession(ConfigFactory.load().getString("local.common.spark.app.name"))
val producer = session.sparkContext.broadcast(KafkaWrapper())
def doIt(TranIDs: DataFrame): Unit = {
import session.sqlContext.implicits._
val TranID = TranIDs
.withColumnRenamed("TranID", "REFERENCE_TranID")
.select($"REFERENCE_TranID")
.union(session.table(BANK_ROLLBACK_TXN_PRODUCER_LOG_VIEW)
.withColumnRenamed("TranID", "REFERENCE_TranID")
.select($"REFERENCE_TranID"))
.where($"REFERENCE_TranID".isNotNull)
if (TranID.count() == 0) {
throw new Exception("No rows.")
}
val core = session
.table(BANK_TRANS_MASTER_CORE)
.withColumnRenamed("TranID", "MASTER_REFERENCE_TranID")
.withColumnRenamed("CLIENTID", "REF_CLIENT_ID")
.withColumnRenamed("SUBCLIENTID", "REF_SUBCLIENT_ID")
.select($"MASTER_REFERENCE_TranID",
$"TranIDDATE")
.join(TranID, TranID.col("REFERENCE_TranID") === $"MASTER_REFERENCE_TranID")
val ref = session
.table(BANK_RBI_REF_CLIENT)
.select($"CLIENTID", $"SUBCLIENTID", $"FLAGTRE")
.join(core, $"CLIENTID" === core.col("REF_CLIENT_ID")
&& $"SUBCLIENTID" === core.col("REF_SUBCLIENT_ID")
val details = session
.table(BANK_TRANS_MASTER_DETAILS)
.select($"TranID",
$"REALFRAUD",
$"REALFRAUDDATEBAE",
$"REALFRAUDYYYYMMDD"
)
.join(ref, ref.col("MASTER_REFERENCE_TranID") === $"TranID"
&& $"REALFRAUD" === lit("Y"))
.where($"TranID".isNotNull
&& $"TranIDDATE".isNotNull)
.groupBy($"TranID")
.agg(first($"TranID").as("TranID"),
first(core("TranIDDATE")).cast("String").as("TranIDDATE"),
max($"REALFRAUDDATEBAE").as("REALFRAUDDATEBAE"),
max($"REALFRAUDYYYYMMDD").as("REALFRAUDYYYYMMDD"),
first($"REALFRAUD").as("REALFRAUD"),
first($"ABA").as("ABA"))
details.foreach(row => {
import scala.collection.JavaConversions._
val transaction = TxUpdate.newBuilder().setTranID(row.getAs("TranID").toString)
.setTranIDDATE(row.getAs("TranIDDATE").toString)
.setAttributes(ListBuffer(
Attribute.newBuilder.setKey("REALFRAUD").setValue(if (row.getAs("REALFRAUD") != null) row.getAs("REALFRAUD").toString else null).build(),
Attribute.newBuilder.setKey("REALFRAUDDATEBAE").setValue(if (row.getAs("REALFRAUDDATEBAE") != null) if (row.getAs("REALFRAUDDATEBAE") != null) row.getAs("REALFRAUDDATEBAE").toString else null else null).build(),
Attribute.newBuilder.setKey("REALFRAUDYYYYMMDD").setValue(if (row.getAs("REALFRAUDYYYYMMDD") != null) row.getAs("REALFRAUDYYYYMMDD").toString else null).build(),
Attribute.newBuilder.setKey("ABA").setValue(if (row.getAs("ABA") != null) row.getAs("ABA").toString else null).build(),
.build()
if (producer.value.sendSync(ConfigFactory.load().getString("local.common.kafka.rollbackKafkaTopicName"),
transaction.getTranID.toString,
transaction)) {
session.sqlContext.sql("insert into " + BANK_ROLLBACK_TXN_PRODUCER_LOG + "(TranID, when_loaded, status) values('" + transaction.getTranID.toString + "', 'current_timestamp()', 'S')")
} else {
session.sqlContext.sql("insert into " + BANK_ROLLBACK_TXN_PRODUCER_LOG + "(TranID, when_loaded, status) values('" + transaction.getTranID.toString + "', 'current_timestamp()', 'F')")
}
})
}
}
最佳答案
错误在这里不明确。
在较高级别,您可以使用在Spark中启用hivecontext的方法,然后使用append选项直接将其永久保存到Hive表中。这将比执行插入操作快得多。流将是这样的:
步骤0-所有这些都必须在单个spark session 中进行。您无需为每个插入创建多个 session 。从某种意义上说,这是没有意义的。
一种。创建一个数据框架,其中包含Hive基础表的列。
b。在进行 Spark 处理期间,数据帧获取的数据最终将保留在Hive中。
C。使用附加选项启动可保存的数据框
Insert Into Hive
希望这有助于您了解如何解决此问题。
关于apache-spark - 在Spark中循环插入HIVE SQL,我们在Stack Overflow上找到一个类似的问题: https://stackoverflow.com/questions/52545121/