mongodb - RDD 仅部分写入 mongo

标签 mongodb hadoop apache-spark

我正在使用 Spark 1.3.1 并尝试使用 mongo-hadoop connector 将 RDD 保存到 mongodb版本 1.3.2 和 mongo-java-driver 版本 3.0.1。当我在独立集群上运行下面的应用程序时,驱动程序被标记为失败。

这是我用来重现问题的代码,

import org.apache.spark.SparkConf
import org.apache.spark.SparkContext
import org.apache.spark.SparkContext._

import org.apache.hadoop.conf.Configuration
import org.apache.spark.rdd.RDD

import org.bson.BasicBSONObject
import org.bson.BSONObject

object TestApp {

  def testSaveRddToMongo() {
    val sparkConf = new SparkConf().setAppName("Test")
    val sc = new SparkContext(sparkConf)

    val mongoConfig = new Configuration()
    mongoConfig.set("mongo.job.input.format","com.mongodb.hadoop.MongoInputFormat")
    mongoConfig.set("mongo.input.uri", "mongodb://some.local.ip:27017/mydb.input")

    val bsonRDD: RDD[(Object, BSONObject)] = sc.newAPIHadoopRDD(mongoConfig, classOf[com.mongodb.hadoop.MongoInputFormat], classOf[Object], classOf[BSONObject])

    val reasons: RDD[String] = bsonRDD.map( tuple => {
      tuple._2.asInstanceOf[BasicBSONObject].getString("fieldName").trim
      }).distinct().cache()

    val out: RDD[(String,Int)] = reasons.zipWithIndex().map { case (k,v) => (k,v.toInt)}

    println (s"Saving ${out.count} elements")
    val outputConfig = new Configuration()
    outputConfig.set("mongo.job.output.format","com.mongodb.hadoop.MongoOutputFormat")
    outputConfig.set("mongo.output.uri", "mongodb://some.local.ip:27017/mydb.garbage")
    out.saveAsNewAPIHadoopFile("file:///bogus", classOf[Any], classOf[Any], classOf[com.mongodb.hadoop.MongoOutputFormat[Any, Any]], outputConfig)
  }

  def main(args: Array[String]) {
    testSaveRddToMongo()
  }
}

在驱动程序的 stderr 中,我看到了这个

    15/05/15 14:18:43 INFO DAGScheduler: Job 2 failed: saveAsNewAPIHadoopFile at Test.scala:39, took 6.491961 s
    Exception in thread "main" java.lang.reflect.InvocationTargetException
            at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
            at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62)
            at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
            at java.lang.reflect.Method.invoke(Method.java:497)
            at org.apache.spark.deploy.worker.DriverWrapper$.main(DriverWrapper.scala:59)
            at org.apache.spark.deploy.worker.DriverWrapper.main(DriverWrapper.scala)
    Caused by: org.apache.spark.SparkException: Job aborted due to stage failure: Task 3 in stage 5.0 failed 4 times, most recent failure: Lost task 3.3 in stage 5.0 (TID 275, largo-ubuntu): 
java.lang.IllegalStateException: The pool is closed
            at com.mongodb.internal.connection.ConcurrentPool.get(ConcurrentPool.java:123)
            at com.mongodb.connection.DefaultConnectionPool.getPooledConnection(DefaultConnectionPool.java:243)
            at com.mongodb.connection.DefaultConnectionPool.get(DefaultConnectionPool.java:90)
            at com.mongodb.connection.DefaultConnectionPool.get(DefaultConnectionPool.java:80)
            at com.mongodb.connection.DefaultServer.getConnection(DefaultServer.java:69)
            at com.mongodb.binding.ClusterBinding$ClusterBindingConnectionSource.getConnection(ClusterBinding.java:86)
            at com.mongodb.operation.OperationHelper.withConnectionSource(OperationHelper.java:184)
            at com.mongodb.operation.OperationHelper.withConnection(OperationHelper.java:177)
            at com.mongodb.operation.BaseWriteOperation.execute(BaseWriteOperation.java:106)
            at com.mongodb.operation.BaseWriteOperation.execute(BaseWriteOperation.java:58)
            at com.mongodb.Mongo.execute(Mongo.java:745)
            at com.mongodb.Mongo$2.execute(Mongo.java:728)
            at com.mongodb.DBCollection.executeWriteOperation(DBCollection.java:327)
            at com.mongodb.DBCollection.replaceOrInsert(DBCollection.java:405)
            at com.mongodb.DBCollection.save(DBCollection.java:394)
            at com.mongodb.DBCollection.save(DBCollection.java:367)
            at com.mongodb.hadoop.output.MongoRecordWriter.write(MongoRecordWriter.java:105)
            at org.apache.spark.rdd.PairRDDFunctions$$anonfun$12.apply(PairRDDFunctions.scala:1000)
            at org.apache.spark.rdd.PairRDDFunctions$$anonfun$12.apply(PairRDDFunctions.scala:979)
            at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:61)
            at org.apache.spark.scheduler.Task.run(Task.scala:64)
            at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:203)
            at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142)
            at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617)
            at java.lang.Thread.run(Thread.java:745)

    Driver stacktrace:
            at org.apache.spark.scheduler.DAGScheduler.org$apache$spark$scheduler$DAGScheduler$$failJobAndIndependentStages(DAGScheduler.scala:1204)
            at org.apache.spark.scheduler.DAGScheduler$$anonfun$abortStage$1.apply(DAGScheduler.scala:1193)
            at org.apache.spark.scheduler.DAGScheduler$$anonfun$abortStage$1.apply(DAGScheduler.scala:1192)
            at scala.collection.mutable.ResizableArray$class.foreach(ResizableArray.scala:59)
            at scala.collection.mutable.ArrayBuffer.foreach(ArrayBuffer.scala:47)
            at org.apache.spark.scheduler.DAGScheduler.abortStage(DAGScheduler.scala:1192)
            at org.apache.spark.scheduler.DAGScheduler$$anonfun$handleTaskSetFailed$1.apply(DAGScheduler.scala:693)
            at org.apache.spark.scheduler.DAGScheduler$$anonfun$handleTaskSetFailed$1.apply(DAGScheduler.scala:693)
            at scala.Option.foreach(Option.scala:236)
            at org.apache.spark.scheduler.DAGScheduler.handleTaskSetFailed(DAGScheduler.scala:693)
            at org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.onReceive(DAGScheduler.scala:1393)
            at org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.onReceive(DAGScheduler.scala:1354)
            at org.apache.spark.util.EventLoop$$anon$1.run(EventLoop.scala:48)

为什么连接被关闭了?其他地方是否有我没有看到的异常(exception)情况?

修复

按照下面的 maasg,使用 casbah 编写结果是可行的。我更新了代码如下,

import com.mongodb.casbah.Imports._
...
    println (s"Saving ${out.count} elements")
    val uri = MongoClientURI("mongodb://some.local.ip:27017/mydb.garbage")
    val mongoClient = MongoClient(uri)
    val collection = mongoClient(uri.database.get)(uri.collection.get)
    collection.drop()
    val builder = collection.initializeUnorderedBulkOperation
    for ((value, index) <- out.collect()) { builder.insert(MongoDBObject(("_id" -> value), ("value" -> index))) }
    builder.execute()

更好的修复

这是一个更好的版本,它会为每个分区做一批写入

...
  def dropCollection(uriString: String) {
    val uri = MongoClientURI(uriString)
    val mongoClient = MongoClient(uri)
    val collection = mongoClient(uri.database.get)(uri.collection.get)
    mongoClient.close()
  }

  def saveReultsToMongo(out: RDD[(String,Int)], uriString: String) {
    out.foreachPartition( itr => {
      val uri = MongoClientURI(uriString)
      val mongoClient = MongoClient(uri)
      val collection = mongoClient(uri.database.get)(uri.collection.get)
      val builder = collection.initializeUnorderedBulkOperation
      for ( (value, index) <- itr ){ builder.insert(MongoDBObject(("_id" -> value), ("value" -> index))) }
      builder.execute
      mongoClient.close
      })
  }
...
    println (s"Saving ${out.count} elements")
    dropCollection("mongodb://10.22.128.84:27017/Minerva.garbage")
    saveReultsToMongo(out, "mongodb://10.22.128.84:27017/Minerva.garbage")

一些注意事项,

  • out.foreach{ case (value, index) => builder.insert(MongoDBObject(("_id"-> value), ("value"-> index))) } 不工作是因为 BulkWriteOperation 不可序列化
    • 但是,out.foreachPartition 可以按照 maasg 和 BETTER FIX 使用
  • casbah 1.8.1 不兼容 mongo-java-driver 3.0.x 它使用 2.13.1

最佳答案

在 1.4 版发布之前,hadoop-mongo 连接器无法可靠地与 Spark 配合使用。高并行负载会泄漏客户端连接,导致失败。在我们的例子中,这个错误是关键点:https://jira.mongodb.org/browse/HADOOP-143 如您所见,它已合并到 1.4 版本中。

作为解决方法,我可以推荐使用 casbah client + 批处理操作(围绕 Java 客户端的 Scala 包装器)。

关于mongodb - RDD 仅部分写入 mongo,我们在Stack Overflow上找到一个类似的问题: https://stackoverflow.com/questions/30267050/

相关文章:

node.js - Mongoose 中嵌套文档的不同类型是什么?

apache-spark - Hive 在选择数据时是否保留文件顺序

apache-spark - 同时多个 Spark 应用程序,同一个 Jarfile... 作业处于等待状态

scala - 使用 Spark ML 时的 VectorUDT 问题

javascript - 使mongoDB用字符串替换单值数组

java - Spring mongotemplate 查询结果与子文档

php - 如何从 PHP 将空关联数组 ({}) 写入 MongoDB

hadoop - 配置单元 : select row with column having maximum value without join

scala - 从Spark RDD提取值

apache-spark - pyspark df.count() 花费很长时间(或根本不工作)