运行 带有水印和窗口聚合的 Spark 结构化流中的多个查询

Running Multiple Queries in Spark Structured Streaming with Watermarking and Windowed Aggregations

我的目标是从多个 Kafka 主题读取数据,聚合数据并写入 hdfs。 我遍历了 kafka 主题列表以创建多个查询。代码 运行 在 运行 单个查询时没问题,但在 运行 多个查询时出错。我将所有主题的检查点目录保持不同,因为我在许多帖子中读到这可能会导致类似的问题。

代码如下:

object CombinedDcAggStreaming {

  def main(args: Array[String]): Unit = {


    val jobConfigFile = "configPath"

    /* Read input configuration */
    val jobProps = Util.loadProperties(jobConfigFile).asScala

    val sparkConfigFile = jobProps.getOrElse("spark_config_file", throw new RuntimeException("Can't find spark property file"))
    val kafkaConfigFile = jobProps.getOrElse("kafka_config_file", throw new RuntimeException("Can't find kafka property file"))

    val sparkProps = Util.loadProperties(sparkConfigFile).asScala
    val kafkaProps = Util.loadProperties(kafkaConfigFile).asScala

    val topicList = Seq("topic_1", "topic_2")
    val avroSchemaFile = jobProps.getOrElse("schema_file", throw new RuntimeException("Can't find schema file..."))
    val checkpointLocation = jobProps.getOrElse("checkpoint_location", throw new RuntimeException("Can't find check point directory..."))
    val triggerInterval = jobProps.getOrElse("triggerInterval", throw new RuntimeException("Can't find trigger interval..."))
    val outputPath = jobProps.getOrElse("output_path", throw new RuntimeException("Can't find output directory..."))
    val outputFormat = jobProps.getOrElse("output_format", throw new RuntimeException("Can't find output format...")) //"parquet"
    val outputMode = jobProps.getOrElse("output_mode", throw new RuntimeException("Can't find output mode...")) //"append"
    val partitionByCols = jobProps.getOrElse("partition_by_columns", throw new RuntimeException("Can't find partition by columns...")).split(",").toSeq

    val spark = SparkSession.builder.appName("streaming").master("local[4]").getOrCreate()
    sparkProps.foreach(prop => spark.conf.set(prop._1, prop._2))

    topicList.foreach(
      topicId => {

        kafkaProps.update("subscribe", topicId)


        val schemaPath = avroSchemaFile + "/" + topicId + ".avsc"


        val dimensionMap = ConfigUtils.getDimensionMap(jobConfig)
        val measureMap = ConfigUtils.getMeasureMap(jobConfig)

        val source= Source.fromInputStream(Util.getInputStream(schemaPath)).getLines.mkString
        val schemaParser = new Schema.Parser
        val schema = schemaParser.parse(source)
        val sqlTypeSchema = SchemaConverters.toSqlType(schema).dataType.asInstanceOf[StructType]

        val kafkaStreamData = spark
          .readStream
          .format("kafka")
          .options(kafkaProps)
          .load()

        val udfDeserialize = udf(deserialize(source), DataTypes.createStructType(sqlTypeSchema.fields))

        val transformedDeserializedData = kafkaStreamData.select("value").as(Encoders.BINARY)
          .withColumn("rows", udfDeserialize(col("value")))
          .select("rows.*")
          .withColumn("end_time", (col("end_time") / 1000).cast(LongType))
          .withColumn("timestamp", from_unixtime(col("end_time"),"yyyy-MM-dd HH").cast(TimestampType))
          .withColumn("year", from_unixtime(col("end_time"),"yyyy").cast(IntegerType))
          .withColumn("month", from_unixtime(col("end_time"),"MM").cast(IntegerType))
          .withColumn("day", from_unixtime(col("end_time"),"dd").cast(IntegerType))
          .withColumn("hour",from_unixtime(col("end_time"),"HH").cast(IntegerType))
          .withColumn("topic_id", lit(topicId))

        val groupBycols: Array[String] = dimensionMap.keys.toArray[String] ++ partitionByCols.toArray[String]
)

        val aggregatedData = AggregationUtils.aggregateDFWithWatermarking(transformedDeserializedData, groupBycols, "timestamp", "10 minutes", measureMap) //Watermarking time -> 10. minutes, window => window("timestamp", "5 minutes")

        val query = aggregatedData
          .writeStream
          .trigger(Trigger.ProcessingTime(triggerInterval))
          .outputMode("update")
          .format("console")
          .partitionBy(partitionByCols: _*)
          .option("path", outputPath)
          .option("checkpointLocation", checkpointLocation + "//" + topicId)
          .start()
      })

    spark.streams.awaitAnyTermination()

    def deserialize(source: String): Array[Byte] => Option[Row] = (data: Array[Byte]) => {
      try {
        val parser = new Schema.Parser
        val schema = parser.parse(source)
        val recordInjection: Injection[GenericRecord, Array[Byte]] = GenericAvroCodecs.toBinary(schema)
        val record = recordInjection.invert(data).get
        val objectArray = new Array[Any](record.asInstanceOf[GenericRecord].getSchema.getFields.size)
        record.getSchema.getFields.asScala.foreach(field => {
          val fieldVal = record.get(field.pos()) match {
            case x: org.apache.avro.util.Utf8 => x.toString
            case y: Any => y
            case _ => None
          }
          objectArray(field.pos()) = fieldVal
        })
        Some(Row(objectArray: _*))
      } catch {
        case ex: Exception => {
          log.info(s"Failed to parse schema with error: ${ex.printStackTrace()}")
          None
        }
      }
    }
  }
}

我在 运行 作业时遇到以下错误:

java.lang.IllegalStateException: Race while writing batch 0

但是当我 运行 一个查询而不是多个查询时,作业 运行 正常。关于如何解决这个问题的任何建议?

这可能是一个迟到的答案。但是我也遇到了同样的问题。

我能够解决问题。根本原因是两个查询都试图写入相同的基本路径。因此 _spark_meta 信息存在重叠。 Spark Structured Streaming 维护检查点,以及 _spark_meta 数据文件以跟踪正在处理的批次。

源 Spark 文档:

为了正确处理部分故障,同时保持恰好一次的语义,每个批次的文件都被写到一个唯一的目录中,然后自动附加到元数据日志中。当基于 parquet 的 DataSource 被初始化以供读取时,我们首先检查这个日志目录并在存在时使用它而不是文件列表。

因此现在每个查询都应该有一个单独的路径。与检查点不同,没有配置 _spark_matadata 位置的选项。

Link to same type of question which I asked.