如何从 HBase 读取记录然后存储到 Spark RDD(弹性分布式数据集);并读取一个 RDD 记录然后写入 HBase?

How to read a record from HBase then store into Spark RDD (Resilient Distributed Datasets); and read one RDD record then write into HBase?

所以我想写一个代码从Hadoop HBase中读取一条记录,然后将其存储到Spark RDD(Resilient Distributed Datasets)中;并读取一条RDD记录然后写入HBase。我对这两者的了解为零,我需要使用 AWS 云或 Hadoop 虚拟机。有人请指导我从头开始。

请使用Scala 中的基本代码,我们正在使用Scala 读取HBase 中的数据。同样你可以写一个table创建将数据写入HBase

import org.apache.hadoop.hbase.client.{HBaseAdmin, Result}
import org.apache.hadoop.hbase.{ HBaseConfiguration, HTableDescriptor }
import org.apache.hadoop.hbase.mapreduce.TableInputFormat
import org.apache.hadoop.hbase.io.ImmutableBytesWritable

import org.apache.spark._

object HBaseApp {
  def main(args: Array[String]) {
    val sparkConf = new SparkConf().setAppName("HBaseApp").setMaster("local[2]")
    val sc = new SparkContext(sparkConf)
    val conf = HBaseConfiguration.create()
    val tableName = "table1"

    System.setProperty("user.name", "hdfs")
    System.setProperty("HADOOP_USER_NAME", "hdfs")
    conf.set("hbase.master", "localhost:60000")
    conf.setInt("timeout", 100000)
    conf.set("hbase.zookeeper.quorum", "localhost")
    conf.set("zookeeper.znode.parent", "/hbase-unsecure")
    conf.set(TableInputFormat.INPUT_TABLE, tableName)

    val admin = new HBaseAdmin(conf)
    if (!admin.isTableAvailable(tableName)) {
      val tableDesc = new HTableDescriptor(tableName)
      admin.createTable(tableDesc)
    }

    val hBaseRDD = sc.newAPIHadoopRDD(conf, classOf[TableInputFormat], classOf[ImmutableBytesWritable], classOf[Result])
    println("Number of Records found : " + hBaseRDD.count())
    sc.stop()
  }
}