aboutsummaryrefslogtreecommitdiff
path: root/examples/src/main/python/hbase_outputformat.py
blob: 49bbc5aebdb0b3b1b9c3df104ca3e38284536af3 (plain) (blame)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements.  See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License.  You may obtain a copy of the License at
#
#    http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#

import sys

from pyspark import SparkContext

"""
Create test table in HBase first:

hbase(main):001:0> create 'test', 'f1'
0 row(s) in 0.7840 seconds

> hbase_outputformat <host> test row1 f1 q1 value1
> hbase_outputformat <host> test row2 f1 q1 value2
> hbase_outputformat <host> test row3 f1 q1 value3
> hbase_outputformat <host> test row4 f1 q1 value4

hbase(main):002:0> scan 'test'
ROW                   COLUMN+CELL
 row1                 column=f1:q1, timestamp=1405659615726, value=value1
 row2                 column=f1:q1, timestamp=1405659626803, value=value2
 row3                 column=f1:q1, timestamp=1405659640106, value=value3
 row4                 column=f1:q1, timestamp=1405659650292, value=value4
4 row(s) in 0.0780 seconds
"""
if __name__ == "__main__":
    if len(sys.argv) != 7:
        print >> sys.stderr, """
        Usage: hbase_outputformat <host> <table> <row> <family> <qualifier> <value>

        Run with example jar:
        ./bin/spark-submit --driver-class-path /path/to/example/jar /path/to/examples/hbase_outputformat.py <args>
        Assumes you have created <table> with column family <family> in HBase running on <host> already
        """
        exit(-1)

    host = sys.argv[1]
    table = sys.argv[2]
    sc = SparkContext(appName="HBaseOutputFormat")

    conf = {"hbase.zookeeper.quorum": host,
            "hbase.mapred.outputtable": table,
            "mapreduce.outputformat.class" : "org.apache.hadoop.hbase.mapreduce.TableOutputFormat",
            "mapreduce.job.output.key.class" : "org.apache.hadoop.hbase.io.ImmutableBytesWritable",
            "mapreduce.job.output.value.class" : "org.apache.hadoop.io.Writable"}

    sc.parallelize([sys.argv[3:]]).map(lambda x: (x[0], x)).saveAsNewAPIHadoopDataset(
        conf=conf,
        keyConverter="org.apache.spark.examples.pythonconverters.StringToImmutableBytesWritableConverter",
        valueConverter="org.apache.spark.examples.pythonconverters.StringListToPutConverter")

    sc.stop()