最近一直在对历史数据进行清洗,原始数据是纯数据格式,现在要清洗到hbase中,方便后期跟hive进行整合查询。。
可能现在基本上都使用spark来做清洗了,但是如果受机器本身硬件条件的限制的话,就没法子了,spark根本跑不动,哎,还是老老实实的写MR吧。。话不多说,直接上代码。
import com.gey.hbase.helper.HBaseHelper;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.mapreduce.MultiTableOutputFormat;
import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.Reducer;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.input.TextInputFormat;
import java.io.IOException;
/**
* @author ly
* @date 2019/7/24 13:59
* @description
*/
public class HBaseMultiTableOutputApp {
public static class HBaseMultiTableOutputMapper extends Mapper<LongWritable, Text, Text, Text> {
@Override
protected void map(LongWritable key, Text value, Context context) throws IOException, InterruptedException {
String[] splits = value.toString().split("\t");
String createdDate = splits[1];
String enable ="1";
String id = splits[4];
String uid = splits[5];
//生成rowkey
String rk = HBaseHelper.getRowkey(id,uid);
String s = id+"\t"+uid+"\t"+createdDate+"\t"+enable;
context.write(new Text(rk),new Text(s));
}
}
public static class HBaseMultiTableOutputReducer extends Reducer<Text, Text, ImmutableBytesWritable, Put> {
//我这里是需要根据id取模,然后根据取模的值将数据存入对应的表
ImmutableBytesWritable userTb0 = null;
ImmutableBytesWritable userTb1 = null;
ImmutableBytesWritable userTb2 = null;
ImmutableBytesWritable userTb3 = null;
ImmutableBytesWritable userTb4 = null;
ImmutableBytesWritable userTb5 = null;
ImmutableBytesWritable userTb6 = null;
ImmutableBytesWritable userTb7 = null;
ImmutableBytesWritable userTb8 = null;
ImmutableBytesWritable userTb9 = null;
@Override
protected void setup(Context context) throws IOException, InterruptedException {
//初始化表
userTb0 = new ImmutableBytesWritable(Bytes.toBytes("user_0"));
userTb1 = new ImmutableBytesWritable(Bytes.toBytes("user_1"));
userTb2 = new ImmutableBytesWritable(Bytes.toBytes("user_2"));
userTb3 = new ImmutableBytesWritable(Bytes.toBytes("user_3"));
userTb4 = new ImmutableBytesWritable(Bytes.toBytes("user_4"));
userTb5 = new ImmutableBytesWritable(Bytes.toBytes("user_5"));
userTb6 = new ImmutableBytesWritable(Bytes.toBytes("user_6"));
userTb7 = new ImmutableBytesWritable(Bytes.toBytes("user_7"));
userTb8 = new ImmutableBytesWritable(Bytes.toBytes("user_8"));
userTb9 = new ImmutableBytesWritable(Bytes.toBytes("user_9"));
}
@Override
protected void reduce(Text key, Iterable<Text> values, Context context) throws IOException, InterruptedException {
for (Text value : values) {
String[] splits = value.toString().split("\t");
Put put = new Put(Bytes.toBytes(key.toString()));
put.addColumn(Bytes.toBytes("info"), Bytes.toBytes("id"), Bytes.toBytes(splits[0]));
put.addColumn(Bytes.toBytes("info"), Bytes.toBytes("uid"), Bytes.toBytes(splits[1]));
put.addColumn(Bytes.toBytes("info"), Bytes.toBytes("epochs"), Bytes.toBytes(splits[3]));
put.addColumn(Bytes.toBytes("info"), Bytes.toBytes("time"), Bytes.toBytes(splits[2]));
//对id%10
Long ret = Long.valueOf(splits[0]) % 10;
if (ret == 0){
context.write(userTb0,put);
}else if(ret == 1){
context.write(userTb1,put);
}else if(ret == 2){
context.write(userTb2,put);
}else if(ret == 3){
context.write(userTb3,put);
}else if(ret == 4){
context.write(userTb4,put);
}else if(ret == 5){
context.write(userTb5,put);
}else if(ret == 6){
context.write(userTb6,put);
}else if(ret == 7){
context.write(userTb7,put);
}else if(ret == 8){
context.write(userTb8,put);
}else if(ret == 9){
context.write(userTb9,put);
}
}
}
}
public static void main(String[] args) throws Exception{
Configuration conf = new Configuration();
conf.set("hbase.zookeeper.quorum","192.168.32.101,192.168.32.102,192.168.32.103");
conf.set("hbase.zookeeper.port", "2181");
conf.set("zookeeper.znode.parent","/hbase");
//创建job
Job job = Job.getInstance(conf, "HBaseMultiTableOutputApp");
//设置job的处理类
job.setJarByClass(HBaseMultiTableOutputApp.class);
job.setMapperClass(HBaseMultiTableOutputMapper.class);
job.setReducerClass(HBaseMultiTableOutputReducer.class);
job.setMapOutputKeyClass(Text.class);
job.setMapOutputValueClass(Text.class);
// 设置输入和输出格式
job.setInputFormatClass(TextInputFormat.class);
job.setOutputFormatClass(MultiTableOutputFormat.class);
FileInputFormat.addInputPath(job, new Path(args[0]));
boolean result = job.waitForCompletion(true);
System.exit(result?1:0);
}
}
思考:本来我这里不想用Reducer只用Mapper的,但是不行,报错。。也不知道问题出在哪儿,等有空再来研究研究。。以上是清洗到多个Hbase表。。
如果是清洗到一张表的话,就只需要Mapper即可,直接上代码:
import com.gey.hbase.helper.HBaseHelper;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
import org.apache.hadoop.hbase.mapreduce.TableMapReduceUtil;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import java.io.IOException;
/**
* @author ly
* @date 2019/6/28 09:47
* @description
*/
public class HDFS2HBaseApp {
public static class HDFS2HBaseMapper extends Mapper<LongWritable, Text, ImmutableBytesWritable, Put> {
ImmutableBytesWritable rowkey = new ImmutableBytesWritable();
private static final String ENABLE_TRUE="true";
private static final String ENABLE_FALSE="false";
@Override
protected void map(LongWritable key, Text value, Context context) throws IOException, InterruptedException {
String[] splits = value.toString().split("\t");
String createdDate = splits[1];
String enable = "1";
String id = splits[4];
String uid = splits[5];
String rk = HBaseHelper.getRowkey(id,uid);
rowkey.set(Bytes.toBytes(rk));
Put put = new Put(Bytes.toBytes(rk));
put.addColumn(Bytes.toBytes("info"), Bytes.toBytes("id"), Bytes.toBytes(id));
put.addColumn(Bytes.toBytes("info"), Bytes.toBytes("uid"), Bytes.toBytes(uid));
put.addColumn(Bytes.toBytes("info"), Bytes.toBytes("epochs"), Bytes.toBytes(enable));
put.addColumn(Bytes.toBytes("info"), Bytes.toBytes("time"), Bytes.toBytes(createdDate));
context.write(rowkey, put);
}
}
public static void main(String[] args) throws Exception{
//创建Configuration
Configuration configuration = new Configuration();
configuration.set("hbase.zookeeper.quorum","192.168.32.101,192.168.32.102,192.168.32.103");
configuration.set("hbase.zookeeper.port", "2181");
configuration.set("zookeeper.znode.parent","/hbase");
//创建job
Job job = Job.getInstance(configuration, "HDFS2HBaseApp");
//设置job的处理类
job.setJarByClass(HDFS2HBaseApp.class);
job.setMapperClass(HDFS2HBaseMapper.class);
job.setMapOutputKeyClass(ImmutableBytesWritable.class);
job.setMapOutputValueClass(Put.class);
FileInputFormat.addInputPath(job, new Path(args[0]));
TableMapReduceUtil.initTableReducerJob(args[1],null,job);
job.setNumReduceTasks(1);
boolean result = job.waitForCompletion(true);
System.exit(result?1:0);
}
}
注意:如果是放到CDH环境的集群上跑,注意在yarn上配置一下hbase的jar所在路径,不然会报错:Exception in thread "main" java.lang.NoClassDefFoundError: org/apache/hadoop/hbase/io/ImmutableBytesWritable
解决方法:
在Yarn的配置页面,输入“hadoop-env”,在右侧中填入hbase路径:
HADOOP_CLASSPATH=$HADOOP_CLASSPATH:/opt/cloudera/parcels/CDH-5.16.1-1.cdh5.16.1.p0.3/lib/hbase/lib/*
保存后,继续输入“yarn.application.”,在右侧添加hbase路径,如下图所示:
然后重启就OK了。。
欢迎大家留言讨论
内容将同步到微信公众号,欢迎关注微信公众号:LearnBigData