对日志数据中的上下行流量信息汇总---分区统计

xiaoxiao2021-02-28  57

需求:

对日志数据中的上下行流量信息汇总---分区统计

根据归属地输出流量统计数据结果到不同文件,以便于在查询统计结果时可以定位到省级范围进行

分析:

Mapreduce中会将map输出的kv对,按照相同key分组,然后分发给不同的reducetask

默认的分发规则为:根据keyhashcode%reducetask数来分发

所以:如果要按照我们自己的需求进行分组,则需要改写数据分发(分组)组件Partitioner

自定义一个CustomPartitioner继承抽象类:Partitioner

然后在job对象中,设置自定义partitionerjob.setPartitionerClass(CustomPartitioner.class)

1363157985066 13726230503 00-FD-07-A4-72-B8:CMCC 120.196.100.82  24 27 2481 24681 200

1363157995052 13826544101 5C-0E-8B-C7-F1-E0:CMCC 120.197.40.4 4 0 264 0 200

1363157991076 13926435656 20-10-7A-28-CC-0A:CMCC 120.196.100.99 2 4 132 1512 200

1363154400022 13926251106 5C-0E-8B-8B-B1-50:CMCC 120.197.40.4 4 0 240 0 200

1、自定义一个FlowBean

import java.io.DataInput; import java.io.DataOutput; import java.io.IOException; import org.apache.hadoop.io.Writable; /** * 把流量信息封装成对象 * * @author * */ public class FlowBean implements Writable { private long upFlow; private long dFlow; private long sumFlow; public FlowBean() { super(); } public FlowBean(long upFlow, long dFlow) { super(); this.upFlow = upFlow; this.dFlow = dFlow; this.sumFlow = upFlow + dFlow; } public long getUpFlow() { return upFlow; } public void setUpFlow(long upFlow) { this.upFlow = upFlow; } public long getdFlow() { return dFlow; } public void setdFlow(long dFlow) { this.dFlow = dFlow; } public long getSumFlow() { return sumFlow; } public void setSumFlow(long sumFlow) { this.sumFlow = sumFlow; } // 序列化 :将字段信息写到输出流中 @Override public void write(DataOutput out) throws IOException { out.writeLong(upFlow); out.writeLong(dFlow); out.writeLong(sumFlow); } // 反序列化:从输出流中读取各个字段的信息 // 注意:反序列化的顺序必须跟序列化的对象一致 @Override public void readFields(DataInput in) throws IOException { upFlow = in.readLong(); dFlow = in.readLong(); sumFlow = in.readLong(); } // 重写toString()方法 @Override public String toString() { return upFlow + "\t" + dFlow + "\t" + sumFlow; } } 2、创建mapper

import java.io.IOException; import org.apache.hadoop.io.LongWritable; import org.apache.hadoop.io.Text; import org.apache.hadoop.mapreduce.Mapper; public class FlowCountMapper extends Mapper<LongWritable, Text, Text, FlowBean> { @Override protected void map(LongWritable key, Text values, Context context) throws IOException, InterruptedException { // 将一行的内容转化为String String value = values.toString(); // 切分字段 String[] split = value.split("\t"); // 取出手机号码 String phoneNum = split[1]; // 取出上行流量和下行流量 long upFlow = Long.parseLong(split[split.length - 2]); long dFlow = Long.parseLong(split[split.length - 3]); context.write(new Text(phoneNum), new FlowBean(upFlow, dFlow)); } }

3、创建reducer

import java.io.IOException; import org.apache.hadoop.io.Text; import org.apache.hadoop.mapreduce.Reducer; public class FlowCountReduce extends Reducer<Text, FlowBean, Text, FlowBean> { @Override protected void reduce(Text key, Iterable<FlowBean> values, Context context) throws IOException, InterruptedException { long sum_upFlow = 0; long sum_dFlow = 0; // 遍历 将上行流量和下行流量分别累加 for (FlowBean flowBean : values) { sum_upFlow += flowBean.getUpFlow(); sum_dFlow += flowBean.getdFlow(); } FlowBean res = new FlowBean(sum_upFlow, sum_dFlow); context.write(key, res); } } import java.util.HashMap; import org.apache.hadoop.io.Text; import org.apache.hadoop.mapreduce.Partitioner; public class ProvincePartitioner extends Partitioner<Text, FlowBean> { public static HashMap<String, Integer> provinceDict = new HashMap<String, Integer>(); static { provinceDict.put("136", 0); provinceDict.put("137", 1); provinceDict.put("138", 2); provinceDict.put("139", 3); } @Override public int getPartition(Text key, FlowBean values, int numPartitions) { String prefix = key.toString().substring(0, 3); Integer provinceID = provinceDict.get(prefix); return provinceID == null ? 4 : provinceID; } }

(4) 自定义partitioner import java.util.HashMap; import org.apache.hadoop.io.Text; import org.apache.hadoop.mapreduce.Partitioner; /*

定义自己的从mapreduce之间的数据(分组)分发规则 按照手机号所属的省份来分发(分组)ProvincePartitioner

*/ public class ProvincePartitioner extends Partitioner<Text, FlowBean> { public static HashMap<String, Integer> provinceDict = new HashMap<String, Integer>(); static { provinceDict.put("136", 0); provinceDict.put("137", 1); provinceDict.put("138", 2); provinceDict.put("139", 3); } @Override public int getPartition(Text key, FlowBean values, int numPartitions) { String prefix = key.toString().substring(0, 3); Integer provinceID = provinceDict.get(prefix); return provinceID == null ? 4 : provinceID; } } (5)创建FlowCount

import java.io.IOException; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.Path; import org.apache.hadoop.io.Text; import org.apache.hadoop.mapreduce.Job; import org.apache.hadoop.mapreduce.lib.input.FileInputFormat; import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat; public class FlowCount { public static void main(String[] args) throws Exception { String inPath = ""; String outPath = ""; if (args.length == 2) { inPath = args[0]; outPath = args[1]; } Configuration conf = new Configuration(); Job job = Job.getInstance(conf); // 指定jar包所在的本地路径 job.setJarByClass(FlowCount.class); // 指定jar包使用的mapper和Reduce业务类 job.setMapperClass(FlowCountMapper.class); job.setReducerClass(FlowCountReduce.class); // 指定mapper输出数据的kv类型 job.setOutputKeyClass(Text.class); job.setOutputValueClass(FlowBean.class); // 指定最终的输出数据的kv类型 job.setOutputKeyClass(Text.class); job.setOutputValueClass(FlowBean.class); // 指定自定义的数据分区器 job.setPartitionerClass(ProvincePartitioner.class); job.setNumReduceTasks(5); // 指定job的输入原始文件所在的目录 FileInputFormat.setInputPaths(job, new Path(inPath)); FileOutputFormat.setOutputPath(job, new Path(outPath)); boolean res = job.waitForCompletion(true); System.exit(res ? 0 : 1); } }

转载请注明原文地址: https://www.6miu.com/read-46531.html

最新回复(0)