流量统计--Maven依赖

发布于:2025-04-16 ⋅ 阅读:(23) ⋅ 点赞:(0)

新建项目Flow

创建依赖,在pm.xml里添加如下内容:

<!-- 添加hadoop-client 3.1.3的依赖-->
    <dependencies>
        <dependency>
            <groupId>org.apache.hadoop</groupId>
            <artifactId>hadoop-client</artifactId>
            <version>3.1.3</version>
        </dependency>
    </dependencies>

新建一个log.txt.,编写数据,如:

12611113333 556 8976
12612113333 1123 9087
13787653490 2345 7864
15027889876 556 76
13889764536 887 9

分别创建FlowBean FlowDriver FlowMapper FlowRecuder

代码如下:

FlowBean:

package org.example.flow;

import org.apache.hadoop.io.Writable;

import java.io.DataInput;
import java.io.DataOutput;
import java.io.IOException;

//hadoop 序列化
//三个属性,手机号,上行流量,下行流量
public class FlowBean implements Writable {
    private String phone;
    private Long upFlow;
    private Long downFlow;
    public FlowBean(String phone, Long upFlow, Long downFlow) {
        this.phone = phone;
        this.upFlow = upFlow;
        this.downFlow = downFlow;
    }
    //定义get/set方法
    public String getPhone() {
        return phone;
    }
    public void setPhone(String phone) {
        this.phone = phone;
    }
    public Long getUpFlow() {
        return upFlow;
    }
    public void setUpFlow(Long upFlow) {
        this.upFlow = upFlow;
    }
    public Long getDownFlow() {
        return downFlow;
    }
    public void setDownFlow(Long downFlow) {
        this.downFlow = downFlow;
    }
    //定义无参构造
    public FlowBean(){}
    //定义一个获取总流量的方法
    public Long getTotalFlow(){
        return upFlow + downFlow;
    }
    @Override
    public void write(DataOutput dataOutput) throws IOException {
        dataOutput.writeUTF(phone);
        dataOutput.writeLong(upFlow);
        dataOutput.writeLong(downFlow);
    }
    @Override
    public void readFields(DataInput dataInput) throws IOException {
        phone = dataInput.readUTF();
        upFlow = dataInput.readLong();
        downFlow = dataInput.readLong();
    }
}

FlowDriver:

package org.example.flow;

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import java.io.IOException;


//1.提交job类,一共做7件事
public class FlowDriver {
    public static void main(String[] args) throws IOException, InterruptedException, ClassNotFoundException {
        //1.获取配置,找到job对象
        Configuration conf = new Configuration();
        Job job = Job.getInstance(conf);
        //2.设置jar包路径
        job.setJarByClass(FlowDriver.class);
        //3.关联Mapper和Reducer
        job.setMapperClass(FlowMapper.class);
        job.setReducerClass(FlowReducer.class);
        //4.设置Mapper和Reducer的输出类型
        job.setMapOutputKeyClass(Text.class);
        job.setMapOutputValueClass(FlowBean.class);
        //5.设置reducer的输出类型
        job.setOutputKeyClass(Text.class);
        job.setOutputValueClass(Text.class);
        //6.设置输入和输出路径
        FileInputFormat.setInputPaths(job,new Path("D:\\vm\\wcinput"));
        FileOutputFormat.setOutputPath(job,new Path("output4"));
        //7.提交job,根据返回值设置程序退出code
        boolean result = job.waitForCompletion(true);
        System.exit(result ? 0 : 1);

    }
}

FlowMapper:

package org.example.flow;


import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Mapper;

import java.io.IOException;

//1.继承Mapper
//2.重写Map函数
public class FlowMapper extends Mapper<LongWritable, Text, Text, FlowBean> {
    @Override
    protected void map(LongWritable key, Text value, Context context) throws IOException, InterruptedException {
        //1、获取一行数据,使用空格拆分
        //手机号就是第一个元素
        //上行流量就是第二个元素
        //下行流量就是第三个元素
        String[] split = value.toString().split(" ");
        String phone = split[0];

        Long upFlow = Long.parseLong(split[1]);
        Long downFlow = Long.parseLong(split[2]);
        //2.封装对象
        FlowBean flowBean = new FlowBean(phone,upFlow, downFlow);
        //写入手机号为key,值就是这个对象
        context.write(new Text(phone), flowBean);

    }
}

FlowReducer:

package org.example.flow;

import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Reducer;

import java.io.IOException;

//继承Reducer
//重写reducer函数
public class FlowReducer extends Reducer<Text, FlowBean, Text, Text> {
    @Override
    protected void reduce(Text key, Iterable<FlowBean> values, Context context) throws IOException, InterruptedException {
        //1.遍历集合,去除每一个元素,计算上行流量和下行流量的汇总
        Long upFlowSum = 0L;
        Long downFlowSum = 0L;
        for (FlowBean flowBean : values) {
            upFlowSum += flowBean.getUpFlow();
            downFlowSum += flowBean.getDownFlow();
        }
        //2/计算总的汇总
        long sumFlow = upFlowSum + downFlowSum;
        String flowDesc = String.format("总的上行流量: %d, 总的下行流量: %d, 总的流量: %d", upFlowSum, downFlowSum, sumFlow);

        context.write(key, new Text(flowDesc));
       }
    }

在recourse中建立一个文件log4j.properties,添加如下内容:

log4j.rootLogger=INFO, stdout
log4j.appender.stdout=org.apache.log4j.ConsoleAppender
log4j.appender.stdout.layout=org.apache.log4j.PatternLayout
log4j.appender.stdout.layout.ConversionPattern=%d %p [%c] - %m%n
log4j.appender.logfile=org.apache.log4j.FileAppender
log4j.appender.logfile.File=target/spring.log
log4j.appender.logfile.layout=org.apache.log4j.PatternLayout  log4j.appender.logfile.layout.ConversionPattern=%d %p [%c] - %m%n


网站公告

今日签到

点亮在社区的每一天
去签到