hadoop第4课(WordCount)

发布于:2025-03-17 ⋅ 阅读:(17) ⋅ 点赞:(0)

准备:

1.一台开启的hadoop集群

2.idea(java代码编辑器)

<dependency>
  <groupId>org.apache.hadoop</groupId>
  <artifactId>hadoop-client</artifactId>
  <version>2.9.2</version>
</dependency>

WordCountMapper.java

package com.peizheng.bigdata;

import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Mapper;

import java.io.IOException;


/*

KEYIN, 偏移量, LongWritable
VALUEIN, 文本, Text
KEYOUT,  文本(单词), Text
VALUEOUT,  次数, LongWritable

 */
public class WordCountMapper extends Mapper<LongWritable, Text, Text, LongWritable> {
    private Text outKey = new Text();
    private LongWritable outValue = new LongWritable(1);

    @Override
    protected void map(LongWritable key, Text value, Mapper<LongWritable, Text, Text, LongWritable>.Context context) throws IOException, InterruptedException {
        // "hadoop hadoop hadoop"
        String line = value.toString();

        // ["hadoop", "hadoop", "hadoop"]
        String[] words = line.split(" ");

        // <"hadoop", 1>
        // <"hadoop", 1>
        // <"hadoop", 1>

        for (String word : words) {
            outKey.set(word);

            // <"hadoop", 1>
            context.write(outKey, outValue);
        }
    }
}

 

WordCountReducer.java

package com.peizheng.bigdata;

import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Reducer;

import java.io.IOException;

/*

KEYIN, 单词 Text
VALUEIN, 次数(1次) LongWritable
KEYOUT, 单词 Text
VALUEOUT, 次数(总数) LongWritable

 */

public class WordCountReducer extends Reducer<Text, LongWritable, Text, LongWritable> {


    @Override
    protected void reduce(Text key, Iterable<LongWritable> values, Reducer<Text, LongWritable, Text, LongWritable>.Context context) throws IOException, InterruptedException {
        // <hadoop,[1,1,1]>   -> <hadoop, 3>
        long sum = 0;

        for (LongWritable value : values) {
            sum += value.get();
        }
        //
        LongWritable outValue = new LongWritable(sum);

        // sum = 3
        context.write(key, outValue);

    }
}

WordCountDriver.java 

package com.peizheng.bigdata;

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;

import java.io.IOException;

public class WordCountDriver {

    public static void main(String[] args) throws IOException, InterruptedException, ClassNotFoundException {

        // 1 获取job

        Configuration conf = new Configuration();
        Job job = Job.getInstance(conf);

        // 2 设置jar包的路径
        job.setJarByClass(WordCountDriver.class);

        // 3 关联mapper、Reducer
        job.setMapperClass(WordCountMapper.class);
        job.setReducerClass(WordCountReducer.class);

        // 4 设置map输出的key、value类型
        job.setMapOutputKeyClass(Text.class);
        job.setMapOutputValueClass(LongWritable.class);

        // 5 设置最终输出的key、value类型
        job.setOutputKeyClass(Text.class);
        job.setOutputValueClass(LongWritable.class);

        // 6 设置要处理的数据集输入路径和输出路径
        FileInputFormat.setInputPaths(job, new Path(args[0]));
        FileOutputFormat.setOutputPath(job, new Path(args[1]));

        // 7 提交
        job.waitForCompletion(true);

    }
}

虚拟机运行集群,jar包启动

vim word.txt
写一些单词
hadoop fs -put word.txt /
yarn jar HadoopDemo-1.0-SNAPSHOT.jar com.bigdata.WordCountDriver /word.txt /output
hadoop fs -cat /output/part*