有 Java 编程相关的问题?

你可以在下面搜索框中键入要查询的问题!

java MapReduce Hadoop字长频率不起作用

关于这一页,我和他有一个类似的问题。我需要提供一个映射和减少方法来计算字长(1到n)频率reference links我尝试了答案的方法来实现这个功能

import java.io.IOException;
import java.util.StringTokenizer;

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.Reducer;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;

public class WordCount {

  //Mapper which implement the mapper() function
  public static class TokenizerMapper extends Mapper<Object, Text, Text, IntWritable> {
  //public static class TokenizerMapper extends Mapper<LongWritable, Text, IntWritable, IntWritable> {

    private final static IntWritable one = new IntWritable(1);
    private Text word = new Text(); 

    public void map(LongWritable key, Text value, Context context) throws IOException, InterruptedException {
      StringTokenizer itr = new StringTokenizer(value.toString());
      while (itr.hasMoreTokens()) {
        //check whether word is start from a or b
        String wordToCheck = itr.nextToken();
        word.set(String.valueOf(wordToCheck.length()));
        context.write(word, one);
        //if (wordToCheck.startsWith("a")||wordToCheck.startsWith("b")){
        //  word.set(wordToCheck);
        //  context.write(word, one);
        //}
        //check for word length
        //if (wordToCheck.length() > 8) {
        // }
      }
    }
  }
  //Reducer which implement the reduce() function
  public static class IntSumReducer extends Reducer<Text, IntWritable, Text, IntWritable> {
    private IntWritable result = new IntWritable();

    public void reduce(Text key, Iterable<IntWritable> values, Context context) throws IOException, InterruptedException {
      int sum = 0;
      for (IntWritable val : values) {
        sum += val.get();
      }
      result.set(sum);
      context.write(key, result);
    }
  }
  //Driver class to specific the Mapper and Reducer
  public static void main(String[] args) throws Exception {
    Configuration conf = new Configuration();
    Job job = Job.getInstance(conf, "word count");
    job.setJarByClass(WordCount.class);
    job.setMapperClass(TokenizerMapper.class);
    job.setReducerClass(IntSumReducer.class);
    job.setOutputKeyClass(Text.class);
    job.setOutputValueClass(IntWritable.class);
    job.setMapOutputKeyClass(Text.class);
    job.setMapOutputValueClass(IntWritable.class);
    FileInputFormat.addInputPath(job, new Path(args[0]));
    FileOutputFormat.setOutputPath(job, new Path(args[1]));
    System.exit(job.waitForCompletion(true) ? 0 : 1);
  }
}

我有以下例外

17/02/25 17:02:34 INFO mapreduce.Job:  map 0% reduce 0%
17/02/25 17:02:36 INFO mapreduce.Job:  map 100% reduce 0%
17/02/25 17:02:36 INFO mapreduce.Job: Task Id : attempt_1488013180963_0001_m_000000_2, Status : FAILED
Error: java.io.IOException: Type mismatch in key from map: expected org.apache.hadoop.io.Text, received org.apache.hadoop.io.LongWritable
    at org.apache.hadoop.mapred.MapTask$MapOutputBuffer.collect(MapTask.java:1069)
    at org.apache.hadoop.mapred.MapTask$NewOutputCollector.write(MapTask.java:712)
    at org.apache.hadoop.mapreduce.task.TaskInputOutputContextImpl.write(TaskInputOutputContextImpl.java:89)
    at org.apache.hadoop.mapreduce.lib.map.WrappedMapper$Context.write(WrappedMapper.java:112)
    at org.apache.hadoop.mapreduce.Mapper.map(Mapper.java:124)
    at org.apache.hadoop.mapreduce.Mapper.run(Mapper.java:145)
    at org.apache.hadoop.mapred.MapTask.runNewMapper(MapTask.java:784)
    at org.apache.hadoop.mapred.MapTask.run(MapTask.java:341)
    at org.apache.hadoop.mapred.YarnChild$2.run(YarnChild.java:163)
    at java.security.AccessController.doPrivileged(Native Method)
    at javax.security.auth.Subject.doAs(Subject.java:415)
    at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1656)
    at org.apache.hadoop.mapred.YarnChild.main(YarnChild.java:158)

我在Eclipse Kepler中开发了这个类,并在UbuntuLTXTerminal中使用hadoop 2.6.3将这个类作为jar文件运行。有什么问题吗?我也尝试使用IntWritable,正如答案中所建议的,但是它也有类似的反应


共 (1) 个答案

  1. # 1 楼答案

    我不是百分之百确定,但是当您使用文件作为输入时,mapper应该有LongWritable类型的键(对应于文件中的行号)和Text值(文件行作为文本)

    因此,可能的解决办法是更换

    public static class TokenizerMapper extends Mapper<Object, Text, Text, IntWritable> {
    

    public static class TokenizerMapper extends Mapper<LongWritable, Text, Text, IntWritable> {