Hadoop中job.setNumReduceTasks(0)方法

本文探讨了在MapReduce中设置job.setNumReduceTasks(0)的影响,详细解释了有无Reduce阶段对Map结果输出方式的区别,以及不同OutputFormat如何影响最终输出。

job.setNumReduceTasks(0)唯一影响的是map结果的输出方式

  当job.setNumReduceTasks(0)时,即没有reduce阶段,此时唯一影响的就是map结果的输出方式

如果有reduce阶段,map的结果被flush到硬盘,作为reduce的输入;reduce的结果将被OutputFormat的RecordWriter写到指定的地方(setOutputPath),作为整个程序的输出。
如果没有reduce阶段,map的结果将直接被OutputFormat的RecordWriter写到指定的地方(setOutputPath),作为整个程序的输出。
而OutputFormat可以是普通的FileOutputFormat等,也可以是一个空的OutputFormat如NullOutputFormat。

  所以有无reduce和OutputFormat的多样性将组合出现以下情形(这个组合其实没什么意义,只是为了更加清楚而已)

有reduce
1.reduce的结果不需要输出到文件,如reduce里直接将结果插入HBase,此时可以采用NullOutputFormat,当然就不需要setOutputPath。
2.reduce的结果需要输出到文件,如采用FileOutputFormat,需要setOutputPath。
无reduce
1 . map的结果需要不输出到文件,如map里直接将结果插入HBase,此时可以采用NullOutputFormat,当然就不需要setOutputPath。
2 . map的结果需要输出到文件,如采用FileOutputFormat,需要setOutputPath。

package com.yjxxt.wordcount; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.Path; import org.apache.hadoop.io.IntWritable; import org.apache.hadoop.io.Text; import org.apache.hadoop.mapred.FileInputFormat; import org.apache.hadoop.mapred.FileOutputFormat; import org.apache.hadoop.mapreduce.Job; import java.io.IOException; public class WordCountJob { public static void main(String[] args) throws IOException, InterruptedException, ClassNotFoundException { // 加载配置文件 Configuration conf = new Configuration(true); // 本地模式运行 conf.set("mapreduce.framework.name","local"); // 创建作业 Job job = Job.getInstance(conf); // 设置作业主类 job.setJarByClass(WordCountJob.class); // 设置作业名称 job.setJobName("yjx-WordCount-"+System.currentTimeMillis()); // 设置 Reduce 的数量 job.setNumReduceTasks(2); // 设置数据的输入路径(需要计算的数据从哪里读) FileInputFormat.setInputPaths(job,new Path("/yjx/harry potter.txt")); // 设置数据的输出路径(计算后的数据输出到哪里) FileOutputFormat.setOutputPath(job,new Path("/yjx/result/"+job.getJobName())); // 设置 Map 的输出的 Key 和 Value 的类型 job.setMapOutputKeyClass(Text.class); job.setMapOutputValueClass(IntWritable.class); // 设置 Map 和 Reduce 的处理类 job.setMapperClass(WordCountMapper.class); job.setReducerClass(WordCountReducer.class); // 将作业提交到集群并等待完成 job.waitForCompletion(true); } } FileInputFormat.setInputPaths(job,new Path("/yjx/harry potter.txt")); FileOutputFormat.setOutputPath(job,new Path("/yjx/result/"+job.getJobName()));爆红,显示类型不对
03-08
package com.dajiangtai.hadoop.tv; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configured; import org.apache.hadoop.fs.Path; import org.apache.hadoop.io.LongWritable; import org.apache.hadoop.io.Text; import org.apache.hadoop.mapreduce.Job; import org.apache.hadoop.mapreduce.Mapper; import org.apache.hadoop.mapreduce.lib.input.FileInputFormat; import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat; import org.apache.hadoop.util.GenericOptionsParser; import org.apache.hadoop.util.Tool; import org.apache.hadoop.util.ToolRunner; import java.io.IOException; public class ParseAndFilterLog extends Configured implements Tool { public static class ExtractTVMsgLogMapper extends Mapper<LongWritable, Text, Text, Text> { private final Text outputKey = new Text(); private final Text outputValue = new Text(); @Override public void map(LongWritable key, Text value, Context context) throws IOException, InterruptedException { String[] parsedData = parseAndFilterData(value.toString()); if (parsedData != null && parsedData.length == 7) { String keyPart = parsedData[0] + "@" + parsedData[1]; String valuePart = String.join("@", parsedData[2], parsedData[3], parsedData[4], parsedData[5], parsedData[6]); outputKey.set(keyPart); outputValue.set(valuePart); context.write(outputKey, outputValue); } } private String[] parseAndFilterData(String data) { // 实际解析逻辑应在此实现 return data.split(","); // 示例:按逗号分割 } } @Override public int run(String[] args) throws Exception { Configuration conf = getConf(); String[] otherArgs = new GenericOptionsParser(conf, args).getRemainingArgs(); if (otherArgs.length < 2) { System.err.println("Usage: ParseAndFilterLog <input> <output>"); return 1; } Job job = Job.getInstance(conf, "TV Log Parser"); job.setJarByClass(ParseAndFilterLog.class); job.setMapperClass(ExtractTVMsgLogMapper.class); job.setOutputKeyClass(Text.class); job.setOutputValueClass(Text.class); job.setNumReduceTasks(0); // 无Reducer FileInputFormat.addInputPath(job, new Path(otherArgs[0])); FileOutputFormat.setOutputPath(job, new Path(otherArgs[1])); return job.waitForCompletion(true) ? 0 : 1; } public static void main(String[] args) throws Exception { int exitCode = ToolRunner.run(new ParseAndFilterLog(), args); System.exit(exitCode); } }import org.apache.hadoop.fs.Path;import org.apache.hadoop.util.GenericOptionsParser; import org.apache.hadoop.util.Tool; import org.apache.hadoop.util.ToolRunner; 这几个饮用能不能去除,因为总是报错,你帮我修改一下
07-02
hadoop jar hadoop-mapreduce-examples-2.7.6.jar wordcount /input/word.txt /output 25/06/24 18:52:31 INFO client.RMProxy: Connecting to ResourceManager at wei/172.26.0.238:8032 org.apache.hadoop.mapred.FileAlreadyExistsException: Output directory hdfs://wei:9000/output already exists at org.apache.hadoop.mapreduce.lib.output.FileOutputFormat.checkOutputSpecs(FileOutputFormat.java:146) at org.apache.hadoop.mapreduce.JobSubmitter.checkSpecs(JobSubmitter.java:266) at org.apache.hadoop.mapreduce.JobSubmitter.submitJobInternal(JobSubmitter.java:139) at org.apache.hadoop.mapreduce.Job$10.run(Job.java:1290) at org.apache.hadoop.mapreduce.Job$10.run(Job.java:1287) at java.security.AccessController.doPrivileged(Native Method) at javax.security.auth.Subject.doAs(Subject.java:422) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1758) at org.apache.hadoop.mapreduce.Job.submit(Job.java:1287) at org.apache.hadoop.mapreduce.Job.waitForCompletion(Job.java:1308) at org.apache.hadoop.examples.WordCount.main(WordCount.java:87) at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62) at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.lang.reflect.Method.invoke(Method.java:498) at org.apache.hadoop.util.ProgramDriver$ProgramDescription.invoke(ProgramDriver.java:71) at org.apache.hadoop.util.ProgramDriver.run(ProgramDriver.java:144) at org.apache.hadoop.examples.ExampleDriver.main(ExampleDriver.java:74) at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62) at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.lang.reflect.Method.invoke(Method.java:498) at org.apache.hadoop.util.RunJar.run(RunJar.java:221) at org.apache.hadoop.util.RunJar.main(RunJar.java:136)
06-25
package com.dajiangtai.hadoop.tv; import java.io.IOException; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configured; import org.apache.hadoop.fs.Path; import org.apache.hadoop.io.LongWritable; import org.apache.hadoop.io.Text; import org.apache.hadoop.mapreduce.Job; import org.apache.hadoop.mapreduce.Mapper; import org.apache.hadoop.mapreduce.lib.input.FileInputFormat; import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat; import org.apache.hadoop.util.GenericOptionsParser; import org.apache.hadoop.util.Tool; import org.apache.hadoop.util.ToolRunner; public class ParseAndFilterLog extends Configured implements Tool { public static class ExtractTVMsgLogMapper extends Mapper<LongWritable, Text, Text, Text> { @Override public void map(LongWritable key, Text value, Context context) throws IOException, InterruptedException { // 实际处理逻辑需要在这里实现 // 示例:简单传递原始值 context.write(new Text("key"), value); } } @Override public int run(String[] args) throws Exception { Configuration conf = this.getConf(); String[] otherArgs = new GenericOptionsParser(conf, args).getRemainingArgs(); if (otherArgs.length < 2) { System.err.println("Usage: ParseAndFilterLog <in> <out>"); System.exit(2); } Job job = Job.getInstance(conf, "ParseAndFilterLog"); job.setJarByClass(ParseAndFilterLog.class); job.setMapperClass(ExtractTVMsgLogMapper.class); job.setOutputKeyClass(Text.class); job.setOutputValueClass(Text.class); FileInputFormat.addInputPath(job, new Path(otherArgs[0])); FileOutputFormat.setOutputPath(job, new Path(otherArgs[1])); job.getConfiguration().set("mapreduce.output.textoutputformat.separator", "@"); return job.waitForCompletion(true) ? 0 : 1; } public static void main(String[] args) throws Exception { int exitCode = ToolRunner.run(new ParseAndFilterLog(), args); System.exit(exitCode); } }这个代码和刚刚那段代码和刚刚合起来重新修改一下
07-02
评论
成就一亿技术人!
拼手气红包6.0元
还能输入1000个字符
 
红包 添加红包
表情包 插入表情
 条评论被折叠 查看
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值