参考:http://hadoop.apache.org/docs/current/hadoop-project-dist/hadoop-common/SingleCluster.html
jdk安装以及配置先配置好
下载hadoop安装 地址:http://apache.fayea.com/hadoop/common/stable/ 可以返回上级目录下载更新的
解压到目录,/usr/tools/ 没有此目录就先创建
切换到 hadoop安装目录 cd /usr/tools/hadoop-2.7.3
hadoop支持独立模式运行,以本地文件系统方式作为输入和输出
$ mkdir input //创建文件输入目录
切换到input目录创建几个文件
echo hello world, I am jungle. bye world>file1
$ bin/hadoop jar share/hadoop/mapreduce/hadoop-mapreduce-examples-2.7.3.jar wordcount input output $ cat output/*echo hello hadoop. hello jungle. bye hadoop.>file2echo the great software is hadoop.>>file2统计各个单词出现的次数
展示输出结果,结果如下:
伪分布式方式:
修改配置文件
etc/hadoop/core-site.xml:
<configuration> <property> <name>fs.defaultFS</name> <value>hdfs://localhost:9000</value> </property> </configuration>etc/hadoop/hdfs-site.xml:
<configuration> <property> <name>dfs.replication</name> <value>1</value> </property> </configuration>
No检查ssh登录是否不需要密码
$ ssh localhost如果需要密码, 执行下面操作
$ ssh-keygen -t rsa -P '' -f ~/.ssh/id_rsa $ cat ~/.ssh/id_rsa.pub >> ~/.ssh/authorized_keys $ chmod 0600 ~/.ssh/authorized_keys
格式化文件系统
$ bin/hdfs namenode -format开启 NameNode 进程 and DataNode 进程:
$ sbin/start-dfs.sh
创建分布式文件系统目录 input 并且把本地文件夹input内的文件都复制到hdfs中input目录下
[root@localhost hadoop-2.7.3]#hdfs dfs -mkdir input[root@localhost hadoop-2.7.3]# hdfs dfs -put /usr/tools/hadoop-2.7.3/input/* /input
执行mapreduce操作 统计各个单词出现的次数
$bin/hadoop jar share/hadoop/mapreduce/hadoop-mapreduce-examples-2.7.3.jar wordcount input output
查看输出结果
$ bin/hdfs dfs -get output output $ cat output/*或者
$ bin/hdfs dfs -cat output/*
贴上示例中jar包的源码 统计每个单词的个数
package org.apache.hadoop.examples;
import java.io.IOException;
import java.io.PrintStream;
import java.util.StringTokenizer;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.Mapper.Context;
import org.apache.hadoop.mapreduce.Reducer;
import org.apache.hadoop.mapreduce.Reducer.Context;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import org.apache.hadoop.util.GenericOptionsParser;
public class WordCount
{
public static void main(String[] args)
throws Exception
{
Configuration conf = new Configuration();
String[] otherArgs = new GenericOptionsParser(conf, args).getRemainingArgs();
if (otherArgs.length < 2) {
System.err.println("Usage: wordcount <in> [<in>...] <out>");
System.exit(2);
}
Job job = Job.getInstance(conf, "word count");
job.setJarByClass(WordCount.class);
job.setMapperClass(TokenizerMapper.class);
job.setCombinerClass(IntSumReducer.class);
job.setReducerClass(IntSumReducer.class);
job.setOutputKeyClass(Text.class);
job.setOutputValueClass(IntWritable.class);
for (int i = 0; i < otherArgs.length - 1; i++) {
FileInputFormat.addInputPath(job, new Path(otherArgs[i]));
}
FileOutputFormat.setOutputPath(job, new Path(otherArgs[(otherArgs.length - 1)]));
System.exit(job.waitForCompletion(true) ? 0 : 1);
}
public static class IntSumReducer extends Reducer<Text, IntWritable, Text, IntWritable>
{
private IntWritable result = new IntWritable();
public void reduce(Text key, Iterable<IntWritable> values, Reducer<Text, IntWritable, Text, IntWritable>.Context context)
throws IOException, InterruptedException
{
int sum = 0;
for (IntWritable val : values) {
sum += val.get();
}
this.result.set(sum);
context.write(key, this.result);
}
}
public static class TokenizerMapper extends Mapper<Object, Text, Text, IntWritable>
{
private static final IntWritable one = new IntWritable(1);
private Text word = new Text();
public void map(Object key, Text value, Mapper<Object, Text, Text, IntWritable>.Context context) throws IOException, InterruptedException
{
StringTokenizer itr = new StringTokenizer(value.toString());
while (itr.hasMoreTokens()) {
this.word.set(itr.nextToken());
context.write(this.word, one);
}
}
}
}