package com.ws;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import java.io.*;
public class WordCount {
public static void main(String[] args) throws IOException {
// run("log0.txt","0",0,134217728);
run("log0.txt","1",134217728,999999999);
}
/**
* @param index maptask的脚标
* @param seek 偏移量
* @param endLength 截止子节数
* @throws IOException
*/
public static void run(String filename ,String index,Integer seek,Integer endLength) throws IOException {
System.setProperty("HADOOP_USER_NAME","root");
Configuration conf = new Configuration();
conf.set("fs.defaultFS", "hdfs://dream1:9000");
FileSystem fs = FileSystem.get(conf);
FSDataInputStream in = fs.open(new Path("/wordcount/"+filename));
FSDataOutputStream out0 = fs.create(new Path("/wordcount/m"+index+"r0.txt"));
FSDataOutputStream out1 = fs.create(new Path("/wordcount/m"+index+"r1.txt"));
FSDataOutputStream out2 = fs.create(new Path("/wordcount/m"+index+"r2.txt"));
in.seek(seek);
BufferedReader br = new BufferedReader(new InputStreamReader(in));
String line;
int length = 0;
// 如果带有偏移量的,第一行不处理,交给上一个处理
if(seek!=0){
br.readLine();
}
while ((line = br.readLine()) != null) {
// 为什么要+1 是因为br.readline()的结果不包含\n,他给去掉了,相当于根据\n分割遍历
length += line.getBytes().length+1;
for (String word : line.split(" ")) {
if(word.hashCode()%3 ==2){
out2.write((word+"\001"+"1\n").getBytes());
}else if(word.hashCode()%3 ==1){
out1.write((word+"\001"+"1\n").getBytes());
}else{
out0.write((word+"\001"+"1\n").getBytes());
}
}
// 为什么要这么判断? : 处理两个问题,一:分割点会把单词切开,二:不好截取分割点之前,和之后的数据
// 我们统一这样处理,如果读完了,再多读一行,带着偏移量读取的少读一行,这样就能解决这个问题
// 为什么不是>= 因为length = endLength的时候,正好读完一行,但是我们要多读一行 所以用>
if(length > endLength){
break;
}
}
}
}