基于前面配置的HDFS伪分布式模式进行实验,完全分布式模式下次来搞。。
public static StringhdfsUrl = "hdfs://192.168.1.106:8020";
@Test
public voidtestHDFSMkdir() throws Exception{
//create HDFS folder创建一个文件夹
Configuration conf= new Configuration();
FileSystem fs =FileSystem.get(URI.create(hdfsUrl),conf);
Path path = new Path("/test");
fs.mkdirs(path);
}
@Test
public voidtestCreateFile() throws Exception{
//create a file 创建一个文件
Configuration conf= new Configuration();
FileSystem fs = FileSystem.get(URI.create(hdfsUrl),conf);
Pathpath = new Path("/test/a.txt");
FSDataOutputStream out = fs.create(path);
out.write("hello hadoop".getBytes());
}
@Test
publicvoid testRenameFile() throws Exception{
//rename afile 重命名
Configuration conf= new Configuration();
FileSystem fs =FileSystem.get(URI.create(hdfsUrl),conf);
Path path = new Path("/test/a.txt");
Path newPath = new Path("/test/b.txt");
System.out.println(fs.rename(path,newPath));
}
@Test
publicvoid testUploadLocalFile1() throws Exception{
//upload alocal file 上传文件
Configuration conf= new Configuration();
FileSystem fs =FileSystem.get(URI.create(hdfsUrl),conf);
Path src = newPath("/home/hadoop/hadoop-1.2.1/bin/rcc");
Path dst = new Path("/test");
fs.copyFromLocalFile(src, dst);
}
@Test
public void testUploadLocalFile2() throws Exception{
//upload alocal file 上传文件
Configuration conf= new Configuration();
FileSystem fs =FileSystem.get(URI.create(hdfsUrl),conf);
Path src = newPath("/home/hadoop/hadoop-1.2.1/bin/rcc");
Path dst = new Path("/test");
InputStream in = new BufferedInputStream(newFileInputStream(newFile("/home/hadoop/hadoop-1.2.1/bin/rcc")));
FSDataOutputStream out = fs.create(newPath("/test/rcc1"));
IOUtils.copyBytes(in, out, 4096);
}
public void testListFIles() throws Exception{
//list files under folder 列出文件
Configuration conf= newConfiguration();
FileSystem fs =FileSystem.get(URI.create(hdfsUrl),conf);
Path dst = newPath("/test");
FileStatus[] files =fs.listStatus(dst);
for(FileStatus file:files){
System.out.println(file.getPath().toString());
}
public void testGetBlockInfo() throws Exception{
//list blockinfo of file
查找文件所在的数据块
Configuration conf= newConfiguration();
FileSystem fs =FileSystem.get(URI.create(hdfsUrl),conf);
Path dst = newPath("/test/rcc");
FileStatus fileStatus =fs.getFileStatus(dst);
BlockLocation[]blkloc=fs.getFileBlockLocations(fileStatus,0,fileStatus.getLen());
//查找文件所在数据块
for(BlockLocation loc:blkloc){
for(int i=0;i <loc.getHosts().length;i++)
System.out.println(loc.getHosts()[i]);
}
}

创建Java项目,File->New->Java Project,命名为TestHDFS
采用单元测试做实验,加入单元测试依赖包,项目导航栏里右键Build Path->AddLibraries->JUnit,以上操作完成如下:
引入hadoop相关外部jar包,Build Path->Add ExternalArchives,jar包包括:
hadoop/lib/*.jar,hadoop/hadoop-core-1.2.1.jar
创建一个java类,TestHDFS.java,继承junit.framework.TestCase,代码开始写:
import java.io.BufferedInputStream;
import java.io.File;
import java.io.FileInputStream;
import java.io.InputStream;
import java.net.URI;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.BlockLocation;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IOUtils;
import org.junit.Test;
import junit.framework.TestCase;
public class TestHDFS extends TestCase {
@Test
}
@Test
}
还有很多函数,慢慢使用体会吧。。。
单元测试过程:
Eclipse左侧窗口,展开java类的方法,对某个需要进行单元测试的类右键,选择JUnitTest,如图所示。
先到这里吧。。。
API说明: