hadoop 2.2 hdfs 操作例子

package hdfs;

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;

public class Delete {

	public static void main(String[] args)throws Exception{
		Configuration conf=new Configuration();
		FileSystem fs=FileSystem.get(conf);
		fs.delete(new Path(args[0]), true);
		fs.close();

	}

}
package hdfs;

import java.net.URI;

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IOUtils;

public class DoubleCat {

	public static void main(String[] args)throws Exception {
		Configuration conf=new Configuration();
		FileSystem fs=FileSystem.get(URI.create(args[0]), conf);
		FSDataInputStream in=null;
		try{
			in=fs.open(new Path(args[0]));
			IOUtils.copyBytes(in, System.out, 1024, false);
			in.seek(3);
			IOUtils.copyBytes(in, System.out, 1024, false);
		}finally{
			IOUtils.closeStream(in);
		}		

	}

}

package hdfs;

import java.io.InputStream;
import java.net.URI;

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IOUtils;

public class FileSystemCat {

	public static void main(String[] args) throws Exception{
		Configuration conf=new Configuration();
		FileSystem fileSystem=FileSystem.get(URI.create(args[0]), conf);
		InputStream in=null;
		try{
			in=fileSystem.open(new Path(args[0]));
			IOUtils.copyBytes(in, System.out, 1024,false);
		}finally{
			IOUtils.closeStream(in);
		}

	}

}

package hdfs;

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.fs.Path;

public class GlobStatus {

	public static void main(String[] args)throws Exception {
		Configuration conf=new Configuration();
		FileSystem fs=FileSystem.get(conf);
		FileStatus[] fStatus=fs.globStatus(new Path(args[0]));
		Path[] paths=FileUtil.stat2Paths(fStatus);
		for(Path path:paths){
			System.out.println(path);
		}

	}

}

package hdfs;

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.fs.Path;

public class ListStatus {

	public static void main(String[] args)throws Exception {
		Configuration conf=new Configuration();
		FileSystem fs=FileSystem.get(conf);
		FileStatus[] fileStatuses=fs.listStatus(new Path(args[0]));
		Path[]paths=FileUtil.stat2Paths(fileStatuses);
		for(Path path:paths){
			System.out.println(path);
		}

	}

}

package hdfs;

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;

public class Mkdir {

	public static void main(String[] args)throws Exception {
		Configuration conf=new Configuration();
		FileSystem fs=FileSystem.get(conf);
		fs.mkdirs(new Path(args[0]));
		fs.close();

	}

}

package hdfs;

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.PathFilter;

public class PathFilterExample {
	private static class StartWithPathFilter implements PathFilter{

		@Override
		public boolean accept(Path path) {
			if(path.getName().startsWith("o")){
				return true;
			}else{
				return false;
			}
		}
		
	}
	
	public static void main(String[] args) throws Exception{
		Configuration conf=new Configuration();
		FileSystem fs=FileSystem.get(conf);
		FileStatus[] fStatus=fs.listStatus(new Path(args[0]), new StartWithPathFilter());
		Path [] paths=FileUtil.stat2Paths(fStatus);
		for(Path path:paths){
			System.out.println(path);
		}

	}

}

package hdfs;

import java.io.BufferedInputStream;
import java.io.FileInputStream;
import java.io.InputStream;
import java.io.OutputStream;
import java.net.URI;

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.util.Progressable;

public class PutData {

	public static void main(String[] args)throws Exception {
		Configuration conf=new Configuration();
		FileSystem fs=FileSystem.get(URI.create(args[1]), conf);
		OutputStream out=fs.create(new Path(args[1]), new Progressable() {
			@Override
			public void progress() {
				System.out.println("*");
				
			}
		});
		InputStream in=new BufferedInputStream(new FileInputStream(args[0]));
		try{
			IOUtils.copyBytes(in, out, 1024, false);
		}finally{
			IOUtils.closeStream(in);
			IOUtils.closeStream(out);
		}

	}

}

package hdfs;

import java.io.InputStream;
import java.net.URL;

import org.apache.hadoop.fs.FsUrlStreamHandlerFactory;
import org.apache.hadoop.io.IOUtils;

public class UrlCat {
	static{
		URL.setURLStreamHandlerFactory(new FsUrlStreamHandlerFactory());
	}
	public static void main(String[] args)throws Exception{
		InputStream in=null;
		try{
			in=new URL(args[0]).openStream();
			IOUtils.copyBytes(in, System.out, 1024,false);
		}finally{
			IOUtils.closeStream(in);
		}
	}

}


评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包

打赏作者

hxpjava1

你的鼓励将是我创作的最大动力

¥1 ¥2 ¥4 ¥6 ¥10 ¥20
扫码支付:¥1
获取中
扫码支付

您的余额不足,请更换扫码支付或充值

打赏作者

实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值