安装篇
1.上传hbase安装包(测试使用的是hbase1.2.6)
2.解压(tar -zxvf hbse…… -C /usr/local)
3.配置hbase集群,要修改3个文件(首先zk集群已经安装好了)
注意:要把hadoop的hdfs-site.xml和core-site.xml 放到hbase/conf下
3.1修改hbase-env.sh
export JAVA_HOME=/usr/java/jdk1.7.0_55
//告诉hbase使用外部的zk
export HBASE_MANAGES_ZK=false
vim hbase-site.xml
hbase.rootdir
hdfs://hadoop01:9000/hbase
hbase.cluster.distributed
true
hbase.zookeeper.quorum
hadoop01:2181,hadoop02:2181,hadoop03:2181
vim regionservers
hadoop01
hadoop02
hadoop03
3.2拷贝hbase到其他节点
scp -r /hadoop01/hbase-0.96.2-hadoop2/ hadoop02:/usr/local/
4.将配置好的HBase拷贝到每一个节点并同步时间。
5.启动所有的hbase
分别启动zk
./zkServer.sh start
启动hadoop集群
start-dfs.sh
启动hbase,在主节点上运行:
start-hbase.sh
6.通过浏览器访问hbase管理页面
192.168.1.201:16010
7.为保证集群的可靠性,要启动多个HMaster
hbase-daemon.sh start master
链接:https://pan.baidu.com/s/14Q6z6bdDa-WgMu5TbOUgzA 密码:dzxd
操作数据
import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.Connection;
import org.apache.hadoop.hbase.client.ConnectionFactory;
import org.apache.hadoop.hbase.client.Delete;
import org.apache.hadoop.hbase.client.Get;
import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.client.Result;
import org.apache.hadoop.hbase.client.ResultScanner;
import org.apache.hadoop.hbase.client.Scan;
import org.apache.hadoop.hbase.client.Table;
import org.apache.hadoop.hbase.filter.ColumnPrefixFilter;
import org.apache.hadoop.hbase.filter.FilterList;
import org.apache.hadoop.hbase.filter.MultipleColumnPrefixFilter;
import org.apache.hadoop.hbase.filter.RegexStringComparator;
import org.apache.hadoop.hbase.filter.RowFilter;
import org.apache.hadoop.hbase.filter.SingleColumnValueExcludeFilter;
import org.apache.hadoop.hbase.filter.CompareFilter.CompareOp;
import org.apache.hadoop.hbase.util.Bytes;
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
public class TestData {
/*
*对数据的增删改查以及过滤器的使用
*/
/**
* //获取连接
*/
Configuration conf = null;
Connection connect = null;
Table table = null;
@Before
public void init() throws IOException {
conf = HBaseConfiguration.create();
conf.set("hbase.zookeeper.quorum", "hadoop01:2181,hadoop02:2181,hadoop03:2181");
connect = ConnectionFactory.createConnection(conf);
}
/**
* 增加数据
* @throws Exception
*/
@Test
public void testadd() throws Exception {
table = connect.getTable(TableName.valueOf("t_user"));
//设置rowkey
Put put = new Put("001".getBytes());