hbase shell

hbase shell 命令

 

 

mvn clean; mvn package -DskipTests=true -Dhadoop.profile=2.0

 

 

https://code.google.com/p/powermock/issues/detail?id=504

There is a work-around for this issue: run the junit tests with a '-noverify' jvm argument
 

 

 

scan 'MEMBER_INFO', {STARTROW => '\\x00\\x00\\x00\\x00\\x00\\x9F\\xBF\\xF4\\x00\\x00\\x00\\x00\\x10\\xB2\\xF2\\xBE', LIMIT => 1000}

STOPROW 

 

scan ‘t1′, {RAW => true, VERSIONS => 10}

 

scan ‘t1′, {RAW => true} 

 

scan ‘t1′, {RAW => true,  LIMIT => 10

 

scan 'xml', {RAW=>true}

scan 'xml', {RAW=>true, LIMIT=>2}

disable 'xml'

enable 'xml'

 

alter 'bts_ic', {NAME => 'f',  BLOOMFILTER => 'ROW', VERSIONS => '3000', COMPRESSION => 'LZO', BLOCKSIZE => '8192', IN_MEMORY => 'true'}


alter 'xml', METHOD=>'table_att','coprocessor'=>'|org.apache.hadoop.hbase.coprocessor.TransformeRegionCompaction|1001|'

 

alter 'TestTable', METHOD =>'table_att', 'coprocessor'=>'hdfs://localhost:9000/user/tianzhao/table-trace-coprocessor-0.0.1.jar|com.trace.coprocessor.TableTraceCoprocessor|1|trace.flume.hostname=10.101.72.48,trace.sampler.fraction=1.0'

 

alter 'tsdb', {NAME => 't', DATA_BLOCK_ENCODING => 'FAST_DIFF'}

 

alter 'xml', {NAME => 'a', BLOCKCACHE => 'false'} 

 

alter 'tianzhao', METHOD=>'table_att', MAX_FILESIZE => '102400000000'

alter 'table_1024B', METHOD=>'table_att', DURABILITY => 'ASYNC_WAL'

alter 'table_1024B', METHOD=>'table_att', DURABILITY => 'SYNC_WAL'

 

alter 'xml', METHOD=>'table_att_unset', NAME=>'coprocessor$1'
alter 'xml', METHOD=>'table_att_unset', NAME=>'coprocessor$2'
major_compact 'xml'

 

alter 't1', {NAME => 'f1',IN_MEMORY => 'true'}
alter 't1', {NAME => 'f1', METHOD => 'delete'}

 

alter 'ic_tianzhao', METHOD=>'table_att', MEMSTORE_FLUSHSIZE => '268435456' 

 

// TTL 3天

 

alter 'test',{NAME => 'info', TTL => '259200'}

you can specify 'FOREVER' for TTL by:

  create 't1',{NAME =>'f1', TTL =>2147483647}

 

 

create 'feature', {MAX_FILESIZE => '9223372036854775807'}, {NAME => 'f', BLOCKSIZE => '65535'} 

 

mvn clean test -Dtest=TestFullNotifyTask -DfailIfNoTests=false  

 

测试hbase.hstore.time.to.purge.deletes

put 't1','r5', 'f1:a','v5'

flush 't1'

scan 't1', {RAW => true, VERSIONS => 10}

delete 't1', 'r5', 'f1:a'

flush 't1'

scan 't1', {RAW => true, VERSIONS => 10}

major_compact 't1'

scan 't1', {RAW => true, VERSIONS => 10}

alter 't1',CONFIGURATION => {'hbase.hstore.time.to.purge.deletes' => '300000'} 

然后重新执行一遍看下效果。

 

 

http://wiki.apache.org/hadoop/Hbase/Shell

 

把下面的建表语句写到一个文件中create_table.sh

create 'bts2_ic2', {NAME => 'a', BLOOMFILTER => 'ROW', COMPRESSION => 'LZO', VERSIONS => '1', BLOCKSIZE => '8192'}

 

alter 'bts2_ic2', METHOD =>'table_att', 'coprocessor'=>'hdfs://dump002:9000/coprocessor/hbase-coprocessor-0.3.0.jar|com.dump.hbase.coprocessor.PriceTrendEndPoint|1', DEFERRED_LOG_FLUSH => 'true', MAX_FILESIZE => '102400000000', CONFIG => { 'SPLIT_POLICY' => 'org.apache.hadoop.hbase.regionserver.ConstantSizeRegionSplitPolicy'}

 

exit 

然后运行 $HBASE_HOME/bin/hbase shell create_table.sh即可,或者运行cat create_table.sh | $HBASE_HOME/bin/hbase shell 。

 

 

'SYSTEM.TABLE', {METHOD => 'table_att', coprocessor$1 => '|com.salesforce.phoenix.coprocessor.ScanRegionObserver|1|', coprocessor$2 => '|com.salesforce.phoenix.coprocessor.UngroupedAggregateRegionObserver|1|', coprocessor$3 => '|com.salesforce.phoenix.coprocessor.GroupedAggregateRegionObserver|1|', coprocessor$4 => '|com.salesforce.phoenix.join.HashJoiningRegionObserver|1|', coprocessor$5 => '|com.salesforce.phoenix.coprocessor.MetaDataEndpointImpl|1|', coprocessor$6 => '|com.salesforce.phoenix.coprocessor.MetaDataRegionObserver|2|', CONFIG => {'SPLIT_POLICY' => 'com.salesforce.phoenix.schema.MetaDataSplitPolicy', 'UpgradeTo20' => 'true'}}, {NAME => '_0', DATA_BLOCK_ENCODING => 'FAST_DIFF', VERSIONS => '1000', KEEP_DELETED_CELLS => 'true'}

 

 

 http://learnhbase.wordpress.com/2013/03/02/hbase-shell-commands/

 

http://itindex.net/detail/46101-hbase-shell-%E5%91%BD%E4%BB%A4

 

http://www.tuicool.com/articles/uARbi2

 

create 't1', {NAME => 'f1', VERSIONS => 5}

put 't1', "row1", 'f1:1', "aaa"

scan 't1'

 

 Hbase 设置irbrc保存hbase的操作命令历史

http://blog.youkuaiyun.com/wisgood/article/details/23380907

 

Phoneix:

 

!tables

 

CREATE TABLE IF NOT EXISTS "t1" (
     row VARCHAR NOT NULL,
     "f1"."1" VARCHAR
     CONSTRAINT PK PRIMARY KEY (row)
);

select * from "t1";

drop table "t1"; // t1表里面的数据被删除掉了,但是table是存在的。

 

 ./sqlline.sh node35:2224 ../examples/stock_symbol.sql

./psql.sh node35:2224 ../examples/web_stat.sql ../examples/web_stat.csv ../examples/web_stat_queries.sql

./performance.sh node35:2224 1000000

 

performance.sh里面

# Create Table DDL

createtable="CREATE TABLE IF NOT EXISTS $table (HOST CHAR(2) NOT NULL,DOMAIN VARCHAR NOT NULL,

FEATURE VARCHAR NOT NULL,DATE DATE NOT NULL,USAGE.CORE BIGINT,USAGE.DB BIGINT,STATS.ACTIVE_VISITOR 

INTEGER CONSTRAINT PK PRIMARY KEY (HOST, DOMAIN, FEATURE, DATE)) 

SPLIT ON ('CSGoogle','CSSalesforce','EUApple','EUGoogle','EUSalesforce','NAApple','NAGoogle','NASalesforce');"

 


0: jdbc:phoenix:node35:2224> select count(*) from PERFORMANCE_1000000;
+----------+
| COUNT(1) |
+----------+
| 1000000  |
+----------+

0: jdbc:phoenix:node35:2224> select * from PERFORMANCE_1000000 limit 2;
+------+------------+------------+---------------------+----------+----------+----------------+
| HOST |   DOMAIN   |  FEATURE   |        DATE         |   CORE   |    DB    | ACTIVE_VISITOR |
+------+------------+------------+---------------------+----------+----------+----------------+
| CS   | Apple.com  | Dashboard  | 2013-10-22          | 425      | 1906     | 4744           |
| CS   | Apple.com  | Dashboard  | 2013-10-22          | 471      | 875      | 9533           |
+------+------------+------------+---------------------+----------+----------+----------------+

0: jdbc:phoenix:node35:2224> explain select count(*) from PERFORMANCE_1000000;
+------------+
|    PLAN    |
+------------+
| CLIENT PARALLEL 27-WAY FULL SCAN OVER PERFORMANCE_1000000 |
|     SERVER AGGREGATE INTO SINGLE ROW |
+------------+

0: jdbc:phoenix:node35:2224> explain select * from PERFORMANCE_1000000 limit 2;
+------------+
|    PLAN    |
+------------+
| CLIENT PARALLEL 27-WAY FULL SCAN OVER PERFORMANCE_1000000 |
|     SERVER FILTER BY PageFilter 2 |
| CLIENT 2 ROW LIMIT |
+------------+

 

import java.sql.Connection;
import java.sql.DriverManager;
import java.sql.ResultSet;
import java.sql.Statement;


public class UsePhoenix {

  public static void main(String[] args) {

    try {
      // Register
      Class.forName("com.salesforce.phoenix.jdbc.PhoenixDriver");
      // Address
      String DBConnectionString = "jdbc:phoenix:10.232.98.35:2224";
      Connection connection = DriverManager.getConnection(DBConnectionString);

      Statement statement = connection.createStatement();
      ResultSet rs = statement
          .executeQuery("select count( *) from PERFORMANCE_1000000");
      rs.next();
      System.out.println("Count " + rs.getLong(1));

      connection.close();
    } catch (Exception e) {
      e.printStackTrace();
    }

  }

}

 

./psql.sh node35:2224 count.sql

./sqlline.sh node35:2224 count.sql

 

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值