一、Hadoop的安装
Hbase的数据实际是存储在HDFS上面的,所以需要先安装Hadoop服务
节点 | IP | 机器名 |
---|---|---|
master | 192.168.0.100 | hadoop |
workers | 192.168.0.101 | hadoop2 |
workers | 192.168.0.102 | hadoop3 |
#修改hostname(三台机器分别修改为自己对应的)
vim /etc/hostname
hadoop
# 下载解压到服务目录
wget http://mirrors.hust.edu.cn/apache/hadoop/common/current/hadoop-3.3.0.tar.gz
tar zxvf hadoop-3.3.0.tar.gz
mv hadoop-3.3.0 /data/services/
#配置jdk环境变量
vim /data/services/hadoop-3.3.0/etc/hadoop/hadoop-env.sh
export JAVA_HOME=/usr/local/jdk-15.0.2/
export HDFS_NAMENODE_USER=root
#配置yarn环境变量
vim /data/services/hadoop-3.3.0/etc/hadoop/yarn-env.sh
export JAVA_HOME=/usr/local/jdk-15.0.2/
#配置核心组件文件
vim /data/services/hadoop-3.3.0/etc/hadoop/core-site.xml
<configuration>
<property>
<name>hadoop.tmp.dir</name>
<value>/data/hadoop/hdfs/data</value>
</property>
<property>
<name>fs.default.name</name>
<value>hdfs://hadoop:9000</value>
<!-- hdfs主机名和端口号,这里一定要用主机名,不能采用localhost,否则后面用出现Phonix连接不上的问题 -->
</property>
</configuration>
#配置系统文件
vim /data/services/hadoop-3.3.0/etc/hadoop/hdfs-site.xml
<configuration>
<property>
<name>dfs.replication</name>
<value>1</value>
</property>
<property>
<name>dfs.namenode.name.dir</name>
<value>/data/hadoop/hdfs/name</value>
</property>
<property>
<name>dfs.datannode.data.dir</name>
<value>/data/hadoop/hdfs/data</value> //这个地址必须和core-site.xml里面的hadoop.tmp.dir保持一致
</property>
<property>
<name>dfs.http.address</name> //hdfs 网页监控地址配置
<value>hadoop:50070</value> //http://主机名:50070
</property>
</configuration>
#配置yam-site.xml
vim /data/services/hadoop-3.3.0/etc/hadoop/yarn-site.xml
<configuration>
<property>
<name>yarn.resourcemanager.hostname</name>
<value>hadoop</value>
</property>
<property>
<name>yarn.nodemanager.aux-services</name>
<value>mapreduce_shuffle</value>
</property>
<property>
<name>yarn.resourcemanager.address</name>
<value>hadoop:18040</value>
</property>
<property>
<name>yarn.resourcemanager.scheduler.address</name>
<value>hadoop:18030</value>
</property>
<property>
<name>yarn.resourcemanager.resource-tracker.address</name>
<value>hadoop:18025</value>
</property>
<property>
<name>yarn.resourcemanager.admin.address</name>
<value>hadoop:18141</value>
</property>
<property>
<name>yarn.resourcemanager.webapp.address</name>
<value>hadoop:18088</value>
</property>
<property>
<name>yarn.scheduler.minimum-allocation-mb</name>
## 虚拟内存设置太少才造成类似的错误
<value>2000</value>
</property>
<property>
<name>yarn.scheduler.maximum-allocation-mb</name>
## 虚拟内存设置太少才造成类似的错误
<value>3000</value>
</property>
</configuration>
#修改 etc/hadoop下的workers文件
vim /data/services/hadoop-3.3.0/etc/hadoop/workers
hadoop2
hadoop3
#配置MapReduce计算矿建文件
vim /data/services/hadoop-3.3.0/etc/hadoop/mapred-site.xml
<configuration>
<property>
<name>mapreduce.framework.name</name>
<value>yarn</value>
</property>
</configuration>
#修改 etc/hadoop下的workers文件
vim /data/services/hadoop-3.3.0/etc/hadoop/workers
hadoop2
hadoop3
#把hadoop拷贝到另外两台机器上
scp -r /data/services/hadoop-3.3.0/ root@192.168.0.101:/data/services/
scp -r /data/services/hadoop-3.3.0/ root@192.168.0.102:/data/services/
#在另外两台机器上创建文件夹
mkdir -vp /data/hadoop/data
mkdir -vp /data/hadoop/hdfs/data
mkdir -vp /data/hadoop/hdfs/name
#修改host(三台机器)
vim /etc/hosts
192.168.0.100 hadoop
192.168.0.101 hadoop2
192.168.0.102 hadoop3
#配置环境变量(三台机器)
vi /etc/profile
//java环境变量
export JAVA_HOME=/usr/local/jdk-15.0.2
export JRE_HOME=${JAVA_HOME}/jre
export CLASSPATH=.:${JAVA_HOME}/lib:${JRE_HOME}/lib
export PATH=${JAVA_HOME}/bin:$PATH
//hadoop环境变量
export HADOOP_HOME=/data/services/hadoop-3.3.0
export PATH=$HADOOP_HOME/bin:$HADOOP_HOME/sbin:$PATH
source /etc/profile
#设置SSH免密登陆(三台机器)
ssh-keygen -t rsa
Generating public/private rsa key pair.
Enter file in which to save the key (/root/.ssh/id_rsa):
Enter passphrase (empty for no passphrase):
Enter same passphrase again:
Your identification has been saved in /root/.ssh/id_rsa.
Your public key has been saved in /root/.ssh/id_rsa.pub.
The key fingerprint is:
SHA256:McCFQrALgUppzd/cAdLHMMc0upL51r60cLjc5FT/sio root@node2
The key's randomart image is:
+---[RSA 2048]----+
|o =o.oo*B+ |
|.= +. +o+=. |
|= . ..o.= . |
|.. . .oo.+ |
| . + .S . |
| o o . . |
| = * . |
| o @E. .. |
| o *o...o. |
+----[SHA256]-----+
#生成认证文件(三台机器)
touch /root/.ssh/authorized_keys
chmod 600 /root/.ssh/authorized_keys
cat /root/.ssh/id_rsa.pub >> /root/.ssh/authorized_keys
#获取公钥,非master节点执行
ssh-copy-id -i ~/.ssh/id_rsa.pub hadoop
#把公钥复制到hadoop用户下面(三台都要执行,我这里是用的root用户,所以本机就不用复制了,建议使用专门用户执行)
scp -r ~/.ssh/authorized_keys root@hdoop2:~/.ssh/
scp -r ~/.ssh/authorized_keys root@hdoop3:~/.ssh/
#初始化文件系统
hdfs namenode -format
hdfs datanode -format
#启动hadoop集群(只需要在master上执行)
/home/services/hadoop-3.3.0/sbin/start-all.sh
安装好后,可以通过下面的网址查看集群情况:
http://192.168.0.100:50070/dfshealth.html#tab-overview
http://192.168.0.100:18088/cluster
常见错误:could only be written to 0 of the 1 minReplication nodes. There are 0 datanode(s) running and 0 node(s) are excluded in this operation.
原因:可能是重复格式化造成的
解决方法:删除数据后,重新格式化
/data/services/hadoop-3.3.0/sbin/stop-all.sh
rm -rf /data/hadoop/hdfs/name/*
rm -rf /data/hadoop/hdfs/data/*
hdfs namenode -format
hdfs datanode -format
/data/services/hadoop-3.3.0/sbin/start-all.sh
二、Hbase的安装
1、hbase依赖于zookeeper管理,所以需要先安装zookeeper
# 下载配置ZK,三台上面都需要下载配置
cd /data/download/
wget -c http://mirrors.cnnic.cn/apache/zookeeper/zookeeper-3.4.9/zookeeper-3.4.9.tar.gz
cd /data/services/
tar zxvf /data/download/zookeeper-3.4.9.tar.gz
mv zookeeper-3.4.9/ zookeeper
cp zookeeper/conf/zoo_sample.cfg zookeeper/conf/zoo.cfg
vim ./zookeeper/conf/zoo.cfg
tickTime=2000
initLimit=5
syncLimit=2
dataDir=/data/zookeeper/data
dataLogDir=/data/zookeeper/logs
clientPort=2181
server.1=192.168.0.100:2888:3888
server.2=192.168.0.101:2888:3888
server.3=192.168.0.102:2888:3888
# 创建文件夹
[root@hadoop ~]# mkdir -p /data/zookeeper/data/
[root@hadoop ~]# echo 1 > /data/zookeeper/data/myid
[root@hadoop2 ~]# mkdir -p /data/zookeeper/data/
[root@hadoop2 ~]# echo 2 > /data/zookeeper/data/myid
[root@hadoop3 ~]# mkdir -p /data/zookeeper/data/
[root@hadoop3 ~]# echo 3 > /data/zookeeper/data/myid
启动:/data/services/zookeeper/bin/zkServer.sh start
停止:/data/services/zookeeper/bin/zkServer.sh stop
2、Hbase安装
# 下载hbase
wget https://dlcdn.apache.org/hbase/2.4.16/hbase-2.4.16-bin.tar.gz
tar zxvf ./hbase-2.4.16-bin.tar.gz
mv ./hbase-2.4.16 /data/services/
# 配置
cd /data/services/hbase-2.4.16/conf
vim hbase-env.sh
export JAVA_HOME=/data/services/jdk/
export HBASE_MANAGES_ZK=false
vim hbase-site.xml
<configuration>
<property>
<name>hbase.cluster.distributed</name>
<value>true</value>
</property>
<property>
<name>hbase.rootdir</name>
<value>hdfs://hadoop:9001/hbase</value>
</property>
<property>
<name>hbase.regionserver.hostname.disable.master.reversedns</name>
<value>true</value>
</property>
<property>
<name>hbase.wal.provider</name>
<value>filesystem</value> <!--也可以用multiwal-->
</property>
<property>
<name>phoenix.default.column.encoded.bytes.attrib</name>
<value>0</value>
</property>
<property>
<name>hbase.zookeeper.quorum</name>
<value>hadoop,hadoop1,hadoop2</value>
<description>这个是zookeeper集群的地址,不加端口号</description>
</property>
<property>
<name>hbase.zookeeper.property.dataDir</name>
<value>/data/data/hbase/zookeeperDir</value>
<description>此项用于设置存储ZooKeeper的元数据,如果不设置默认存在/tmp下,重启时数据会丢失。</description>
</property>
<property>
<name>hbase.master.info.port</name>
<value>60010</value>
</property>
</configuration>
# 命令行
/data/services/hbase-2.4.16/bin/hbase shell
启动:/data/services/hbase-2.4.16/bin/start-hbase.sh
停止:/data/services/hbase-2.4.16/bin/stop-hbase.sh
三、Phoenix的安装
由于hbase只支持单表的简单查询,这里采用phoenix来进行查询,详情可查看官方文档
# 下载
cd /data/download
wget https://dlcdn.apache.org/phoenix/phoenix-5.1.3/phoenix-hbase-2.4-5.1.3-bin.tar.gz --no-check-certificate
tar zxvf phoenix-hbase-2.4-5.1.3-bin.tar.gz
cd phoenix-hbase-2.4-5.1.3-bin/
# 配置
mv phoenix-hbase-2.4-5.1.3-bin /data/services/
cd /data/services/phoenix-hbase-2.4-5.1.3-bin
cp ./phoenix-server-hbase-2.4.jar /data/services/hbase-2.4.16/lib/
# 重启hbase
/data/services/hbase-2.4.16/bin/stop-hbase.sh
/data/services/hbase-2.4.16/bin/start-hbase.sh
# shell 测试
cd ../bin
./sqlline.py 192.168.0.100:2181
四、SpringBoot集成Phoenix和MybatisPlus操作Hbase
1、添加依赖
<dependency>
<groupId>cn.hutool</groupId>
<artifactId>hutool-all</artifactId>
</dependency>
<dependency>
<groupId>com.baomidou</groupId>
<artifactId>mybatis-plus-boot-starter</artifactId>
</dependency>
<!-- https://mvnrepository.com/artifact/org.apache.phoenix/phoenix-core -->
<dependency>
<groupId>org.apache.phoenix</groupId>
<artifactId>phoenix-core</artifactId>
<exclusions>
<exclusion>
<groupId>org.slf4j</groupId>
<artifactId>slf4j-api</artifactId>
</exclusion>
<exclusion>
<groupId>org.slf4j</groupId>
<artifactId>log4j-over-slf4j</artifactId>
</exclusion>
<exclusion>
<groupId>org.slf4j</groupId>
<artifactId>slf4j-log4j12</artifactId>
</exclusion>
</exclusions>
</dependency>
<!-- https://mvnrepository.com/artifact/org.apache.phoenix/phoenix-hbase-compat-2.4.1 -->
<dependency>
<groupId>org.apache.phoenix</groupId>
<artifactId>phoenix-hbase-compat-2.4.1</artifactId>
<scope>runtime</scope>
<exclusions>
<exclusion>
<groupId>org.slf4j</groupId>
<artifactId>slf4j-api</artifactId>
</exclusion>
<exclusion>
<groupId>org.slf4j</groupId>
<artifactId>log4j-over-slf4j</artifactId>
</exclusion>
<exclusion>
<groupId>org.slf4j</groupId>
<artifactId>slf4j-log4j12</artifactId>
</exclusion>
</exclusions>
</dependency>
<!-- https://mvnrepository.com/artifact/org.apache.hbase/hbase-client -->
<!-- 这个包要放在phoenix下面,否则会有包冲突的问题 -->
<dependency>
<groupId>org.apache.hbase</groupId>
<artifactId>hbase-shaded-client</artifactId>
<exclusions>
<exclusion>
<groupId>org.slf4j</groupId>
<artifactId>slf4j-api</artifactId>
</exclusion>
<exclusion>
<groupId>org.slf4j</groupId>
<artifactId>log4j-over-slf4j</artifactId>
</exclusion>
<exclusion>
<groupId>org.slf4j</groupId>
<artifactId>slf4j-log4j12</artifactId>
</exclusion>
</exclusions>
</dependency>
2、修改数据源配置
spring:
datasource:
url: jdbc:phoenix:192.168.1.100:2181,192.168.1.101:2181,192.168.1.102:2181
driver-class-name: org.apache.phoenix.jdbc.PhoenixDriver
3、修改Mapper
public interface PhoenixBaseMapper<T> extends BaseMapper<T> {
int upsert(T entity);
}
@Mapper
public interface TestMapper extends PhoenixBaseMapper<ContactModel> {
}
4、注入Upsert
public class PhoenixSqlInjector extends DefaultSqlInjector {
@Override
public List<AbstractMethod> getMethodList(Class<?> mapperClass, TableInfo tableInfo) {
List<AbstractMethod> methodList = super.getMethodList(mapperClass, tableInfo);
methodList.add(new Upsert());
return methodList;
}
}
@Configuration
public class MyBatisPlusConfig {
@Bean
public MybatisPlusInterceptor paginationInterceptor() {
MybatisPlusInterceptor innerInterceptor = new MybatisPlusInterceptor();
innerInterceptor.addInnerInterceptor(new PaginationInnerInterceptor(DbType.PHOENIX));
// innerInterceptor.addInnerInterceptor(new PhoenixMybatisPlusPlugin());
return innerInterceptor;
}
@Bean
public PhoenixSqlInjector phoenixSqlInjector() {
return new PhoenixSqlInjector();
}
}
# Hbase里面的表名和字段都会自动大写,如果不想用大写,查询的时候就需要给表名的字段加双引号,可以采用下面这个拦截器统一处理
package com.souyee.email.common.interceptor;
import cn.hutool.core.util.StrUtil;
import com.baomidou.mybatisplus.core.toolkit.PluginUtils;
import com.baomidou.mybatisplus.core.toolkit.StringUtils;
import com.baomidou.mybatisplus.extension.parser.JsqlParserSupport;
import com.baomidou.mybatisplus.extension.plugins.inner.InnerInterceptor;
import net.sf.jsqlparser.expression.*;
import net.sf.jsqlparser.schema.Column;
import net.sf.jsqlparser.schema.Table;
import net.sf.jsqlparser.statement.Statement;
import net.sf.jsqlparser.statement.delete.Delete;
import net.sf.jsqlparser.statement.insert.Insert;
import net.sf.jsqlparser.statement.select.*;
import net.sf.jsqlparser.statement.update.Update;
import net.sf.jsqlparser.statement.upsert.Upsert;
import org.apache.ibatis.executor.Executor;
import org.apache.ibatis.executor.statement.StatementHandler;
import org.apache.ibatis.mapping.BoundSql;
import org.apache.ibatis.mapping.MappedStatement;
import org.apache.ibatis.mapping.SqlCommandType;
import org.apache.ibatis.session.ResultHandler;
import org.apache.ibatis.session.RowBounds;
import java.sql.Connection;
import java.sql.SQLException;
import java.util.List;
import java.util.stream.Collectors;
/**
* @author blueo
* @desc phoenix 的语法和mySQL不太一样,字段、表名都会自动转大写,如果数据库不是全用大写,通过插件的方式自动给加上引号
* @date 2023/3/17 18:00
*/
public class PhoenixMybatisPlusPlugin extends JsqlParserSupport implements InnerInterceptor {
/**
* 查询时处理逻辑
*/
@Override
public void beforeQuery(Executor executor, MappedStatement ms, Object parameter, RowBounds rowBounds, ResultHandler resultHandler, BoundSql boundSql) throws SQLException {
PluginUtils.MPBoundSql mpBs = PluginUtils.mpBoundSql(boundSql);
//通过 JSqlParser工具修改sql后执行
mpBs.sql(parserSingle(mpBs.sql(), null));
}
/**
* 增删改时 处理逻辑
*/
@Override
public void beforePrepare(StatementHandler sh, Connection connection, Integer transactionTimeout) {
PluginUtils.MPStatementHandler mpSh = PluginUtils.mpStatementHandler(sh);
MappedStatement ms = mpSh.mappedStatement();
SqlCommandType sct = ms.getSqlCommandType();
//增删改调用 JSqlParser工具修改sql后执行
if (sct == SqlCommandType.INSERT || sct == SqlCommandType.UPDATE || sct == SqlCommandType.DELETE) {
PluginUtils.MPBoundSql mpBs = mpSh.mPBoundSql();
mpBs.sql(parserMulti(mpBs.sql(), null));
}
}
@Override
protected String processParser(Statement statement, int index, String sql, Object obj) {
if (this.logger.isDebugEnabled()) {
this.logger.debug("SQL to parse, SQL: " + sql);
}
if (statement instanceof Insert) {
this.processInsert((Insert)statement, index, sql, obj);
} else if (statement instanceof Select) {
this.processSelect((Select)statement, index, sql, obj);
} else if (statement instanceof Update) {
this.processUpdate((Update)statement, index, sql, obj);
} else if (statement instanceof Delete) {
this.processDelete((Delete)statement, index, sql, obj);
} else if (statement instanceof Upsert) {
this.processUpsert((Upsert)statement, index, sql, obj);
}
sql = statement.toString();
if (this.logger.isDebugEnabled()) {
this.logger.debug("parse the finished SQL: " + sql);
}
return sql;
}
@Override
protected void processInsert(Insert insert, int index, String sql, Object obj) {
throw new UnsupportedOperationException();
}
@Override
protected void processDelete(Delete delete, int index, String sql, Object obj) {
throw new UnsupportedOperationException();
}
@Override
protected void processUpdate(Update update, int index, String sql, Object obj) {
throw new UnsupportedOperationException();
}
protected void processUpsert(Upsert upsert, int index, String sql, Object obj) {
//处理表名
upsert.setTable(disposeTable(upsert.getTable()));
//处理字段
List<Column> columnList = upsert.getColumns();
columnList = columnList.stream().map(e->{
return reFormatSelectColumn(e, null);
}).collect(Collectors.toList());
upsert.setColumns(columnList);
}
@Override
protected void processSelect(Select select, int index, String sql, Object obj) {
//此处处理select逻辑 将字符串拼接上 双引号
SelectBody selectBody = select.getSelectBody();
if(selectBody instanceof PlainSelect) reformatPlainSelect((PlainSelect) selectBody);
}
/**
* 处理查询字段
*/
private List<SelectItem> disposeSelectColumn(List<SelectItem> selectItems){
return selectItems.stream().map( this::resetSelectItem ).collect(Collectors.toList());
}
/**
* 处理表名
* @param table
* @return
*/
private Table disposeTable(Table table) {
StringBuilder tableName = new StringBuilder();
if (StringUtils.isEmpty(table.getSchemaName())) {
return new Table(appendPrefixAndSuffix(table.getName())).withAlias(table.getAlias());
}
return new Table(appendPrefixAndSuffix(table.getSchemaName()), appendPrefixAndSuffix(table.getName())).withAlias(table.getAlias());
}
private SelectItem resetSelectItem( SelectItem selectItem ){
//如果不符合直接返回
if( !(selectItem instanceof SelectExpressionItem) ) return selectItem;
SelectExpressionItem item = (SelectExpressionItem)selectItem;
//如果是列
if( item.getExpression() instanceof Column ){
Column columnExp = (Column)item.getExpression();
return new SelectExpressionItem( reFormatSelectColumn( columnExp,item.getAlias() ) );
}
//如果是函数
if( item.getExpression() instanceof Function){
Function function = (Function) item.getExpression();
return new SelectExpressionItem( reFormatFunction( function ) );
}
return item;
}
/**
* 重新格式化 查询语句
* @param plainSelect 查询语句
* @return 格式化的查询语句
*/
public void reformatPlainSelect(PlainSelect plainSelect){
//处理要查询的字段
List<SelectItem> selectItems = plainSelect.getSelectItems();
//处理查询条件
plainSelect.setSelectItems( disposeSelectColumn( selectItems ) );
//处理表名
if (plainSelect.getFromItem() instanceof Table) {
plainSelect.setFromItem(disposeTable((Table) plainSelect.getFromItem()));
}
//处理 where 条件
plainSelect.setWhere( disposeSelectWhere( plainSelect.getWhere() ) );
}
/**
* 重新格式化列
* @param columnExp 列
* @param alias 列别名
* @return 格式化的列
*/
private Column reFormatSelectColumn(Column columnExp, Alias alias ){
if( columnExp == null ) return columnExp;
//表名和列簇名会在一起
String tableAndCFName= columnExp.getTable() == null ? "" : columnExp.getTable().toString();
//字段名
String columnName= columnExp.getColumnName();
//根据 `.` 分隔方便处理表名和列簇名
String[] tableAndCFInfo = tableAndCFName.split("\\.");
// 可能会出现很多情况 列名 列簇.列名 表名.列簇.列名 表名.列名
String tableName = tableAndCFInfo[0];
String cf = tableAndCFInfo[tableAndCFInfo.length - 1];
//如果表名和字段名相等,只有3种情况: 列名 表名.列名 列簇.列名
if( StrUtil.equals(tableName,cf) && StrUtil.isNotBlank(tableName) ){
//判断前缀是表名还是列名 要求列簇必须全大写 表名不能全大写
//如果全大写这是列簇名
if( StrUtil.equals(cf.toUpperCase(),cf) ) {
tableName = "";
}else cf = ""; //否则是表名
}
StringBuilder finalName = new StringBuilder();
//如果表名不为空 拼接表名
if( StrUtil.isNotBlank( tableName ) ) finalName.append( appendPrefixAndSuffix(tableName) ).append( "." );
//如果列簇名不为空 拼接列簇名
if( StrUtil.isNotBlank( cf ) ) finalName.append( appendPrefixAndSuffix(cf) ).append(".");
//拼接字段名
finalName.append( appendPrefixAndSuffix(columnName) );
//拼接别名: as xxx
if( alias !=null ) finalName.append(" ").append( alias.getName() );
//重新格式化列名 封装返回
return new Column( finalName.toString() );
}
/**
* 重新格式化查询函数
* @param function 函数
* @return 格式化的函数
*/
private Function reFormatFunction( Function function ){
List<Expression> expressions = function.getParameters().getExpressions();
//对于是列的参数进行格式化
expressions = expressions.stream().map(exp -> {
if (exp instanceof Column) return reFormatSelectColumn((Column) exp, null);
return exp;
}).collect(Collectors.toList());
//重新设置回去
function.getParameters().setExpressions(expressions);
return function;
}
/**
* 重新格式化子查询
* @param subSelect 子查询
* @return 格式化的函数
*/
private SubSelect reFormatSubSelect( SubSelect subSelect ){
if( subSelect.getSelectBody() instanceof PlainSelect ){
reformatPlainSelect( (PlainSelect)subSelect.getSelectBody() );
}
return subSelect;
}
public Expression disposeSelectWhere(Expression expression){
if (expression instanceof Parenthesis) {
disposeSelectWhere(((Parenthesis) expression).getExpression());
}
if( !(expression instanceof BinaryExpression) ) return expression;
BinaryExpression binaryExpression =(BinaryExpression)expression;
//如果左边还是多条件的
if( binaryExpression.getLeftExpression() instanceof BinaryExpression){
disposeSelectWhere( binaryExpression.getLeftExpression() );
}
//如果右边还是多条件的
if( binaryExpression.getRightExpression() instanceof BinaryExpression){
disposeSelectWhere( binaryExpression.getRightExpression() );
}
//如果左边表达式是列信息 格式化
if( binaryExpression.getLeftExpression() instanceof Column){
Column newColumn = reFormatSelectColumn((Column) binaryExpression.getLeftExpression(), null);
binaryExpression.setLeftExpression( newColumn );
}
//如果左边表达式是 子查询 processPlainSelect
if(binaryExpression.getLeftExpression() instanceof SubSelect){
SubSelect subSelect = (SubSelect)binaryExpression.getLeftExpression();
if( subSelect.getSelectBody() instanceof PlainSelect ){
reformatPlainSelect( (PlainSelect)subSelect.getSelectBody() );
}
}
//如果右边是列信息 格式化
if( binaryExpression.getRightExpression() instanceof Column ){
Column newColumn = reFormatSelectColumn((Column) binaryExpression.getLeftExpression(), null);
binaryExpression.setRightExpression( newColumn );
}
//如果右边表达式是 子查询 processPlainSelect
if( binaryExpression.getRightExpression() instanceof SubSelect){
SubSelect subSelect = (SubSelect)binaryExpression.getRightExpression();
reFormatSubSelect( subSelect );
}
return binaryExpression;
}
private String appendPrefixAndSuffix(String str){
final String PREFIX = "\"";
final String SUFFIX = "\"";
//如果已经有前缀了直接返回
if( str.contains(PREFIX) ) return str;
//拼接前缀和后缀
return new StringBuilder().append(PREFIX).append(str).append(SUFFIX).toString();
}
}
5、配置hbase
把服务器的hbase-site.xml配置拷到项目的资源文件夹(resources)里面