常用数据库脚本

1. 批量删除数据库脚本

#!/bin/bash
CONN_INFO=rds_list
while read line
do

  HOST=`echo $line|awk -F: '{print $1}'`
  USER=`echo $line|awk -F: '{print $2}'`
  PASS=`echo $line|awk -F: '{print $3}'`


  DB_LIST=$(mysql -h$HOST -u$USER -p$PASS -s -e "show databases;"  |grep  -Ev "__recycle_bin__|information_schema|mysql|performance_schema|sys")

  echo $DB_LIST
  for DB in $DB_LIST; do
    mysql -h$HOST -u$USER -p$PASS -s  -e "drop database $DB;"
    if [ $? -ne 0 ];then
        echo "实例:$HOST \n删除数据库:$DB 失败!" >> drop_err.log
        else
        echo "实例:$HOST \n删除数据库:$DB 成功!" >> drop_ok.log
    fi
  done
done < $CONN_INFO

rds_list 文件内容

rm-xxx:xxxx:xxxx

2. 数据库批量用户锁定

#!/bin/bash
[ ! -d rds_lock ] && mkdir rds_lock
while read line
do
        Host=`echo $line|awk -F: '{print $1}'`
        User=`echo $line|awk -F: '{print $2}'`
        Pw=`echo $line|awk -F: '{print $3}'`
        mysql -h$Host -u$User -p${Pw} -e "select concat('ALTER USER \'', user,'\'','@','\'',host,'\'  ACCOUNT LOCK;') as user_lock FROM mysql.user where user not in ('aliyun_root', 'replicator', 'mysql.session', 'mysql.sys', 'rds_load', 'root', 'aurora', 'aurora_proxy','qianyi');" > rds_lock/user_lock_${Host}.sql
        str="mysql  -h$Host -u$User -p${Pw} "
        echo $str
        sed -i '1d'  rds_lock/user_lock_${Host}.sql
        echo "$str <  rds_lock/user_lock_${Host}.sql" |bash
done < rds_ip_list

2.1 批量对数据库实例进行会话删除

#!/bin/bash
start_time=`date +%s`  #定义脚本运行的开始时间

tmp_fifofile="/tmp/$$.fifo"
mkfifo $tmp_fifofile   # 新建一个FIFO类型的文件
exec 6<>$tmp_fifofile  # 将FD6指向FIFO类型
rm -rf $tmpfofile  #删也可以,

thread_num=5  # 定义最大线程数

#根据线程总数量设置令牌个数
#事实上就是在fd6中放置了$thread_num个回车符
for ((i=0;i<${thread_num};i++));do
    echo
done >&6

while read line
do
        Host=`echo $line|awk -F: '{print $2}'`
        User=`echo $line|awk -F: '{print $3}'`
        Pw=`echo $line|awk -F: '{print $4}'`

        mysql -h$Host -u$User -p${Pw} -N -e "select concat(id) from information_schema.processlist where user not in ('event_scheduler','replicator','system user','aliyun_root', 'replicator', 'mysql.session', 'mysql.sys', 'rds_load', 'root', 'aurora', 'aurora_proxy','qianyi','rds_sync');" > id

        for i in `cat id`
        do
                read -u6
                {
                        mysql -h$Host -u$User -p${Pw} -e "kill $i;"
                        echo >&6 # 当进程结束以后,再向FD6中加上一个回车符,即补上了read -u6减去的那个
                }&
        done

        mysql -h$Host -u$User -p${Pw} -s -e "select * from information_schema.processlist where user not in  ('event_scheduler','replicator','system user','aliyun_root', 'replicator', 'mysql.session', 'mysql.sys', 'rds_load', 'root', 'aurora', 'aurora_proxy','qianyi','rds_sync');"|awk  'BEGIN{printf "id\t\t用户\t\t地址/端口\t\t状态\n"}{printf "%-16s%-16s%-16s%13s\n",$1, $2, $3, $5}'
done < rds_ip_list

wait # 要有wait,等待所有线程结束

stop_time=`date +%s` # 定义脚本运行的结束时间
echo "TIME:`expr $stop_time - $start_time`" # 输出脚本运行时间

exec 6>&- # 关闭FD6
echo "over" # 表示脚本运行结束

        注意:删除会话之前需要先对数据库进行用户锁定,防止会话被杀掉之后重新连接到数据库。

3. 数据库对象校验

#!/bin/bash
file=ip

#-----------------------------------------------------
function v1(){
echo "---------------------------------VIEW-------------------------------------------"
mysql -h$1 -u$2 -p"$3" -P $4 -B -e"
select TABLE_SCHEMA as "数据库名称",TABLE_NAME as "视图名称"
FROM  INFORMATION_SCHEMA.TABLES
WHERE table_type ='view'
and TABLE_SCHEMA not in ('sys','mysql','information_schema','performance_schema','restore','__recycle_bin__')
group by TABLE_SCHEMA,TABLE_NAME;"
}
function func1(){
echo "---------------------------------FUNCTION-------------------------------------------"
mysql -h$1 -u$2 -p"$3" -P $4 -B -e"
select db as "数据库名称",name as "函数名称"
from mysql.proc
where db not in ('sys','mysql','information_schema','performance_schema','restore','__recycle_bin__','test')
and type = 'FUNCTION';"
}
function pro1(){
echo "-------------------------------- -PROCEDURE-------------------------------------------"
mysql -h$1 -u$2 -p"$3" -P $4 -B -e"
select db as "数据库名称",name as "存过名称"
from mysql.proc
where db not in ('sys','mysql','information_schema','performance_schema','restore','__recycle_bin__','test')
and type = 'PROCEDURE';"
}

function env1(){
echo "---------------------------------EVENT-------------------------------------------"
mysql -h$1 -u$2 -p"$3" -P $4 -B -e"
select  db as "数据库名称",name as "事件名称"
from  mysql.event
where db not in ('sys','mysql','information_schema','performance_schema','restore','__recycle_bin__','test'); "
}


function tr1(){
echo "---------------------------------TRIGGER-------------------------------------------"
mysql -h$1 -u$2 -p"$3"  -P $4 -e"
SELECT TRIGGER_SCHEMA as "数据库名称",TRIGGER_NAME as "触发器名称"
FROM information_schema.triggers
where TRIGGER_SCHEMA not in ('sys','mysql','information_schema','performance_schema','restore','__recycle_bin__','test') ;"
}

#-----------------------------------------目标端-------------------------------------------------
function v(){
echo "---------------------------------VIEW-------------------------------------------"
mysql -h$1 -u$2 -p"$3" -P $4 -B -e"
select TABLE_SCHEMA as "数据库名称",TABLE_NAME as "视图名称"
FROM  INFORMATION_SCHEMA.TABLES
WHERE table_type ='view'
and TABLE_SCHEMA not in ('sys','mysql','information_schema','performance_schema','restore','__recycle_bin__')
group by TABLE_SCHEMA,TABLE_NAME;"
}

function func(){
echo "---------------------------------FUNCTION-------------------------------------------"
mysql -h$1 -u$2 -p"$3" -P $4 -B -e"
select db as "数据库名称",name as "函数名称"
from mysql.proc
where db not in ('sys','mysql','information_schema','performance_schema','restore','__recycle_bin__','test')
and type = 'FUNCTION';"
}

function pro(){
echo "-------------------------------- -PROCEDURE-------------------------------------------"
mysql -h$1 -u$2 -p"$3" -P $4 -B -e"
select db as "数据库名称",name as "存过名称"
from mysql.proc
where db not in ('sys','mysql','information_schema','performance_schema','restore','__recycle_bin__','test')
and type = 'PROCEDURE';"
}

function env(){
echo "---------------------------------EVENT-------------------------------------------"
mysql -h$1 -u$2 -p"$3" -P $4 -B -e"
select  db as "数据库名称",name as "事件名称"
from  mysql.event
where db not in ('sys','mysql','information_schema','performance_schema','restore','__recycle_bin__','test'); "
}


function tr(){
echo "---------------------------------TRIGGER-------------------------------------------"
mysql -h$1 -u$2 -p"$3"  -P $4 -e"
SELECT TRIGGER_SCHEMA as "数据库名称",TRIGGER_NAME as "触发器名称"
FROM information_schema.triggers
where TRIGGER_SCHEMA not in ('sys','mysql','information_schema','performance_schema','restore','__recycle_bin__','test') ;"
}

function diff(){
        grep -vwf $1 $2
}
file2=src
file3=dst
file4=diff
echo $file
[ ! -d $file2 -a ! -d $file3 -a ! -d $file4 ] && mkdir -p $file{2..4}
#mkdir -p $file{2..4}
while read line
do
        H=`echo $line|awk -F'[:/]' '{print $1}'`
        src_ip=`echo $line|awk -F'[:/]' '{print $2}'`
        src_user=`echo $line|awk -F'[:/]' '{print $3}'`
        src_pw=`echo $line|awk -F'[:/]' '{print $4}'`
        src_port=`echo $line|awk -F'[:/]' '{print $5}'`
#-----------------------------------------
        dst_ip=`echo $line|awk -F'[:/]' '{print $6}'`
        dst_user=`echo $line|awk -F'[:/]' '{print $7}'`
        dst_pw=`echo $line|awk -F'[:/]' '{print $8}'`
        dst_port=`echo $line|awk -F'[:/]' '{print $9}'`
        src_file=${file2}
        dst_file=${file3}
        for i in v1 func1 pro1 env1 tr1
        do
                $i $src_ip  $src_user $src_pw $src_port > ${src_file}/${H}-${i}
        done
#-----------------------------------------------
        for i in v func pro env tr
        do
                $i $dst_ip $dst_user $dst_pw $dst_port > ${dst_file}/${H}-${i}
        done
#-----------------------------------------------
        for i in v func pro env tr
        do
                j=${i}1
                diff  ${dst_file}/${H}-${i}  ${src_file}/${H}-${j}   > $file4/${H}-${i}
        done

done < $file

4. 数据批量备份脚本

#!/bin/bash
CONN_INFO=rds_list
DATE=$(date +%F_%H-%M-%S)
while read line
do
  #CLUSTER=`echo $line|awk '{print $4}'|awk -F "#" '{print $2}'`
  #TENANT_TYPE=`echo $line|awk '{print $1}'`
  #TENANT_NAME=`echo $line|awk '{print $4}'|awk -F "@" '{print $2}'|awk -F "#" '{print $1}'`
  HOST=`echo $line|awk '{print $2}'|awk -F "-h" '{print $2}'`
  USER=`echo $line|awk '{print $3}'|awk -F "-u" '{print $2}'`
  PASS=`echo $line|awk '{print $4}'|awk -F "-p" '{print $2}'`


  BACKUP_DIR="/data/db_backup/$HOST"
  mkdir -p $BACKUP_DIR
  DB_LIST=$(mysql -h$HOST -u$USER -p$PASS -s -e "show databases;"  |grep  -Ev "__recycle_bin__|Database|information_schema|mysql|performance_schema|sys")

  echo $DB_LIST
  for DB in $DB_LIST; do
    BACKUP_NAME=$BACKUP_DIR/${DB}_${DATE}.sql
    mysqldump -h$HOST -u$USER -p$PASS --single-transaction --set-gtid-purged=OFF  --databases $DB > $BACKUP_NAME ;
    if [ $? -ne 0 ];then
        echo "$BACKUP_NAME 备份失败!" >> back_err.log
    fi
  done
done < $CONN_INFO

5. odps 批量建表语句对比

#!/bin/bash
pro_list=project.txt
dst=odpscmd_dst
uat=odpscmd_uat1
file_dst="table/${dst}"
file_uat="table/${uat}"
file_diff="table/diff"
table_dst="$file_dst/show_dst"
table_uat="$file_uat/show_uat"
[ -d table ] && rm -rf table ;echo "初始化"
#----------------建表-------------------------
[ ! -d  $file_dst -a ! -d $file_uat -a ! -d $file_diff -a ! -d $table_dst -a ! -d $table_uat ] && mkdir -p $file_dst $file_uat $file_diff $table_dst $table_uat
show(){

sh /root/$dst/bin/odpscmd --project=${1} -e " show create table $2;" >> $file_dst/dst_$1;sed -i  's/ALIORC .*/ALIORC;/g' $file_dst/dst_$1 # 2&> /dev/null
sh /root/$uat/bin/odpscmd --project=${1} -e " show create table $2;" >> $file_uat/uat_$1;sed -i  's/ALIORC .*/ALIORC;/g' $file_uat/uat_$1 # 2&> /dev/null

}

show_table(){
table1="table/1"
[ ! -d $table1 ] && mkdir -p $table1
sh /root/$dst/bin/odpscmd --project=${1} -e " show tables;" > $table1/dst_$1 && cat $table1/dst_$1| awk -F: '{print $3}' > $table_dst/dst_$1 #2&> /dev/null
sh /root/$uat/bin/odpscmd --project=${1} -e " show tables;" > $table1/uat_$1 && cat $table1/uat_$1| awk -F: '{print $3}' > $table_uat/uat_$1 #2&> /dev/null
[[ `cat $table_dst/dst_$1|wc -l` == ` cat $table_uat/uat_$1|wc -l` ]] && echo "${1}项目空间表数量一致!!" || echo -e "${1}项目空间表数量不一致!\n源端数量:`cat $table_dst/dst_$1|wc -l`\n目标端数量:` cat $table_uat/uat_$1|wc -l`" #exit 2
}


diff(){
table2="table/2"
[ ! -d $table2 ] && mkdir -p $table2
        cat  $file_uat/uat_$1  $file_dst/dst_$1  > $table2/uniq_$1_all|sort|uniq -d ;cat $file_uat/uat_$1 $table2/uniq_$1_all |sort|uniq -u > $file_diff/diff_$1
}

while read line
do
        a=`echo $line`
        show_table $a
        table_list="$table_dst/dst_$a"
        for i in `cat $table_list`
        do
                show $a $i
        done

        diff $a
done < $pro_list

6. odps 批量对比字段不一致

#!/bin/bash
pro_list=project.txt
dst=odpscmd_dst
uat=odpscmd_uat1
file_dst="table_desc/${dst}"
file_uat="table_desc/${uat}"
file_diff="table_desc/diff"
table_dst="$file_dst/show_dst"
table_uat="$file_uat/show_uat"
table_dst1="$file_dst/desc_det"
table_uat1="$file_uat/desc_uat"
[ -d table_desc ] && rm -rf table_desc ;echo "初始化"
#----------------建表-------------------------
[ ! -d  $file_dst -a ! -d $file_uat -a ! -d $file_diff -a ! -d $table_dst -a ! -d $table_uat -a ! -d $table_dst1 -a ! -d $table_uat1 ] && mkdir -p $file_dst $file_uat $file_diff $table_dst $table_uat $table_dst1 $table_uat1
show(){

sh /root/$dst/bin/odpscmd --project=${1} -e " desc $2;" > $file_dst/dst_${1}_tmp && awk '/^\|/&&NR>12' $file_dst/dst_${1}_tmp|sed 's@|@@g'|xargs echo|sed 's/(.*)/& '$2'/g' >> $table_dst1/dst_$1   # 2&> /dev/null
sh /root/$uat/bin/odpscmd --project=${1} -e " desc $2;" > $file_uat/uat_${1}_tmp && awk '/^\|/&&NR>12' $file_uat/uat_${1}_tmp|sed 's@|@@g'|xargs echo|sed 's/(.*)/& '$2'/g' >> $table_uat1/uat_$1   # 2&> /dev/null

}

show_table(){
table1="table_desc/1"
[ ! -d $table1 ] && mkdir -p $table1
sh /root/$dst/bin/odpscmd --project=${1} -e " show tables;" > $table1/dst_$1 && cat $table1/dst_$1| awk -F: '{print $3}' > $table_dst/dst_$1 #2&> /dev/null
sh /root/$uat/bin/odpscmd --project=${1} -e " show tables;" > $table1/uat_$1 && cat $table1/uat_$1| awk -F: '{print $3}' > $table_uat/uat_$1 #2&> /dev/null
[[ `cat $table_dst/dst_$1|wc -l` == ` cat $table_uat/uat_$1|wc -l` ]] && echo "${1}项目空间表数量一致!!" || echo -e "${1}项目空间表数量不一致!\n源端数量:`cat $table_dst/dst_$1|wc -l`\n目标端数量:` cat $table_uat/uat_$1|wc -l`" #exit 2
}


diff(){
table2="table_desc/2"
[ ! -d $table2 ] && mkdir -p $table2
        cat  $table_uat1/uat_$1 $table_dst1/dst_$1  > $table2/uniq_$1_all|sort|uniq -d ;cat $table_uat1/uat_$1 $table2/uniq_$1_all |sort|uniq -u > $file_diff/diff_$1
}

while read line
do
        a=`echo $line`
        show_table $a
        table_list="$table_dst/dst_$a"
        for i in `cat $table_list`
        do
                show $a $i
        done

        diff $a
done < $pro_list

7. odps 字段多线程对比字段类型差异

适用于数据量较少的情况,如果数据量大且并发数大容易导致并发写入数据紊乱的情况。

#!/bin/bash
pro_list=project.txt
dst=odpscmd_dst
uat=odpscmd_uat1
file_dst="table_desc/${dst}"
file_uat="table_desc/${uat}"
file_diff="table_desc/diff"
table_dst="$file_dst/show_dst"
table_uat="$file_uat/show_uat"
table_dst1="$file_dst/desc_det"
table_uat1="$file_uat/desc_uat"
[ ! -d table_desc ] && rm -rf table_desc ;echo "初始化"
#----------------建表-------------------------
[ ! -d  $file_dst -a ! -d $file_uat -a ! -d $file_diff -a ! -d $table_dst -a ! -d $table_uat -a ! -d $table_dst1 -a ! -d $table_uat1 ] && mkdir -p $file_dst $file_uat $file_diff $table_dst $table_uat $table_dst1 $table_uat1
show(){

sh /root/$dst/bin/odpscmd --project=${1} -e " desc $2;" > $file_dst/dst_${1}_tmp && awk '/^\|/&&NR>12' $file_dst/dst_${1}_tmp|sed 's@|@@g'|sed '1i\"<---"'$2'"--->"'|xargs echo >> $table_dst1/dst_$1    # 2&> /dev/null
sh /root/$uat/bin/odpscmd --project=${1} -e " desc $2;" > $file_uat/uat_${1}_tmp && awk '/^\|/&&NR>12' $file_uat/uat_${1}_tmp|sed 's@|@@g'|sed '1i\"<---"'$2'"--->"'|xargs echo >> $table_uat1/uat_$1    # 2&> /dev/null

}

show_table(){
table1="table_desc/1"
[ ! -d $table1 ] && mkdir -p $table1
sh /root/$dst/bin/odpscmd --project=${1} -e " show tables;" > $table1/dst_$1 && cat $table1/dst_$1| awk -F: '{print $NF}' > $table_dst/dst_$1 #2&> /dev/null
sh /root/$uat/bin/odpscmd --project=${1} -e " show tables;" > $table1/uat_$1 && cat $table1/uat_$1| awk -F: '{print $NF}' > $table_uat/uat_$1 #2&> /dev/null
[[ `cat $table_dst/dst_$1|wc -l` == ` cat $table_uat/uat_$1|wc -l` ]] && echo "${1}项目空间表数量一致!!" || echo -e "${1}项目空间表数量不一致!\n源端数量:`cat $table_dst/dst_$1|wc -l`\n目标端数量:` cat $table_uat/uat_$1|wc -l`" #exit 2
}


diff(){
table2="table_desc/2"
[ ! -d $table2 ] && mkdir -p $table2
        cat  $table_uat1/uat_$1 $table_dst1/dst_$1  > $table2/uniq_$1_all|sort|uniq -d ;cat $table_uat1/uat_$1 $table2/uniq_$1_all |sort|uniq -u > $file_diff/diff_$1
}

start_time=`date +%s`  #定义脚本运行的开始时间

tmp_fifofile="/tmp/$$.fifo"
mkfifo $tmp_fifofile   # 新建一个FIFO类型的文件
exec 6<>$tmp_fifofile  # 将FD6指向FIFO类型
rm -rf $tmpfofile  #删也可以,

thread_num=20  # 定义最大线程数

#根据线程总数量设置令牌个数
#事实上就是在fd6中放置了$thread_num个回车符
for ((i=0;i<${thread_num};i++));do
    echo
done >&6

while read line
do
        a=`echo $line`
        show_table $a
        table_list="$table_dst/dst_$a"
        for i in `cat $table_list`
        do
                 read -u6
                 {
                    show $a $i
                     echo >&6 # 当进程结束以后,再向FD6中加上一个回车符,即补上了read -u6减去的那个
                 } &
        done

#       diff $a
done < $pro_list
    # 一个read -u6命令执行一次,就从FD6中减去一个回车符,然后向下执行
    # 当FD6中没有回车符时 ,就停止,从而实现线程数量控制

wait # 要有wait,等待所有线程结束

stop_time=`date +%s` # 定义脚本运行的结束时间
echo "TIME:`expr $stop_time - $start_time`" # 输出脚本运行时间

exec 6>&- # 关闭FD6
echo "over" # 表示脚本运行结束

注意:

当多个进程可能会对同样的数据执行操作时,这些进程需要保证其它进程没有也在操作,以免损坏数据。进程会使用一个「锁文件」,也就是建立一个文件来告诉别的进程自己在运行,如果检测到那个文件存在则认为有操作同样数据的进程在工作。所以shell命令flock通过加锁方式实现互斥访问,常用于多进程间互斥访问。

    flock:manage locks from shell scripts。

     -s, --shared:共享锁,或者称为读锁;

    -u,--unlock:手动释放锁,一般情况不必须,当FD关闭时,系统会自动解锁;

    -w, --wait, --timeout seconds:设置阻塞超时,当超过设置的秒数时,退出阻塞模式,返回1;

    -x,-e,--exclusive:获取一个排它锁,或者称为写锁,为默认项;

    -n,--nb, --nonblock:非阻塞模式,当获取锁失败时,返回1而不是等待;

    -c, --command ,command:在shell中执行其后的语句

8. 解决多进程写入导致数据紊乱问题

法一:

#!/bin/bash
pro_list=project.txt
dst=odpscmd_dst
uat=odpscmd_uat1
file_dst="table_desc/${dst}"
file_uat="table_desc/${uat}"
file_diff="table_desc/diff"
table_dst="$file_dst/show_dst"
table_uat="$file_uat/show_uat"
table_dst1="$file_dst/desc_det"
table_uat1="$file_uat/desc_uat"
[ ! -d table_desc ] && rm -rf table_desc ;echo "初始化"
#----------------建表-------------------------
[ ! -d  $file_dst -a ! -d $file_uat -a ! -d $file_diff -a ! -d $table_dst -a ! -d $table_uat -a ! -d $table_dst1 -a ! -d $table_uat1 ] && mkdir -p $file_dst $file_uat $file_diff $table_dst $table_uat $table_dst1 $table_uat1
show(){

sh /root/$dst/bin/odpscmd --project=${1} -e " desc $2;" > $file_dst/dst_${1}_tmp && awk '/^\|/&&NR>12' $file_dst/dst_${1}_tmp|sed 's@|@@g'|sed '1i\"<---"'$2'"--->"'|xargs echo >> $table_dst1/dst_$1    # 2&> /dev/null
sh /root/$uat/bin/odpscmd --project=${1} -e " desc $2;" > $file_uat/uat_${1}_tmp && awk '/^\|/&&NR>12' $file_uat/uat_${1}_tmp|sed 's@|@@g'|sed '1i\"<---"'$2'"--->"'|xargs echo >> $table_uat1/uat_$1    # 2&> /dev/null

}

show_table(){
table1="table_desc/1"
[ ! -d $table1 ] && mkdir -p $table1
sh /root/$dst/bin/odpscmd --project=${1} -e " show tables;" > $table1/dst_$1 && cat $table1/dst_$1| awk -F: '{print $NF}' > $table_dst/dst_$1 #2&> /dev/null
sh /root/$uat/bin/odpscmd --project=${1} -e " show tables;" > $table1/uat_$1 && cat $table1/uat_$1| awk -F: '{print $NF}' > $table_uat/uat_$1 #2&> /dev/null
[[ `cat $table_dst/dst_$1|wc -l` == ` cat $table_uat/uat_$1|wc -l` ]] && echo "${1}项目空间表数量一致!!" || echo -e "${1}项目空间表数量不一致!\n源端数量:`cat $table_dst/dst_$1|wc -l`\n目标端数量:` cat $table_uat/uat_$1|wc -l`" #exit 2
}


diff(){
table2="table_desc/2"
[ ! -d $table2 ] && mkdir -p $table2
        cat  $table_uat1/uat_$1 $table_dst1/dst_$1  > $table2/uniq_$1_all|sort|uniq -d ;cat $table_uat1/uat_$1 $table2/uniq_$1_all |sort|uniq -u > $file_diff/diff_$1
}

start_time=`date +%s`  #定义脚本运行的开始时间

tmp_fifofile="/tmp/$$.fifo"
mkfifo $tmp_fifofile   # 新建一个FIFO类型的文件
exec 6<>$tmp_fifofile  # 将FD6指向FIFO类型
rm -rf $tmpfofile  #删也可以,

thread_num=20  # 定义最大线程数

#根据线程总数量设置令牌个数
#事实上就是在fd6中放置了$thread_num个回车符
for ((i=0;i<${thread_num};i++));do
    echo
done >&6

while read line
do
        a=`echo $line`
        show_table $a
        table_list="$table_dst/dst_$a"
        for i in `cat $table_list`
        do
                 read -u6
                 {
                    shell_lock=1.lock  
                    exec 200>$shell_lock  
                    flock -n 200            #每个进程写入之前先给临时文件上锁,防止数据写乱
                    show $a $i
                    echo >&6 # 当进程结束以后,再向FD6中加上一个回车符,即补上了read -u6减去的那个
                 } &
        done

#       diff $a
done < $pro_list
    # 一个read -u6命令执行一次,就从FD6中减去一个回车符,然后向下执行
    # 当FD6中没有回车符时 ,就停止,从而实现线程数量控制

wait # 要有wait,等待所有线程结束

stop_time=`date +%s` # 定义脚本运行的结束时间
echo "TIME:`expr $stop_time - $start_time`" # 输出脚本运行时间

exec 200>&- # 关闭FD200
exec 6>&- # 关闭FD6
echo "over" # 表示脚本运行结束

法二:

#!/bin/bash
pro_list=project.txt
dst=odpscmd_dst
uat=odpscmd_pro
file_dst="table_desc/${dst}"
file_uat="table_desc/${uat}"
file_diff="table_desc/diff"
table_dst="$file_dst/show_dst"
table_uat="$file_uat/show_uat"
table_dst1="$file_dst/desc_dst"
table_uat1="$file_uat/desc_uat"
[  -d table_desc ] && rm -rf table_desc ;echo "初始化"
#----------------建表-------------------------
[ ! -d  $file_dst -a ! -d $file_uat -a ! -d $file_diff -a ! -d $table_dst -a ! -d $table_uat -a ! -d $table_dst1 -a ! -d $table_uat1 ] && mkdir -p $file_dst $file_uat $file_diff $table_dst $table_uat $table_dst1 $table_uat1
 
show(){
[ ! -d $file_dst/${1} -a ! -d $file_uat/${1} -a ! -d $file_diff/${1} -a ! -d $table_dst1/${1} -a ! -d $table_uat1/${1} ] && mkdir -p $file_dst/${1}  $file_uat/${1} $file_diff/${1} $table_dst1/${1}  $table_uat1/${1}
 
sh /root/$dst/bin/odpscmd --project=${1} -e " desc $2;" >$file_dst/${1}/${2}
awk '/^\|/&&NR>12' $file_dst/${1}/${2}|sed 's@|@@g'|xargs  >> $table_dst1/${1}/${2}
#---------------------------------------------
sh /root/$uat/bin/odpscmd --project=${1} -e " desc $2;" >$file_uat/${1}/${2}
awk '/^\|/&&NR>12' $file_uat/${1}/${2}|sed 's@|@@g'|xargs  >  $table_uat1/${1}/${2}
}
 
show_table(){
table1="table_desc/1"
[ ! -d $table1 ] && mkdir -p $table1
sh /root/$dst/bin/odpscmd --project=${1} -e " show tables;" > $table1/dst_$1 && cat $table1/dst_$1| awk -F: '{print $NF}' > $table_dst/dst_$1 #2&> /dev/null
sh /root/$uat/bin/odpscmd --project=${1} -e " show tables;" > $table1/uat_$1 && cat $table1/uat_$1| awk -F: '{print $NF}' > $table_uat/uat_$1 #2&> /dev/null
[[ `cat $table_dst/dst_$1|wc -l` == ` cat $table_uat/uat_$1|wc -l` ]] && echo "${1}项目空间表数量一致!!" || echo -e "${1}项目空间表数量不一致!\n目标端数量:`cat $table_dst/dst_$1|wc -l`\n源端数量:` cat $table_uat/uat_$1|wc -l`" #exit 2
}
 
diff1(){
mkdir -p ${file_diff}/${1}
diff -rqs ${table_uat1}/${1}  ${table_dst1}/${1} > ${file_diff}/${1}/result
#echo "源端不存在分区及表:" && awk 'NR==FNR{a[$0]++} NR>FNR&&!a[$0]' $table_dst1/${1} $table_uat1/${1} > $file_diff/${1}/result_awk_d_s
#echo "目标端不存在分区及表:" && awk 'NR==FNR{a[$0]++} NR>FNR&&!a[$0]' $table_uat1/${1} $table_dst1/${1} > $file_diff/${1}/result_awk_d_d

} 
start_time=`date +%s`  #定义脚本运行的开始时间
 
tmp_fifofile="/tmp/$$.fifo"
mkfifo $tmp_fifofile   # 新建一个FIFO类型的文件
exec 6<>$tmp_fifofile  # 将FD6指向FIFO类型
rm -rf $tmpfofile  #删也可以,
 
thread_num=30  # 定义最大线程数
 
 
#事实上就是在fd6中放置了$thread_num个回车符
for ((i=0;i<${thread_num};i++));do
   echo
done >&6
 
while read line
do
        a=`echo $line`
        show_table $a
       # table_list="$table_dst/dst_$a"
        table_list="$table_uat/uat_$a"
 
        for i in `cat $table_list`
        do
                read -u6
                {
                        show $a $i
                        echo >&6
                 } &
        done
	diff1 $a
done < $pro_list
 
wait # 要有wait,等待所有线程结束
 
stop_time=`date +%s` # 定义脚本运行的结束时间
 
echo "TIME:`expr $stop_time - $start_time`" # 输出脚本运行时间
 
exec 6>&- # 关闭FD6
 
echo "over" # 表示脚本运行结束

方法三:

#!/bin/bash
pro_list=project.txt
dst=odpscmd_dst
uat=odpscmd
file_dst="table_desc_pro/${dst}"
file_uat="table_desc_pro/${uat}"
file_diff="table_desc_pro/diff"
table_dst="$file_dst/show_dst"
table_uat="$file_uat/show_uat"
table_dst1="$file_dst/desc_det"
table_uat1="$file_uat/desc_uat"
[  -d table_desc_pro ] && rm -rf table_desc_pro ;echo "初始化"
#----------------建表-------------------------
[ ! -d  $file_dst -a ! -d $file_uat -a ! -d $file_diff -a ! -d $table_dst -a ! -d $table_uat -a ! -d $table_dst1 -a ! -d $table_uat1 ] && mkdir -p $file_dst $file_uat $file_diff $table_dst $table_uat $table_dst1 $table_uat1
show(){

#sh /home/qianyi/$dst/bin/odpscmd --project=${1} -e " desc $2;" > $file_dst/dst_${1}_tmp && awk '/^\|/&&NR>12' $file_dst/dst_${1}_tmp|sed 's@|@@g'|sed '1i\"<---"'$2'"--->"'|xargs echo >> $table_dst1/dst_$1    # 2&> /dev/null
#sh /home/qianyi/$uat/bin/odpscmd --project=${1} -e " desc $2;" > $file_uat/uat_${1}_tmp && awk '/^\|/&&NR>12' $file_uat/uat_${1}_tmp|sed 's@|@@g'|sed '1i\"<---"'$2'"--->"'|xargs echo >> $table_uat1/uat_$1    # 2&> /dev/null
sh /home/qianyi/$uat/bin/odpscmd --project=${1} -e " desc $2;"|awk '/^\|/&&NR>12'|sed 's@|@@g'|sed '1i\"<---"'$2'"--->"'|xargs echo >> $table_uat1/uat_$1    # 2&> /dev/null

}

show_table(){
table1="table_desc_pro/1"
[ ! -d $table1 ] && mkdir -p $table1
sh /home/qianyi/$dst/bin/odpscmd --project=${1} -e " show tables;" > $table1/dst_$1 && cat $table1/dst_$1| awk -F: '{print $NF}' > $table_dst/dst_$1 #2&> /dev/null
sh /home/qianyi/$uat/bin/odpscmd --project=${1} -e " show tables;" > $table1/uat_$1 && cat $table1/uat_$1| awk -F: '{print $NF}' > $table_uat/uat_$1 #2&> /dev/null
[[ `cat $table_dst/dst_$1|wc -l` == ` cat $table_uat/uat_$1|wc -l` ]] && echo "${1}项目空间表数量一致!!" || echo -e "${1}项目空间表数量不一致!\n目标端数量:`cat $table_dst/dst_$1|wc -l`\n源端数量:` cat $table_uat/uat_$1|wc -l`" #exit 2
}


diff(){
table2="table_desc_pro/2"
[ ! -d $table2 ] && mkdir -p $table2
        cat  $table_uat1/uat_$1 $table_dst1/dst_$1  > $table2/uniq_$1_all|sort|uniq -d ;cat $table_uat1/uat_$1 $table2/uniq_$1_all |sort|uniq -u > $file_diff/diff_$1
}

start_time=`date +%s`  #定义脚本运行的开始时间

tmp_fifofile="/tmp/$$.fifo"
mkfifo $tmp_fifofile   # 新建一个FIFO类型的文件
exec 6<>$tmp_fifofile  # 将FD6指向FIFO类型
rm -rf $tmpfofile  #删也可以,

thread_num=20  # 定义最大线程数

#根据线程总数量设置令牌个数
#事实上就是在fd6中放置了$thread_num个回车符
for ((i=0;i<${thread_num};i++));do
    echo
done >&6

while read line
do
        a=`echo $line`
        show_table $a
        table_list="$table_uat/uat_$a"
        for i in `cat $table_list`
        do
                 read -u6
                 {
                    show $a $i
                     echo >&6 # 当进程结束以后,再向FD6中加上一个回车符,即补上了read -u6减去的那个
                 } &
        done

 #      diff $a
done < $pro_list
    # 一个read -u6命令执行一次,就从FD6中减去一个回车符,然后向下执行
    # 当FD6中没有回车符时 ,就停止,从而实现线程数量控制

wait # 要有wait,等待所有线程结束

stop_time=`date +%s` # 定义脚本运行的结束时间
echo "TIME:`expr $stop_time - $start_time`" # 输出脚本运行时间

exec 6>&- # 关闭FD6
echo "over" # 表示脚本运行结束

注解:

        1. 由于方法一同时写入同一个文件的时候为防止数据紊乱在写入之前给文件上锁,不适用于两个或者多个进程写入时,花费时间差异较大的两条命令同时写入,因为会导致并发效果大大减小;

        2. 方法二适用于所有类型的多进程并发写入操作;

9. OB用户锁定

------------------OB用户锁定----------------
#!/bin/bash
CONN_INFO=dest_conn_info_conf
CLUSTER=''
TENANT_TYPE=''
TENANT_NAME=''
HOST=''
USER=''
PASS=''
while read line
do
CLUSTER=`echo $line|awk '{print $4}'|awk -F "#" '{print $2}'`
TENANT_TYPE=`echo $line|awk '{print $1}'`
TENANT_NAME=`echo $line|awk '{print $4}'|awk -F "@" '{print $2}'|awk -F "#" '{print $1}'`
HOST=`echo $line|awk '{print $2}'|awk -F "-h" '{print $2}'`
USER=`echo $line|awk '{print $4}'|awk -F "-u" '{print $2}'|awk -F "@" '{print $1}'`
PASS=`echo $line|awk '{print $5}'|awk -F "-p" '{print $2}'`

if [ $TENANT_TYPE = 'mysql' ];then
#mysql租户
  for user in $(obclient -h${HOST} -P3306 -p${PASS} -u${USER}@${TENANT_NAME}\#${CLUSTER} -e"select user from mysql.user where user not in ('ORAAUDITOR','__oceanbase_inner_drc_user','root');" | grep -v 'user');do
        echo  "obclient -h${HOST} -P3306 -p${PASS} -u${USER}@${TENANT_NAME}#${CLUSTER} -e\"ALTER USER $user ACCOUNT LOCK;\""
  done
else

#oracle租户
echo "----------------------------${TENANT_NAME}--ob_dest_tenant_database----------------------------"
  for o_user in $(obclient -h${HOST} -P3306 -p${PASS} -u${USER}@${TENANT_NAME}\#${CLUSTER} -e "SELECT USERNAME FROM DBA_USERS WHERE USERNAME NOT IN('SYS','LBACSYS','ORAAUDITOR','PUBLIC','__OCEANBASE_INNER_DRC_USER');" | grep -v 'USERNAME');do
        echo "obclient -h${HOST} -P3306 -p${PASS} -u${USER}@${TENANT_NAME}#${CLUSTER} -e\"ALTER USER $user ACCOUNT LOCK;\""
  done

fi

done < $CONN_INFO

10. OB 对象迁移

#!/bin/bash
CONN_INFO=dest_conn_info_conf
CLUSTER=''
TENANT_TYPE=''
TENANT_NAME=''
HOST=''
USER=''
PASS=''
while read line
do
CLUSTER=`echo $line|awk '{print $4}'|awk -F "#" '{print $2}'`
TENANT_TYPE=`echo $line|awk '{print $1}'`
TENANT_NAME=`echo $line|awk '{print $4}'|awk -F "@" '{print $2}'|awk -F "#" '{print $1}'`
HOST=`echo $line|awk '{print $2}'|awk -F "-h" '{print $2}'`
USER=`echo $line|awk '{print $4}'|awk -F "-u" '{print $2}'|awk -F "@" '{print $1}'`
PASS=`echo $line|awk '{print $5}'|awk -F "-p" '{print $2}'`

if [ $TENANT_TYPE = 'mysql' ];then
############################################mysql租户#############################################
####################get dest_database########################
echo "`date '+%Y-%m-%d %H:%M:%S'` get ${TENANT_NAME} dest_database"
obclient -h${HOST} -P3306 -p${PASS} -u${USER}@${TENANT_NAME}\#${CLUSTER} -B -N -e "
select null id,"\'${CLUSTER}\'" cluster_name,tenant_id,tenant_name,'mysql' tenant_type,database_id,database_name,now() remark from oceanbase.gv\$database where tenant_name="\'${TENANT_NAME}\'" and database_name not in ('oceanbase','information_schema','mysql','__recyclebin','__public','test') order by tenant_name,database_name;
"|tr '\t' ','>./ob_buffer/ob_dest_tenant_database.csv

mysql -uroot -hxxxxx -pxxxx-e "delete from ob_migration.ob_dest_tenant_database where tenant_name="\'${TENANT_NAME}\'";LOAD DATA LOCAL INFILE './ob_buffer/ob_dest_tenant_database.csv' INTO TABLE ob_migration.ob_dest_tenant_database FIELDS TERMINATED BY ',' LINES TERMINATED BY '\n';"
echo "`date '+%Y-%m-%d %H:%M:%S'` load ${TENANT_NAME} dest_database meta_info done!"

####################get dest_table ########################
echo "`date '+%Y-%m-%d %H:%M:%S'` get ${TENANT_NAME} dest_table"
obclient -h${HOST} -P3306 -p${PASS} -u${USER}@${TENANT_NAME}\#${CLUSTER} -B -N -e "
select null id,"\'${CLUSTER}\'" cluster_name,a.tenant_id,a.tenant_name,'mysql' tenant_type,a.database_id,a.database_name,null table_id,b.table_name,b.table_rows row_count,now() remark
from oceanbase.gv\$database a
left join (select distinct table_schema,table_name,table_rows from information_schema.tables where table_schema not in ('information_schema','mysql','performance_schema','test','d_utf8mb4')) b
on a.database_name =b.table_schema
where a.tenant_name="\'${TENANT_NAME}\'"
and a.database_name not in ('oceanbase','information_schema','mysql','__recyclebin','__public','test')
and b.table_name is not null
order by a.tenant_name,a.database_name;
"|tr '\t' ','>./ob_buffer/ob_dest_table.csv

mysql -uroot -hrm-dxxxxx -pxxxxx -e "delete from ob_migration.ob_dest_table where tenant_name="\'${TENANT_NAME}\'";LOAD DATA LOCAL INFILE './ob_buffer/ob_dest_table.csv' INTO TABLE ob_migration.ob_dest_table FIELDS TERMINATED BY ',' LINES TERMINATED BY '\n';"
echo "`date '+%Y-%m-%d %H:%M:%S'` load ${TENANT_NAME} dest_table meta_info done!"

####################get dest_view ########################
obclient -h${HOST} -P3306 -p${PASS} -u${USER}@${TENANT_NAME}\#${CLUSTER} -B -N -e "
select null id,"\'${CLUSTER}\'" cluster_name,a.tenant_id,a.tenant_name,'mysql' tenant_type,a.database_id,a.database_name,null view_id,b.table_name view_name,now() remark
from oceanbase.gv\$database a
left join (select distinct table_schema,table_name from information_schema.views where table_schema not in ('information_schema','mysql','performance_schema','test','d_utf8mb4')) b
on a.database_name =b.table_schema
where a.tenant_name="\'${TENANT_NAME}\'"
and a.database_name not in ('oceanbase','information_schema','mysql','__recyclebin','__public','test')
and b.table_name is not null
order by a.tenant_name,a.database_name;
"|tr '\t' ','>./ob_buffer/ob_dest_view.csv

mysql -uroot -hrm-xxxxxxxxxx -pxxxxxx -e "delete from ob_migration.ob_dest_view where tenant_name="\'${TENANT_NAME}\'";LOAD DATA LOCAL INFILE './ob_buffer/ob_dest_view.csv' INTO TABLE ob_migration.ob_dest_view FIELDS TERMINATED BY ',' LINES TERMINATED BY '\n';"
echo "`date '+%Y-%m-%d %H:%M:%S'` load ${TENANT_NAME} dest_view meta_info done!"

####################get dest_procedure########################
echo "`date '+%Y-%m-%d %H:%M:%S'` get ${TENANT_NAME} dest_procedure"
obclient -h${HOST} -P3306 -p${PASS} -u${USER}@${TENANT_NAME}\#${CLUSTER} -B -N -e "
select null id,"\'${CLUSTER}\'" cluster_name,a.tenant_id,a.tenant_name,'mysql' tenant_type,a.database_id,a.database_name,null procedure_id,b.name procedure_id,now() remark
from oceanbase.gv\$database a
left join (select distinct db,name from mysql.proc where type = 'procedure' and db not in ('information_schema','mysql','performance_schema','test','d_utf8mb4')) b
on a.database_name =b.db
where a.tenant_name="\'${TENANT_NAME}\'"
and a.database_name not in ('oceanbase','information_schema','mysql','__recyclebin','__public','test')
and b.name is not null
order by a.tenant_name,a.database_name;
"|tr '\t' ','>./ob_buffer/ob_dest_procedure.csv

mysql -uroot -hrm-xxxxxxx -pxxxx -e "delete from ob_migration.ob_dest_procedure where tenant_name="\'${TENANT_NAME}\'";LOAD DATA LOCAL INFILE './ob_buffer/ob_dest_procedure.csv' INTO TABLE ob_migration.ob_dest_procedure FIELDS TERMINATED BY ',' LINES TERMINATED BY '\n';"
echo "`date '+%Y-%m-%d %H:%M:%S'` load ${TENANT_NAME} dest_procedure meta_info done!"

####################get dest_function########################
echo "`date '+%Y-%m-%d %H:%M:%S'` get ${TENANT_NAME} dest_function"
obclient -h${HOST} -P3306 -p${PASS} -u${USER}@${TENANT_NAME}\#${CLUSTER} -B -N -e "
select null id,"\'${CLUSTER}\'" cluster_name,a.tenant_id,a.tenant_name,'mysql' tenant_type,a.database_id,a.database_name,null function_id,b.name function_name,now() remark
from oceanbase.gv\$database a
left join (select distinct db,name from mysql.proc where type = 'function' and db not in ('information_schema','mysql','performance_schema','test','d_utf8mb4')) b
on a.database_name =b.db
where a.tenant_name="\'${TENANT_NAME}\'"
and a.database_name not in ('oceanbase','information_schema','mysql','__recyclebin','__public','test')
and b.name is not null
order by a.tenant_name,a.database_name;
"|tr '\t' ','>./ob_buffer/ob_dest_function.csv

mysql -uroot -hrm-xxxxxx -pxxxx -e "delete from ob_migration.ob_dest_function where tenant_name="\'${TENANT_NAME}\'";LOAD DATA LOCAL INFILE './ob_buffer/ob_dest_function.csv' INTO TABLE ob_migration.ob_dest_function FIELDS TERMINATED BY ',' LINES TERMINATED BY '\n';"
echo "`date '+%Y-%m-%d %H:%M:%S'` load ${TENANT_NAME} dest_function meta_info done!"

####################get dest_trigger########################

obclient -h${HOST} -P3306 -p${PASS} -u${USER}@${TENANT_NAME}\#${CLUSTER} -B -N -e "
select null id,"\'${CLUSTER}\'" cluster_name,a.tenant_id,a.tenant_name,'mysql' tenant_type,a.database_id,a.database_name,null trigger_id,b.trigger_name,now() remark
from oceanbase.gv\$database a
left join (select distinct trigger_schema,trigger_name from information_schema.triggers where trigger_schema not in ('information_schema','mysql','performance_schema','test','d_utf8mb4','objr') ) b
on a.database_name =b.trigger_schema
where a.tenant_name="\'${TENANT_NAME}\'"
and a.database_name not in ('oceanbase','information_schema','mysql','__recyclebin','__public','test')
and b.trigger_name is not null
order by a.tenant_name,a.database_name;
"|tr '\t' ','>./ob_buffer/ob_dest_trigger.csv

mysql -uroot -hrm-xxxxx -pxxxx -e "delete from ob_migration.ob_dest_trigger where tenant_name="\'${TENANT_NAME}\'";LOAD DATA LOCAL INFILE './ob_buffer/ob_dest_trigger.csv' INTO TABLE ob_migration.ob_dest_trigger FIELDS TERMINATED BY ',' LINES TERMINATED BY '\n';"
echo "`date '+%Y-%m-%d %H:%M:%S'` load ${TENANT_NAME} dest_trigger meta_info done!"

####################get dest_constraint########################
echo "`date '+%Y-%m-%d %H:%M:%S'` get ${TENANT_NAME} dest_constraint"
obclient -h${HOST} -P3306 -p${PASS} -u${USER}@${TENANT_NAME}\#${CLUSTER} -B -N -e "
select null id,"\'${CLUSTER}\'" cluster_name,a.tenant_id,a.tenant_name,'mysql' tenant_type,a.database_id,a.database_name,constraint_type,null constraint_id,b.constraint_name,b.table_name,now() remark
from oceanbase.gv\$database a
left join (select distinct case when constraint_type='PRIMARY KEY' then 'P' when constraint_type='UNIQUE' then 'U' else 'C' end constraint_type,constraint_schema,constraint_name,table_name from information_schema.table_constraints where table_schema not in ('information_schema','mysql','performance_schema','test') and constraint_name not like '%_OBPK_INCRMT%') b
on a.database_name =b.constraint_schema
where a.tenant_name="\'${TENANT_NAME}\'"
and a.database_name not in ('oceanbase','information_schema','mysql','__recyclebin','__public','test')
and b.constraint_name is not null
order by a.tenant_name,a.database_name;
"|tr '\t' ','>./ob_buffer/ob_dest_constraint.csv

mysql -uroot -hrm-xxxxxx -pxxxx -e "delete from ob_migration.ob_dest_constraint where tenant_name="\'${TENANT_NAME}\'";LOAD DATA LOCAL INFILE './ob_buffer/ob_dest_constraint.csv' INTO TABLE ob_migration.ob_dest_constraint FIELDS TERMINATED BY ',' LINES TERMINATED BY '\n';"
echo "`date '+%Y-%m-%d %H:%M:%S'` load ${TENANT_NAME} dest_constraint meta_info done!"

else

#oracle租户
####################get dest_database########################
echo "`date '+%Y-%m-%d %H:%M:%S'` get ${TENANT_NAME} dest_database"
obclient -h${HOST} -P3306 -p${PASS} -u${USER}@${TENANT_NAME}\#${CLUSTER} -B -N -e "
select null id,"\'${CLUSTER}\'" cluster_name,null tenant_id,"\'${TENANT_NAME}\'" tenant_name,'oracle' tenant_type,null database_id,username database_name,to_char(sysdate,'yyyy-mm-dd hh24:mi:ss') remark from dba_users where username not in ('SYS','LBACSYS','ORAAUDITOR','PUBLIC','__OCEANBASE_INNER_DRC_USER') order by username;
"|tr '\t' ','>./ob_buffer/ob_dest_tenant_database.csv

mysql -uroot -hrm-xxx -pxxx -e "delete from ob_migration.ob_dest_tenant_database where tenant_name="\'${TENANT_NAME}\'";LOAD DATA LOCAL INFILE './ob_buffer/ob_dest_tenant_database.csv' INTO TABLE ob_migration.ob_dest_tenant_database FIELDS TERMINATED BY ',' LINES TERMINATED BY '\n';"
echo "`date '+%Y-%m-%d %H:%M:%S'` load ${TENANT_NAME} dest_constraint meta_info done!"

####################get dest_table ########################
obclient -h${HOST} -P3306 -p${PASS} -u${USER}@${TENANT_NAME}\#${CLUSTER} -B -N -e "
select null id,"\'${CLUSTER}\'" cluster_name,null tenant_id,"\'${TENANT_NAME}\'" tenant_name,'oracle' tenant_type,null database_id,username database_name,null table_id,c.table_name,c.num_rows row_count,to_char(sysdate,'yyyy-mm-dd hh24:mi:ss') remark
from dba_users a
left join dba_objects b
on a.username = b.owner
and b.owner not in ('SYS')
and b.object_type='TABLE'
left join dba_tables c
 on b.owner = c.owner
and b.OBJECT_NAME = c.TABLE_NAME
where a.username not in ('SYS','LBACSYS','ORAAUDITOR','PUBLIC','__OCEANBASE_INNER_DRC_USER')
and c.TABLE_NAME is not null
order by a.username;
"|tr '\t' ','>./ob_buffer/ob_dest_table.csv


mysql -uroot -hrm-xxx -pxxx -e "delete from ob_migration.ob_dest_table where tenant_name="\'${TENANT_NAME}\'";LOAD DATA LOCAL INFILE './ob_buffer/ob_dest_table.csv' INTO TABLE ob_migration.ob_dest_table FIELDS TERMINATED BY ',' LINES TERMINATED BY '\n';"
echo "`date '+%Y-%m-%d %H:%M:%S'` load ${TENANT_NAME} dest_table meta_info done!"

####################get dest_view ########################
obclient -h${HOST} -P3306 -p${PASS} -u${USER}@${TENANT_NAME}\#${CLUSTER} -B -N -e "
select null id,"\'${CLUSTER}\'" cluster_name,null tenant_id,"\'${TENANT_NAME}\'" tenant_name,'oracle' tenant_type,null database_id,username database_name,null view_id,b.object_name view_name,to_char(sysdate,'yyyy-mm-dd hh24:mi:ss') remark
from dba_users a
left join dba_objects b
on a.username = b.owner
and b.owner not in ('SYS')
and b.object_type='VIEW'
where a.username not in ('SYS','LBACSYS','ORAAUDITOR','PUBLIC','__OCEANBASE_INNER_DRC_USER')
and b.object_type is not null
order by a.username;
"|tr '\t' ','>./ob_buffer/ob_dest_view.csv

mysql -uroot -hrm-xxxxx -pxxxx -e "delete from ob_migration.ob_dest_view where tenant_name="\'${TENANT_NAME}\'";LOAD DATA LOCAL INFILE './ob_buffer/ob_dest_view.csv' INTO TABLE ob_migration.ob_dest_view FIELDS TERMINATED BY ',' LINES TERMINATED BY '\n';"
echo "`date '+%Y-%m-%d %H:%M:%S'` load ${TENANT_NAME} dest_view meta_info done!"

####################get dest_procedure########################
echo "`date '+%Y-%m-%d %H:%M:%S'` get ${TENANT_NAME} dest_procedure"
obclient -h${HOST} -P3306 -p${PASS} -u${USER}@${TENANT_NAME}\#${CLUSTER} -B -N -e "
select null id,"\'${CLUSTER}\'" cluster_name,null tenant_id,"\'${TENANT_NAME}\'" tenant_name,'oracle' tenant_type,null database_id,username database_name,null procedure_id,b.object_name procedure_name,to_char(sysdate,'yyyy-mm-dd hh24:mi:ss') remark
from dba_users a
left join dba_objects b
on a.username = b.owner
and b.owner not in ('SYS')
and b.object_type='PROCEDURE'
where a.username not in ('SYS','LBACSYS','ORAAUDITOR','PUBLIC','__OCEANBASE_INNER_DRC_USER')
and b.object_type is not null
order by a.username;
"|tr '\t' ','>./ob_buffer/ob_dest_procedure.csv

mysql -uroot -hrm-xxx -pxxx -e "delete from ob_migration.ob_dest_procedure where tenant_name="\'${TENANT_NAME}\'";LOAD DATA LOCAL INFILE './ob_buffer/ob_dest_procedure.csv' INTO TABLE ob_migration.ob_dest_procedure FIELDS TERMINATED BY ',' LINES TERMINATED BY '\n';"
echo "`date '+%Y-%m-%d %H:%M:%S'` load ${TENANT_NAME} dest_procedure meta_info done!"

####################get dest_function########################
echo "`date '+%Y-%m-%d %H:%M:%S'` get ${TENANT_NAME} dest_function"
obclient -h${HOST} -P3306 -p${PASS} -u${USER}@${TENANT_NAME}\#${CLUSTER} -B -N -e "
select null id,"\'${CLUSTER}\'" cluster_name,null tenant_id,"\'${TENANT_NAME}\'" tenant_name,'oracle' tenant_type,null database_id,username database_name,null function_id,b.object_name function_name,to_char(sysdate,'yyyy-mm-dd hh24:mi:ss') remark
from dba_users a
left join dba_objects b
on a.username = b.owner
and b.owner not in ('SYS')
and b.object_type='FUNCTION'
where a.username not in ('SYS','LBACSYS','ORAAUDITOR','PUBLIC','__OCEANBASE_INNER_DRC_USER')
and b.object_type is not null
order by a.username;
"|tr '\t' ','>./ob_buffer/ob_dest_function.csv

mysql -uroot -hrm-xxx -pxxx -e "delete from ob_migration.ob_dest_function where tenant_name="\'${TENANT_NAME}\'";LOAD DATA LOCAL INFILE './ob_buffer/ob_dest_function.csv' INTO TABLE ob_migration.ob_dest_function FIELDS TERMINATED BY ',' LINES TERMINATED BY '\n';"
echo "`date '+%Y-%m-%d %H:%M:%S'` load ${TENANT_NAME} dest_function meta_info done!"


####################get dest_package########################
echo "`date '+%Y-%m-%d %H:%M:%S'` get ${TENANT_NAME} dest_package"
obclient -h${HOST} -P3306 -p${PASS} -u${USER}@${TENANT_NAME}\#${CLUSTER} -B -N -e "
select null id,"\'${CLUSTER}\'" cluster_name,null tenant_id,"\'${TENANT_NAME}\'" tenant_name,'oracle' tenant_type,null database_id,username database_name,null package_id,b.object_name package_name,to_char(sysdate,'yyyy-mm-dd hh24:mi:ss') remark
from dba_users a
left join dba_objects b
on a.username = b.owner
and b.owner not in ('SYS')
and b.object_type='PACKAGE'
and b.OBJECT_NAME not like 'OBODC%'
where a.username not in ('SYS','LBACSYS','ORAAUDITOR','PUBLIC','__OCEANBASE_INNER_DRC_USER')
and b.object_type is not null
order by a.username;
"|tr '\t' ','>./ob_buffer/ob_dest_package.csv

mysql -uroot -hrm-xxx -pxxx -e "delete from ob_migration.ob_dest_package where tenant_name="\'${TENANT_NAME}\'";LOAD DATA LOCAL INFILE './ob_buffer/ob_dest_package.csv' INTO TABLE ob_migration.ob_dest_package FIELDS TERMINATED BY ',' LINES TERMINATED BY '\n';"
echo "`date '+%Y-%m-%d %H:%M:%S'` load ${TENANT_NAME} dest_package meta_info done!"

####################get dest_packagebody########################
echo "`date '+%Y-%m-%d %H:%M:%S'` get ${TENANT_NAME} dest_package"
obclient -h${HOST} -P3306 -p${PASS} -u${USER}@${TENANT_NAME}\#${CLUSTER} -B -N -e "
select null id,"\'${CLUSTER}\'" cluster_name,null tenant_id,"\'${TENANT_NAME}\'" tenant_name,'oracle' tenant_type,null database_id,username database_name,null packagebody_id,b.object_name packagebody_name,to_char(sysdate,'yyyy-mm-dd hh24:mi:ss') remark
from dba_users a
left join dba_objects b
on a.username = b.owner
and b.owner not in ('SYS')
and b.object_type='PACKAGE BODY'
and b.OBJECT_NAME not like 'OBODC%'
where a.username not in ('SYS','LBACSYS','ORAAUDITOR','PUBLIC','__OCEANBASE_INNER_DRC_USER')
and b.object_type is not null
order by a.username;
"|tr '\t' ','>./ob_buffer/ob_dest_packagebody.csv

mysql -uroot -hrm-xxx -pxxx -e "delete from ob_migration.ob_dest_packagebody where tenant_name="\'${TENANT_NAME}\'";LOAD DATA LOCAL INFILE './ob_buffer/ob_dest_packagebody.csv' INTO TABLE ob_migration.ob_dest_packagebody FIELDS TERMINATED BY ',' LINES TERMINATED BY '\n';"
echo "`date '+%Y-%m-%d %H:%M:%S'` load ${TENANT_NAME} dest_packagebody meta_info done!"

####################get dest_trigger########################
echo "`date '+%Y-%m-%d %H:%M:%S'` get ${TENANT_NAME} dest_trigger"
obclient -h${HOST} -P3306 -p${PASS} -u${USER}@${TENANT_NAME}\#${CLUSTER} -B -N -e "
select null id,"\'${CLUSTER}\'" cluster_name,null tenant_id,"\'${TENANT_NAME}\'" tenant_name,'oracle' tenant_type,null database_id,username database_name,null trigger_id,b.trigger_name trigger_name,to_char(sysdate,'yyyy-mm-dd hh24:mi:ss') remark
from dba_users a
left join (select distinct owner,trigger_name from dba_triggers) b
on a.username = b.owner
and b.owner not in ('SYS')
where a.username not in ('SYS','LBACSYS','ORAAUDITOR','PUBLIC','__OCEANBASE_INNER_DRC_USER')
and b.trigger_name is not null
order by a.username;
"|tr '\t' ','>./ob_buffer/ob_dest_trigger.csv

mysql -uroot -hrm-xxx -pxxxx -e "delete from ob_migration.ob_dest_trigger where tenant_name="\'${TENANT_NAME}\'";LOAD DATA LOCAL INFILE './ob_buffer/ob_dest_trigger.csv' INTO TABLE ob_migration.ob_dest_trigger FIELDS TERMINATED BY ',' LINES TERMINATED BY '\n';"
echo "`date '+%Y-%m-%d %H:%M:%S'` load ${TENANT_NAME} dest_trigger meta_info done!"

####################get dest_constraint########################
echo "`date '+%Y-%m-%d %H:%M:%S'` get ${TENANT_NAME} dest_constraint"
obclient -h${HOST} -P3306 -p${PASS} -u${USER}@${TENANT_NAME}\#${CLUSTER} -B -N -e "
select null id,"\'${CLUSTER}\'" cluster_name,null tenant_id,"\'${TENANT_NAME}\'" tenant_name,'oracle' tenant_type,null database_id,username database_name,b.constraint_type,null constraint_id,b.constraint_name constraint_name,b.table_name,to_char(sysdate,'yyyy-mm-dd hh24:mi:ss') remark
from dba_users a
left join (select distinct owner,constraint_type,constraint_name,table_name from dba_constraints where owner not in ('SYS')  and CONSTRAINT_TYPE in ('P','R','U','C') and constraint_name not like '%_OBPK_INCRMT%') b
on a.username = b.owner
where a.username not in ('SYS','LBACSYS','ORAAUDITOR','PUBLIC','__OCEANBASE_INNER_DRC_USER')
and b.constraint_name is not null
order by a.username;
"|tr '\t' ','>./ob_buffer/ob_dest_constraint.csv

mysql -uroot -hrm-xxx -pxxx -e "delete from ob_migration.ob_dest_constraint where tenant_name="\'${TENANT_NAME}\'";LOAD DATA LOCAL INFILE './ob_buffer/ob_dest_constraint.csv' INTO TABLE ob_migration.ob_dest_constraint FIELDS TERMINATED BY ',' LINES TERMINATED BY '\n';"
echo "`date '+%Y-%m-%d %H:%M:%S'` load ${TENANT_NAME} dest_constraint meta_info done!"
fi

done < $CONN_INFO

OB用户连通性检测

#!/bin/bash

#检查数据库连通性
#mysql租户串样例
#mysql -hobproxy.xxxxxx -P3306 -uroot@xxxxx#xxxx:3 -pxxxxxxx
#oracle租户串样例
#obclient -hobproxy.xxxx -P3306 -uSYS@xxxxx#xxxxx:1 -pxxxxxx

CONN_INFO=target
while read line

do
  CLUSTER=`echo $line|awk '{print $4}'|awk -F "#" '{print $2}'`
  TENANT_TYPE=`echo $line|awk '{print $1}'`
  TENANT_NAME=`echo $line|awk '{print $4}'|awk -F "@" '{print $2}'|awk -F "#" '{print $1}'`
  HOST=`echo $line|awk '{print $2}'|awk -F "-h" '{print $2}'`
  USER=`echo $line|awk '{print $4}'|awk -F "-u" '{print $2}'|awk -F "@" '{print $1}'`
  PASS=`echo $line|awk '{print $5}'|awk -F "-p" '{print $2}'`

# 判断数据库联通性


  if [ $TENANT_TYPE = 'mysql' ];then
    echo -e "租户: ${TENANT_NAME}##### 用户:${USER}------------\033[31m$1\033[0m"
    obclient -h${HOST} -P3306 -p${PASS} -u${USER}@${TENANT_NAME}\#${CLUSTER} -e "select 1;"
    if [ $? -ne 0 ];then
     echo "${HOST}${TENANT_NAME}#${USER} check  conn Falied" >> err_lock.log
    fi
  else
    echo -e "租户: ${TENANT_NAME}##### 用户:${USER}------------\033[31m$1\033[0m"
    obclient -h${HOST} -P3306 -p${PASS} -u${USER}@${TENANT_NAME}\#${CLUSTER} -e "select 1 from dual;"
    if [ $? -ne 0 ];then
     echo "${HOST}${TENANT_NAME}#${USER} check  conn Falied" >> err_lock.log
    fi
  fi
done < $CONN_INFO

OB数据库及用户、用户权限输出及创建

#!/bin/bash
#CONN_INFO=src_conn_info
CONN_INFO=abt_cnn_info
do
CLUSTER=`echo $line|awk '{print $4}'|awk -F "#" '{print $2}'`
TENANT_TYPE=`echo $line|awk '{print $1}'`
TENANT_NAME=`echo $line|awk '{print $4}'|awk -F "@" '{print $2}'|awk -F "#" '{print $1}'`
HOST=`echo $line|awk '{print $2}'|awk -F "-h" '{print $2}'`
USER=`echo $line|awk '{print $4}'|awk -F "-u" '{print $2}'|awk -F "@" '{print $1}'`
PASS=`echo $line|awk '{print $5}'|awk -F "-p" '{print $2}'`
#XY_HOST=`grep -w ${TENANT_NAME} db_mapping.txt|head -1|awk '{print $7}'|awk -F '-h' '{print $2}'`
#XY_TENANT=`grep -w ${TENANT_NAME} db_mapping.txt|head -1|awk '{print $3}'`
#XY_CLUSTER=`grep -w ${TENANT_NAME} db_mapping.txt|head -1|awk '{print $9}'|awk -F "#" '{print $2}'`
echo ""
echo "-----------------------------------${TENANT_NAME}-CREATE DATABASE--------------------------------------------"|tee -a create.log
# 输出源端db信息
mysql -h${HOST}  -P3306 -p${PASS} -u${USER}@${TENANT_NAME}\#${CLUSTER} -B -N -e"
select concat ('create database ',TABLE_SCHEMA,';') as 'CREATE DATABASE'
FROM  INFORMATION_SCHEMA.TABLES 
WHERE table_type ='BASE TABLE'
and TABLE_SCHEMA not in ('oceanbase','mysql','information_schema','test')
group by TABLE_SCHEMA;"

#目标端执行建库
for DB in `mysql -h${HOST}  -P3306 -p${PASS} -u${USER}@${TENANT_NAME}\#${CLUSTER} -B -N -e "select TABLE_SCHEMA FROM  INFORMATION_SCHEMA.TABLES 
WHERE table_type ='BASE TABLE'
and TABLE_SCHEMA not in ('oceanbase','mysql','information_schema','test')
group by TABLE_SCHEMA;"`;do
XY_HOST=`grep -w ${DB} db_mapping.txt|head -1|awk '{print $7}'|awk -F '-h' '{print $2}'`
echo "mysql -h${XY_HOST} -P3306 -uroot@${XY_TENANT}#${XY_CLUSTER} -pOceanBase_123# -e \"create database $DB\"" >> create.log
done

echo "----------------------------------------${TENANT_NAME}-CREATE USER--------------------------------------------"|tee -a create.log
# 输出源端USER
mysql -h${HOST}  -P3306 -p${PASS} -u${USER}@${TENANT_NAME}\#${CLUSTER} -B -N -e "
select concat('create user ',user,\" identified by '\",user,\"_Prd1122%';\")  from mysql.user where user not in ('ORAAUDITOR','__oceanbase_inner_drc_user','root','oms_drc');"

# 目标端创建用户
APP=`echo "$APP_USER"|awk -F'_' '{print $1}'`
XY_HOST=`grep -w $APP db_mapping.txt|awk '{print $7}'|awk -F '-h' '{print $2}'`
XY_TENANT=`grep -w $APP db_mapping.txt|awk '{print $3}'`
XY_CLUSTER=`grep -w $APP db_mapping.txt|awk '{print $9}'|awk -F "#" '{print $2}'`
echo "mysql -h${XY_HOST} -P3306 -uroot@${XY_TENANT}#${XY_CLUSTER} -pOceanBase_123# -e \"create user ${APP_USER} identified by '${APP_USER}_Prd1122%'\"" >>create.log
done
echo "mysql -h${XY_HOST} -P3306 -uroot@${XY_TENANT}#${XY_CLUSTER} -pOceanBase_123# -e \"create user safe_qry identified by 'Safe_qry1122%';\"" >> create.log
echo ""

echo "-------------------------------------${TENANT_NAME}-GRANT USER------------------------------------------------"|tee -a create.log
# 输出源端授权
GRANT_SQL=$(mysql -h${HOST}  -P3306 -p${PASS} -u${USER}@${TENANT_NAME}\#${CLUSTER} -B -N -e "show grants for $APP_USER"|tr '[\n]' '\n;')
echo "$GRANT_SQL"
APP=`echo "$APP_USER"|awk -F'_' '{print $1}'`
XY_HOST=`grep -w $APP db_mapping.txt|awk '{print $7}'|awk -F '-h' '{print $2}'`
XY_TENANT=`grep -w $APP db_mapping.txt|awk '{print $3}'`
XY_CLUSTER=`grep -w $APP db_mapping.txt|awk '{print $9}'|awk -F "#" '{print $2}'`

# 目标端授权
echo "mysql -h${XY_HOST} -P3306 -uroot@${XY_TENANT}#${XY_CLUSTER} -pOceanBase_123# -e \"$GRANT_SQL\"" >>create.log
done
echo "mysql -h${XY_HOST} -P3306 -uroot@${XY_TENANT}#${XY_CLUSTER} -pOceanBase_123# -e \"GRANT SELECT ON *.* TO 'safe_qry';" >> create.log
echo ""

# 输出连接串
echo "----------------------------------------${TENANT_NAME}-CONN_INFO_(DBVPC)-----------------------------------------------"
for APP_USER in `mysql -h${HOST}  -P3306 -p${PASS} -u${USER}@${TENANT_NAME}\#${CLUSTER} -B -N -e "select user from mysql.user where user not in ('ORAAUDITOR','__oceanbase_inner_drc_user','root','oms_drc');"`;do
APP=`echo "$APP_USER"|awk -F'_' '{print $1}'`
XY_DBVPC_HOST=`grep -w $APP db_mapping.txt|awk '{print $12}'|awk -F '-h' '{print $2}'`
XY_TENANT_NAME=`grep -w $APP db_mapping.txt|awk '{print $3}'`
XY_CLUSTER=`grep -w $APP db_mapping.txt|awk '{print $9}'|awk -F "#" '{print $2}'`
echo "mysql -h${XY_DBVPC_HOST} -P3306 -u${APP_USER}@${XY_TENANT_NAME}#${XY_CLUSTER} -p${APP_USER}_Prd1122% -A ob_${APP}_abt"
done
echo ""

echo "----------------------------------------${TENANT_NAME}-CONN_INFO(ANYTUNNEL)-----------------------------------------------"
for APP_USER in `mysql -h${HOST}  -P3306 -p${PASS} -u${USER}@${TENANT_NAME}\#${CLUSTER} -B -N -e "select user from mysql.user where user not in ('ORAAUDITOR','__oceanbase_inner_drc_user','root','oms_drc');"`;do
APP=`echo "$APP_USER"|awk -F'_' '{print $1}'`
XY_ANYTUNNEL_HOST=`grep -w $APP db_mapping.txt|awk '{print $7}'|awk -F '-h' '{print $2}'`
XY_TENANT_NAME=`grep -w $APP db_mapping.txt|awk '{print $3}'`
XY_CLUSTER=`grep -w $APP db_mapping.txt|awk '{print $9}'|awk -F "#" '{print $2}'`
echo "mysql -h${XY_ANYTUNNEL_HOST} -P3306 -u${APP_USER}@${XY_TENANT_NAME}#${XY_CLUSTER} -p${APP_USER}_Prd1122% -A ob_${APP}_abt"
done
echo ""

done < $CONN_INFO

# 替换ALL PRIVILEGES
sed -i 's/ALL PRIVILEGES/select,insert,update,delete,create,drop,alter,index,show view,create view/g' jiaofu.txt
sed -i 's/ALL PRIVILEGES/select,insert,update,delete,create,drop,alter,index,show view,create view/g' create.log

# 删除rmosatvi
sed -i  '/rmosatvi/d' jiaofu.txt
sed -i  '/rmosatvi/d' create.log

# 删除mpbcq
sed -i  '/mpbcq/d' jiaofu.txt
sed -i  '/mpbcq/d' create.log
                                 

OB用户状态检测(是否锁定)

#!/bin/bash
CONN_INFO=$1
while read line

do
  CLUSTER=`echo $line|awk '{print $4}'|awk -F "#" '{print $2}'`
  TENANT_TYPE=`echo $line|awk '{print $1}'`
  TENANT_NAME=`echo $line|awk '{print $4}'|awk -F "@" '{print $2}'|awk -F "#" '{print $1}'`
  HOST=`echo $line|awk '{print $2}'|awk -F "-h" '{print $2}'`
  USER=`echo $line|awk '{print $4}'|awk -F "-u" '{print $2}'|awk -F "@" '{print $1}'`
  PASS=`echo $line|awk '{print $5}'|awk -F "-p" '{print $2}'`

# 检查用户账号是符合预期


  if [ $TENANT_TYPE = 'mysql' ];then
  #mysql租户
    echo "---------mysql租户-------------------${TENANT_NAME}账号状态确认----------------------------"
    obclient -h${HOST} -P3306 -p${PASS} -u${USER}@${TENANT_NAME}\#${CLUSTER} -e"select user_name as '用户名',(case is_locked when 0 then '用户正常' when 1 then '用户锁定'  end) as '用户状态' from OCEANBASE.__ALL_USER where user_name not in ('ORAAUDITOR','__oceanbase_inner_drc_user','root');"
  else

  #oracle租户
  echo "${TENANT_NAME}******Sorry,Oracle tenant.It won't suport it"
  #echo "---------oracle租户-------------------${TENANT_NAME}账号状态确认----------------------------"
  #  obclient -h${HOST} -P3306 -p${PASS} -u${USER}@${TENANT_NAME}\#${CLUSTER} -e "select user_name as '用户名',(case is_locked when 0 then '用户正常' when 1 then '用户锁定'  end) as '用户状态' from __all_virtual_user where tenant_id<>500;" 

  fi

done < $CONN_INFO

OB用户会话查杀

#!/bin/bash
# Read connection strings
conn_str=$1
while read line 
do
    cluster=$(echo $line|awk '{print $4}'|awk -F "#" '{print $2}')
    db_type=$(echo $line|awk '{print $1}')
    tenant_name=$(echo $line|awk '{print $4}'|awk -F "@" '{print $2}'|awk -F "#" '{print $1}')
    host=$(echo $line|awk '{print $2}'|awk -F "-h" '{print $2}')
    user=$(echo $line|awk '{print $4}'|awk -F "-u" '{print $2}'|awk -F "@" '{print $1}')
    pass=$(echo $line|awk '{print $5}'|awk -F "-p" '{print $2}')
    # Determine database type
    #db_type=$(echo $line | awk -F ':' '{print $3}')
# Extract host, user, and password from connection strings

# Kill sessions
if [ $db_type == "obclient" ]; then
    sessions=$(obclient -h${host} -P3306 -p${pass} -u${user}@${tenant_name}\#${cluster} <<EOF
    set heading off
    set feedback off
    select 'alter system kill session ''' || sid || ',' || serial# || ''';'
    from v$session
    where username like '%_APP%' or username like '%_DPL%' or username like '%_QRY%';
    exit;
EOF
    )
    for session in $sessions
    do
        sqlplus -S $user/$pass@$host <<EOF
        $session
        exit;
EOF
    done
elif [ $db_type == "mysql" ]; then
    sessions=$(obclient -h$host -Pxxxxx -u$user@${tenant_name} -p$pass <<EOF
    select id from oceanbase.__all_virtual_processlist where user like '%_app%' or user like '%_qry%' or user like '%_dpl%';
    exit;
EOF)
  echo $sessions;
  for session in $sessions; do
         obclient -h$host -Pxxx -u$user@${tenant_name} -p$pass  -e "kill $session";
    [ $? -ne 0 ]&&echo "${TENANT_NAME}#${user} $session Falied" >> err_lock.log;
  done
 fi
done < $conn_str

 

11. 批量查询mysql用户权限

select  concat('show grants for '"\'",user,"\'",'@',"\'",host,"\'"';') from mysql.user where user not in ('aurora','aurora_proxy','root','aliyun_root','replicator','mysql.session','mysql.sys');

12. 批量删除odps表数据

#!/bin/bash
odps_file="/root/odpscmd_dst/bin"
file=check.list
start_time=`date +%s`  #定义脚本运行的开始时间
 
tmp_fifofile="/tmp/$$.fifo"
mkfifo $tmp_fifofile   # 新建一个FIFO类型的文件
exec 6<>$tmp_fifofile  # 将FD6指向FIFO类型
rm -rf $tmpfofile  #删也可以,
 
thread_num=5  # 定义最大线程数
 
#事实上就是在fd6中放置了$thread_num个回车符
for ((i=0;i<${thread_num};i++));do
    echo
done >&6
 
for i in data/*.bam # 找到data文件夹下所有bam格式的文件
do
    # 一个read -u6命令执行一次,就从FD6中减去一个回车符,然后向下执行
    # 当FD6中没有回车符时 ,就停止,从而实现线程数量控制
done
 
for i in `cat $file`
do
	$odps_file/odpscmd --project=${i} -e "show tables;"|awk -F: '{print $NF}' |sed "s@.*@drop table ${i}.&\;@g">> table.list
	while read line
	do
		a=`echo $line`
    		read -u6
    		{
        	$odps_file/odpscmd  --project=pai_uat1 -e "$a"
        	echo >&6 # 当进程结束以后,再向FD6中加上一个回车符,即补上了read -u6减去的那个
    		} &
	done < table.list	
done

wait # 要有wait,等待所有线程结束
 
stop_time=`date +%s` # 定义脚本运行的结束时间
echo "TIME:`expr $stop_time - $start_time`" # 输出脚本运行时间
 
exec 6>&- # 关闭FD6
echo "over" # 表示脚本运行结束

13. odps批量查询出迁移不一致的分区

#!/bin/bash
pro_list=project.txt
dst=odpscmd_dst
uat=odpscmd_uat1
file_dst="table_desc_par/${dst}"
file_uat="table_desc_par/${uat}"
file_diff_dst="table_desc_par/diff_dst"
file_diff_uat="table_desc_par/diff_uat"
table_dst="$file_dst/show_dst"
table_uat="$file_uat/show_uat"
table_dst1="$file_dst/desc_dst"
table_uat1="$file_uat/desc_uat"
[  -d table_desc_par ] && rm -rf  table_desc_par || mkdir table_desc_par;echo "初始化"
#----------------建表-------------------------
mkdir -p $file_dst $file_uat  $table_dst $table_uat $table_dst1 $table_uat1
 
show(){
[  ! -d $file_diff/${1} -a ! -d $table_dst1/${1} -a ! -d  $table_uat1/${1} ] && mkdir -p  $file_diff/${1} $table_dst1/${1} $table_uat1/${1}
 
sh /root/$dst/bin/odpscmd --project=${1} -e " show partitions $2;"|sed "s@.*@${1} ${2} &@g" >>$file_dst/${1}
#---------------------------------------------
sh /root/$uat/bin/odpscmd --project=${1} -e " show partitions $2;"|sed "s@.*@${1} ${2} &@g" >>$file_uat/${1}
}

diff(){ 
         echo "源端不存在分区及表:" && awk 'NR==FNR{a[$0]++} NR>FNR&&!a[$0]' $table_dst1 $table_uat1 > $file_diff_dst
         echo "目标端不存在分区及表" && awk 'NR==FNR{a[$0]++} NR>FNR&&!a[$0]' $table_uat1 $table_dst1> $file_diff_uat
 }

show_table(){
table1="table_desc_par/1"
[ ! -d $table1 ] && mkdir -p $table1
sh /root/$dst/bin/odpscmd --project=${1} -e " show tables;" | awk -F: '{print $NF}' > $table_dst/dst_$1 #2&> /dev/null
sh /root/$uat/bin/odpscmd --project=${1} -e " show tables;" | awk -F: '{print $NF}' > $table_uat/uat_$1 #2&> /dev/null
[[ `cat $table_dst/dst_$1|wc -l` == ` cat $table_uat/uat_$1|wc -l` ]] && echo "${1}项目空间表数量一致!!" || echo -e "${1}项目空间表数量不一致!\n目标端数量:`cat $table_dst/dst_$1|wc -l`\n源端数量:` cat $table_uat/uat_$1|wc -l`" #exit 2
}
 
 
start_time=`date +%s`  #定义脚本运行的开始时间
 
tmp_fifofile="/tmp/$$.fifo"
mkfifo $tmp_fifofile   # 新建一个FIFO类型的文件
exec 6<>$tmp_fifofile  # 将FD6指向FIFO类型
rm -rf $tmpfofile  #删也可以,
 
thread_num=30  # 定义最大线程数
 
 
#事实上就是在fd6中放置了$thread_num个回车符
for ((i=0;i<${thread_num};i++));do
   echo
done >&6
 
while read line
do
        a=`echo $line`
        show_table $a
        table_list="$table_dst/dst_$a"
 
        for i in `cat $table_list`
        do
                read -u6
                {
                        show $a $i
                        echo >&6
                 } &
        done
	diff 
done < $pro_list
 
wait # 要有wait,等待所有线程结束
 
stop_time=`date +%s` # 定义脚本运行的结束时间
 
echo "TIME:`expr $stop_time - $start_time`" # 输出脚本运行时间
 
exec 6>&- # 关闭FD6
 
echo "over" # 表示脚本运行结束

14. odps检测脚本

#!/bin/bash
start_path=/root/start_odps3.sh
host=`hostname`
log_path=/mnt/transfer_*.log
start_size=`stat -c  "%n %s" ${log_path} > start.txt` 
sleep 600
stop_size=`stat -c  "%n %s" ${log_path} > stop.txt`
########################【执行前初始化】##################################
function ini
{
	echo "初始化/mnt目录文件" && find /mnt -type f  -mtime +1 |xargs rm -f
	transfer_pro=`ps -ef|grep -v grep |grep -oP '(?<=port\=)\d+(?= )' > pro`
	log_coun=`ls /mnt/|egrep -o 890[0-9]{1} > log`
	mv_log_file=`grep -vwf pro log`
	for i in `echo $mv_log_file`
	do
		ls /mnt|grep ''$i''|xargs -i rm  /mnt/{}
	done

	mv_tra_process=`grep -vwf log pro`
	for i in `echo $mv_tra_process`
	do
		ps -ef |grep -v grep|grep "port=$i"|awk '{print $2}'|xargs -i kill {}
	done
}

ini

function worker
{
worker_num=`ps -ef |grep 'max-compute-transfer'|grep -v grep |wc -l`
if [[ $worker_num < 6  ]];then
        $start_patho
	if [[ $? == 0 ]];then
		echo " 进程启动成功"
        	sleep 30
        	echo "已经起了<---------${worker_num}个---------------->进程"
	fi
else
        echo "已经起了<---------${worker_num}个---------------->进程"
        exit 122
fi
}
function k
{
	transfer_pid=`grep -wf start.txt stop.txt|egrep -o '89[0-9]{2}'`
	transfer_log=`grep -wf start.txt stop.txt`
	kill_transfer=`ps -ef |grep -v grep|grep "port=$transfer_pid"|awk '{print $2}'`
	for i in `echo $kill_transfer`
	do
		
		rm -f $transfer_log
		echo "${time}---transfer日志文件---${i}---停止增长!!" >>error.txt
        	echo "[transfer-worker_${i}]进程异常即将重启进程---${date}"  >> error.txt
		echo $i |xargs kill &&  echo "$i--进程被杀死!!" >> error.txt
	done
	#	worker
}

error_transfer=`grep -wf start.txt stop.txt`
if [[ -z $error_transfer ]];then
        echo "程序运行正常---`date`" >> ok.txt
else
	k
fi

 

 

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值