# netstat -in
Name Mtu Network Address Ipkts Ierrs Opkts Oerrs Coll
en6 1500 link#2 e4.1f.13.50.f5.8e 0 0 89404 89404 0
en6 1500 192.168.10 192.168.10.1 0 0 89404 89404 0
en8 1500 link#3 5c.f3.fc.f.2e.b2 3936078728 0 3484183160 2 0
en8 1500 172.16.7 172.16.7.60 3936078728 0 3484183160 2 0
en8 1500 10.10.20 10.10.20.50 3936078728 0 3484183160 2 0
en8 1500 172.16.7 172.16.7.62 3936078728 0 3484183160 2 0
en8 1500 172.16.7 172.16.7.64 3936078728 0 3484183160 2 0
en14 1500 link#4 5c.f3.fc.f.2c.ac 2487918075 0 3651663589 2 0
en14 1500 11.11.11 11.11.11.50 2487918075 0 3651663589 2 0
en14 1500 169.254 169.254.34.84 2487918075 0 3651663589 2 0
en16 1500 link#5 5c.f3.fc.f.2e.b3 167005 0 221 32 0
en16 1500 10.128.59 10.128.59.12 167005 0 221 32 0
lo0 16896 link#1 470377750 0 470436254 0 0
lo0 16896 127 127.0.0.1 470377750 0 470436254 0 0
lo0 16896 ::1%1 470377750 0 470436254 0 0
172.16.7.60 mmdb1
172.16.7.61 mmdb2
172.16.7.62 mmdb1-vip
172.16.7.63 mmdb2-vip
172.16.7.64 mmdb-scan
11.11.11.50 mmdb1-priv
11.11.11.51 mmdb2-priv
10.128.59.12 mmdb1
主机名 类型 IP地址 网卡端口
mmdb1 Public ip 10.128.59.12 en16
mmdb1 Vip 10.128.59.15 en16
mmdb2 Public ip 10.128.59.13 en16
mmdb2 Vip 10.128.59.16 en16
mmdb1 Scan-ip 10.128.59.14 en16
10.128.59.12 mmdb1
10.128.59.13 mmdb2
10.128.59.15 mmdb1-vip
10.128.59.16 mmdb2-vip
11.11.11.50 mmdb1-priv
11.11.11.51 mmdb2-priv
10.128.59.14 mmdb-scan
开始前的准备工作
1、停监听、停服务
srvctl stop listener -n mmdb1
srvctl stop listener -n mmdb2
srvctl stop service -d mmdb -s mmdb -n mmdb1
srvctl stop service -d mmdb -s mmdb -n mmdb2
2、查看public IP
检查公共网络接口的配置信息
# hostname
mmdb1
# ./oifcfg getif
en8 172.16.7.0 global public
en14 11.11.11.0 global cluster_interconnect
# hostname
mmdb2
# ./oifcfg getif
en8 172.16.7.0 global public
en14 11.11.11.0 global cluster_interconnect
2.1、 修改 Clusterware public IP
删除PUBLIC IP那个接口:
[root@rac1 bin]# ./oifcfg delif -global en8
在en16接口重新配置PUBLIC IP信息:
[root@rac1 bin]# ./oifcfg setif -global en16/10.128.59.0:public
重新查看:
[root@rac2 bin]# ./oifcfg getif
eth1 192.168.56.0 global cluster_interconnect
eth0 192.168.53.0 global public
2.2、停止所有节点的资源服务
[root@rac1 bin]# ./crsctl stop has
[root@rac2 bin]# ./crsctl stop has
2.3、修改物理网卡IP (此处没有删掉eth0网卡信息)
[root@rac1 bin]# vi /etc/sysconfig/network-scripts/ifcfg-eth2
[root@rac2 bin]# vi /etc/sysconfig/network-scripts/ifcfg-eth2
重启network:
[root@rac1 bin]# service network restart
[root@rac2 bin]# service network restart
2.4、修改所有节点hosts文件
10.128.59.12 mmdb1
10.128.59.13 mmdb2
10.128.59.15 mmdb1-vip
10.128.59.16 mmdb2-vip
11.11.11.50 mmdb1-priv
11.11.11.51 mmdb2-priv
10.128.59.14 mmdb-scan
2.5、启动所有节点资源服务
[root@rac1 bin]# ./crsctl start has
[root@rac2 bin]# ./crsctl start has
查看资源状态:
[root@rac2 bin]# ./crsctl status resourct -t
3、修改VIP
3.1、停止VIP资源和本地
[root@rac2 bin]# ./crsctl stop resource ora.rac1.vip -f --vip和监听是关联的,
[root@rac2 bin]# ./crsctl stop resource ora.rac2.vip -f
# hostname
mmdb1
# ./srvctl config network
Network exists: 1/172.16.7.0/255.255.255.0/en8, type static
3.2、使用root用户执行 修改VIP: (只需在一个节点执行即可)
[root@rac1 bin]#srvctl modify network -k 1 -S 10.128.59.0/255.255.255.0/en16 -v
[root@node2 bin]# ./srvctl config network
Network exists: 1/192.168.3.0/255.255.255.0/eth2, type static
3.3、启动VIP资源
[root@rac2 bin]# ./crsctl start resource ora.rac1.vip
CRS-2672: Attempting to start 'ora.rac1.vip' on 'rac1'
CRS-2676: Start of 'ora.rac1.vip' on 'rac1' succeeded
[root@rac2 bin]# ./crsctl start resource ora.rac2.vip
CRS-2672: Attempting to start 'ora.rac2.vip' on 'rac2'
CRS-2676: Start of 'ora.rac2.vip' on 'rac2' succeeded
3.4、启动本地监听器并查看状态:(只需在一个节点执行)
[root@rac2 bin]# ./srvctl start listener
[root@rac2 bin]# ./srvctl status listener
Listener LISTENER is enabled
Listener LISTENER is running on node(s): rac1,rac2
4、配置SCAN和SCAN_LISTENER
4.1、查看当前SCAN和SCAN_LISTENER信息
(测试时显示已经改过,只不过使用ifconfig发现scan_ip和vip没有绑定在eth2网卡上,
还是在原来的eth0网卡上)
[root@node2 bin]# ./srvctl config scan
SCAN name: cluster-scan, Network: 1/192.168.3.0/255.255.255.0/eth2
SCAN VIP name: scan1, IP: /cluster-scan/192.168.3.200 --发现此处还是200,没有更新
[root@node2 bin]# ./srvctl config scan_listener
SCAN Listener LISTENER_SCAN1 exists. Port: TCP:1521
4.2、停止SCAN和SCAN_listener
[root@rac2 bin]# ./srvctl stop scan_listener
[root@rac2 bin]# ./srvctl stop scan
4.3、删除SCAN和SCAN监听
[root@rac2 bin]# ./srvctl remove scan_listener
Remove scan listener? (y/[n]) y
[root@rac2 bin]# ./srvctl remove scan
Remove the scan? (y/[n]) y
4.4、重新添加SCAN
[root@rac2 bin]# ./srvctl add scan -n cluster-scan -k 1 -S 192.168.3.0/255.255.255.0/eth2
4.5、重新添加SCAN监听器:
[root@rac2 bin]# ./srvctl add scan_listener
4.6、启动SCAN和SCAN监听器:
[root@rac2 bin]# ./srvctl start scan
[root@rac2 bin]# ./srvctl start scan_listener
4.7、再次查看
[root@node1 bin]# ./srvctl config scan
SCAN name: cluster-scan, Network: 1/192.168.3.0/255.255.255.0/eth2
SCAN VIP name: scan1, IP: /cluster-scan/192.168.3.222
5、修改local_listener参数 (以上修改成功后,此参数自动修改了)
6、检查RAC状态
./crsctl resource -t
查看SCAN
./srvctl config scan
./srvctl config scan_listener
./srvctl status listener
grid]$ lsnrctl status
grid]$ lsnrctl status listenere_scan2
SQL> select instance_name, status from gv$instance;
来自 “ ITPUB博客 ” ,链接:http://blog.itpub.net/13750068/viewspace-1137929/,如需转载,请注明出处,否则将追究法律责任。
转载于:http://blog.itpub.net/13750068/viewspace-1137929/