$ crsctl -help
$srvctl <command> -h
#Start Up and Stop Clusterware
$crsctl start cluster –all
$ crsctl start cluster –n k2r720n1
#Start the Oracle High Availability Services daemon (OHASD) and the Clusterware service stack together on the local server only
$crsctl start crs
#Managing Oracle Clusterware
check the cluster status: $ crsctl check cluster {-all}
Check the CRS status with the following command: $ crsctl check crs
#Check the OHASD status:
crsctl check has
#Check the current status of all the resources : crsctl status resource -t
#Managing OCR and the Voting Disk
$ ocrcheck
$ ocrdump
#show the backup information
ocrconfig –showbackup
#The steps to restore OCR from a backup file are as follows:
1. Identify the backup by using the ocrconfig -showbackup command.
2. Stop the clusterware on all the cluster nodes.
3. Perform the restore with the restore command:ocrconfig –restore file_name
4. Restart the crs and do an OCR integrity check by using cluvfy comp ocr.
#use the following command to check the VD location
crsctl query css votedisk
#Managing CRS Resources srvctl utility can be used to manage the resources that the Clusterware manages
Check the SCAN configuration of the cluster: $srvctl config scan
Check the node VIP status : srvctl status vip -n knewracn1
Check the node apps : srvctl status nodeapps -n knewracn1
#Adding a new node to the existing cluster
Cloning Grid Infrastructure Home (cluster/ASM)
Cluster configuration
Cloning RDBMS home
#From the first node of the cluster, execute the following command
$ cluvfy stage –pre nodeadd –n rac3 –fixup -verbose
$ $GRID_HOME/oui/bin/addNode.sh –silent "CLUSTER_NEW_NODES={rac3}" "CLUSTER_NEW_VIRTUAL_HOSTNAMES={rac3_vip}"
$ cluvfy stage –post nodeadd –n rac3
$ crsctl check cluster –all --verify the cluster health from all nodes
$ olsnodes –n -- to list all existing nodes in a cluster
$ORACLE_HOME/oui/bin/addNode.sh "CLUSTER_NEW_NODES={rac3}"
#Removing a Node
#identify if a node is pinned and how to unpin the node: $ olsnodes –n –s -t
$ crsctl unpin css –n rac3
$GRID_HOME/deinstall/deinstall –local
$crsctl delete node –n rac3
$GRID_HOME/oui/bin/runInstaller –updateNodeList ORACLE_HOME=$GRID_HOME cluster_nodes={rac1,rac2} CRS=TRUE -silent
$GRID_HOME/oui/bin/runInstaller –updateNodeList ORACLE_HOME=$ORACLE_HOME cluster_nodes={rac1,rac2} CRS=TRUE –silent
$cluvfy stage –post nodedel –n rac3 –verbose
$olsnodes –n –s -t
#Clean up the following directories manually on the node that was just dropped:
/etc/oraInst.loc, /etc/oratab, /etc/oracle/ /tmp/.oracle, /opt/ORCLmap