主机网络配置注意事项:
ip地址使用静态配置:static
网关要指定
hostname不要出现在回环地址!
如果启动过单机asm服务,请先停止

在node1 & node2 运行install.sh
为oracle用户设置口令
修改oracle用户.bashrc文件
export ORA_CRS_HOME=/u01/app/crs_1
export ORACLE_SID=racdb#

su -
chown oracle.oinstall /u01/app -R

配置网络:
vi /etc/hosts
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
127.0.0.1  localhost.localdomain localhost
::1        localhost6.localdomain6 localhost6
# Public Network - (eth0)
192.168.3.50 stu50
192.168.3.52 stu52
# Public Virtual IP (eth0:1)
192.168.3.51 stu50-vip
192.168.3.53 stu52-vip
# Private Interconnect - (eth1 -> eth0:2)
10.0.0.50 stu50-priv
10.0.0.52 stu52-priv

配置eth0:2:
cd /etc/sysconfig/network-scripts/
cp ifcfg-eth0 ifcfg-eth0:2
DEVICE=eth0:2
BOOTPROTO=static
HWADDR=00:E0:4D:3B:0C:B2
IPADDR=10.0.0.50
IPV6INIT=yes
IPV6_AUTOCONF=yes
NETMASK=255.255.255.0
GATEWAY=10.0.0.1
ONBOOT=yes

配置hangcheck-timer:用于监视 Linux 内核是否挂起
vi /etc/modprobe.conf
options hangcheck-timer hangcheck_tick=30 hangcheck_margin=180
自动加载hangcheck-timer 模块/u01/app/oracle/product/10.2.0/db_1
vi /etc/rc.local
modprobe hangcheck-timer
检查hangcheck-timer模块是否已经加载:
lsmod | grep hangcheck_timer

配置信任关系:
node1:192.168.3.50
su - oracle
ssh-keygen -t rsa
ssh-keygen -t dsa
cd .ssh
cat *.pub > authorized_keys

node2:192.168.3.52
su - oracle
ssh-keygen -t rsa
ssh-keygen -t dsa
cd .ssh
cat *.pub > authorized_keys

node1:192.168.3.50
scp authorized_keys [email protected]:/home/oracle/.ssh/keys_dbs

node2:192.168.3.52
cat keys_dbs >> authorized_keys
scp authorized_keys [email protected]:/home/oracle/.ssh/

测试信任关系:
node1:192.168.3.50
node2:192.168.3.52
ssh stu50
ssh stu52
ssh stu50-priv
ssh stu52-priv

准备公用卷:iscsi
rpm -ivh compat-db-4.2.52-5.1.i386.rpm
rpm -ivh libXp-1.0.0-8.1.el5.i386.rpm
rpm -ivh openmotif22-2.2.3-18.i386.rpm

node1 : 192.168.3.50 stu50 (iscsi server)

划分10G分区作为iscsi共享磁盘:
分区:/dev/sda5  5889        7105     9775521   83  Linux

iscsi 服务端:ClusterStorage目录下
rpm -ivh perl-Config-General-2.40-1.el5.noarch.rpm
rpm -ivh scsi-target-utils-0.0-5.20080917snap.el5.x86_64.rpm
Server目录下
rpm -ivh iscsi-initiator-utils-6.2.0.871-0.16.el5.i386.rpm

vi /etc/tgt/targets.conf
----------------------------------------
 <target iqn.2011-01.com.oracle.blues:luns1>
        backing-store /dev/sda9
       initiator-address 10.1.1.0/24
 </target>
----------------------------------------

vi /etc/udev/scripts/iscsidev.sh
----------------------------------------
#!/bin/bash
 BUS=${1}
 HOST=${BUS%%:*}
 [ -e /sys/class/iscsi_host ] || exit 1
 file="/sys/class/iscsi_host/host${HOST}/device/session*/iscsi_session*/targetname"
 target_name=$(cat ${file})
 if [ -z "${target_name}" ] ; then
        exit 1
 fi
 echo "${target_name##*:}"
----------------------------------------

chmod +x /etc/udev/scripts/iscsidev.sh

chkconfig iscsi on
chkconfig iscsid on
chkconfig tgtd on

service iscsi start
service iscsid start
service tgtd start

tgtadm --lld iscsi --op bind --mode target --tid 1 -I ALL
iscsiadm -m discovery -t sendtargets -p 10.1.1.103
service iscsi start
fdisk -l

重新扫描服务器
iscsiadm -m session -u
iscsiadm -m discovery -t sendtargets -p 10.1.1.103

vi /etc/rc.local
tgtadm --lld iscsi --op bind --mode target --tid 1 -I ALL
service iscsi start

iscsi客户端配置 client : 10.1.1.103
rpm -ivh iscsi-initiator-utils-6.2.0.871-0.16.el5.i386.rpm

vi /etc/udev/rules.d/55-openiscsi.rules
-----------------------------------------------
KERNEL=="sd*",BUS=="scsi",PROGRAM="/etc/udev/scripts/iscsidev.sh %b",SYMLINK+="iscsi/%c"
-----------------------------------------------

vi /etc/udev/scripts/iscsidev.sh
----------------------------------------
#!/bin/bash
BUS=${1}
HOST=${BUS%%:*}
[ -e /sys/class/iscsi_host ] || exit 1
file="/sys/class/iscsi_host/host${HOST}/device/session*/iscsi_session*/targetname"
target_name=$(cat ${file})
if [ -z "${target_name}" ] ; then
       exit 1
fi
echo "${target_name##*:}"
----------------------------------------

chmod +x /etc/udev/scripts/iscsidev.sh

service iscsi start
iscsiadm -m discovery -t sendtargets -p 10.1.1.18 -l
service iscsi start
fdisk -l
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
对iscsi共享盘分区:

将iscsi共享分区变为裸设备:
vi /etc/udev/rules.d/60-raw.rules
ACTION=="add", KERNEL=="sdb1", RUN+="/bin/raw /dev/raw/raw1 %N"
ACTION=="add", KERNEL=="sdb2", RUN+="/bin/raw /dev/raw/raw2 %N"
ACTION=="add", KERNEL=="sdb3", RUN+="/bin/raw /dev/raw/raw3 %N"
ACTION=="add", KERNEL=="sdb5", RUN+="/bin/raw /dev/raw/raw4 %N"
KERNEL=="raw[1]", MODE="0660", GROUP="oinstall", OWNER="root"
KERNEL=="raw[2]", MODE="0660", GROUP="oinstall", OWNER="oracle"
KERNEL=="raw[3]", MODE="0660", GROUP="oinstall", OWNER="oracle"
KERNEL=="raw[4]", MODE="0660", GROUP="oinstall", OWNER="oracle"

分别在node1 & node2启动udev:
start_udev
分别在node1 & node2确认裸设备被加载:
[root@stu50 ~]# ll /dev/raw
总计 0
crw-rw---- 1 root   oinstall 162, 1 01-11 12:44 raw1
crw-rw---- 1 oracle oinstall 162, 2 01-11 12:44 raw2
crw-rw---- 1 oracle oinstall 162, 3 01-11 12:44 raw3
crw-rw---- 1 oracle oinstall 162, 4 01-11 12:44 raw4

使用CVU校验集群安装可行性:
./runcluvfy.sh stage -pre crsinst -n rac1,rac2 -verbose

安装clusterware软件(只需在一个节点做,但要手工将其它节点加入到群):
/mnt/clusterware/runInstaller

注意:在弹出要求运行root.sh脚本的对话框时先不要运行root.sh脚本先修改vipca和srvctl脚本,
不然运行脚本过程中调用java会报错!

su - oracle
vi +123 $CRS_HOME/bin/vipca
在123行 fi 后新添加一行:
unset LD_ASSUME_KERNEL

vi + $CRS_HOME/bin/srvctl
在export LD_ASSUME_KERNEL这一行后加
unset LD_ASSUME_KERNEL

在最后一个节点运行root.sh如果出现下面错误,请按下面蓝色字体部分解决!
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Running vipca(silent) for configuring nodeapps
Error 0(Native: listNetInterfaces:[3])
  [Error 0(Native: listNetInterfaces:[3])]
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~

注意红颜色部分!要按自己的网络配置修改(注意网络适配器名称和IP地址!不要盲目照抄!)!
cd /u01/app/crs_1/bin
#./oifcfg iflist
#./oifcfg setif -global eth0/10.1.1.0:public
#./oifcfg setif -global eth0:2/10.0.0.0:cluster_interconnect
#./oifcfg getif

如果两台实验机的网卡一个是eth0一个是eth1,那么按如下方法修改:
./oifcfg setif -node node1 eth0/:10.1.1.0:public
./oifcfg setif -node node1 eth0:0/172.20.1.0:cluster_interconnect
./oifcfg setif -node node1 eth0:1/172.20.1.0:cluster_interconnect

./oifcfg setif -node node2 eth1/:10.1.1.0:public
./oifcfg setif -node node2 eth1:0/172.20.1.0:cluster_interconnect
./oifcfg setif -node node2 eth1:1/172.20.1.0:cluster_interconnect

############ 效果 ########
[root@server bin]# ./oifcfg getif
eth0  10.1.1.0  node1  public
eth0:0  172.20.1.0  node1  cluster_interconnect
eth0:1  172.20.1.0  node1  cluster_interconnect
eth1  10.1.1.0  node2  public
eth1:0  172.20.1.0  node2  cluster_interconnect
eth1:1  172.20.1.0  node2  cluster_interconnect

############ 效果 ########

设置网络接口后在当前节点手工运行vipca
unset LANG
./vipca

vipca向导将资源启动后,查看各资源状态
cd $ORA_CRS_HOME/bin
./crs_stat -t

查看各资源详细信息:
./crs_stat
./crs_stat -p

clusterware软件安装成功之后备份一下ocr!
./ocrconfig -export /home/oracle/ocr.bak

安装数据库软件(只需在一个节点做,会出现多节点的选择选项):安装时选择只安装软件不键库
/mnt/database/runInstaller

clusterware管理:
查看voting disk位置:
#./crsctl query css votedisk

备份voting disk
dd if=voting_disk_name of=backup_file_name bs=4k
还原voting disk
dd if=backup_file_name of=voting_disk_name bs=4k

添加新的表决磁盘:
# crsctl add css votedisk <new voting disk path>

删除表决磁盘:
# crsctl delete css votedisk <old voting disk path>

如果所有节点上的 Oracle Clusterware 都已关闭,请使用 –force 选项:
# crsctl add css votedisk <new voting disk path> -force
# crsctl delete css votedisk <old voting disk path> -force

查看OCR的位置
#./ocrcheck

找到物理备份:
$ocrconfig -showbackup

检查ocr内容:
# ocrdump –backupfile file_name

检查 OCR 完整性:
$ cluvfy comp ocr -n all

OCR 会在以下时间自动进行备份:
每 4 小时:CRS 会保留最后 3 个副本。
每天结束时:CRS 会保留最后 2 个副本。
每周结束时:CRS 会保留最后 2 个副本。

更改自动备份的默认位置:
# ocrconfig –backuploc /shared/bak

还原 OCR 物理备份:
# crsctl stop crs
# ocrconfig –restore <CRS HOME>/cdata/jfv_clus/day.ocr 
# crsctl start crs

手工备份:
/data/oracle/crs/bin/ocrconfig -export /data/backup/rac/ocrdisk.bak

还原逻辑 OCR 备份:
# crsctl stop crs
# ocrconfig –import /shared/export/ocrback.dmp 
# crsctl start crs

检查 OCR 完整性:
$ cluvfy comp ocr -n all

停止crs:
/etc/init.d/init.crs stop

启动crs:
/etc/init.d/init.crs start

查看系统活动:
tail -f /var/log/message

~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
手工卸载clusterware:如果clusterware安装失败可以使用下面方法卸载clusterware!安装成功就不要卸载了!脚本别瞎用!
cd /u01/app/crs_1/install
./rootdelete.sh
./rootdeinstall.sh

rm -fr /etc/ora*
rm -fr /etc/init.d/*.crs
rm -fr /etc/init.d/*.crsd
rm -fr /etc/init.d/*.css
rm -fr /etc/init.d/*.cssd
su - oracle
rm -fr $ORACLE_BASE/*
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
cd $ORACLE_HOME/rdbms/lib
校验rac功能是否打开(结果大于0则是rac)
nm -r libknlopt.a | grep -c kcsm.o
打开rac功能
make -f ins_rdbms.mk rac_on
关闭rac功能
make -f ins_rdbms.mk rac_off