Linux6 fo OracleRAC install

来源:互联网 发布:淘宝代销退货流程 编辑:程序博客网 时间:2024/05/01 20:56
两节点执行:
[root@tbrac01 ~]# vi /etc/hosts
# Do not remove the following line, or various programs
# that require network functionality will fail.
127.0.0.1 localhost.localdomain localhost
::1 localhost6.localdomain6 localhost6
# Public IP for Oracle 11g RAC
192.168.20.138 tbrac01
192.168.20.139 tbrac02
# Private IP for Oracle 11g RAC
172.16.20.14 tbrac01pri
172.16.20.15 tbrac02pri
# Virtual IP for Oracle 11g RAC
192.168.20.140 tbrac01vip
192.168.20.141 tbrac02vip
192.168.20.142 tbrac-scan

测试连同性
[root@tbrac01 ~]# ping tbrac01
PING tbrac01 (192.168.20.138) 56(84) bytes of data.
64 bytes from tbrac01 (192.168.20.138): icmp_seq=1 ttl=64 time=0.022 ms
--- tbrac01 ping statistics ---
1 packets transmitted, 1 received, 0% packet loss, time 0ms
rtt min/avg/max/mdev = 0.022/0.022/0.022/0.000 ms
[root@tbrac01 ~]# ping tbrac02
PING tbrac02 (192.168.20.139) 56(84) bytes of data.
64 bytes from tbrac02 (192.168.20.139): icmp_seq=1 ttl=64 time=1.43 ms
--- tbrac02 ping statistics ---
1 packets transmitted, 1 received, 0% packet loss, time 0ms
rtt min/avg/max/mdev = 1.435/1.435/1.435/0.000 ms
[root@tbrac01 ~]# ping tbrac01pri
PING tbrac01pri (172.16.20.14) 56(84) bytes of data.
64 bytes from tbrac01pri (172.16.20.14): icmp_seq=1 ttl=64 time=0.021 ms
--- tbrac01pri ping statistics ---
1 packets transmitted, 1 received, 0% packet loss, time 0ms
rtt min/avg/max/mdev = 0.021/0.021/0.021/0.000 ms
[root@tbrac01 ~]# ping tbrac02pri
PING tbrac02pri (172.16.20.15) 56(84) bytes of data.
64 bytes from tbrac02pri (172.16.20.15): icmp_seq=1 ttl=64 time=1.37 ms
64 bytes from tbrac02pri (172.16.20.15): icmp_seq=2 ttl=64 time=0.225 ms
--- tbrac02pri ping statistics ---
2 packets transmitted, 2 received, 0% packet loss, time 999ms
rtt min/avg/max/mdev = 0.225/0.799/1.374/0.575 ms
yum install compat-libcap1 -y
yum install compat-libstdc++-33 -y
yum install compat-libstdc++-33.i686 -y
yum install gcc -y
yum install gcc-c++ -y
yum install glibc -y
yum install glibc.i686 -y
yum install glibc-devel -y
yum install glibc-devel.i686 -y
yum install ksh -y
yum install libgcc -y
yum install libgcc.i686 -y
yum install libstdc++ -y
yum install libstdc++.i686 -y
yum install libstdc++-devel -y
yum install libstdc++-devel.i686 -y
yum install libaio -y
yum install libaio.i686 -y
yum install libaio-devel -y
yum install libaio-devel.i686 -y
yum install libXext -y
yum install libXext.i686 -y
yum install libXtst -y
yum install libXtst.i686 -y
yum install libX11 -y
yum install libX11.i686 -y
yum install libXau -y
yum install libXau.i686 -y
yum install libxcb -y
yum install libxcb.i686 -y
yum install libXi -y
yum install libXi.i686 -y
yum install make -y
yum install sysstat -y
yum install unixODBC -y
yum install unixODBC-devel -y
[root@localhost ~]# vi /etc/sysctl.conf
kernel.msgmnb = 65536
kernel.msgmax = 65536
kernel.shmmax = 4398046511104
kernel.shmmni = 4096
kernel.shmall = 1073741824
fs.file-max = 6815744
net.ipv4.ip_local_port_range = 9000 65500
net.core.rmem_default = 262144
net.core.wmem_default = 262144
net.core.rmem_max = 4194304
net.core.wmem_max = 1048576
fs.aio-max-nr = 1048576
kernel.sem = 250 32000 100 128
kernel.panic_on_oops = 1
[root@localhost ~]# vi /etc/security/limits.conf
grid soft nproc 2047
grid hard nproc 16384
grid soft nofile 131072
grid hard nofile 65536
grid soft stack 10240
grid hard stack 32768
oracle soft nproc 131072
oracle hard nproc 131072
oracle soft nofile 131072
oracle hard nofile 65536
oracle hard nofile 131072
oracle soft nofile 131072
[root@localhost ~]# vi /etc/pam.d/login
session required pam_limits.so
session required /lib64/security/pam_limits.so
groupadd -g 1001 oinstall
groupadd -g 1002 dba
groupadd -g 1003 oper
groupadd -g 1004 backupdba
groupadd -g 1005 dgdba
groupadd -g 1006 kmdba
groupadd -g 1007 asmdba
groupadd -g 1008 asmoper
groupadd -g 1009 asmadmin
useradd -u 1010 -g oinstall -G dba,asmadmin,asmdba,asmoper grid
useradd -u 1011 -g oinstall -G dba,oper,backupdba,dgdba,kmdba,asmdba,asmadmin oracle
passwd grid
passwd oracle
groupdel dba
groupdel oper
groupdel backupdba
groupdel dgdba
groupdel kmdba
groupdel asmdba
groupdel asmoper
groupdel asmadmin
groupdel oinstall
userdel -rf oracle
userdel -rf grid

mkdir -p /u01/app/11.2.0/grid
mkdir -p /u01/app/grid
mkdir -p /u01/app/oracle/product/11.2.0/db_1
chown -R grid:oinstall /u01
chmod -R 775 /u01/
chown -R oracle:oinstall /u01/app/oracle
[root@tbrac01 ~]# su - grid
[grid@tbrac01 ~]$ vi .bash_profile
# User specific environment and startup programs
PATH=$PATH:$HOME/bin
export ORACLE_BASE=/u01/app/grid
export ORACLE_HOME=/u01/app/11.2.0/grid
export ORACLE_SID=+ASM1
export PATH=$ORACLE_HOME/bin:$PATH
export LD_LIBRARY_PATH=$ORACLE_HOME/lib:/lib:/usr/lib
export CLASSPATH=$ORACLE_HOME/JRE:$ORACLE_HOME/jlib:$ORACLE_HOME/rdbms/jlib
export DISPLAY=:0.0
umask=022
expor PATH
[root@tbrac01 ~]# su - oracle
[oracle@tbrac01 ~]$ vi .bash_profile
# User specific environment and startup programs
PATH=$PATH:$HOME/bin
export ORACLE_BASE=/u01/app/oracle
export ORACLE_HOME=/u01/app/oracle/product/11.2.0/db_1
export ORACLE_SID=trac1
export PATH=$ORACLE_HOME/bin:$PATH
export LD_LIBRARY_PATH=$ORACLE_HOME/lib:/lib:/usr/lib
export CLASSPATH=$ORACLE_HOME/JRE:$ORACLE_HOME/jlib:$ORACLE_HOME/rdbms/jlib
export DISPLAY=:0.0
umask=022
export PATH
[root@tbrac01 ~]# vi /etc/sysconfig/ntpd
# Drop root to id 'ntp:ntp' by default.
OPTIONS="-x -u ntp:ntp -p /var/run/ntpd.pid"
# Set to 'yes' to sync hw clock after successful ntpdate
SYNC_HWCLOCK=no
# Additional options for ntpdate
NTPDATE_OPTIONS=""
Oracle和grid用户建立用户等效性
[grid@tbrac02 ~]$ ssh-keygen -t rsa
Generating public/private rsa key pair.
Enter file in which to save the key (/home/grid/.ssh/id_rsa): Created directory '/home/grid/.ssh'.
Enter passphrase (empty for no passphrase):
Enter same passphrase again:
Your identification has been saved in /home/grid/.ssh/id_rsa.
Your public key has been saved in /home/grid/.ssh/id_rsa.pub.
The key fingerprint is:
28:03:13:3d:88:b3:b0:ad:d4:1a:f4:89:1f:61:d9:68 grid@tbrac02
[grid@tbrac02 ~]$ ssh-keygen -t dsa
Generating public/private dsa key pair.
Enter file in which to save the key (/home/grid/.ssh/id_dsa):
Enter passphrase (empty for no passphrase):
Enter same passphrase again:
Your identification has been saved in /home/grid/.ssh/id_dsa.
Your public key has been saved in /home/grid/.ssh/id_dsa.pub.
The key fingerprint is:
dd:7b:4c:05:f4:13:30:16:f6:8f:a8:55:bb:df:d5:a5 grid@tbrac02
[grid@tbrac02 .ssh]$ cat id_rsa.pub >> authorized_keys
[grid@tbrac02 .ssh]$ cat id_dsa.pub >> authorized_keys
[grid@tbrac02 .ssh]$ scp authorized_keys tbrac01:/home/grid/.ssh/
The authenticity of host 'tbrac01 (192.168.20.138)' can't be established.
RSA key fingerprint is 4e:74:d0:f0:3e:8c:34:5a:4b:12:e6:34:b0:a5:6f:84.
Are you sure you want to continue connecting (yes/no)? yes
Warning: Permanently added 'tbrac01,192.168.20.138' (RSA) to the list of known hosts.
grid@tbrac01's password:
authorized_keys 100% 996 1.0KB/s 00:00
[grid@tbrac01 .ssh]$ cat id_rsa.pub >> authorized_keys
[grid@tbrac01 .ssh]$ cat id_dsa.pub >> authorized_keys
[grid@tbrac01 .ssh]$ scp authorized_keys tbrac02:/home/grid/.ssh/
The authenticity of host 'tbrac02 (192.168.20.139)' can't be established.
RSA key fingerprint is 6c:33:a8:b5:4e:cf:37:80:d0:20:11:3c:82:06:7e:58.
Are you sure you want to continue connecting (yes/no)? yes
Warning: Permanently added 'tbrac02,192.168.20.139' (RSA) to the list of known hosts.
grid@tbrac02's password:
authorized_keys
[grid@tbrac02 .ssh]$ ssh tbrac01 date
Fri Apr 1 10:35:26 CST 2016
[grid@tbrac02 .ssh]$ ssh tbrac02 date
Fri Apr 1 10:35:27 CST 2016
[root@tbrac02 asm]# rpm -ivh oracleasm-support-2.1.4-1.el4.i386.rpm
warning: oracleasm-support-2.1.4-1.el4.i386.rpm: Header V3 DSA signature: NOKEY, key ID b38a8516
Preparing... ########################################### [100%]
1:oracleasm-support ########################################### [100%]
[root@tbrac02 asm]# rpm -ivh oracleasm-2.6.18-194.el5-2.0.5-1.el5.x86_64.rpm
Preparing... ########################################### [100%]
1:oracleasm-2.6.18-194.el########################################### [100%]
[root@tbrac02 asm]# rpm -ivh oracleasmlib-2.0.4-1.el4.i386.rpm
warning: oracleasmlib-2.0.4-1.el4.i386.rpm: Header V3 DSA signature: NOKEY, key ID b38a8516
Preparing... ########################################### [100%]
1:oracleasmlib ########################################### [100%]
[root@tbrac02 asm]# /etc/init.d/oracleasm configure
Configuring the Oracle ASM library driver.
This will configure the on-boot properties of the Oracle ASM library
driver. The following questions will determine whether the driver is
loaded on boot and what permissions it will have. The current values
will be shown in brackets ('[]'). Hitting <ENTER> without typing an
answer will keep that current value. Ctrl-C will abort.
Default user to own the driver interface []: grid
Default group to own the driver interface []: asmadmin
Start Oracle ASM library driver on boot (y/n) [n]: y
Scan for Oracle ASM disks on boot (y/n) [y]: y
Writing Oracle ASM library driver configuration: done
Initializing the Oracle ASMLib driver: [ OK ]
Scanning the system for Oracle ASMLib disks: [ OK ]
[root@tbrac02 asm]# fdisk -l
Disk /dev/sda: 107.3 GB, 107374182400 bytes
255 heads, 63 sectors/track, 13054 cylinders
Units = cylinders of 16065 * 512 = 8225280 bytes
Device Boot Start End Blocks Id System
/dev/sda1 * 1 13 104391 83 Linux
/dev/sda2 14 13054 104751832+ 8e Linux LVM
Disk /dev/sdb: 53.6 GB, 53687091200 bytes
255 heads, 63 sectors/track, 6527 cylinders
Units = cylinders of 16065 * 512 = 8225280 bytes
Device Boot Start End Blocks Id System
/dev/sdb1 1 6527 52428096 83 Linux
Disk /dev/sdc: 64.4 GB, 64424509440 bytes
255 heads, 63 sectors/track, 7832 cylinders
Units = cylinders of 16065 * 512 = 8225280 bytes
Device Boot Start End Blocks Id System
/dev/sdc1 1 7832 62910508+ 83 Linux
Disk /dev/sdd: 107.3 GB, 107374182400 bytes
255 heads, 63 sectors/track, 13054 cylinders
Units = cylinders of 16065 * 512 = 8225280 bytes
Device Boot Start End Blocks Id System
/dev/sdd1 1 13054 104856223+ 83 Linux
kmod-oracleasm
[root@tbrac02 asm]# /etc/init.d/oracleasm createdisk CRS /dev/sdb1
Marking disk "CRS" as an ASM disk: [ OK ]
[root@tbrac02 asm]# /etc/init.d/oracleasm createdisk FLA /dev/sdc1
Marking disk "FLA" as an ASM disk: [ OK ]
[root@tbrac02 asm]# /etc/init.d/oracleasm createdisk DATA /dev/sdd1
Marking disk "DATA" as an ASM disk: [ OK ]
[root@tbrac02 asm]# /etc/init.d/oracleasm scandisks
Scanning the system for Oracle ASMLib disks: [ OK ]
[root@tbrac02 asm]# /etc/init.d/oracleasm listdisks
CRS
DATA
FLA
[root@tbrac01 ~]# /etc/init.d/oracleasm status
Checking if ASM is loaded: yes
Checking if /dev/oracleasm is mounted: yes
[root@tbrac01 oraclefile]# cd /home/grid/grid/rpm/
[root@tbrac01 rpm]# ls
cvuqdisk-1.0.9-1.rpm
[root@tbrac01 rpm]# rpm -ivh cvuqdisk-1.0.9-1.rpm
Preparing... ########################################### [100%]
Using default group oinstall to install package
1:cvuqdisk ########################################### [100%]

安装前检查
./runcluvfy.sh stage -pre crsinst -n trac01,trac02 -fixup -verbose
root.sh删除配置脚本
css log
/u01/app/11.2.0/grid/log/tbrac01/crsd/crsdOUT.log
./app/11.2.0/grid/crs/install/roothas.pl -deconfig -force -verbose
./app/11.2.0/grid/crs/install/rootcrs.pl -deconfig -force
/app/product/grid/11.2.0/crs/install/roothas.pl -delete -force -verbose
/app/product/grid/11.2.0/root.sh

 
 
 




 
 
 
 
This document is being delivered to you via Oracle Support's Rapid Visibility (RaV) process and therefore has not been subject to an independent technical review.
Applies to:
Oracle Database - Enterprise Edition - Version 11.2.0.1 and later
Generic Linux
***Checked for relevance on 11-Mar-2013***
Symptoms
After successful upgrade to 11gR2 Grid Infrastructure clusterware, INS-20802 reported while running Oracle Cluster Verification Utility post check, installation logs located in$ORACLE_BASE/oraInventory/logs/installActions{$TIMESTAMP}.log shows similar entries like following:
INFO: PRVF-9802 : Attempt to get udev info from node "node1" failed
INFO: Checking udev settings for device "/dev/raw/raw1"
INFO: PRVF-5184 : Check of following Udev attributes of "node2:/dev/raw/raw1" failed: "[Group: Found='root' Expected='oinstall', Permissions: Found='0660' Expected='0640']"
INFO: PRVF-5186 : Check for Udev permissions failed on node "node1"
Or
INFO: Checking udev settings for device "/dev/raw/raw1"
INFO: PRVF-5184 : Check of following Udev attributes of "node1:/dev/raw/raw1" failed: "[Group: Found='root' Expected='oinstall', Permissions: Found='0600' Expected='0640']"
INFO: PRVF-5184 : Check of following Udev attributes of "node2:/dev/raw/raw1" failed: "[Group: Found='root' Expected='oinstall', Permissions: Found='0600' Expected='0640']"
Cause
There's multiple likely causes:
Cause 1. OCR and Voting Disk devices are not managed by UDEV, thus UDEV rules doesn't have proper configuration for OCR and Voting disk devices
Cause 2. Due to unpublished bug 8726128, even UDEV is configured properly, the error still showed up on local node on RHEL4/OEL4
Solution
1. To keep OCR and Voting Disk device name persistent across node reboot with proper ownership and permission, they need to be managed by UDEV; if you have other tool for the same purpose, you can safely ignore the warning.
2. Unpublished bug 8726128 will be fixed in 11.2.0.2, if "ls -l name_of_ocr" command shows expected result, the error can be ignored

Note: its recommended to migrate OCR and Voting Disk to ASM once upgrade is finished; if Voting Disk is migrated to ASM, it will inherit underlying diskgroup redundance:
To migrate OCR to ASM:
$GRID_HOME/bin/ocrconfig -add DiskGroupName
$GRID_HOME/bin/ocrconfig -delete Current_OCR_NAME
Example:
$GRID_HOME/bin/ocrconfig -add +DGOCW
$GRID_HOME/bin/ocrconfig -delete /dev/raw/raw1

To migrate Voting Disk to ASM:
$GRID_HOME/bin/crsctl replace votedisk +DiskGroupName

Example:
$GRID_HOME/bin/crsctl replace votedisk +DGOCW
References
 

 
按顺序依次在两个节点上执行脚本
tbrac01
/u01/app/oraInventory/orainstRoot.sh
tbrac02
/u01/app/oraInventory/orainstRoot.sh
tbrac01
/u01/app/11.2.0/grid/root.sh
tbrac02
/u01/app/11.2.0/grid/root.sh
 
 
 
 
 
安装数据库
 








asmca 创建asm盘



 
 
升级记录:
grid GI升级
两节点分别备份grid和Oracle的原始opatch
[root@trac02 ~]# mv /u01/app/11.2.0/grid/OPatch/ /u01/app/11.2.0/grid/OPatchbk
[root@trac02 ~]# mv /u01/app/oracle/product/11.1.0/db_1/OPatch/ /u01/app/oracle/product/11.1.0/db_1/OPatchbk
两节点替换新版本Opatch
[root@trac02 ~]# cp -r OPatch /u01/app/11.2.0/grid/
[root@trac02 ~]# cp -r OPatch /u01/app/oracle/product/11.1.0/db_1/
[root@trac02 ~]# chown -R grid:oinstall /u01/app/11.2.0/grid/OPatch
[root@trac02 ~]# chown -R oracle:oinstall /u01/app/oracle/product/11.1.0/db_1/OPatch
[root@trac02 ~]# /u01/app/11.2.0/grid/OPatch/opatch version
OPatch Version: 11.2.0.3.0
OPatch succeeded.
[root@trac02 ~]# /u01/app/oracle/product/11.1.0/db_1/OPatch/opatch version
OPatch Version: 11.2.0.3.0
OPatch succeeded.
两节点生产ocm文件
[root@trac02 ~]# su - oracle
[oracle@trac02 ~]$ /u01/app/oracle/product/11.1.0/db_1/OPatch/ocm/bin/emocmrsp
OCM Installation Response Generator 10.3.4.0.0 - Production
Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
Provide your email address to be informed of security issues, install and
initiate Oracle Configuration Manager. Easier for you if you use your My
Oracle Support Email address/User Name.
Visit http://www.oracle.com/support/policies.html for details.
Email address/User Name:
You have not provided an email address for notification of security issues.
Do you wish to remain uninformed of security issues ([Y]es, [N]o) [N]: Y
The OCM configuration response file (ocm.rsp) was successfully created.
[oracle@trac02 ~]$ ls
ocm.rsp
两节点依次GI升级
[root@trac02 u01]# /u01/app/11.2.0/grid/OPatch/opatch auto /u01/opatchfile/ -ocmrf /u01/opatchfile/ocm.rsp
Executing /u01/app/11.2.0/grid/perl/bin/perl /u01/app/11.2.0/grid/OPatch/crs/patch11202.pl -patchdir /u01 -patchn opatchfile -ocmrf /u01/opatchfile/ocm.rsp -paramfile /u01/app/11.2.0/grid/crs/install/crsconfig_params
INC is /u01/opatchfile/13696242/files/crs/install /u01/app/11.2.0/grid/crs/install /u01/app/11.2.0/grid/perl/lib/5.10.0/x86_64-linux-thread-multi /u01/app/11.2.0/grid/perl/lib/5.10.0 /u01/app/11.2.0/grid/perl/lib/site_perl/5.10.0/x86_64-linux-thread-multi /u01/app/11.2.0/grid/perl/lib/site_perl/5.10.0 /u01/app/11.2.0/grid/perl/lib/5.10.0/x86_64-linux-thread-multi /u01/app/11.2.0/grid/perl/lib/5.10.0/x86_64-linux-thread-multi /u01/app/11.2.0/grid/perl/lib/5.10.0 /u01/app/11.2.0/grid/perl/lib/site_perl/5.10.0/x86_64-linux-thread-multi /u01/app/11.2.0/grid/perl/lib/site_perl/5.10.0 /u01/app/11.2.0/grid/perl/lib/site_perl .
opatch auto log file location is :
/u01/app/11.2.0/grid/OPatch/crs/../../cfgtoollogs/opatchauto2016-03-31_15-20-21.log
Detected Oracle Clusterware install
Using configuration parameter file: /u01/app/11.2.0/grid/crs/install/crsconfig_params
Clusterware stack up on node trac01
Refer to opatch auto help for patching shared homes and follow the steps
opatch auto successd.
0 0
原创粉丝点击