====== 1- Installation GRID 19C ======
== Contexte ==
* 2 noeuds avec 3 cartes réseau
* installation Oracle Linux 7 (compatible 19.3)
* oracle2
* LAN: 192.168.10.212
* réseau de stockage (iSCSI): 10.6.0.12
* interconnect: 172.16.0.12
* oracle3
* LAN: 192.168.10.213
* réseau de stockage (iSCSI): 10.6.0.13
* interconnect: 172.16.0.13
* RAC
* /etc/hosts sur les deux noeuds (procéder aux résa IP et DNS dans l'intervale)
127.0.0.1 localhost localhost.localdomain localhost4 localhost4.localdomain4
::1 localhost localhost.localdomain localhost6 localhost6.localdomain6
##### RAC #####
# Public
192.168.10.212 oracle2 oracle2.labo.delag.fr
192.168.10.213 oracle3 oracle3.labo.delag.fr
#
# Private (interconnect)
172.16.0.12 oracle2-priv oracle2-priv.labo.delag.fr
172.16.0.13 oracle3-priv oracle3-priv.labo.delag.fr
#
# Virtual
192.168.10.222 oracle2-vip oracle2-vip.labo.delag.fr
192.168.10.223 oracle3-vip oracle3-vip.labo.delag.fr
#
# Scan
192.168.10.232 rac1-scan rac1-scan.labo.delag.fr
192.168.10.233 rac1-scan rac1-scan.labo.delag.fr
192.168.10.234 rac1-scan rac1-scan.labo.delag.fr
#
* les packages suivants devront être téléchargés manuellement chez Oracle sur une distribution non Oracle:
* oracleasm-support
* oracle-database-preinstall-19c
\\
== 1/ Prérequis ==
A faire sur les deux noeuds
* Package supplémentaire
[root@oracle2 ~]# yum install cifs-utils tree targetcli oracleasm-support oracle-database-preinstall-19c iscsi-initiator-utils nmap
* Firewall
[root@oracle2 ~]# systemctl status firewalld
[root@oracle2 ~]# systemctl stop firewalld
[root@oracle2 ~]# systemctl disable firewalld
* Ajuster SELINUX
vi /etc/selinux/config
SELINUX=permissive
* Ajuster limites OS pour user GRID et Oracle
[root@oracle2 ~]# vi /etc/security/limits.conf
###GRID##############
grid soft nofile 1024
grid hard nofile 65536
grid soft nproc 16384
grid hard nproc 16384
grid soft stack 10240
grid hard stack 32768
grid hard memlock 134217728
grid soft memlock 134217728
###ORACLE##############
oracle soft nofile 1024
oracle hard nofile 65536
oracle soft nproc 16384
oracle hard nproc 16384
oracle soft stack 10240
oracle hard stack 32768
oracle hard memlock 134217728
oracle soft memlock 134217728
[root@oracle2 ~]# vi /etc/security/limits.d/oracle-database-preinstall-19c.conf
#### GRID ####
grid soft nofile 1024
grid hard nofile 65536
grid soft nproc 16384
grid hard nproc 16384
grid soft stack 10240
grid hard stack 32768
grid hard memlock 134217728
grid soft memlock 134217728
* créer une partition LVM
[root@oracle2 ~]# pvcreate /dev/xvdb
[root@oracle2 ~]# vgcreate vg_u01 /dev/xvdb
[root@oracle2 ~]# lvcreate -n lv_u01 -l 100%VG vg_u01
* formater la partition en XFS
[root@oracle2 ~]# mkfs -t xfs /dev/vg_u01/lv_u01
* ajout /u01 dans fstab et montage volume /u01
[root@oracle2 ~]# vi /etc/fstab
### ORACLE FS ###
/dev/vg_u01/lv_u01 /u01 xfs defaults 0 0
[root@oracle2 ~]# mkdir /u01
[root@oracle2 ~]# mount -a
* extension du SWAP (mini 9.5GB, ajout 3ème disque de 15GB)
[root@oracle2 ~]# pvcreate /dev/xvdc
Physical volume "/dev/xvdc" successfully created.
[root@oracle2 ~]# vgcreate ol_swap /dev/xvdc
Volume group "ol_swap" successfully created
[root@oracle2 ~]# lvcreate -n lv_swap -l 100%VG ol_swap
Logical volume "lv_swap" created.
[root@oracle2 ~]# lvs
LV VG Attr LSize Pool Origin Data% Meta% Move Log Cpy%Sync Convert
root ol -wi-ao---- 34,80g
swap ol -wi-ao---- 4,00g
lv_swap ol_swap -wi-a----- <15,00g
lv_u01 vg_u01 -wi-ao---- <50,00g
[root@oracle2 ~]# swapoff -v /dev/ol/swap
swapoff /dev/ol/swap
[root@oracle2 ~]# mkswap /dev/ol_swap/lv_swap
Configure l'espace d'échange (swap) en version 1, taille = 15724540 Kio
pas d'étiquette, UUID=dc298a2b-0268-4c3e-a788-dbf3aa0094b8 '
[root@oracle2 ~]# swapon /dev/ol_swap/lv_swap
[root@oracle2 ~]# lvremove /dev/mapper/ol-swap
Do you really want to remove active logical volume ol/swap? [y/n]: y
Logical volume "swap" successfully removed
[root@oracle2 ~]# lsblk
NAME MAJ:MIN RM SIZE RO TYPE MOUNTPOINT
sdf 8:80 0 10G 0 disk
└─sdf1 8:81 0 10G 0 part
xvdc 202:32 0 15G 0 disk
└─ol_swap-lv_swap 252:3 0 15G 0 lvm [SWAP]
sdd 8:48 0 20G 0 disk
└─sdd1 8:49 0 20G 0 part
xvda 202:0 0 40G 0 disk
├─xvda2 202:2 0 1G 0 part /boot
├─xvda3 202:3 0 38,8G 0 part
│ └─ol-root 252:0 0 34,8G 0 lvm /
└─xvda1 202:1 0 200M 0 part /boot/efi
sdb 8:16 0 20G 0 disk
└─sdb1 8:17 0 20G 0 part
sr0 11:0 1 1024M 0 rom
sdg 8:96 0 40G 0 disk
└─sdg1 8:97 0 40G 0 part
sde 8:64 0 10G 0 disk
└─sde1 8:65 0 10G 0 part
xvdb 202:16 0 50G 0 disk
└─vg_u01-lv_u01 252:2 0 50G 0 lvm /u01
sdc 8:32 0 10G 0 disk
└─sdc1 8:33 0 10G 0 part
sda 8:0 0 20G 0 disk
└─sda1 8:1 0 20G 0 part
* modifier FSTAB sur nouveau swap et reboot
[root@oracle2 ~]# /dev/mapper/ol_swap-lv_swap none swap defaults 0 0
* création user et groupe pour le GRID (non créé par le RPM preinstall)
[root@oracle2 ~]# groupadd asmadmin
[root@oracle2 ~]# groupadd asmdba
[root@oracle2 ~]# groupadd asmoper
[root@oracle2 ~]# useradd -g oinstall -G asmadmin,asmdba,asmoper -d /home/grid -m grid
[root@oracle2 ~]# usermod -a -G asmdba oracle
* Arborescence:
[root@oracle2 ~]# mkdir -p /u01/Downloads
[root@oracle2 ~]# mkdir -p /u01/app/oracle/product/19c/db_home1 #Oracle Home
[root@oracle2 ~]# chown -R oracle:oinstall /u01
[root@oracle2 ~]# mkdir -p /u01/app/19c/grid_base # Grid Base
[root@oracle2 ~]# mkdir -p /u01/app/19c/grid_home # Grid Home
[root@oracle2 ~]# chown -R grid:oinstall /u01/app/19c
[root@oracle2 ~]# mkdir -p /u01/app/oraInventory
[root@oracle2 ~]# chown -R grid:oinstall /u01/app/oraInventory
[root@oracle2 ~]# chmod -R 775 /u01/app/oraInventory
* montage partage CIFS (pour charger les binaires Oracle)
[root@oracle2 ~]# mkdir /mnt/mdl-nas3
[root@oracle2 ~]# mount.cifs -o username=mdl //mdl-nas3/share_smb /mnt/mdl-nas3/
* copie des binaires sur les deux noeuds
[root@oracle2 ~]# cp /mnt/mdl-nas3/3-Software/6-BDD/ORACLE/Oracle19c/Grid_Infrastructure_19.3.0.0.0.zip /u01/Downloads/
[root@oracle2 ~]# cp /mnt/mdl-nas3/3-Software/6-BDD/ORACLE/Oracle19c/Oracle_Database_19.3.0.0.0.zip /u01/Downloads/
* initialisation MDP
[root@oracle2 ~]# passwd grid
[root@oracle2 ~]# passwd oracle
* personalisation iSCSI initiator name sur les deux noeuds
[root@oracle2 ~]# vi /etc/iscsi/initiatorname.iscsi
InitiatorName=iqn.labo:oracle2.labo.local
* ajout disques pour ASM (au préalable LUN créée sur SAN et partagée via iSCSI)
* il est important de provisionner au niveau du SAN des disques avec des tailles de bloc de 4k ou 512k (compatibité ASM 12.2 +)
* Sur TrueNas, il faut aller dans les propriétés des extends de chacune des LUN comme suit
{{:oracle:install19c_rac:truenas_editblocsize.png?1200|}}
{{:oracle:install19c_rac:truenas_extendblocsize.png?900|}}
* 3 disques de 20 GB pour Diskgroup DATA (bloc 4Mo)
* 3 disques de 10GB pour diskgroup OCR (bloc 4Mo)
* 1 disque de 40GB pour diskgroup FRA (bloc 4Mo)
[root@oracle2 ~]# iscsiadm --mode discovery -t sendtargets --portal 10.6.0.2
10.6.0.2:3260,1 iqn.2009-10.com.osnexus:b592670e-34d020c81974e95a:HYPER-V2.RAC1.DATA1
10.6.0.2:3260,1 iqn.2009-10.com.osnexus:b592670e-b261776ed6c0ab21:HYPER-V2.RAC1.DATA2
10.6.0.2:3260,1 iqn.2009-10.com.osnexus:b592670e-c364079a0a6462e4:HYPER-V2.RAC1.DATA3
10.6.0.2:3260,1 iqn.2009-10.com.osnexus:b592670e-5a1c19783d48f2e2:HYPER-V2.RAC1.OCR1
10.6.0.2:3260,1 iqn.2009-10.com.osnexus:b592670e-b7812c75eb637d45:HYPER-V2.RAC1.OCR3
10.6.0.2:3260,1 iqn.2009-10.com.osnexus:b592670e-cc172b0ab861ac87:HYPER-V2.RAC1.OCR2
10.6.0.2:3260,1 iqn.2009-10.com.osnexus:b592670e-2f20bb5fccc7dde7:HYPER-V2.RAC1.FRA1
iscsiadm --mode node --targetname iqn.2009-10.com.osnexus:b592670e-34d020c81974e95a:HYPER-V2.RAC1.DATA1 --portal 10.6.0.2 --login
iscsiadm --mode node --targetname iqn.2009-10.com.osnexus:b592670e-b261776ed6c0ab21:HYPER-V2.RAC1.DATA2 --portal 10.6.0.2 --login
iscsiadm --mode node --targetname iqn.2009-10.com.osnexus:b592670e-c364079a0a6462e4:HYPER-V2.RAC1.DATA3 --portal 10.6.0.2 --login
iscsiadm --mode node --targetname iqn.2009-10.com.osnexus:b592670e-5a1c19783d48f2e2:HYPER-V2.RAC1.OCR1 --portal 10.6.0.2 --login
iscsiadm --mode node --targetname iqn.2009-10.com.osnexus:b592670e-cc172b0ab861ac87:HYPER-V2.RAC1.OCR2 --portal 10.6.0.2 --login
iscsiadm --mode node --targetname iqn.2009-10.com.osnexus:b592670e-b7812c75eb637d45:HYPER-V2.RAC1.OCR3 --portal 10.6.0.2 --login
iscsiadm --mode node --targetname iqn.2009-10.com.osnexus:b592670e-2f20bb5fccc7dde7:HYPER-V2.RAC1.FRA1 --portal 10.6.0.2 --login
[root@oracle2 ~]# lsblk
[root@oracle2 ~]# iscsiadm --mode session
tcp: [16] 10.6.0.2:3260,1 iqn.2009-10.com.osnexus:b592670e-34d020c81974e95a:HYPER-V2.RAC1.DATA1 (non-flash)
tcp: [17] 10.6.0.2:3260,1 iqn.2009-10.com.osnexus:b592670e-b261776ed6c0ab21:HYPER-V2.RAC1.DATA2 (non-flash)
tcp: [18] 10.6.0.2:3260,1 iqn.2009-10.com.osnexus:b592670e-c364079a0a6462e4:HYPER-V2.RAC1.DATA3 (non-flash)
tcp: [19] 10.6.0.2:3260,1 iqn.2009-10.com.osnexus:b592670e-5a1c19783d48f2e2:HYPER-V2.RAC1.OCR1 (non-flash)
tcp: [21] 10.6.0.2:3260,1 iqn.2009-10.com.osnexus:b592670e-cc172b0ab861ac87:HYPER-V2.RAC1.OCR2 (non-flash)
tcp: [22] 10.6.0.2:3260,1 iqn.2009-10.com.osnexus:b592670e-b7812c75eb637d45:HYPER-V2.RAC1.OCR3 (non-flash)
tcp: [23] 10.6.0.2:3260,1 iqn.2009-10.com.osnexus:b592670e-2f20bb5fccc7dde7:HYPER-V2.RAC1.FRA1 (non-flash)
* Création des partitions (sur oracle2)
[root@oracle2 ~]# cfdisk /dev/sda
[root@oracle2 ~]# cfdisk /dev/sdb
[root@oracle2 ~]# cfdisk /dev/sdc
[root@oracle2 ~]# cfdisk /dev/sdd
[root@oracle2 ~]# cfdisk /dev/sde
[root@oracle2 ~]# cfdisk /dev/sdf
[root@oracle2 ~]# cfdisk /dev/sdg
* on valide que les partitions on bien une taille de bloc requise (ici 4k)
[root@oracle2 ~]# lsblk -o NAME,SIZE,PHY-SEC,LOG-SEC /dev/sd*1
NAME SIZE PHY-SEC LOG-SEC
sda1 20G 4096 512
sdb1 20G 4096 512
sdc1 20G 4096 512
sdd1 10G 4096 512
sde1 10G 4096 512
sdf1 10G 4096 512
sdg1 40G 4096 512
\\
== 2/ ASM ==
* Initialisation ASM (à faire sur les deux noeuds)
[root@oracle2 ~]# oracleasm configure -i
Configuring the Oracle ASM library driver.
This will configure the on-boot properties of the Oracle ASM library
driver. The following questions will determine whether the driver is
loaded on boot and what permissions it will have. The current values
will be shown in brackets ('[]'). Hitting without typing an
answer will keep that current value. Ctrl-C will abort.
Default user to own the driver interface [grid]:
Default group to own the driver interface [asmadmin]:
Start Oracle ASM library driver on boot (y/n) [y]:
Scan for Oracle ASM disks on boot (y/n) [y]:
Writing Oracle ASM library driver configuration: done
[root@oracle2 ~]# oracleasm init
* Création DiskGroup (sur oracle2)
[root@oracle2 ~]# oracleasm scandisks
Reloading disk partitions: done
Cleaning any stale ASM disks...
Scanning system for ASM disks...
[root@oracle2 grid]# oracleasm createdisk ASM_DATA1 /dev/sda1
Writing disk header: done
Instantiating disk: done
[root@oracle2 grid]# oracleasm createdisk ASM_DATA2 /dev/sdb1
Writing disk header: done
Instantiating disk: done
[root@oracle2 grid]# oracleasm createdisk ASM_DATA3 /dev/sdc1
Writing disk header: done
Instantiating disk: done
[root@oracle2 grid]# oracleasm createdisk ASM_OCR1 /dev/sdd1
Writing disk header: done
Instantiating disk: done
[root@oracle2 ~]# oracleasm createdisk ASM_OCR2 /dev/sde1
Writing disk header: done
Instantiating disk: done
[root@oracle2 ~]# oracleasm createdisk ASM_OCR3 /dev/sdf1
Writing disk header: done
Instantiating disk: done
[root@oracle2 ~]# oracleasm createdisk ASM_FRA1 /dev/sdg1
Writing disk header: done
Instantiating disk: done
[root@oracle2 ~]# oracleasm listdisks
ASM_DATA1
ASM_DATA2
ASM_DATA3
ASM_FRA1
ASM_OCR1
ASM_OCR2
ASM_OCR3
* Ajout DiskGroup (sur oracle3)
[root@oracle3 ~]# oracleasm scandisks
Reloading disk partitions: done
Cleaning any stale ASM disks...
Scanning system for ASM disks...
Instantiating disk "ASM_DATA1"
Instantiating disk "ASM_DATA2"
Instantiating disk "ASM_DATA3"
Instantiating disk "ASM_OCR1"
Instantiating disk "ASM_OCR2"
Instantiating disk "ASM_OCR3"
Instantiating disk "ASM_FRA1"
[root@oracle3 ~]# oracleasm listdisks
ASM_DATA1
ASM_DATA2
ASM_DATA3
ASM_FRA1
ASM_OCR1
ASM_OCR2
ASM_OCR3
\\
== 3/ GRID installation ==
* copie des binaires sur oracle2
[root@oracle2 ~]# cp /mnt/mdl-nas3/3-Software/6-BDD/ORACLE/Oracle19c/Grid_Infrastructure_19.3.0.0.0.zip /u01/Downloads/
* décompression en tant que grid
[root@oracle2 Downloads]# chmod 777 /u01/Downloads/*
[root@oracle2 Downloads]#
[root@oracle2 Downloads]#
[root@oracle2 Downloads]# su - grid
Dernière connexion : vendredi 27 juin 2025 à 07:19:05 EDT sur pts/0
[grid@oracle2 ~]$ cd /u01/Downloads/
[grid@oracle2 Downloads]$ unzip /u01/Downloads/Grid_Infrastructure_19.3.0.0.0.zip -d /u01/app/19c/grid_home/
* équivalence SSH
[root@oracle2 ~]# cd /u01/app/19c/grid_home/
[root@oracle2 ~]# ./deinstall/sshUserSetup.sh -user grid -hosts 'oracle2 oracle3 oracle2-priv oracle3-priv' -noPromptPassphrase -confirm -advanced
* Lancement utilitaire Cluvfy (sur les interface private/interconnect)
[grid@oracle2 ~]$ cd /u01/app/19c/grid_home
[grid@oracle2 grid_home]$ ./runcluvfy.sh stage -pre crsinst -n oracle2-priv,oracle3-priv -verbose
* préparation fichier de réponse
* renseigner le MDP //oracle.install.asm.SYSASMPassword=// et //oracle.install.asm.monitorPassword=//
[grid@oracle2 ~]$ vi /u01/Downloads/GridInstall.rsp
###############################################################################
## Copyright(c) Oracle Corporation 1998,2019. All rights reserved. ##
## ##
## Specify values for the variables listed below to customize ##
## your installation. ##
## ##
## Each variable is associated with a comment. The comment ##
## can help to populate the variables with the appropriate ##
## values. ##
## ##
## IMPORTANT NOTE: This file contains plain text passwords and ##
## should be secured to have read permission only by oracle user ##
## or db administrator who owns this installation. ##
## ##
###############################################################################
###############################################################################
## ##
## Instructions to fill this response file ##
## To register and configure 'Grid Infrastructure for Cluster' ##
## - Fill out sections A,B,C,D,E,F and G ##
## - Fill out section G if OCR and voting disk should be placed on ASM ##
## ##
## To register and configure 'Grid Infrastructure for Standalone server' ##
## - Fill out sections A,B and G ##
## ##
## To register software for 'Grid Infrastructure' ##
## - Fill out sections A,B and D ##
## - Provide the cluster nodes in section D when choosing CRS_SWONLY as ##
## installation option in section A ##
## ##
## To upgrade clusterware and/or Automatic storage management of earlier ##
## releases ##
## - Fill out sections A,B,C,D and H ##
## ##
## To add more nodes to the cluster ##
## - Fill out sections A and D ##
## - Provide the cluster nodes in section D when choosing CRS_ADDNODE as ##
## installation option in section A ##
## ##
###############################################################################
#------------------------------------------------------------------------------
# Do not change the following system generated value.
#------------------------------------------------------------------------------
oracle.install.responseFileVersion=/oracle/install/rspfmt_crsinstall_response_schema_v19.0.0
###############################################################################
# #
# SECTION A - BASIC #
# #
###############################################################################
#-------------------------------------------------------------------------------
# Specify the location which holds the inventory files.
# This is an optional parameter if installing on
# Windows based Operating System.
#-------------------------------------------------------------------------------
INVENTORY_LOCATION=/u01/app/19c/oraInventory
#-------------------------------------------------------------------------------
# Specify the installation option.
# Allowed values: CRS_CONFIG or HA_CONFIG or UPGRADE or CRS_SWONLY or HA_SWONLY
# - CRS_CONFIG : To register home and configure Grid Infrastructure for cluster
# - HA_CONFIG : To register home and configure Grid Infrastructure for stand alone server
# - UPGRADE : To register home and upgrade clusterware software of earlier release
# - CRS_SWONLY : To register Grid Infrastructure Software home (can be configured for cluster
# or stand alone server later)
# - HA_SWONLY : To register Grid Infrastructure Software home (can be configured for stand
# alone server later. This is only supported on Windows.)
# - CRS_DELETE_NODE : To delete nodes to the cluster
# - CRS_ADDNODE : To add more nodes to the cluster
#-------------------------------------------------------------------------------
oracle.install.option=CRS_CONFIG
#-------------------------------------------------------------------------------
# Specify the complete path of the Oracle Base.
#-------------------------------------------------------------------------------
ORACLE_BASE=/u01/app/19c/grid_base
################################################################################
# #
# SECTION B - GROUPS #
# #
# The following three groups need to be assigned for all GI installations. #
# OSDBA and OSOPER can be the same or different. OSASM must be different #
# than the other two. #
# The value to be specified for OSDBA, OSOPER and OSASM group is only for #
# Unix based Operating System. #
# These groups are not required for upgrades, as they will be determined #
# from the Oracle home to upgrade. #
# #
################################################################################
#-------------------------------------------------------------------------------
# The OSDBA_GROUP is the OS group which is to be granted SYSDBA privileges.
#-------------------------------------------------------------------------------
oracle.install.asm.OSDBA=asmdba
#-------------------------------------------------------------------------------
# The OSOPER_GROUP is the OS group which is to be granted SYSOPER privileges.
# The value to be specified for OSOPER group is optional.
# Value should not be provided if configuring Client Cluster - i.e. storageOption=CLIENT_ASM_STORAGE.
#-------------------------------------------------------------------------------
oracle.install.asm.OSOPER=asmoper
#-------------------------------------------------------------------------------
# The OSASM_GROUP is the OS group which is to be granted SYSASM privileges. This
# must be different than the previous two.
#-------------------------------------------------------------------------------
oracle.install.asm.OSASM=asmadmin
################################################################################
# #
# SECTION C - SCAN #
# #
################################################################################
#-------------------------------------------------------------------------------
# Specify the type of SCAN configuration for the cluster
# Allowed values : LOCAL_SCAN and SHARED_SCAN
#-------------------------------------------------------------------------------
oracle.install.crs.config.scanType=LOCAL_SCAN
#-------------------------------------------------------------------------------
# Applicable only if SHARED_SCAN is being configured for cluster
# Specify the path to the SCAN client data file
#-------------------------------------------------------------------------------
oracle.install.crs.config.SCANClientDataFile=
#-------------------------------------------------------------------------------
# Specify a name for SCAN
# Applicable if LOCAL_SCAN is being configured for the cluster
# If you choose to configure the cluster with GNS with Auto assigned Node VIPs(DHCP),then the scanName should be specified in the format of 'SCAN name.Cluster name.GNS sub-domain'
#-------------------------------------------------------------------------------
oracle.install.crs.config.gpnp.scanName=rac1-scan
#-------------------------------------------------------------------------------
# Specify a unused port number for SCAN service
#-------------------------------------------------------------------------------
oracle.install.crs.config.gpnp.scanPort=1521
################################################################################
# #
# SECTION D - CLUSTER & GNS #
# #
################################################################################
#-------------------------------------------------------------------------------
# Specify the required cluster configuration
# Allowed values: STANDALONE, DOMAIN, MEMBERDB, MEMBERAPP
#-------------------------------------------------------------------------------
oracle.install.crs.config.ClusterConfiguration=STANDALONE
#-------------------------------------------------------------------------------
# Specify 'true' if you would like to configure the cluster as Extended, else
# specify 'false'
#
# Applicable only for STANDALONE and DOMAIN cluster configuration
#-------------------------------------------------------------------------------
oracle.install.crs.config.configureAsExtendedCluster=false
#-------------------------------------------------------------------------------
# Specify the Member Cluster Manifest file
#
# Applicable only for MEMBERDB and MEMBERAPP cluster configuration
#-------------------------------------------------------------------------------
oracle.install.crs.config.memberClusterManifestFile=
#-------------------------------------------------------------------------------
# Specify a name for the Cluster you are creating.
#
# The maximum length allowed for clustername is 63 characters. The name can be
# any combination of lower and uppercase alphabets (A - Z), (0 - 9) and hyphens (-).
#
# Applicable only for STANDALONE and DOMAIN cluster configuration
#-------------------------------------------------------------------------------
oracle.install.crs.config.clusterName=rac1
#-------------------------------------------------------------------------------
# Applicable only for STANDALONE, DOMAIN, MEMBERDB cluster configuration.
# Specify 'true' if you would like to configure Grid Naming Service(GNS), else
# specify 'false'
#-------------------------------------------------------------------------------
oracle.install.crs.config.gpnp.configureGNS=false
#-------------------------------------------------------------------------------
# Applicable only for STANDALONE and DOMAIN cluster configuration if you choose to configure GNS.
# Specify 'true' if you would like to assign SCAN name VIP and Node VIPs by DHCP
# , else specify 'false'
#-------------------------------------------------------------------------------
oracle.install.crs.config.autoConfigureClusterNodeVIP=false
#-------------------------------------------------------------------------------
# Applicable only if you choose to configure GNS.
# Specify the type of GNS configuration for cluster
# Allowed values are: CREATE_NEW_GNS and USE_SHARED_GNS
# Only USE_SHARED_GNS value is allowed for MEMBERDB cluster configuration.
#-------------------------------------------------------------------------------
oracle.install.crs.config.gpnp.gnsOption=
#-------------------------------------------------------------------------------
# Applicable only if SHARED_GNS is being configured for cluster
# Specify the path to the GNS client data file
#-------------------------------------------------------------------------------
oracle.install.crs.config.gpnp.gnsClientDataFile=
#-------------------------------------------------------------------------------
# Applicable only for STANDALONE and DOMAIN cluster configuration if you choose to
# configure GNS for this cluster oracle.install.crs.config.gpnp.gnsOption=CREATE_NEW_GNS
# Specify the GNS subdomain and an unused virtual hostname for GNS service
#-------------------------------------------------------------------------------
oracle.install.crs.config.gpnp.gnsSubDomain=
oracle.install.crs.config.gpnp.gnsVIPAddress=
#-------------------------------------------------------------------------------
# Specify the list of sites - only if configuring an Extended Cluster
#-------------------------------------------------------------------------------
oracle.install.crs.config.sites=
#-------------------------------------------------------------------------------
# Specify the list of nodes that have to be configured to be part of the cluster.
#
# The list should a comma-separated list of tuples. Each tuple should be a
# colon-separated string that contains
# - 1 field if you have chosen CRS_SWONLY as installation option, or
# - 1 field if configuring an Application Cluster, or
# - 3 fields if configuring a Flex Cluster
# - 3 fields if adding more nodes to the configured cluster, or
# - 4 fields if configuring an Extended Cluster
#
# The fields should be ordered as follows:
# 1. The first field should be the public node name.
# 2. The second field should be the virtual host name
# (Should be specified as AUTO if you have chosen 'auto configure for VIP'
# i.e. autoConfigureClusterNodeVIP=true)
# 3. The third field indicates the site designation for the node. To be specified only if configuring an Extended Cluster.
# Only the 1st field is applicable if you have chosen CRS_SWONLY as installation option
# Only the 1st field is applicable if configuring an Application Cluster
#
# Examples
# For registering GI for a cluster software: oracle.install.crs.config.clusterNodes=node1,node2
# For adding more nodes to the configured cluster: oracle.install.crs.config.clusterNodes=node1:node1-vip,node2:node2-vip
# For configuring Application Cluster: oracle.install.crs.config.clusterNodes=node1,node2
# For configuring Flex Cluster: oracle.install.crs.config.clusterNodes=node1:node1-vip,node2:node2-vip
# For configuring Extended Cluster: oracle.install.crs.config.clusterNodes=node1:node1-vip:site1,node2:node2-vip:site2
# You can specify a range of nodes in the tuple using colon separated fields of format
# hostnameprefix:lowerbound-upperbound:hostnamesuffix:vipsuffix:role of node
#
#-------------------------------------------------------------------------------
oracle.install.crs.config.clusterNodes=oracle2:oracle2-vip,oracle3:oracle3-vip
#-------------------------------------------------------------------------------
# The value should be a comma separated strings where each string is as shown below
# InterfaceName:SubnetAddress:InterfaceType
# where InterfaceType can be either "1", "2", "3", "4", or "5"
# InterfaceType stand for the following values
# - 1 : PUBLIC
# - 2 : PRIVATE
# - 3 : DO NOT USE
# - 4 : ASM
# - 5 : ASM & PRIVATE
#
# For example: eth0:140.87.24.0:1,eth1:10.2.1.0:2,eth2:140.87.52.0:3
#
#-------------------------------------------------------------------------------
oracle.install.crs.config.networkInterfaceList=eth0:192.168.10.0:1,eth1:10.6.0.0:3,eth2:172.16.0.0:5
#------------------------------------------------------------------------------
# Specify 'true' if you would like to configure Grid Infrastructure Management
# Repository (GIMR), else specify 'false'.
# This option is only applicable when CRS_CONFIG is chosen as install option,
# and STANDALONE is chosen as cluster configuration.
#------------------------------------------------------------------------------
oracle.install.crs.configureGIMR=false
#------------------------------------------------------------------------------
# Create a separate ASM DiskGroup to store GIMR data.
# Specify 'true' if you would like to separate GIMR data with clusterware data,
# else specify 'false'
# Value should be 'true' for DOMAIN cluster configurations
# Value can be true/false for STANDALONE cluster configurations.
#------------------------------------------------------------------------------
oracle.install.asm.configureGIMRDataDG=false
################################################################################
# #
# SECTION E - STORAGE #
# #
################################################################################
#-------------------------------------------------------------------------------
# Specify the type of storage to use for Oracle Cluster Registry(OCR) and Voting
# Disks files. Only applicable for Standalone and MemberDB cluster.
# - FLEX_ASM_STORAGE
# - CLIENT_ASM_STORAGE
# - FILE_SYSTEM_STORAGE
#
# Option FILE_SYSTEM_STORAGE is only for STANDALONE cluster configuration.
#-------------------------------------------------------------------------------
oracle.install.crs.config.storageOption=FLEX_ASM_STORAGE
#-------------------------------------------------------------------------------
# These properties are applicable only if FILE_SYSTEM_STORAGE is chosen for
# storing OCR and voting disk
# Specify the location(s) for OCR and voting disks
# Three(3) or one(1) location(s) should be specified for OCR and voting disk,
# separated by commas.
# Example:
# For Unix based Operating System:
# oracle.install.crs.config.sharedFileSystemStorage.votingDiskLocations=/oradbocfs/storage/vdsk1,/oradbocfs/storage/vdsk2,/oradbocfs/storage/vdsk3
# oracle.install.crs.config.sharedFileSystemStorage.ocrLocations=/oradbocfs/storage/ocr1,/oradbocfs/storage/ocr2,/oradbocfs/storage/ocr3
# For Windows based Operating System OCR/VDSK on shared storage is not supported.
#-------------------------------------------------------------------------------
oracle.install.crs.config.sharedFileSystemStorage.votingDiskLocations=
oracle.install.crs.config.sharedFileSystemStorage.ocrLocations=
################################################################################
# #
# SECTION F - IPMI #
# #
################################################################################
#-------------------------------------------------------------------------------
# Specify 'true' if you would like to configure Intelligent Power Management interface
# (IPMI), else specify 'false'
#-------------------------------------------------------------------------------
oracle.install.crs.config.useIPMI=false
#-------------------------------------------------------------------------------
# Applicable only if you choose to configure IPMI
# i.e. oracle.install.crs.config.useIPMI=true
# Specify the username and password for using IPMI service
#-------------------------------------------------------------------------------
oracle.install.crs.config.ipmi.bmcUsername=
oracle.install.crs.config.ipmi.bmcPassword=
################################################################################
# #
# SECTION G - ASM #
# #
################################################################################
#-------------------------------------------------------------------------------
# Password for SYS user of Oracle ASM
#-------------------------------------------------------------------------------
oracle.install.asm.SYSASMPassword=
#-------------------------------------------------------------------------------
# The ASM DiskGroup
#
# Example: oracle.install.asm.diskGroup.name=data
#
#-------------------------------------------------------------------------------
oracle.install.asm.diskGroup.name=OCR
#-------------------------------------------------------------------------------
# Redundancy level to be used by ASM.
# It can be one of the following
# - NORMAL
# - HIGH
# - EXTERNAL
# - FLEX# - EXTENDED (required if oracle.install.crs.config.ClusterConfiguration=EXTENDED)
# Example: oracle.install.asm.diskGroup.redundancy=NORMAL
#
#-------------------------------------------------------------------------------
oracle.install.asm.diskGroup.redundancy=NORMAL
#-------------------------------------------------------------------------------
# Allocation unit size to be used by ASM.
# It can be one of the following values
# - 1
# - 2
# - 4
# - 8
# - 16
# Example: oracle.install.asm.diskGroup.AUSize=4
# size unit is MB
#
#-------------------------------------------------------------------------------
oracle.install.asm.diskGroup.AUSize=4
#-------------------------------------------------------------------------------
# Failure Groups for the disk group
# If configuring for Extended cluster specify as list of "failure group name:site"
# tuples.
# Else just specify as list of failure group names
#-------------------------------------------------------------------------------
oracle.install.asm.diskGroup.FailureGroups=
#-------------------------------------------------------------------------------
# List of disks and their failure groups to create a ASM DiskGroup
# (Use this if each of the disks have an associated failure group)
# Failure Groups are not required if oracle.install.asm.diskGroup.redundancy=EXTERNAL
# Example:
# For Unix based Operating System:
# oracle.install.asm.diskGroup.disksWithFailureGroupNames=/oracle/asm/disk1,FGName,/oracle/asm/disk2,FGName
# For Windows based Operating System:
# oracle.install.asm.diskGroup.disksWithFailureGroupNames=\\.\ORCLDISKDATA0,FGName,\\.\ORCLDISKDATA1,FGName
#
#-------------------------------------------------------------------------------
oracle.install.asm.diskGroup.disksWithFailureGroupNames=/dev/oracleasm/disks/ASM_OCR1,,/dev/oracleasm/disks/ASM_OCR2,,/dev/oracleasm/disks/ASM_OCR3,
#-------------------------------------------------------------------------------
# List of disks to create a ASM DiskGroup
# (Use this variable only if failure groups configuration is not required)
# Example:
# For Unix based Operating System:
# oracle.install.asm.diskGroup.disks=/oracle/asm/disk1,/oracle/asm/disk2
# For Windows based Operating System:
# oracle.install.asm.diskGroup.disks=\\.\ORCLDISKDATA0,\\.\ORCLDISKDATA1
#
#-------------------------------------------------------------------------------
oracle.install.asm.diskGroup.disks=/dev/oracleasm/disks/ASM_OCR1,/dev/oracleasm/disks/ASM_OCR2,/dev/oracleasm/disks/ASM_OCR3
#-------------------------------------------------------------------------------
# List of failure groups to be marked as QUORUM.
# Quorum failure groups contain only voting disk data, no user data is stored
# Example:
# oracle.install.asm.diskGroup.quorumFailureGroupNames=FGName1,FGName2
#-------------------------------------------------------------------------------
oracle.install.asm.diskGroup.quorumFailureGroupNames=
#-------------------------------------------------------------------------------
# The disk discovery string to be used to discover the disks used create a ASM DiskGroup
#
# Example:
# For Unix based Operating System:
# oracle.install.asm.diskGroup.diskDiscoveryString=/oracle/asm/*
# For Windows based Operating System:
# oracle.install.asm.diskGroup.diskDiscoveryString=\\.\ORCLDISK*
#
#-------------------------------------------------------------------------------
oracle.install.asm.diskGroup.diskDiscoveryString=/dev/oracleasm/disks/*
#-------------------------------------------------------------------------------
# Password for ASMSNMP account
# ASMSNMP account is used by Oracle Enterprise Manager to monitor Oracle ASM instances
#-------------------------------------------------------------------------------
oracle.install.asm.monitorPassword=
#-------------------------------------------------------------------------------
# GIMR Storage data ASM DiskGroup
# Applicable only when
# oracle.install.asm.configureGIMRDataDG=true
# Example: oracle.install.asm.GIMRDG.name=MGMT
#
#-------------------------------------------------------------------------------
oracle.install.asm.gimrDG.name=
#-------------------------------------------------------------------------------
# Redundancy level to be used by ASM.
# It can be one of the following
# - NORMAL
# - HIGH
# - EXTERNAL
# - FLEX# - EXTENDED (only if oracle.install.crs.config.ClusterConfiguration=EXTENDED)
# Example: oracle.install.asm.gimrDG.redundancy=NORMAL
#
#-------------------------------------------------------------------------------
oracle.install.asm.gimrDG.redundancy=
#-------------------------------------------------------------------------------
# Allocation unit size to be used by ASM.
# It can be one of the following values
# - 1
# - 2
# - 4
# - 8
# - 16
# Example: oracle.install.asm.gimrDG.AUSize=4
# size unit is MB
#
#-------------------------------------------------------------------------------
oracle.install.asm.gimrDG.AUSize=1
#-------------------------------------------------------------------------------
# Failure Groups for the GIMR storage data ASM disk group
# If configuring for Extended cluster specify as list of "failure group name:site"
# tuples.
# Else just specify as list of failure group names
#-------------------------------------------------------------------------------
oracle.install.asm.gimrDG.FailureGroups=
#-------------------------------------------------------------------------------
# List of disks and their failure groups to create GIMR data ASM DiskGroup
# (Use this if each of the disks have an associated failure group)
# Failure Groups are not required if oracle.install.asm.gimrDG.redundancy=EXTERNAL
# Example:
# For Unix based Operating System:
# oracle.install.asm.gimrDG.disksWithFailureGroupNames=/oracle/asm/disk1,FGName,/oracle/asm/disk2,FGName
# For Windows based Operating System:
# oracle.install.asm.gimrDG.disksWithFailureGroupNames=\\.\ORCLDISKDATA0,FGName,\\.\ORCLDISKDATA1,FGName
#
#-------------------------------------------------------------------------------
oracle.install.asm.gimrDG.disksWithFailureGroupNames=
#-------------------------------------------------------------------------------
# List of disks to create GIMR data ASM DiskGroup
# (Use this variable only if failure groups configuration is not required)
# Example:
# For Unix based Operating System:
# oracle.install.asm.gimrDG.disks=/oracle/asm/disk1,/oracle/asm/disk2
# For Windows based Operating System:
# oracle.install.asm.gimrDG.disks=\\.\ORCLDISKDATA0,\\.\ORCLDISKDATA1
#
#-------------------------------------------------------------------------------
oracle.install.asm.gimrDG.disks=
#-------------------------------------------------------------------------------
# List of failure groups to be marked as QUORUM.
# Quorum failure groups contain only voting disk data, no user data is stored
# Example:
# oracle.install.asm.gimrDG.quorumFailureGroupNames=FGName1,FGName2
#-------------------------------------------------------------------------------
oracle.install.asm.gimrDG.quorumFailureGroupNames=
#-------------------------------------------------------------------------------
# Configure AFD - ASM Filter Driver
# Applicable only for FLEX_ASM_STORAGE option
# Specify 'true' if you want to configure AFD, else specify 'false'
#-------------------------------------------------------------------------------
oracle.install.asm.configureAFD=false
#-------------------------------------------------------------------------------
# Configure RHPS - Rapid Home Provisioning Service
# Applicable only for DOMAIN cluster configuration
# Specify 'true' if you want to configure RHP service, else specify 'false'
#-------------------------------------------------------------------------------
oracle.install.crs.configureRHPS=false
################################################################################
# #
# SECTION H - UPGRADE #
# #
################################################################################
#-------------------------------------------------------------------------------
# Specify whether to ignore down nodes during upgrade operation.
# Value should be 'true' to ignore down nodes otherwise specify 'false'
#-------------------------------------------------------------------------------
oracle.install.crs.config.ignoreDownNodes=false
################################################################################
# #
# MANAGEMENT OPTIONS #
# #
################################################################################
#-------------------------------------------------------------------------------
# Specify the management option to use for managing Oracle Grid Infrastructure
# Options are:
# 1. CLOUD_CONTROL - If you want to manage your Oracle Grid Infrastructure with Enterprise Manager Cloud Control.
# 2. NONE -If you do not want to manage your Oracle Grid Infrastructure with Enterprise Manager Cloud Control.
#-------------------------------------------------------------------------------
oracle.install.config.managementOption=NONE
#-------------------------------------------------------------------------------
# Specify the OMS host to connect to Cloud Control.
# Applicable only when oracle.install.config.managementOption=CLOUD_CONTROL
#-------------------------------------------------------------------------------
oracle.install.config.omsHost=
#-------------------------------------------------------------------------------
# Specify the OMS port to connect to Cloud Control.
# Applicable only when oracle.install.config.managementOption=CLOUD_CONTROL
#-------------------------------------------------------------------------------
oracle.install.config.omsPort=0
#-------------------------------------------------------------------------------
# Specify the EM Admin user name to use to connect to Cloud Control.
# Applicable only when oracle.install.config.managementOption=CLOUD_CONTROL
#-------------------------------------------------------------------------------
oracle.install.config.emAdminUser=
#-------------------------------------------------------------------------------
# Specify the EM Admin password to use to connect to Cloud Control.
# Applicable only when oracle.install.config.managementOption=CLOUD_CONTROL
#-------------------------------------------------------------------------------
oracle.install.config.emAdminPassword=
################################################################################
# #
# Root script execution configuration #
# #
################################################################################
#-------------------------------------------------------------------------------------------------------
# Specify the root script execution mode.
#
# - true : To execute the root script automatically by using the appropriate configuration methods.
# - false : To execute the root script manually.
#
# If this option is selected, password should be specified on the console.
#-------------------------------------------------------------------------------------------------------
oracle.install.crs.rootconfig.executeRootScript=true
#--------------------------------------------------------------------------------------
# Specify the configuration method to be used for automatic root script execution.
#
# Following are the possible choices:
# - ROOT
# - SUDO
#--------------------------------------------------------------------------------------
oracle.install.crs.rootconfig.configMethod=ROOT
#--------------------------------------------------------------------------------------
# Specify the absolute path of the sudo program.
#
# Applicable only when SUDO configuration method was chosen.
#--------------------------------------------------------------------------------------
oracle.install.crs.rootconfig.sudoPath=
#--------------------------------------------------------------------------------------
# Specify the name of the user who is in the sudoers list.
# Applicable only when SUDO configuration method was chosen.
# Note:For Grid Infrastructure for Standalone server installations,the sudo user name must be the username of the user performing the installation.
#--------------------------------------------------------------------------------------
oracle.install.crs.rootconfig.sudoUserName=
#--------------------------------------------------------------------------------------
# Specify the nodes batch map.
#
# This should be a comma separated list of node:batch pairs.
# During upgrade, you can sequence the automatic execution of root scripts
# by pooling the nodes into batches.
# A maximum of three batches can be specified.
# Installer will execute the root scripts on all the nodes in one batch before
# proceeding to next batch.
# Root script execution on the local node must be in Batch 1.
#
# Examples:
# 1. oracle.install.crs.config.batchinfo=Node1:1,Node2:2,Node3:2,Node4:3
# 2. oracle.install.crs.config.batchinfo=Node1:1,Node2:2,Node3:2,Node4:2
# 3. oracle.install.crs.config.batchinfo=Node1:1,Node2:1,Node3:2,Node4:3
#
# Applicable only for UPGRADE install option.
#--------------------------------------------------------------------------------------
oracle.install.crs.config.batchinfo=
################################################################################
# #
# APPLICATION CLUSTER OPTIONS #
# #
################################################################################
#-------------------------------------------------------------------------------
# Specify the Virtual hostname to configure virtual access for your Application
# The value to be specified for Virtual hostname is optional.
#-------------------------------------------------------------------------------
oracle.install.crs.app.applicationAddress=
#################################################################################
# #
# DELETE NODE OPTIONS #
# #
#################################################################################
#--------------------------------------------------------------------------------
# Specify the node names to delete nodes from cluster.
# Delete node will be performed only for the remote nodes from the cluster.
#--------------------------------------------------------------------------------
oracle.install.crs.deleteNode.nodes=
* lancement de l'installation sur le noeud 1
[grid@oracle2 grid]$ ./gridSetup.sh -silent -ignorePrereqFailure -responseFile /u01/Downloads/GridInstall.rsp
* une fois l'installation terminée exécuter les scripts suivants en tant que ROOT sur les deux noeuds
__oracle2__
[root@oracle2 ~]# /u01/app/19c/oraInventory/orainstRoot.sh
Modification des droits d'accès de /u01/app/19c/oraInventory.
Ajout de droits d'accès en lecture/écriture pour le groupe.
Suppression des droits d'accès en lecture/écriture/exécution pour le monde.
Modification du nom de groupe de /u01/app/19c/oraInventory en oinstall.
L'exécution du script est terminée.
[root@oracle2 ~]# /u01/app/19c/grid_home/root.sh
Performing root user operation.
The following environment variables are set as:
ORACLE_OWNER= grid
ORACLE_HOME= /u01/app/19c/grid_home
Copying dbhome to /usr/local/bin ...
Copying oraenv to /usr/local/bin ...
Copying coraenv to /usr/local/bin ...
Entries will be added to the /etc/oratab file as needed by
Database Configuration Assistant when a database is created
Finished running generic part of root script.
Now product-specific root actions will be performed.
Relinking oracle with rac_on option
Using configuration parameter file: /u01/app/19c/grid_home/crs/install/crsconfig_params
The log of current session can be found at:
/u01/app/19c/grid_base/crsdata/oracle2/crsconfig/rootcrs_oracle2_2025-08-30_04-43-39PM.log
2025/08/30 16:43:44 CLSRSC-456: The Oracle Grid Infrastructure has already been configured.
__oracle3__
[root@oracle3 ~]# /u01/app/19c/oraInventory/orainstRoot.sh
Modification des droits d'accès de /u01/app/19c/oraInventory.
Ajout de droits d'accès en lecture/écriture pour le groupe.
Suppression des droits d'accès en lecture/écriture/exécution pour le monde.
Modification du nom de groupe de /u01/app/19c/oraInventory en oinstall.
L'exécution du script est terminée.
[root@oracle3 ~]# /u01/app/19c/grid_home/root.sh
Performing root user operation.
The following environment variables are set as:
ORACLE_OWNER= grid
ORACLE_HOME= /u01/app/19c/grid_home
Copying dbhome to /usr/local/bin ...
Copying oraenv to /usr/local/bin ...
Copying coraenv to /usr/local/bin ...
Entries will be added to the /etc/oratab file as needed by
Database Configuration Assistant when a database is created
Finished running generic part of root script.
Now product-specific root actions will be performed.
Relinking oracle with rac_on option
Using configuration parameter file: /u01/app/19c/grid_home/crs/install/crsconfig_params
The log of current session can be found at:
/u01/app/19c/grid_base/crsdata/oracle3/crsconfig/rootcrs_oracle3_2025-08-30_04-47-08PM.log
2025/08/30 16:47:13 CLSRSC-456: The Oracle Grid Infrastructure has already been configured.
[root@oracle3 ~]#
* une fois l'installation, peut voir le process ASM tourner sur les deux noeuds
[root@oracle2 ~]# ps -ef | grep pmon
root 6588 5101 0 19:03 pts/1 00:00:00 grep --color=auto pmon
grid 8284 1 0 14:15 ? 00:00:01 asm_pmon_+ASM1
[root@oracle3 ~]# ps -ef | grep pmon
grid 2388 1 0 14:25 ? 00:00:01 asm_pmon_+ASM2
root 5109 5369 0 19:03 pts/0 00:00:00 grep --color=auto pmon
\\
== 4/ Post Install ==
* personnalisation bash_profile du grid
**__oracle2__**
# .bash_profile
# Get the aliases and functions
if [ -f ~/.bashrc ]; then
. ~/.bashrc
fi
# User specific environment and startup programs
PATH=$PATH:$HOME/.local/bin:$HOME/bin
export PATH
#
##### Oracle Env #####
export ORACLE_BASE=/u01/app/19c/grid_base
export ORACLE_HOME=/u01/app/19c/grid_home
export ORACLE_SID=+ASM1
export LD_LIBRARY_PATH=\$ORACLE_HOME/lib:/lib:/usr/lib
export CLASSPATH=\$ORACLE_HOME/jlib:\$ORACLE_HOME/rdbms/jlib
PATH=$PATH:$HOME/.local/bin:$ORACLE_HOME/bin
**__oracle3__**
# .bash_profile
# Get the aliases and functions
if [ -f ~/.bashrc ]; then
. ~/.bashrc
fi
# User specific environment and startup programs
PATH=$PATH:$HOME/.local/bin:$HOME/bin
export PATH
#
##### Oracle Env #####
export ORACLE_BASE=/u01/app/19c/grid_base
export ORACLE_HOME=/u01/app/19c/grid_home
export ORACLE_SID=+ASM2
export LD_LIBRARY_PATH=\$ORACLE_HOME/lib:/lib:/usr/lib
export CLASSPATH=\$ORACLE_HOME/jlib:\$ORACLE_HOME/rdbms/jlib
PATH=$PATH:$HOME/.local/bin:$ORACLE_HOME/bin
* vérification du statut du cluster
[root@oracle2 ~]# /u01/app/19c/grid_home/bin/crsctl check cluster -all
**************************************************************
oracle2:
CRS-4537: Cluster Ready Services is online
CRS-4529: Cluster Synchronization Services is online
CRS-4533: Event Manager is online
**************************************************************
oracle3:
CRS-4537: Cluster Ready Services is online
CRS-4529: Cluster Synchronization Services is online
CRS-4533: Event Manager is online
**************************************************************
* ou en GRID/ROOT
[root@oracle2 bin]# /u01/app/19c/grid_home/bin/crsctl stat res -t
--------------------------------------------------------------------------------
Name Target State Server State details
--------------------------------------------------------------------------------
Local Resources
--------------------------------------------------------------------------------
ora.LISTENER.lsnr
ONLINE ONLINE oracle2 STABLE
OFFLINE OFFLINE oracle3 STABLE
ora.chad
ONLINE ONLINE oracle2 STABLE
ONLINE ONLINE oracle3 STABLE
ora.net1.network
ONLINE ONLINE oracle2 STABLE
ONLINE ONLINE oracle3 STABLE
ora.ons
ONLINE ONLINE oracle2 STABLE
ONLINE ONLINE oracle3 STABLE
--------------------------------------------------------------------------------
Cluster Resources
--------------------------------------------------------------------------------
ora.ASMNET1LSNR_ASM.lsnr(ora.asmgroup)
1 ONLINE ONLINE oracle2 STABLE
2 ONLINE ONLINE oracle3 STABLE
3 ONLINE OFFLINE STABLE
ora.LISTENER_SCAN1.lsnr
1 ONLINE ONLINE oracle3 STABLE
ora.LISTENER_SCAN2.lsnr
1 ONLINE ONLINE oracle2 STABLE
ora.LISTENER_SCAN3.lsnr
1 ONLINE ONLINE oracle2 STABLE
ora.OCR.dg(ora.asmgroup)
1 ONLINE ONLINE oracle2 STABLE
2 ONLINE ONLINE oracle3 STABLE
3 OFFLINE OFFLINE STABLE
ora.asm(ora.asmgroup)
1 ONLINE ONLINE oracle2 Started,STABLE
2 ONLINE ONLINE oracle3 Started,STABLE
3 OFFLINE OFFLINE STABLE
ora.asmnet1.asmnetwork(ora.asmgroup)
1 ONLINE ONLINE oracle2 STABLE
2 ONLINE ONLINE oracle3 STABLE
3 OFFLINE OFFLINE STABLE
ora.cvu
1 ONLINE ONLINE oracle2 STABLE
ora.oracle2.vip
1 ONLINE ONLINE oracle2 STABLE
ora.qosmserver
1 ONLINE ONLINE oracle2 STABLE
ora.scan1.vip
1 ONLINE ONLINE oracle3 STABLE
ora.scan2.vip
1 ONLINE ONLINE oracle2 STABLE
ora.scan3.vip
1 ONLINE ONLINE oracle2 STABLE
--------------------------------------------------------------------------------
* modification séquence de démarrage du service oracleasm pour le lier au montage des disques OS
[root@oracle2 bin]# vi /usr/lib/systemd/system/oracleasm.service
[Unit]
Description=Load oracleasm Modules
Requires=multipathd.service iscsi.service multi-user.target
After=multipathd.service iscsi.service multi-user.target
[Service]
Type=oneshot
RemainAfterExit=yes
ExecStart=/usr/sbin/oracleasm.init start_sysctl
ExecStop=/usr/sbin/oracleasm.init stop_sysctl
ExecReload=/usr/sbin/oracleasm.init restart_sysctl
[Install]
WantedBy=multi-user.target
[root@oracle3 bin]# vi /usr/lib/systemd/system/oracleasm.service
[Unit]
Description=Load oracleasm Modules
Requires=multipathd.service iscsi.service multi-user.target
After=multipathd.service iscsi.service multi-user.target
[Service]
Type=oneshot
RemainAfterExit=yes
ExecStart=/usr/sbin/oracleasm.init start_sysctl
ExecStop=/usr/sbin/oracleasm.init stop_sysctl
ExecReload=/usr/sbin/oracleasm.init restart_sysctl
[Install]
WantedBy=multi-user.target