suse 11sp2 oracle 11.2.0.4 install

suse 配置网络:



说明:

ip:192.168.21.172

子网掩码:255.255.255.0

网关:192.168.21.2

dns:8.8.8.8

         8.8.4.4

1、设置ip地址
vi /etc/sysconfig/network/ifcfg-eth0   #编辑配置文件
BOOTPROTO='static'   #静态IP
BROADCAST='192.168.21.255'   #广播地址
IPADDR='192.168.21.172'   #IP地址
NETMASK='255.255.255.0'   #子网掩码
NETWORK='192.168.21.0'  #网络地址
STARTMODE='auto'    #开机启动网络
系统运维 www.osyunwei.com 温馨提醒:qihang01原创内容版权所有,转载请注明出处及原文链接
2、设置网关
vi /etc/sysconfig/network/routes #编辑文件
default 192.168.21.2 - -


3、设置dns
vi /etc/resolv.conf   #编辑文件
nameserver=8.8.8.8
nameserver=8.8.4.4


rcnetwork restart  #重启网络
service network restart #重启网络
/etc/init.d/network restart #重启网络


配置完成,现在SUSE Linux已经可以联网了!




host 文件:


node1:/etc/sysconfig/network # cat /etc/hosts
#
# hosts         This file describes a number of hostname-to-address
#               mappings for the TCP/IP subsystem.  It is mostly
#               used at boot time, when no name servers are running.
#               On small systems, this file can be used instead of a
#               "named" name server.
# Syntax:
#   
# IP-Address  Full-Qualified-Hostname  Short-Hostname
#

127.0.0.1       localhost

# special IPv6 addresses
::1             localhost ipv6-localhost ipv6-loopback

fe00::0         ipv6-localnet

ff00::0         ipv6-mcastprefix
ff02::1         ipv6-allnodes
ff02::2         ipv6-allrouters
ff02::3         ipv6-allhosts
10.10.5.88      node1-priv
192.168.1.88    node1 node1

网卡名称调整udev /etc/udev/rules.d/70-persistent-net.rules




网络配置调整文件 /etc/sysconfig/network/ifcfg-ethxxxxx

模板:


node2-priv:/etc/sysconfig/network # cat ifcfg-eth0
BOOTPROTO='static'
BROADCAST='192.168.1.255'
IPADDR='192.168.1.99'
NETMASK='255.255.255.0'
STARTMODE='auto'


node1:/etc/udev/rules.d # cat /etc/hosts
#
# hosts         This file describes a number of hostname-to-address
#               mappings for the TCP/IP subsystem.  It is mostly
#               used at boot time, when no name servers are running.
#               On small systems, this file can be used instead of a
#               "named" name server.
# Syntax:
#   
# IP-Address  Full-Qualified-Hostname  Short-Hostname
#

127.0.0.1       localhost

# special IPv6 addresses
::1             localhost ipv6-localhost ipv6-loopback

fe00::0         ipv6-localnet

ff00::0         ipv6-mcastprefix
ff02::1         ipv6-allnodes
ff02::2         ipv6-allrouters
ff02::3         ipv6-allhosts
10.10.5.88      node1-priv
192.168.1.88    node1
192.168.1.77    node1-vip

10.10.5.99     node2-priv
192.168.1.99   node2
192.168.1.66   node2-vip


192.168.1.111 scanip



node1:/etc/udev/rules.d # service open-iscsi start
Loading iscsi modules:  tcp                                          done
Starting iSCSI initiator service:                                    done
Setting up iSCSI targets:                                            unused
node1:/etc/udev/rules.d # chkconfig open-iscsi on


node2:~/Desktop # iscsiadm -m discovery -t sendtargets -p 192.168.1.34
192.168.1.34:3260,1 iqn.2006-01.com.openfiler:tsn.df9cb4414a25
192.168.1.34:3260,1 iqn.2006-01.com.openfiler:tsn.e5261933f528
192.168.1.34:3260,1 iqn.2006-01.com.openfiler:tsn.9924e46174a2






关闭防火墙:

node1:~/Desktop # chkconfig --list | grep fire
SuSEfirewall2_init        0ff  1ff  2ff  3n   4ff  5n   6ff
SuSEfirewall2_setup       0ff  1ff  2ff  3:on   4:off  5:on   6:off
node1:~/Desktop # chkconfig SuSEfirewall2_init off
insserv: FATAL: service SuSEfirewall2_init has to be enabled to use service SuSEfirewall2_setup
insserv: exiting now!
/sbin/insserv failed, exit code 1
node1:~/Desktop # chkconfig SuSEfirewall2_setup off
node1:~/Desktop # service SuSEfirewall2_setup stop
Shutting down the Firewall                                           done
node1:~/Desktop # service SuSEfirewall2_init stop


开启root user ssh 登录:

参考 sshd_config,ssh_config



配置多路径:


所需软件包:
node1:~ # rpm -qa | grep device
device-mapper-32bit-1.02.63-18.25.1
device-mapper-1.02.63-18.25.1



配置文件:
node2:#cp /usr/share/doc/packages/multipath-tools/multipath.conf.synthetic /etc/multipath.conf
node2:# vi /etc/multipath.conf
node2:/dev/mapper # cat /etc/multipath.conf
##
## This is a template multipath-tools configuration file
## Uncomment the lines relevent to your environment
##
defaults {
        udev_dir                /dev
        polling_interval         10
        path_selector                "round-robin 0"
        path_grouping_policy        multibus
        getuid_callout                "/lib/udev/scsi_id --whitelisted --device=/dev/%n"
        prio                        const
        path_checker                directio
        rr_min_io                100
        flush_on_last_del        no
        max_fds                        8192
        rr_weight                priorities
        failback                immediate
        no_path_retry                fail
        queue_without_daemon    no
        user_friendly_names        no

        # See /usr/share/doc/packages/device-mapper/12-dm-permissions.rules
        # to set mode/uid/gid.
}
blacklist {
#       wwid 26353900f02796769
#        devnode "^(ram|raw|loop|fd|md|dm-|sr|scd|st)[0-9]*"
  devnode "sda"
#        devnode "^hd[a-z][[0-9]*]"
#        device {
#                vendor DEC.*
#                product MSA[15]00
        }
#blacklist_exceptions {
#       devnode "^dasd[c-d]+[0-9]*"
#       wwid    "IBM.75000000092461.4d00.34"
#}
multipaths {
        multipath {
                wwid                        14f504e46494c45005970584e62562d317a364b2d5564424d
                alias                        data
                path_grouping_policy        multibus
                path_selector                "round-robin 0"
                failback                manual
                rr_weight                priorities
                no_path_retry                5
                rr_min_io                100
        }
       
        multipath {
                wwid                        14f504e46494c45007572374872332d7a4e644a2d67386e43
                alias                        ocr
                path_grouping_policy        multibus
                path_selector                "round-robin 0"
                failback                manual
                rr_weight                priorities
                no_path_retry                5
                rr_min_io                100
        }
         multipath {
                wwid                        14f504e46494c450045666f7978342d586841712d684a3632
                alias                        arch
                path_grouping_policy        multibus
                path_selector                "round-robin 0"
                failback                manual
                rr_weight                priorities
                no_path_retry                5
                rr_min_io                100
        }
        }
       
       
/etc/init.d/multipathd restart

node2:/dev/mapper # multipath -ll
arch (14f504e46494c450045666f7978342d586841712d684a3632) dm-0 OPNFILER,VIRTUAL-DISK
size=12G features='1 queue_if_no_path' hwhandler='0' wp=rw
`-+- policy='round-robin 0' prio=1 status=active
  `- 34:0:0:1 sde 8:64 active ready running
data (14f504e46494c45005970584e62562d317a364b2d5564424d) dm-1 OPNFILER,VIRTUAL-DISK
size=12G features='1 queue_if_no_path' hwhandler='0' wp=rw
`-+- policy='round-robin 0' prio=1 status=active
  `- 33:0:0:0 sdc 8:32 active ready running
ocr (14f504e46494c45007572374872332d7a4e644a2d67386e43) dm-2 OPNFILER,VIRTUAL-DISK
size=5.9G features='1 queue_if_no_path' hwhandler='0' wp=rw
`-+- policy='round-robin 0' prio=1 status=active
  |- 35:0:0:0 sdd 8:48 active ready running
  `- 34:0:0:0 sdb 8:16 active ready running




udev 方式举例:(本例未采用udev方式仅举例,此次安装采用device-mapper和asmlib结合的方式,不过查看共享盘的wwid可以采用下面的步骤查找)
node2:/etc # cat /etc/scsi_id.config
options=--whitelisted --replace-whitespace



node2:~ # for i in b c d e f g h i  
> do  
> echo "KERNEL==\"sd*\", SUBSYSTEM==\"block\", PROGRAM==\"/lib/udev/scsi_id --whitelisted --replace-whitespace --device=/dev/\$name\", RESULT==\"`/lib/udev/scsi_id --whitelisted --replace-whitespace --device=/dev/sd$i`\", NAME=\"asm-disk$i\", OWNER=\"grid\", GROUP=\"asmadmin\", MODE=\"0660\""  
> done
KERNEL=="sd*", SUBSYSTEM=="block", PROGRAM=="/lib/udev/scsi_id --whitelisted --replace-whitespace --device=/dev/$name", RESULT=="14f504e46494c45005970584e62562d317a364b2d5564424d", NAME="asm-diskb", OWNER="grid", GROUP="asmadmin", MODE="0660"
KERNEL=="sd*", SUBSYSTEM=="block", PROGRAM=="/lib/udev/scsi_id --whitelisted --replace-whitespace --device=/dev/$name", RESULT=="14f504e46494c45007572374872332d7a4e644a2d67386e43", NAME="asm-diskc", OWNER="grid", GROUP="asmadmin", MODE="0660"
KERNEL=="sd*", SUBSYSTEM=="block", PROGRAM=="/lib/udev/scsi_id --whitelisted --replace-whitespace --device=/dev/$name", RESULT=="14f504e46494c45007572374872332d7a4e644a2d67386e43", NAME="asm-diskd", OWNER="grid", GROUP="asmadmin", MODE="0660"
KERNEL=="sd*", SUBSYSTEM=="block", PROGRAM=="/lib/udev/scsi_id --whitelisted --replace-whitespace --device=/dev/$name", RESULT=="14f504e46494c450045666f7978342d586841712d684a3632", NAME="asm-diske", OWNER="grid", GROUP="asmadmin", MODE="0660"
KERNEL=="sd*", SUBSYSTEM=="block", PROGRAM=="/lib/udev/scsi_id --whitelisted --replace-whitespace --device=/dev/$name", RESULT=="", NAME="asm-diskf", OWNER="grid", GROUP="asmadmin", MODE="0660"
KERNEL=="sd*", SUBSYSTEM=="block", PROGRAM=="/lib/udev/scsi_id --whitelisted --replace-whitespace --device=/dev/$name", RESULT=="", NAME="asm-diskg", OWNER="grid", GROUP="asmadmin", MODE="0660"
KERNEL=="sd*", SUBSYSTEM=="block", PROGRAM=="/lib/udev/scsi_id --whitelisted --replace-whitespace --device=/dev/$name", RESULT=="", NAME="asm-diskh", OWNER="grid", GROUP="asmadmin", MODE="0660"
KERNEL=="sd*", SUBSYSTEM=="block", PROGRAM=="/lib/udev/scsi_id --whitelisted --replace-whitespace --device=/dev/$name", RESULT=="", NAME="asm-diski", OWNER="grid", GROUP="asmadmin", MODE="0660"












node2:~ # rpm -q binutils gcc gcc-32bit gcc-c++ glibc glibc-32bit glibc-devel glibc-devel-32bit ksh libaio libaio-32bit libaio-devel libaio-devel-32bit libstdc++33 libstdc++33-32bit libstdc++43 libstdc++43-32bit libstdc++43-devel libstdc++43-devel-32bit libgcc43 libstdc++-devel make sysstat unixODBC unixODBC-devel unixODBC-32bit unixODBC-devel-32bit libcap1  
binutils-2.21.1-0.7.25
gcc-4.3-62.198
gcc-32bit-4.3-62.198
gcc-c++-4.3-62.198
glibc-2.11.3-17.31.1
glibc-32bit-2.11.3-17.31.1
glibc-devel-2.11.3-17.31.1
glibc-devel-32bit-2.11.3-17.31.1
ksh-93u-0.6.1
libaio-0.3.109-0.1.46
libaio-32bit-0.3.109-0.1.46
libaio-devel-0.3.109-0.1.46
libaio-devel-32bit-0.3.109-0.1.46
libstdc++33-3.3.3-11.9
libstdc++33-32bit-3.3.3-11.9
package libstdc++43 is not installed
package libstdc++43-32bit is not installed
libstdc++43-devel-4.3.4_20091019-0.22.17
package libstdc++43-devel-32bit is not installed
package libgcc43 is not installed
libstdc++-devel-4.3-62.198
make-3.81-128.20
sysstat-8.1.5-7.32.1
package unixODBC is not installed
package unixODBC-devel is not installed
package unixODBC-32bit is not installed
package unixODBC-devel-32bit is not installed
package libcap1 is not installed

未安装软件如下:
package libstdc++43 is not installed
package libstdc++43-32bit is not installed
package libstdc++43-devel-32bit is not installed
package libgcc43 is not installed
package unixODBC is not installed
package unixODBC-devel is not installed
package unixODBC-32bit is not installed
package unixODBC-devel-32bit is not installed
package libcap1 is not installed

after yast install

node2:~ # rpm -q binutils gcc gcc-32bit gcc-c++ glibc glibc-32bit glibc-devel glibc-devel-32bit ksh libaio libaio-32bit libaio-devel libaio-devel-32bit libstdc++33 libstdc++33-32bit libstdc++43 libstdc++43-32bit libstdc++43-devel libstdc++43-devel-32bit libgcc43 libstdc++-devel make sysstat unixODBC unixODBC-devel unixODBC-32bit unixODBC-devel-32bit libcap1  
binutils-2.21.1-0.7.25
gcc-4.3-62.198
gcc-32bit-4.3-62.198
gcc-c++-4.3-62.198
glibc-2.11.3-17.31.1
glibc-32bit-2.11.3-17.31.1
glibc-devel-2.11.3-17.31.1
glibc-devel-32bit-2.11.3-17.31.1
ksh-93u-0.6.1
libaio-0.3.109-0.1.46
libaio-32bit-0.3.109-0.1.46
libaio-devel-0.3.109-0.1.46
libaio-devel-32bit-0.3.109-0.1.46
libstdc++33-3.3.3-11.9
libstdc++33-32bit-3.3.3-11.9
package libstdc++43 is not installed
package libstdc++43-32bit is not installed          ####can ignore
libstdc++43-devel-4.3.4_20091019-0.22.17
libstdc++43-devel-32bit-4.3.4_20091019-0.22.17
package libgcc43 is not installed                   #### can ignore
libstdc++-devel-4.3-62.198
make-3.81-128.20
sysstat-8.1.5-7.32.1
unixODBC-2.2.12-198.17
unixODBC-devel-2.2.12-198.17
unixODBC-32bit-2.2.12-198.17
unixODBC-devel-32bit-2.2.12-198.17
libcap1-1.10-6.10


卸载orarun 软件包:

node1:/etc/init.d # env | grep ORA
ORA_CRS_HOME=/opt/oracle/product/11gR1/crs
ORA_ASM_HOME=/opt/oracle/product/11gR1/asm
ORACLE_SID=orcl
ORACLE_BASE=/opt/oracle
ORACLE_HOME=/opt/oracle/product/11gR1/db
node1:/etc/init.d # rpm -qa | grep orarun
orarun-1.9-172.20.21.54
node1:/etc/init.d # rpm -e orarun-1.9-172.20.21.54
node1:/etc/init.d # rpm -qa | grep orarun



重新登录,是以上环境变量失效

卸载orarun软件创建的用户及用户组:

node1:/etc/init.d # id oracle
uid=104(oracle) gid=107(oinstall) groups=108(DBA),107(oinstall)
node1:/etc/init.d # id grid
id: grid: No such user
node1:/etc/init.d # groupdel oinstall
groupdel: GID `107' is primary group of `oracle'.
groupdel: Cannot remove user's primary group.
node1:/etc/init.d # userdel oracle
no crontab for oracle
node1:/etc/init.d # groupdel oinstall
node1:/etc/init.d # groupdel dba





创建用户:

groupadd -g 1000 oinstall   
groupadd -g 1200 asmadmin  
groupadd -g 1201 asmdba   
groupadd -g 1202 asmoper   
groupadd -g 1300 dba  
groupadd -g 1301 oper  
useradd -m -u 1100 -g oinstall -G asmadmin,asmdba,asmoper,dba -d /home/grid -s /bin/bash -c "Grid Infrastructure Owner" grid  
useradd -m -u 1101 -g oinstall -G dba,oper,asmdba,asmadmin -d /home/oracle -s /bin/bash -c "Oracle Software Owner" oracle  
mkdir -p /u01/app/grid  
mkdir -p /u01/app/11.2.0/grid  
chown -R grid:oinstall /u01  
mkdir -p /u01/app/oracle  
chown -R oracle:oinstall /u01/app/oracle  
chmod -R 775 /u01  



编辑文件:

vi /etc/sysctl.conf


net.ipv4.icmp_echo_ignore_broadcasts = 1  
net.ipv4.conf.all.rp_filter = 1  
fs.inotify.max_user_watches = 65536  
net.ipv4.conf.default.promote_secondaries = 1  
net.ipv4.conf.all.promote_secondaries = 1  
fs.aio-max-nr = 1048576  
fs.file-max = 6815744  
kernel.shmall = 2097152  
kernel.shmmax = 536870912  
kernel.shmmni = 4096  
kernel.sem = 250 32000 100 128  
net.ipv4.ip_local_port_range = 9000 65500  
net.core.rmem_default = 262144  
net.core.rmem_max = 4194304  
net.core.wmem_default = 262144  
net.core.wmem_max = 1048576  
vm.hugetlb_shm_group = 1000  


vi /etc/security/limits.conf

oracle soft nproc 2047  
oracle hard nproc 16384  
oracle soft nofile 1024  
oracle hard nofile 65536  
grid soft nproc 2047  
grid hard nproc 16384  
grid soft nofile 1024  
grid hard nofile 65536


设置用户口令:
passwd oracle
passwd grid

设置用户.profile文件:



oracle user .profile

ORACLE_SID=orcl1; export ORACLE_SID  
ORACLE_UNQNAME=zwc; export ORACLE_UNQNAME  
JAVA_HOME=/usr/local/java; export JAVA_HOME  
ORACLE_BASE=/u01/app/oracle; export ORACLE_BASE  
ORACLE_HOME=$ORACLE_BASE/product/11.2.0/dbhome_1; export ORACLE_HOME  
ORACLE_PATH=/u01/app/common/oracle/sql; export ORACLE_PATH  
ORACLE_TERM=xterm; export ORACLE_TERM  
  
TNS_ADMIN=$ORACLE_HOME/network/admin; export TNS_ADMIN  
ORA_NLS11=$ORACLE_HOME/nls/data; export ORA_NLS11  
NLS_LANG=AMERICAN_AMERICA.ZHS16GBK; export NLS_LANG  
  
PATH=.{JAVA_HOME}/bin{PATH}HOME/binORACLE_HOME/binORACLE_HOME/OPatch  
PATH=${PATH}:/usr/bin:/bin:/usr/bin/X11:/usr/local/bin  
PATH=${PATH}:/u01/app/common/oracle/bin  
export PATH  
  
LD_LIBRARY_PATH=$ORACLE_HOME/lib  
LD_LIBRARY_PATH=${LD_LIBRARY_PATH}ORACLE_HOME/oracm/lib  
LD_LIBRARY_PATH=${LD_LIBRARY_PATH}:/lib:/usr/lib:/usr/local/lib  
export LD_LIBRARY_PATH  
  
CLASSPATH=$ORACLE_HOME/JRE  
CLASSPATH=${CLASSPATH}ORACLE_HOME/jlib  
CLASSPATH=${CLASSPATH}ORACLE_HOME/rdbms/jlib  
CLASSPATH=${CLASSPATH}ORACLE_HOME/network/jlib  
export CLASSPATH  
  
THREADS_FLAG=native; export THREADS_FLAG  
  
export TEMP=/tmp  
export TMPDIR=/tmp  
  
if [ $USER = "oracle" ] || [ $USER = "grid" ]; then  
    if [ $SHELL = "/bin/ksh" ]; then  
        ulimit -p 16384  
        ulimit -n 65536  
    else  
        ulimit -u 16384 -n 65536  
    fi  
        umask 022  
fi  
  
#alias sqlplus="rlwrap sqlplus"  
#alias rman="rlwrap rman"  
#alias asmcmd="rlwrap asmcmd"  
alias base="cd $ORACLE_BASE"  
alias home="cd $ORACLE_HOME"  

grid user .profile

ORACLE_SID=+ASM1; export ORACLE_SID  
JAVA_HOME=/usr/local/java; export JAVA_HOME  
ORACLE_BASE=/u01/app/grid; export ORACLE_BASE  
ORACLE_HOME=/u01/app/11.2.0/grid; export ORACLE_HOME  
ORACLE_PATH=/u01/app/oracle/common/oracle/sql; export ORACLE_PATH  
ORACLE_TERM=xterm; export ORACLE_TERM  
  
TNS_ADMIN=$ORACLE_HOME/network/admin; export TNS_ADMIN  
ORA_NLS11=$ORACLE_HOME/nls/data; export ORA_NLS11  
NLS_LANG=AMERICAN_AMERICA.ZHS16GBK; export NLS_LANG  
  
PATH=.{JAVA_HOME}/bin:${PATH}:$HOME/bin:$ORACLE_HOME/bin:$ORACLE_HOME/OPatch  
PATH=${PATH}:/usr/bin:/bin:/usr/bin/X11:/usr/local/bin  
PATH=${PATH}:/u01/app/common/oracle/bin  
export PATH  
  
LD_LIBRARY_PATH=$ORACLE_HOME/lib  
LD_LIBRARY_PATH=${LD_LIBRARY_PATH}:$ORACLE_HOME/oracm/lib  
LD_LIBRARY_PATH=${LD_LIBRARY_PATH}:/lib:/usr/lib:/usr/local/lib  
export LD_LIBRARY_PATH  
  
CLASSPATH=$ORACLE_HOME/JRE  
CLASSPATH=${CLASSPATH}:$ORACLE_HOME/jlib  
CLASSPATH=${CLASSPATH}:$ORACLE_HOME/rdbms/jlib  
CLASSPATH=${CLASSPATH}:$ORACLE_HOME/network/jlib  
export CLASSPATH  
  
THREADS_FLAG=native; export THREADS_FLAG  
  
export TEMP=/tmp  
export TMPDIR=/tmp  
  
if [ $USER = "oracle" ] || [ $USER = "grid" ]; then  
    if [ $SHELL = "/bin/ksh" ]; then  
        ulimit -p 16384  
        ulimit -n 65536  
    else  
        ulimit -u 16384 -n 65536  
    fi  
        umask 022  
fi  
  
#alias sqlplus="rlwrap sqlplus"  
#alias rman="rlwrap rman"  
#alias asmcmd="rlwrap asmcmd"  
alias base="cd $ORACLE_BASE"  
alias home="cd $ORACLE_HOME"


source .profile


编辑开机调整共享盘权限脚本:



node2:/etc/init.d # cat after.local
#!/bin/sh
#

# OCR disks 11gR1
chown grid:asmadmin /dev/mapper/ocr
chown grid:asmadmin /dev/mapper/data
chown grid:asmadmin /dev/mapper/arch

chmod 0660 /dev/mapper/ocr
chmod 0660 /dev/mapper/data
chmod 0660 /dev/mapper/arch
node2:/etc/init.d # chmod +x after.local
node2:/etc/init.d # ls -l after.local
-rwxr-xr-x 1 root root 225 Dec 18 19:02 after.local




查看系统默认的asmlib 需求包是否安装:

node1:~/Desktop # rpm -qa | grep asm
plasma-theme-aya-4.3.5-0.3.30
oracleasm-support-2.1.7-1.SLE11
libasm1-0.152-4.7.86
oracleasm-kmp-trace-2.0.5_3.0.13_0.27-7.24.59
plasma-addons-4.3.5-0.1.70
oracleasm-2.0.5-7.24.59
oracleasm-kmp-default-2.0.5_3.0.13_0.27-7.24.59
oracleasmlib-2.0.4-1.SLE11
oracleasm-kmp-xen-2.0.5_3.0.13_0.27-7.24.59
plasmoid-quickaccess-0.8.1-2.1.98




安装asmlib相关软件包:
系统自带:
node1:~/Desktop # rpm -qa | grep oracle
oracleasm-2.0.5-7.24.59
oracleasm-kmp-default-2.0.5_3.0.13_0.27-7.24.59


oracle 官网下载:
suse 11 本身自带asmlib所需的部分支持包,yast soft manager 搜索oracle key word ,select all to install
then
install oracle asmlib package
oracleasm-support-2.1.7-1.SLE11
oracleasmlib-2.0.4-1.SLE11

安装完成后:
node1:~ # rpm -qa | grep oracle
oracleasm-2.0.5-7.24.59
oracleasm-kmp-default-2.0.5_3.0.13_0.27-7.24.59
oracleasm-support-2.1.7-1.SLE11
oracleasmlib-2.0.4-1.SLE11


设置开机调整共享盘权限:
vi :
node2:/dev # cd /etc/init.d/
node2:/etc/init.d # cat after.local
#!/bin/sh
chown grid:asmadmin /dev/mapper/data
chown grid:asmadmin /dev/mapper/ocr
chown grid:asmadmin /dev/mapper/arch
chmod 0660 /dev/mapper/data
chmod 0660 /dev/mapper/ocr
chmod 0660 /dev/mapper/arch



共享盘分区:
fdisk /dev/mapper/ocr
fdisk /dev/mapper/data
fdisk /dev/mapper/arch

查看分区后列表:
node1:/dev/mapper # ls -l
total 0
lrwxrwxrwx 1 root root       7 Dec 18 13:34 arch -> ../dm-1
lrwxrwxrwx 1 root root       7 Dec 18 13:34 arch_part1 -> ../dm-5
crw-rw---- 1 root root 10, 236 Dec 18 13:30 control
lrwxrwxrwx 1 root root       7 Dec 18 13:34 data -> ../dm-0
lrwxrwxrwx 1 root root       7 Dec 18 13:34 data_part1 -> ../dm-4
lrwxrwxrwx 1 root root       7 Dec 18 13:33 ocr -> ../dm-2
lrwxrwxrwx 1 root root       7 Dec 18 13:33 ocr_part1 -> ../dm-3

创建ASM disk:
/etc/init.d/oracleasm configure

grid
asmadmin
y
y


ok

node1:/dev/mapper # /etc/init.d/oracleasm createdisk ocr /dev/mapper/ocr_part1
Marking disk "ocr" as an ASM disk:                                                                                     done
node1:/dev/mapper # /etc/init.d/oracleasm createdisk data /dev/mapper/data_part1
Marking disk "data" as an ASM disk:                                                                                    done
node1:/dev/mapper # /etc/init.d/oracleasm createdisk arch /dev/mapper/arch_part1
Marking disk "arch" as an ASM disk:                                                                                    done
node1:/dev/mapper # /etc/init.d/oracleasm scandisks
Scanning the system for Oracle ASMLib disks:                                                                           done
node1:/dev/mapper # /etc/init.d/oracleasm listdisks
ARCH
DATA
OCR

                                                                              done










编辑/etc/sysconfig/oracleasm文件:

node1:/dev # more /etc/sysconfig/oracleasm
#
# This is a configuration file for automatic loading of the Oracle
# Automatic Storage Management library kernel driver.  It is generated
# By running /etc/init.d/oracleasm configure.  Please use that method
# to modify this file
#

# ORACLEASM_ENABELED: 'true' means to load the driver on boot.
ORACLEASM_ENABLED=true

# ORACLEASM_UID: Default user owning the /dev/oracleasm mount point.
ORACLEASM_UID=grid

# ORACLEASM_GID: Default group owning the /dev/oracleasm mount point.
ORACLEASM_GID=asmadmin

# ORACLEASM_SCANBOOT: 'true' means scan for ASM disks on boot.
ORACLEASM_SCANBOOT=true

# ORACLEASM_SCANORDER: Matching patterns to order disk scanning,change to dm
ORACLEASM_SCANORDER="dm"

# ORACLEASM_SCANEXCLUDE: Matching patterns to exclude disks from scan,change to sd
ORACLEASM_SCANEXCLUDE="sd"


编辑后重新启动:
node1:/dev # vi /etc/sysconfig/oracleasm
node1:/dev # /etc/init.d/oracleasm restart
Dropping Oracle ASMLib disks:                                                                                          done
Shutting down the Oracle ASMLib driver:                                                                                done
Initializing the Oracle ASMLib driver:   





基本上可以安装了,安装部分截图及步骤省略:
/etc/init.d/oracleasm deletedisk data 报错,need root user execute
cat /etc/oratab  检查








iscsi 配置参考suse官方存储管理文档:


Configuring iSCSI Initiator
The iSCSI initiator, also called an iSCSI client, can be used to connect to any iSCSI target. This is not restricted to the iSCSI target solution explained in Section 14.2, Setting Up an iSCSI Target. The configuration of iSCSI initiator involves two major steps: the discovery of available iSCSI targets and the setup of an iSCSI session. Both can be done with YaST.

Section 14.3.1, Using YaST for the iSCSI Initiator Configuration

Section 14.3.2, Setting Up the iSCSI Initiator Manually

Section 14.3.3, The iSCSI Client Databases

14.3.1 Using YaST for the iSCSI Initiator Configuration#
The iSCSI Initiator Overview in YaST is divided into three tabs:

Service: The Service tab can be used to enable the iSCSI initiator at boot time. It also offers to set a unique Initiator Name and an iSNS server to use for the discovery. The default port for iSNS is 3205.

Connected Targets: The Connected Targets tab gives an overview of the currently connected iSCSI targets. Like the Discovered Targets tab, it also gives the option to add new targets to the system.

On this page, you can select a target device, then toggle the start-up setting for each iSCSI target device:

Automatic: This option is used for iSCSI targets that are to be connected when the iSCSI service itself starts up. This is the typical configuration.

Onboot: This option is used for iSCSI targets that are to be connected during boot; that is, when root (/) is on iSCSI. As such, the iSCSI target device will be evaluated from the initrd on server boots.

Discovered Targets: Discovered Targets provides the possibility of manually discovering iSCSI targets in the network.

Configuring the iSCSI Initiator

Discovering iSCSI Targets by Using iSNS

Discovering iSCSI Targets Manually

Setting the Start-up Preference for iSCSI Target Devices

Configuring the iSCSI Initiator#

Launch YaST as the root user.

Select Network Services > iSCSI Initiator (you can also use the yast2 iscsi-client.

YaST opens to the iSCSI Initiator Overview page with the Service tab selected.


In the Service Start area, select one of the following:

When booting: Automatically start the initiator service on subsequent server reboots.

Manually (default): Start the service manually.

Specify or verify the Initiator Name.

Specify a well-formed iSCSI qualified name (IQN) for the iSCSI initiator on this server. The initiator name must be globally unique on your network. The IQN uses the following general format:

iqn.yyyy-mm.com.mycompany:n1:n2
where n1 and n2 are alphanumeric characters. For example:

iqn.1996-04.de.suse:01:9c83a3e15f64
The Initiator Name is automatically completed with the corresponding value from the /etc/iscsi/initiatorname.iscsi file on the server.

If the server has iBFT (iSCSI Boot Firmware Table) support, the Initiator Name is completed with the corresponding value in the IBFT, and you are not able to change the initiator name in this interface. Use the BIOS Setup to modify it instead.The iBFT is a block of information containing various parameters useful to the iSCSI boot process, including the iSCSI target and initiator descriptions for the server.

Use either of the following methods to discover iSCSI targets on the network.

iSNS: To use iSNS (Internet Storage Name Service) for discovering iSCSI targets, continue with Discovering iSCSI Targets by Using iSNS.

Discovered Targets: To discover iSCSI target devices manually, continue with Discovering iSCSI Targets Manually.

Discovering iSCSI Targets by Using iSNS#

Before you can use this option, you must have already installed and configured an iSNS server in your environment. For information, see Section 13.0, iSNS for Linux.

In YaST, select iSCSI Initiator, then select the Service tab.

Specify the IP address of the iSNS server and port.

The default port is 3205.

On the iSCSI Initiator Overview page, click Finish to save and apply your changes.

Discovering iSCSI Targets Manually#

Repeat the following process for each of the iSCSI target servers that you want to access from the server where you are setting up the iSCSI initiator.

In YaST, select iSCSI Initiator, then select the Discovered Targets tab.

Click Discovery to open the iSCSI Initiator Discovery dialog box.

Enter the IP address and change the port if needed. IPv6 addresses are supported.

The default port is 3260.

If authentication is required, deselect No Authentication, then specify the credentials the Incoming or Outgoing authentication.

Click Next to start the discovery and connect to the iSCSI target server.

If credentials are required, after a successful discovery, use Login to activate the target.

You are prompted for authentication credentials to use the selected iSCSI target.

Click Next to finish the configuration.

If everything went well, the target now appears in Connected Targets.

The virtual iSCSI device is now available.

On the iSCSI Initiator Overview page, click Finish to save and apply your changes.

You can find the local device path for the iSCSI target device by using the lsscsi command:

lsscsi
[1:0:0:0]   disk    IET      VIRTUAL-DISK     0     /dev/sda
Setting the Start-up Preference for iSCSI Target Devices#

In YaST, select iSCSI Initiator, then select the Connected Targets tab to view a list of the iSCSI target devices that are currently connected to the server.

Select the iSCSI target device that you want to manage.

Click Toggle Start-Up to modify the setting:

Automatic: This option is used for iSCSI targets that are to be connected when the iSCSI service itself starts up. This is the typical configuration.

Onboot: This option is used for iSCSI targets that are to be connected during boot; that is, when root (/) is on iSCSI. As such, the iSCSI target device will be evaluated from the initrd on server boots.

Click Finish to save and apply your changes.

14.3.2 Setting Up the iSCSI Initiator Manually#
Both the discovery and the configuration of iSCSI connections require a running iscsid. When running the discovery the first time, the internal database of the iSCSI initiator is created in the directory /var/lib/open-iscsi.

If your discovery is password protected, provide the authentication information to iscsid. Because the internal database does not exist when doing the first discovery, it cannot be used at this time. Instead, the configuration file /etc/iscsid.conf must be edited to provide the information. To add your password information for the discovery, add the following lines to the end of /etc/iscsid.conf:

discovery.sendtargets.auth.authmethod = CHAP
discovery.sendtargets.auth.username = <username>
discovery.sendtargets.auth.password = <password>
The discovery stores all received values in an internal persistent database. In addition, it displays all detected targets. Run this discovery with the following command:

iscsiadm -m discovery --type=st --portal=<targetip>
The output should look like the following:

10.44.171.99:3260,1 iqn.2006-02.com.example.iserv:systems
To discover the available targets on a iSNS server, use the following command:

iscsiadm --mode discovery --type isns --portal <targetip>
For each target defined on the iSCSI target, one line appears. For more information about the stored data, see Section 14.3.3, The iSCSI Client Databases.

The special --login option of iscsiadm creates all needed devices:

iscsiadm -m node -n iqn.2006-02.com.example.iserv:systems --login
The newly generated devices show up in the output of lsscsi and can now be accessed by mount.

14.3.3 The iSCSI Client Databases#
All information that was discovered by the iSCSI initiator is stored in two database files that reside in /var/lib/open-iscsi. There is one database for the discovery of targets and one for the discovered nodes. When accessing a database, you first must select if you want to get your data from the discovery or from the node database. Do this with the -m discovery and -m node parameters of iscsiadm. Using iscsiadm just with one of these parameters gives an overview of the stored records:

iscsiadm -m discovery
10.44.171.99:3260,1 iqn.2006-02.com.example.iserv:systems
The target name in this example is iqn.2006-02.com.example.iserv:systems. This name is needed for all actions that relate to this special data set. To examine the content of the data record with the ID iqn.2006-02.com.example.iserv:systems, use the following command:

iscsiadm -m node --targetname iqn.2006-02.com.example.iserv:systems
node.name = iqn.2006-02.com.example.iserv:systems
node.transport_name = tcp
node.tpgt = 1
node.active_conn = 1
node.startup = manual
node.session.initial_cmdsn = 0
node.session.reopen_max = 32
node.session.auth.authmethod = CHAP
node.session.auth.username = joe
node.session.auth.password = ********
node.session.auth.username_in = <empty>
node.session.auth.password_in = <empty>
node.session.timeo.replacement_timeout = 0
node.session.err_timeo.abort_timeout = 10
node.session.err_timeo.reset_timeout = 30
node.session.iscsi.InitialR2T = No
node.session.iscsi.ImmediateData = Yes
....
To edit the value of one of these variables, use the command iscsiadm with the update operation. For example, if you want iscsid to log in to the iSCSI target when it initializes, set the variable node.startup to the value automatic:

iscsiadm -m node -n iqn.2006-02.com.example.iserv:systems -p ip:port --op=update --name=node.startup --value=automatic
Remove obsolete data sets with the delete operation If the target iqn.2006-02.com.example.iserv:systems is no longer a valid record, delete this record with the following command:

iscsiadm -m node -n iqn.2006-02.com.example.iserv:systems -p ip:port --op=delete
IMPORTANT:Use this option with caution because it deletes the record without any additional confirmation prompt.
To get a list of all discovered targets, run the iscsiadm -m node command.
标签: 暂无标签
dongxujian

写了 86 篇文章,拥有财富 384,被 13 人关注

转播转播 分享分享 分享淘帖
回复

使用道具

成为第一个吐槽的人

您需要登录后才可以回帖 登录 | 加入社区

本版积分规则

意见
反馈