Follow the following Steps to setup iscsi-initiater on linux server as client of openfier.
[root@oracluster03 /]# cd /media/*/Server
[root@oracluster03 Server]# rpm -i iscsi-init*
warning: iscsi-initiator-utils-6.2.0.868-0.18.el5.x86_64.rpm: Header V3 DSA sign ature: NOKEY, key ID 1e5e0159
[root@oracluster03 ~]# service iscsid start
Turning off network shutdown. Starting iSCSI daemon: [ OK ]
[ OK ]
[root@oracluster03 ~]# chkconfig iscsid on
[root@oracluster03 ~]# chkconfig iscsi on
[root@oracluster03 ~]# service iscsid status
iscsid (pid 22651 22650) is running...
[root@oracluster03 ~]#
[root@oracluster03 ~]#
--- cut here start -------
iscsiadm -m discovery -t sendtargets -p $STORAGE|awk '{print "iscsiadm -m node -T "$2" -p $STORAGE -l"}'
----- cut here end ----------
Test your command and make sure you can see the expected storage.
# export STORAGE=192.168.1.200
# iscsiadm -m discovery -t sendtargets -p $STORAGE
To manually log in to an iSCSI target, use the following command
iscsiadm -m node -T
iscsiadm -m discovery -t sendtargets -p $STORAGE|awk '{print "iscsiadm -m node -T "$2" -p $STORAGE -l"}'
Now use the output to test your login to each disk.
iscsiadm -m node -T iqn.2006-01.com.openfiler:tsn.ocrdisk3 -p $STORAGE -l
iscsiadm -m node -T iqn.2006-01.com.openfiler:tsn.ocrdisk2 -p $STORAGE -l
iscsiadm -m node -T iqn.2006-01.com.openfiler:tsn.ocrdisk1 -p $STORAGE -l
As We setting this up for Oracle RAC, so ideally we need automatic login on startup ( default behaviour ), As a safe side we setup the disk to auto login on startup by using "--op update -n node.startup -v automatic" option.
# iscsiadm -m discovery -t sendtargets -p $STORAGE|awk '{print "iscsiadm -m node -T "$2" -p $STORAGE --op update -n node.startup -v automatic"}'
iscsiadm -m node -T iqn.2006-01.com.openfiler:tsn.ocrdisk3 -p $STORAGE --op update -n node.startup -v automatic
iscsiadm -m node -T iqn.2006-01.com.openfiler:tsn.ocrdisk2 -p $STORAGE --op update -n node.startup -v automatic
iscsiadm -m node -T iqn.2006-01.com.openfiler:tsn.ocrdisk1 -p $STORAGE --op update -n node.startup -v automatic
# iscsiadm -m node -T iqn.2006-01.com.openfiler:tsn.ocrdisk3 -p $STORAGE --op update -n node.startup -v automatic
# iscsiadm -m node -T iqn.2006-01.com.openfiler:tsn.ocrdisk2 -p $STORAGE --op update -n node.startup -v automatic
# iscsiadm -m node -T iqn.2006-01.com.openfiler:tsn.ocrdisk1 -p $STORAGE --op update -n node.startup -v automatic
Now create UDEV rules so all the disk have uniform name on startup.
Create openscsi Rules file
[root@oracluster03 ~]# vi /etc/udev/rules.d/55-openiscsi.rules
----------------------File start here ---------------------
KERNEL=="sd*", BUS=="scsi", PROGRAM="/etc/udev/scripts/iscsidev.sh %b",SYMLINK+="iscsi/%c/part%n"
-------------------------------------------------------------------------------------------
[root@oracluster03 ~]# mkdir -p /etc/udev/scripts
[root@oracluster03 ~]# vi /etc/udev/scripts/iscsidev.sh
-------------------------------file start here --------------------
#!/bin/sh
# FILE: /etc/udev/scripts/iscsidev.sh
BUS=${1}
HOST=${BUS%%:*}
[ -e /sys/class/iscsi_host ] || exit 1
file="/sys/class/iscsi_host/host${HOST}/device/session*/iscsi_session*/targetname"
target_name=$(cat ${file})
# This is not an open-scsi drive
if [ -z "${target_name}" ]; then
exit 1
fi
echo "${target_name##*.}"
---------file ends here -------------------------
[root@oracluster03 ~]# chmod 755 /etc/udev/scripts/iscsidev.sh
[root@oracluster03 ~]# service iscsi stop
Logging out of session [sid: 1, target: iqn.2006-01.com.openfiler:cluster03.gridcontrol01, portal: 192.168.2.5,3260]
Logout of [sid: 1, target: iqn.2006-01.com.openfiler:cluster03.gridcontrol01, portal: 192.168.2.5,3260]: successful
Stopping iSCSI daemon: [ OK ]
[root@oracluster03 ~]# service iscsi start
iscsid dead but pid file exists
Turning off network shutdown. Starting iSCSI daemon: [ OK ]
[ OK ]
Setting up iSCSI targets: Logging in to [iface: default, target: iqn.2006-01.com.openfiler:cluster03.gridcontrol01, portal: 192.168.2.5,3260]
Login to [iface: default, target: iqn.2006-01.com.openfiler:cluster03.gridcontrol01, portal: 192.168.2.5,3260]: successful
[ OK ]
Verify that, Disk are linked for logical link with local name.
[root@oracluster03 ~]# ls -lrt /dev/iscsi/*/
####Only on First node ##########
fdisk /dev/iscsi/disk01/part
fdisk /dev/iscsi/disk02/part
######### on all other nodes ########
partprobe
fdisk -l
[root@oracluster03 /]# cd /media/*/Server
[root@oracluster03 Server]# rpm -i iscsi-init*
warning: iscsi-initiator-utils-6.2.0.868-0.18.el5.x86_64.rpm: Header V3 DSA sign ature: NOKEY, key ID 1e5e0159
[root@oracluster03 ~]# service iscsid start
Turning off network shutdown. Starting iSCSI daemon: [ OK ]
[ OK ]
[root@oracluster03 ~]# chkconfig iscsid on
[root@oracluster03 ~]# chkconfig iscsi on
[root@oracluster03 ~]# service iscsid status
iscsid (pid 22651 22650) is running...
[root@oracluster03 ~]#
[root@oracluster03 ~]#
--- cut here start -------
iscsiadm -m discovery -t sendtargets -p $STORAGE|awk '{print "iscsiadm -m node -T "$2" -p $STORAGE -l"}'
----- cut here end ----------
Test your command and make sure you can see the expected storage.
# export STORAGE=192.168.1.200
# iscsiadm -m discovery -t sendtargets -p $STORAGE
To manually log in to an iSCSI target, use the following command
iscsiadm -m node -T
proper_target_name
-p target_IP
-l
As you need to do this each disk, it can be achieved by using simple AWK command to generate for all disks.
iscsiadm -m discovery -t sendtargets -p $STORAGE|awk '{print "iscsiadm -m node -T "$2" -p $STORAGE -l"}'
Now use the output to test your login to each disk.
iscsiadm -m node -T iqn.2006-01.com.openfiler:tsn.ocrdisk3 -p $STORAGE -l
iscsiadm -m node -T iqn.2006-01.com.openfiler:tsn.ocrdisk2 -p $STORAGE -l
iscsiadm -m node -T iqn.2006-01.com.openfiler:tsn.ocrdisk1 -p $STORAGE -l
# iscsiadm -m discovery -t sendtargets -p $STORAGE|awk '{print "iscsiadm -m node -T "$2" -p $STORAGE --op update -n node.startup -v automatic"}'
iscsiadm -m node -T iqn.2006-01.com.openfiler:tsn.ocrdisk3 -p $STORAGE --op update -n node.startup -v automatic
iscsiadm -m node -T iqn.2006-01.com.openfiler:tsn.ocrdisk2 -p $STORAGE --op update -n node.startup -v automatic
iscsiadm -m node -T iqn.2006-01.com.openfiler:tsn.ocrdisk1 -p $STORAGE --op update -n node.startup -v automatic
# iscsiadm -m node -T iqn.2006-01.com.openfiler:tsn.ocrdisk3 -p $STORAGE --op update -n node.startup -v automatic
# iscsiadm -m node -T iqn.2006-01.com.openfiler:tsn.ocrdisk2 -p $STORAGE --op update -n node.startup -v automatic
# iscsiadm -m node -T iqn.2006-01.com.openfiler:tsn.ocrdisk1 -p $STORAGE --op update -n node.startup -v automatic
Now create UDEV rules so all the disk have uniform name on startup.
Create openscsi Rules file
[root@oracluster03 ~]# vi /etc/udev/rules.d/55-openiscsi.rules
----------------------File start here ---------------------
KERNEL=="sd*", BUS=="scsi", PROGRAM="/etc/udev/scripts/iscsidev.sh %b",SYMLINK+="iscsi/%c/part%n"
-------------------------------------------------------------------------------------------
[root@oracluster03 ~]# mkdir -p /etc/udev/scripts
[root@oracluster03 ~]# vi /etc/udev/scripts/iscsidev.sh
-------------------------------file start here --------------------
#!/bin/sh
# FILE: /etc/udev/scripts/iscsidev.sh
BUS=${1}
HOST=${BUS%%:*}
[ -e /sys/class/iscsi_host ] || exit 1
file="/sys/class/iscsi_host/host${HOST}/device/session*/iscsi_session*/targetname"
target_name=$(cat ${file})
# This is not an open-scsi drive
if [ -z "${target_name}" ]; then
exit 1
fi
echo "${target_name##*.}"
---------file ends here -------------------------
[root@oracluster03 ~]# chmod 755 /etc/udev/scripts/iscsidev.sh
[root@oracluster03 ~]# service iscsi stop
Logging out of session [sid: 1, target: iqn.2006-01.com.openfiler:cluster03.gridcontrol01, portal: 192.168.2.5,3260]
Logout of [sid: 1, target: iqn.2006-01.com.openfiler:cluster03.gridcontrol01, portal: 192.168.2.5,3260]: successful
Stopping iSCSI daemon: [ OK ]
[root@oracluster03 ~]# service iscsi start
iscsid dead but pid file exists
Turning off network shutdown. Starting iSCSI daemon: [ OK ]
[ OK ]
Setting up iSCSI targets: Logging in to [iface: default, target: iqn.2006-01.com.openfiler:cluster03.gridcontrol01, portal: 192.168.2.5,3260]
Login to [iface: default, target: iqn.2006-01.com.openfiler:cluster03.gridcontrol01, portal: 192.168.2.5,3260]: successful
[ OK ]
Verify that, Disk are linked for logical link with local name.
[root@oracluster03 ~]# ls -lrt /dev/iscsi/*/
####Only on First node ##########
fdisk /dev/iscsi/disk01/part
fdisk /dev/iscsi/disk02/part
######### on all other nodes ########
partprobe
fdisk -l
Hi,
ReplyDeleteWhen I try to setup new enviornment years later , come acrosss to new issue, "iscsiadm: No portals found"
openfiler storage ip is :192.168.1.200
[root@oraclelinux6 ~]# iscsiadm -m discovery -t sendtargets -p 192.168.1.200
iscsiadm: No portals found
I discovered this issue was due to no disk target been setup or if they you added disk target, its been added to deny list on server.
in my case it was in deny list.
ssh root@192.168.1.200 and vi /etc/initiators.deny
if list of disk you looking for are there, comment the lines and try again.
[root@oraclelinux6 ~]# iscsiadm -m discovery -t sendtargets -p 192.168.1.200
192.168.1.200:3260,1 iqn.2006-01.com.openfiler:tsn.ocrdisk3
192.168.1.200:3260,1 iqn.2006-01.com.openfiler:tsn.ocrdisk2
192.168.1.200:3260,1 iqn.2006-01.com.openfiler:tsn.ocrdisk1
Solution: check /etc/initiators.deny adn comment lines/disk you interested in.