1. 建立key
root@node1:/# ssh-keygen -t rsa
root@node1:/# cp /root/.ssh/id_rsa.pub /root/.ssh/authorized_keys
2. 安裝相關套件
root@node1:/# apt-get install rpm pdksh
3. 下載程式碼解壓縮
安裝檔網址:
https://www14.software.ibm.com/webapp/set2/sas/f/gpfs/download/systemx.html
方法一(rpm安裝法):
tar -zxvf gpfs*
rpm -nodeps -Uvh 四個gpfs的 rpm檔
root@node1:# rpm -Uvh gpfs.gpl-3.1.0-10.noarch.rpm
root@node1:# rpm -Uvh gpfs.msg.en_US-3.1.0-10.noarch.rpm
root@node1:# rpm -Uvh gpfs.docs-3.1.0-10.noarch.rpm
root@node1:# rpm -Uvh --noscripts gpfs.base-3.1.0-10.i386.update.rpm
方法二(dpkg安裝法):
# alien gpfs.base-3.1.0-10.i386.update.rpm
==> gpfs.base_3.1.0-11_i386.deb generated
# alien --scripts gpfs.base-3.1.0-10.i386.update.rpm
==>
# alien gpfs.gpl-3.1.0-10.noarch.rpm
==> gpfs.gpl_3.1.0-11_all.deb generated
# alien gpfs.docs-3.1.0-10.noarch.rpm
==> gpfs.docs_3.1.0-11_all.deb generated
# alien gpfs.msg.en_US-3.1.0-10.noarch.rpm
==> gpfs.msg.en-us_3.1.0-11_all.deb generated
# dpkg -i gpfs.gpl_3.1.0-11_all.deb ; dpkg -i gpfs.msg.en-
us_3.1.0-11_all.deb ; dpkg -i gpfs.docs_3.1.0-11_all.deb ; dpkg -i
gpfs.base_3.1.0-11_i386.deb
4.
產生script.sh
# rpm -qip --scripts gpfs.base-3.1.0-10.i386.update.rpm > script.sh
修改script.sh
保留preinstall 與 postinstall 的部分,將43行的exit 1與97行的exit 0註解,並且
# /bin/bash script.sh
修改 site.mcr
root@node1:/# cp src/site.mcr.proto src/site.mcr
root@node1:/# vim src/site.mcr
site.mcr
....
#define LINUX_DISTRIBUTION_LEVEL 90
....
LINUX_DISTRIBUTION = UBUNTU
....
#define LINUX_KERNEL_VERSION 2061621
KERNEL_HEADER_DIR = /lib/modules/`uname -r`/build/include
....
KERNEL_BUILD_DIR = /lib/modules/`uname -r`/build
...
root@node1:/# vim /etc/hosts
hosts
192.168.0.1 node1
192.168.0.2 node2
192.168.0.3 node3
192.168.0.4 node4
127.0.0.1 localhost
.....
root@node1:/# ln -s /usr/bin/awk /bin/awk
root@node1:/# ln -s /usr/bin/sort /bin/sort
5. 編譯預先工作
root@node1:/# vim /usr/lpp/mmfs/src/config/imake.tmpl
imake.tmpl
248行左右:
elif [ "$(LINUX_DISTRIBUTION)" = "UBUNTU" ]; then \ @@\
echo "This is OK" > /dev/null; \ @@\
root@node1:/# ln -s /usr/lpp/mmfs/include/gpfs_gpl.h /usr/lpp/mmfs/src/
gpl-linux/gpfs_gpl.h
root@node1:/# vim /usr/lpp/mmfs/src/ibm-kxi/Imakefile
==> delete line "InstallHeaders(gpfs_gpl.h, ${DESTDIR}/include)"
root@node1:/# vim /usr/lpp/mmfs/src/gpl-linux/Imakefile
==> replace "HEADERS = Shark-gpl.h prelinux.h postlinux.h linux2gpfs.h
verdep.h \
Logger-gpl.h arch-gpl.h"
==> with "HEADERS = Shark-gpl.h prelinux.h postlinux.h linux2gpfs.h
verdep.h \
Logger-gpl.h arch-gpl.h gpfs_gpl.h"
6. 開始編譯
root@node1:/# cd /usr/lpp/mmfs/src/
root@node1:/usr/lpp/mmfs/src/# export SHARKCLONEROOT=/usr/lpp/mmfs/src
root@node1:/usr/lpp/mmfs/src/# export PATH=$PATH:/usr/lpp/mmfs/bin
root@node1:/usr/lpp/mmfs/src/# make World
root@node1:/usr/lpp/mmfs/src/# make InstallImages
(
產生
mmfslinux
mmfs26
lxtrace
dumpconv
tracedev
)
7. 建立新節點
root@node1:/usr/lpp/mmfs/src/# vim gpfs.allnodes
gpfs.allnodes
node1:quorum
node2:
node3:
執行:
root@node1:/usr/lpp/mmfs/src/# mmcrcluster -n gpfs.allnodes -p node1 -
r /usr/bin/ssh -R /usr/bin/scp
檢查:
root@node1:/usr/lpp/mmfs/src# mmlscluster
GPFS cluster information
========================
GPFS cluster name: node1
GPFS cluster id: 9151314448290410001
GPFS UID domain: node1
Remote shell command: /usr/bin/ssh
Remote file copy command: /usr/bin/scp
GPFS cluster configuration servers:
-----------------------------------
Primary server: node1
Secondary server: (none)
Node Daemon node name IP address Admin node
name Designation
-----------------------------------------------------------------------------------------------
1 node1 127.0.0.1
node1 quorum
1 node2 127.0.0.1
node1
1 node3 127.0.0.1
node1
root@node1:/usr/lpp/mmfs/src# mmlsnode -a
GPFS nodeset Node list
-------------
-------------------------------------------------------
node1 node1 node2 node3
root@node1:/usr/lpp/mmfs/src/# vim descfile
descfile
/dev/hdb:node1::dataAndMetadata::
/dev/hdb:node2::dataAndMetadata::
/dev/hdb:node3::dataAndMetadata::
執行:
root@node1:/usr/lpp/mmfs/src/# mmcrnsd -F descfile
檢查:
root@node1:/usr/lpp/mmfs/src/# cat descfile
# /dev/hdb:node1::dataAndMetadata::
gpfs1nsd:::dataAndMetadata:4001::
# /dev/hdb:node2::dataAndMetadata::
gpfs2nsd:::dataAndMetadata:4001::
# /dev/hdb:node3::dataAndMetadata::
gpfs3nsd:::dataAndMetadata:4001::
root@node1:/usr/lpp/mmfs/src/test# mmstartup -a
Tue May 8 11:29:28 CST 2007: mmstartup: Starting GPFS ...
檢查
root@node1:/usr/lpp/mmfs/src/# mmgetstate -a
Node number Node name GPFS state
------------------------------------------
1 node1 active
2 node2 active
3 node3 active
root@node1:/usr/lpp/mmfs/src/# ps aux|grep mm
root 4076 0.0 0.0 0 0 ? S< 11:04 0:00
[krfcommd]
root 4557 0.4 1.2 3464 2304 ? S< 11:29 0:00 /bin/
ksh /usr/lpp/mmfs/bin/runmmfs
root 4839 1.6 37.9 467936 70924 ? S<Ll 11:29 0:00 /usr/
lpp/mmfs/bin//mmfsd
root@node1:/usr/lpp/mmfs/src/# tsstatus
The file system daemon is running.
root@node1:/usr/lpp/mmfs/src/# lsmod |grep mm
mmfs 955296 1
mmfslinux 183172 6 mmfs
tracedev 14364 3 mmfs,mmfslinux
root@node1:/usr/lpp/mmfs/src/test# mmcrfs /gpfs gpfs0 -F descfile -A
yes
Ps :如果出現 6027-470Disk name may still belong to an active file system.
解法: 加 -v no
The following disks of gpfs0 will be formatted on node node1:
gpfs1nsd: size 2097152 KB
Formatting file system ...
Disks up to size 24 GB can be added to storage pool 'system'.
Creating Inode File
Creating Allocation Maps
Clearing Inode Allocation Map
Clearing Block Allocation Map
Completed creation of file system /dev/gpfs0.
檢查:
root@node2:/usr/lpp/mmfs/bin# mmlsdisk gpfs0
disk driver sector failure holds
holds storage
name type size group metadata data status
availability pool
------------ -------- ------ ------- -------- ----- -------------
------------ ------------
gpfs1nsd nsd 512 4001 yes yes ready
up system
gpfs2nsd nsd 512 4002 yes yes ready
up system
gpfs3nsd nsd 512 4002 yes yes ready
up system
Attention: Due to an earlier configuration change the file system
is no longer properly balanced.
root@node2:/gpfs# df -h
Filesystem Size Used Avail Use% Mounted on
.......
/dev/gpfs0 5G XX M 4G 1% /gpfs
8. 停止gpfs
root@node1:/usr/lpp/mmfs/bin# mmshutdown -a
Mon May 14 22:07:08 CST 2007: mmshutdown: Starting force unmount of
GPFS file systems
node1: umount2: Device or resource busy
node1: umount: /gpfs: device is busy
Mon May 14 22:07:13 CST 2007: mmshutdown: Shutting down GPFS daemons
node2: Shutting down!
node1: Shutting down!
node2: 'shutdown' command about to kill process 13852
node2: Unloading modules from /usr/lpp/mmfs/bin
node2: Unloading module mmfs
node1: 'shutdown' command about to kill process 20457
node1: Unloading modules from /usr/lpp/mmfs/bin
node1: Unloading module mmfs
node1: Unloading module mmfslinux
node1: Unloading module tracedev
node2: Unloading module mmfslinux
node2: Unloading module tracedev
Mon May 14 22:07:20 CST 2007: mmshutdown: Finished
9. 加入節點
root@node1:/usr/lpp/mmfs/src/# mmaddnode node4 (方法一)
root@node1:/usr/lpp/mmfs/src/# mmaddnode -N addnode-file (方法二,addnode-
file參考之前allnodes)
root@node1:/usr/lpp/mmfs/src/# vim add-descfile
add-descfile
/dev/hdb:node4::dataAndMetadata::
執行:
root@node1:/usr/lpp/mmfs/src/# mmcrnsd -F add-descfile
檢查:
root@node2:/usr/lpp/mmfs/bin# mmlsnsd
File system Disk name Primary node Backup
node
---------------------------------------------------------------------------
gpfs0 gpfs1nsd node1
gpfs0 gpfs2nsd node2
gpfs0 gpfs3nsd node3
(free disk) gpfs4nsd node4
ps :
若出現已加入的nsd錯誤訊息如下
mmcrnsd: Processing disk hda3
mmcrnsd: Disk descriptor /dev/hda3:node1::dataAndMetadata:: refers to
an existing NSD
則 加入參數 -v no 可以解決,如;
root@node1:/usr/lpp/mmfs/src/# mmcrnsd -F add-descfile -v no
root@node1:/usr/lpp/mmfs/src/test# mmstartup -a
Tue May 8 11:29:28 CST 2007: mmstartup: Starting GPFS ...
root@node1:/usr/lpp/mmfs/src/test# mmadddisk gpfs0 -F add-descfile -v
no
The following disks of gpfs0 will be formatted on node node1:
gpfs1nsd: size 2097152 KB
Formatting file system ...
Disks up to size 24 GB can be added to storage pool 'system'.
Creating Inode File
Creating Allocation Maps
Clearing Inode Allocation Map
Clearing Block Allocation Map
Completed creation of file system /dev/gpfs0.
檢查:
root@node1:/usr/lpp/mmfs/bin# mmlsdisk gpfs0
disk driver sector failure holds
holds storage
name type size group metadata data status
availability pool
------------ -------- ------ ------- -------- ----- -------------
------------ ------------
gpfs1nsd nsd 512 4001 yes yes ready
up system
gpfs2nsd nsd 512 4002 yes yes ready
up system
gpfs3nsd nsd 512 4002 yes yes ready
up system
gpfs4nsd nsd 512 4002 yes yes ready
up system
Attention: Due to an earlier configuration change the file system
is no longer properly balanced.
root@node1:/gpfs# df -h
Filesystem Size Used Avail Use% Mounted on
.......
/dev/gpfs0 20G 156M 19G 1% /gpfs
10. 刪除一個節點
root@node1:/usr/lpp/mmfs/bin# mmstartup -a
ps: 需要在與gpfs0相關的節點umount 掛載點/gpfs
root@node1:/usr/lpp/mmfs/bin# mmumount gpfs0 -a
root@node1:/usr/lpp/mmfs/bin# mmdelfs gpfs0
All data on following disks of gpfs0 will be destroyed:
gpfs1nsd
gpfs2nsd
gpfs3nsd
gpfs4nsd
Completed deletion of file system /dev/gpfs0.
mmdelfs: Propagating the cluster configuration data to all
affected nodes. This is an asynchronous process.
root@node2:/usr/lpp/mmfs/bin# mmdelnsd gpfs2nsd
mmdelnsd: Processing disk gpfs3nsd
mmdelnsd: Propagating the cluster configuration data to all
affected nodes. This is an asynchronous process.
root@node2:/# mmshutdown -a
Mon May 14 23:05:54 CST 2007: mmshutdown: Starting force unmount of
GPFS file systems
Mon May 14 23:05:59 CST 2007: mmshutdown: Shutting down GPFS daemons
node1: Shutting down!
node2: Shutting down!
node1: 'shutdown' command about to kill process 25261
node1: Unloading modules from /usr/lpp/mmfs/bin
node1: Unloading module mmfs
node2: 'shutdown' command about to kill process 20569
node2: Unloading modules from /usr/lpp/mmfs/bin
node2: Unloading module mmfs
node2: Unloading module mmfslinux
node2: Unloading module tracedev
node1: Unloading module mmfslinux
node1: Unloading module tracedev
Mon May 14 23:06:07 CST 2007: mmshutdown: Finished
ps : 若要刪除該節點, 需要到其他節點執行 mmdelnode指令
root@node2:/# ssh node1
Last login: Mon May 14 14:58:48 2007 from node2
Linux node1 2.6.16.21-686 #1 SMP Thu May 3 12:30:17 CST 2007 i686
The programs included with the Ubuntu system are free software;
the exact distribution terms for each program are described in the
individual files in /usr/share/doc/*/copyright.
Ubuntu comes with ABSOLUTELY NO WARRANTY, to the extent permitted by
applicable law.
root@node1:~# mmdelnode node2
Verifying GPFS is stopped on all affected nodes ...
mmdelnode: Command successfully completed
11. 刪除全部節點
root@node1:/# mmdelnsd "gpfs1nsd;gpfs3nsd;gpfs4nsd"
root@node1:/# mmshutdown -a
root@node1:~# mmdelnode -a
12. 注意事項
1.當node 裡面還包含有nsd的在該cluster時候,無法刪除該node。
root@node1:/usr/lpp/mmfs/src/test# mmdelnode node2
Verifying GPFS is stopped on all affected nodes ...
mmdelnode: Node node2 is being used as a primary or backup NSD
server.
2.primary與secondary server節點無法被刪除,
root@node1:/usr/lpp/mmfs/src/test# mmdelnode node2
Verifying GPFS is stopped on all affected nodes ...
mmdelnode: GPFS cluster configuration server node node2 cannot be
removed.
3.secondary server可以不用設定,不過primary server一定要設定 (在mmcrcluster 的參數 -s 設
定) ,清掉secondary server可以使用mmchcluster -s ""來清空
root@node1:/usr/lpp/mmfs/src/test# mmchcluster -s ""
Verifying GPFS is stopped on all affected nodes ...
mmchcluster: Command successfully completed
root@node1:/usr/lpp/mmfs/src/test# mmlscluster
GPFS cluster information
========================
GPFS cluster name: node1
GPFS cluster id: 10119177971165564776
GPFS UID domain: node1
Remote shell command: /usr/bin/ssh
Remote file copy command: /usr/bin/scp
GPFS cluster configuration servers:
-----------------------------------
Primary server: node1
Secondary server: (none)
Node Daemon node name IP address Admin node
name Designation
-----------------------------------------------------------------------------------------------
1 node1 192.168.0.1 node1 quorum
2 node2 192.168.0.2 node2
4.要轉換primary server,可以使用指令 mmcrcluster
root@node1:/usr/lpp/mmfs/src/test# mmchcluster -p node2
Verifying GPFS is stopped on all affected nodes ...
mmchcluster: Command successfully completed
root@node1:/usr/lpp/mmfs/src/test# mmlscluster
GPFS cluster information
========================
GPFS cluster name: node1
GPFS cluster id: 10119177971165564776
GPFS UID domain: node1
Remote shell command: /usr/bin/ssh
Remote file copy command: /usr/bin/scp
GPFS cluster configuration servers:
-----------------------------------
Primary server: node2
Secondary server: (none)
Node Daemon node name IP address Admin node
name Designation
-----------------------------------------------------------------------------------------------
1 node1 192.168.0.1 node1 quorum
2 node2 192.168.0.2 node2
整個問題似乎都卡在 rpm post install script上面,
不過奇怪的是, 為什麼當初用SuSe也會有類似mmlsfs 卡住的問題呢?
未來改善GPFS的目標有幾個可以考慮
* upgrade kernel 2.6.16 into 2.6.20 version
-- for kerrighed and kvm project
* merge gpfs into drbl as a deb package
Barz
嘿嘿~
難怪威宇會MSN如排宿便般暢快~
我也要學怎麼包成deb阿~
報名討論~
rock
- Jazz