blob: 80cfdb1b12bfebb7d8950ad46c8521f36acedb53 (
plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
|
#!/sbin/runscript
# Copyright 1999-2005 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
# $Header: /var/cvsroot/gentoo-x86/sys-fs/lvm2/files/clvmd.rc-2.02.28-r3,v 1.1 2007/11/16 08:05:01 robbat2 Exp $
depend() {
use dns logger
use net
need cluster-manager cluster-locking-manager fenced
}
exefile=/sbin/clvmd
umount_gfs_filesystems() {
local sig retry
local remaining="$(awk '$3 == "gfs" { print $2 }' /proc/mounts | sort -r)"
if [ -n "${remaining}" ]
then
sig=
retry=3
while [ -n "${remaining}" -a "${retry}" -gt 0 ]
do
if [ "${retry}" -lt 3 ]
then
ebegin "Unmounting GFS filesystems (retry)"
umount ${remaining} &>/dev/null
eend $? "Failed to unmount GFS filesystems this retry"
else
ebegin "Unmounting GFS filesystems"
umount ${remaining} &>/dev/null
eend $? "Failed to unmount GFS filesystems"
fi
remaining="$(awk '$3 == "gfs" { print $2 }' /proc/mounts | sort -r)"
[ -z "${remaining}" ] && break
/bin/fuser -k -m ${sig} ${remaining} &>/dev/null
sleep 5
retry=$((${retry} -1))
sig=-9
done
fi
}
start() {
if [ -e /proc/modules -a ! -e /dev/device-mapper ] ; then
ebegin "Loading device-mapper module"
modprobe -q dm-mod
eend $?
fi
if [ ! -e /dev/device-mapper ]; then
eerror "device-mapper not available!"
exit 1
fi
ebegin "Starting clvmd"
start-stop-daemon --start --quiet --exec ${exefile}
rc=$?
eend $rc
ebegin "Scanning LVM volumes"
/sbin/vgscan &>/dev/null
rc=$?
eend $rc
if [ "$rc" -eq 0 ] && [ -x /sbin/vgchange ] && [ -d /etc/lvm ]
then
ebegin "Setting up the Logical Volume Manager"
/sbin/vgchange -aly >/dev/null
eend $? "Failed to setup the LVM"
fi
}
stop() {
# umount GFS filesystems
umount_gfs_filesystems
# This sucks majorly. We need to find a way to only take out the CLUSTER
# VGs and LVs, and never the local ones.
einfo "Shutting down the Logical Volume Manager"
LOGICAL_VOLUMES=`lvdisplay |grep "LV Name"|awk '{print $3}'|sort|xargs echo`
VOLUME_GROUPS=`vgdisplay |grep "VG Name"|awk '{print $3}'|sort|xargs echo`
for x in ${LOGICAL_VOLUMES}
do
LV_IS_ACTIVE=`lvdisplay ${x}|grep "# open"|awk '{print $3}'`
if [ "${LV_IS_ACTIVE}" = 0 ]
then
ebegin " Shutting Down logical volume: ${x} "
lvchange -an --ignorelockingfailure -P ${x} >/dev/null
eend $?
fi
done
for x in ${VOLUME_GROUPS}
do
VG_HAS_ACTIVE_LV=`vgdisplay ${x}|grep "Open LV"|awk '{print $3}'|xargs echo`
if [ "${VG_HAS_ACTIVE_LV}" = 0 ]
then
ebegin " Shutting Down volume group: ${x} "
vgchange -an --ignorelockingfailure -P ${x} >/dev/null
eend
fi
done
for x in ${LOGICAL_VOLUMES}
do
LV_IS_ACTIVE=`lvdisplay ${x}|grep "# open"|awk '{print $3}'`
if [ "${LV_IS_ACTIVE}" = 1 ]
then
ROOT_DEVICE=`mount|grep " / "|awk '{print $1}'`
if [ ! ${ROOT_DEVICE} = ${x} ]
then
ewarn " Unable to shutdown: ${x} "
fi
fi
done
einfo "Finished Shutting down the Logical Volume Manager"
ebegin "Stopping clvmd"
start-stop-daemon --stop --quiet --exec ${exefile}
eend $?
}
|