blob: 1bf8002b9dabecd94e493afe5bc42c7bbfdd5b13 (
plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
|
#!/sbin/openrc-run
# Copyright 1999-2020 Gentoo Authors
# Distributed under the terms of the GNU General Public License v2
_get_lvm_path() {
local lvm_path=
for lvm_path in /bin/lvm /sbin/lvm ; do
[ -x "${lvm_path}" ] && break
done
echo "${lvm_path}"
}
_use_lvmetad() {
local lvm_path="$(_get_lvm_path)"
[ ! -x "${lvm_path}" ] && return 1
${lvm_path} dumpconfig global 2>/dev/null | grep -q 'use_lvmetad=1'
}
_use_lvmlockd() {
local lvm_path="$(_get_lvm_path)"
[ ! -x "${lvm_path}" ] && return 1
${lvm_path} dumpconfig global 2>/dev/null | grep -q 'use_lvmlockd=1'
}
depend() {
before checkfs fsck
after modules device-mapper
# We may use lvmetad based on the configuration. If we added lvmetad
# support while lvm2 is running then we aren't dependent on it. For the
# more common case, if its disabled in the config we aren't dependent
# on it.
config /etc/lvm/lvm.conf
local _use=
if service_started ; then
_use=$(service_get_value use)
else
if _use_lvmetad ; then
_use="${_use} lvmetad"
fi
if _use_lvmlockd ; then
_use="${_use} lvmlockd"
fi
fi
# Make sure you review /etc/conf.d/lvm as well!
# Depending on your system, it might also introduce udev & mdraid
need sysfs
if [ -n "${_use}" ] ; then
use ${_use}
fi
}
config='global { locking_dir = "/run/lock/lvm" }'
dm_in_proc() {
local retval=0
for x in devices misc ; do
grep -qs 'device-mapper' /proc/${x}
retval=$((${retval} + $?))
done
return ${retval}
}
start() {
# LVM support for /usr, /home, /opt ....
# This should be done *before* checking local
# volumes, or they never get checked.
# NOTE: Add needed modules for LVM or RAID, etc
# to /etc/modules.autoload if needed
lvm_path="$(_get_lvm_path)"
if [ -z "${lvm_path}" ] ; then
eerror "Failed to find lvm binary in /bin or /sbin!"
return 1
fi
if [ -z "${CDBOOT}" ] ; then
if [ -e /proc/modules ] && ! dm_in_proc ; then
modprobe dm-mod 2>/dev/null
fi
if [ -d /proc/lvm ] || dm_in_proc ; then
ebegin "Setting up the Logical Volume Manager"
#still echo stderr for debugging
lvm_commands="#!${lvm_path}\n"
# Extra PV find pass because some devices might not have been available until very recently
lvm_commands="${lvm_commands}pvscan --config '${config}'\n"
# Now make the nodes
lvm_commands="${lvm_commands}vgscan --config '${config}' --mknodes\n"
# And turn them on!
lvm_commands="${lvm_commands}vgchange --config '${config}' --sysinit -a ly\n"
if _use_lvmlockd ; then
# Start lockd VGs as required
lvm_commands="${lvm_commands}vgchange --config '${config}' --lock-start --lock-opt auto\n"
fi
# Order of this is important, have to work around dash and LVM readline
printf "%b\n" "${lvm_commands}" | $lvm_path /proc/self/fd/0 >/dev/null
eend $? "Failed to setup the LVM"
fi
fi
}
start_post() {
local _use=
if _use_lvmetad ; then
_use="${_use} lvmetad"
fi
if _use_lvmlockd ; then
_use="${_use} lvmlockd"
fi
service_set_value use "${_use}"
}
stop() {
lvm_path="$(_get_lvm_path)"
if [ -z "${lvm_path}" ] ; then
eerror "Failed to find lvm binary in /bin or /sbin!"
return 1
fi
# Stop LVM2
if [ -x /sbin/vgs ] \
&& [ -x /sbin/vgchange ] \
&& [ -x /sbin/lvchange ] \
&& [ -f /etc/lvmtab -o -d /etc/lvm ] \
&& [ -d /proc/lvm -o "$(grep device-mapper /proc/misc 2>/dev/null)" ]
then
einfo "Shutting down the Logical Volume Manager"
VGS=$($lvm_path vgs --config "${config}" -o vg_name --noheadings --nosuffix --rows 2> /dev/null)
if [ -n "${VGS}" ] ; then
local _ending="eend"
[ "${RC_RUNLEVEL}" = shutdown ] && _ending="ewend"
ebegin " Shutting Down LVs & VGs"
#still echo stderr for debugging
lvm_commands="#!${lvm_path}\n"
# Extra PV find pass because some devices might not have been available until very recently
lvm_commands="${lvm_commands}lvchange --config '${config}' --sysinit -a ln ${VGS}\n"
# Now make the nodes
lvm_commands="${lvm_commands}vgchange --config '${config}' --sysinit -a ln ${VGS}\n"
# Order of this is important, have to work around dash and LVM readline
printf "%b\n" "${lvm_commands}" | ${lvm_path} /proc/self/fd/0 --config "${config}" >/dev/null
rc=${?}
msg="Failed (possibly some LVs still needed for /usr or root)"
[ "${RC_RUNLEVEL}" = shutdown ] && msg="${msg} [rc=${rc}]" && rc=0
${_ending} ${rc} "${msg}"
fi
einfo "Finished shutting down the Logical Volume Manager"
return 0
fi
}
# vim:ts=4
|