Print this page
8890 ipfilter and nfs/server are locked in death race
Split |
Close |
Expand all |
Collapse all |
--- old/usr/src/cmd/fs.d/nfs/svc/nfs-server
+++ new/usr/src/cmd/fs.d/nfs/svc/nfs-server
1 1 #!/sbin/sh
2 2 #
3 3 # CDDL HEADER START
4 4 #
5 5 # The contents of this file are subject to the terms of the
6 6 # Common Development and Distribution License (the "License").
7 7 # You may not use this file except in compliance with the License.
8 8 #
9 9 # You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
10 10 # or http://www.opensolaris.org/os/licensing.
11 11 # See the License for the specific language governing permissions
12 12 # and limitations under the License.
13 13 #
14 14 # When distributing Covered Code, include this CDDL HEADER in each
15 15 # file and include the License file at usr/src/OPENSOLARIS.LICENSE.
16 16 # If applicable, add the following below this CDDL HEADER, with the
17 17 # fields enclosed by brackets "[]" replaced with your own identifying
18 18 # information: Portions Copyright [yyyy] [name of copyright owner]
19 19 #
20 20 # CDDL HEADER END
21 21 #
22 22
23 23 #
24 24 # Copyright (c) 2009, 2010, Oracle and/or its affiliates. All rights reserved.
↓ open down ↓ |
24 lines elided |
↑ open up ↑ |
25 25 # Copyright 2015 Nexenta Systems, Inc. All rights reserved.
26 26 # Copyright 2016 Hans Rosenfeld <rosenfeld@grumpf.hope-2000.org>
27 27 #
28 28
29 29 # Start/stop processes required for server NFS
30 30
31 31 . /lib/svc/share/smf_include.sh
32 32 . /lib/svc/share/ipf_include.sh
33 33 zone=`smf_zonename`
34 34
35 -#
36 -# Handling a corner case here. If we were in offline state due to an
37 -# unsatisfied dependency, the ipf_method process wouldn't have generated
38 -# the ipfilter configuration. When we transition to online because the
39 -# dependency is satisfied, the start method will have to generate the
40 -# ipfilter configuration. To avoid all possible deadlock scenarios,
41 -# we restart ipfilter which will regenerate the ipfilter configuration
42 -# for the entire system.
43 -#
44 -# The ipf_method process signals that it didn't generate ipf rules by
45 -# removing the service's ipf file. Thus we only restart network/ipfilter
46 -# when the file is missing.
47 -#
48 -configure_ipfilter()
49 -{
50 - ipfile=`fmri_to_file $SMF_FMRI $IPF_SUFFIX`
51 - ip6file=`fmri_to_file $SMF_FMRI $IPF6_SUFFIX`
52 - [ -f "$ipfile" -a -f "$ip6file" ] && return 0
53 -
54 - #
55 - # Nothing to do if:
56 - # - ipfilter isn't online
57 - # - global policy is 'custom'
58 - # - service's policy is 'use_global'
59 - #
60 - service_check_state $IPF_FMRI $SMF_ONLINE || return 0
61 - [ "`get_global_def_policy`" = "custom" ] && return 0
62 - [ "`get_policy $SMF_FMRI`" = "use_global" ] && return 0
63 -
64 - svcadm restart $IPF_FMRI
65 -}
66 -
67 35 case "$1" in
68 36 'start')
69 37 # The NFS server is not supported in a local zone
70 38 if smf_is_nonglobalzone; then
71 39 /usr/sbin/svcadm disable -t svc:/network/nfs/server
72 40 echo "The NFS server is not supported in a local zone"
73 41 sleep 5 &
74 42 exit $SMF_EXIT_OK
75 43 fi
76 44
77 45 # Share all file systems enabled for sharing. sharemgr understands
78 46 # regular shares and ZFS shares and will handle both. Technically,
79 47 # the shares would have been started long before getting here since
80 48 # nfsd has a dependency on them.
81 49
82 50 # restart stopped shares from the repository
83 51 /usr/sbin/sharemgr start -P nfs -a
84 52
85 53 # Options for nfsd are now set in SMF
86 54
87 55 /usr/lib/nfs/mountd
88 56 rc=$?
89 57 if [ $rc != 0 ]; then
90 58 /usr/sbin/svcadm mark -t maintenance svc:/network/nfs/server
91 59 echo "$0: mountd failed with $rc"
92 60 sleep 5 &
93 61 exit $SMF_EXIT_ERR_FATAL
↓ open down ↓ |
17 lines elided |
↑ open up ↑ |
94 62 fi
95 63
96 64 /usr/lib/nfs/nfsd
97 65 rc=$?
98 66 if [ $rc != 0 ]; then
99 67 /usr/sbin/svcadm mark -t maintenance svc:/network/nfs/server
100 68 echo "$0: nfsd failed with $rc"
101 69 sleep 5 &
102 70 exit $SMF_EXIT_ERR_FATAL
103 71 fi
104 -
105 - configure_ipfilter
106 72 ;;
107 73
108 74 'refresh')
109 75 /usr/sbin/sharemgr start -P nfs -a
110 76 ;;
111 77
112 78 'stop')
113 79 /usr/bin/pkill -x -u 0,1 -z $zone '(nfsd|mountd)'
114 80
115 81 # Unshare all shared file systems using NFS
116 82
117 83 /usr/sbin/sharemgr stop -P nfs -a
118 84
119 85 # Kill any processes left in service contract
120 86 smf_kill_contract $2 TERM 1
121 87 [ $? -ne 0 ] && exit 1
122 88 ;;
123 89
124 90 'ipfilter')
125 91 #
126 92 # NFS related services are RPC. nfs/server has nfsd which has
127 93 # well-defined port number but mountd is an RPC daemon.
128 94 #
129 95 # Essentially, we generate rules for the following "services"
130 96 # - nfs/server which has nfsd and mountd
131 97 # - nfs/rquota
132 98 #
133 99 # The following services are enabled for both nfs client and
134 100 # server, if nfs/client is enabled we'll treat them as client
135 101 # services and simply allow incoming traffic.
136 102 # - nfs/status
137 103 # - nfs/nlockmgr
138 104 # - nfs/cbd
139 105 #
140 106 NFS_FMRI="svc:/network/nfs/server:default"
141 107 NFSCLI_FMRI="svc:/network/nfs/client:default"
142 108 RQUOTA_FMRI="svc:/network/nfs/rquota:default"
143 109 FMRI=$2
144 110
145 111 file=`fmri_to_file $FMRI $IPF_SUFFIX`
146 112 file6=`fmri_to_file $FMRI $IPF6_SUFFIX`
147 113 echo "# $FMRI" >$file
148 114 echo "# $FMRI" >$file6
149 115 policy=`get_policy $NFS_FMRI`
150 116
151 117 #
152 118 # nfs/server configuration is processed in the start method.
153 119 #
154 120 if [ "$FMRI" = "$NFS_FMRI" ]; then
155 121 service_check_state $FMRI $SMF_ONLINE
156 122 if [ $? -ne 0 ]; then
157 123 rm $file
158 124 exit $SMF_EXIT_OK
159 125 fi
160 126
161 127 nfs_name=`svcprop -p $FW_CONTEXT_PG/name $FMRI 2>/dev/null`
162 128 tport=`$SERVINFO -p -t -s $nfs_name 2>/dev/null`
163 129 if [ -n "$tport" ]; then
164 130 generate_rules $FMRI $policy "tcp" $tport $file
165 131 fi
166 132
167 133 tport6=`$SERVINFO -p -t6 -s $nfs_name 2>/dev/null`
168 134 if [ -n "$tport6" ]; then
169 135 generate_rules $FMRI $policy "tcp" $tport6 $file6 _6
170 136 fi
171 137
172 138 uport=`$SERVINFO -p -u -s $nfs_name 2>/dev/null`
173 139 if [ -n "$uport" ]; then
174 140 generate_rules $FMRI $policy "udp" $uport $file
175 141 fi
176 142
177 143 uport6=`$SERVINFO -p -u6 -s $nfs_name 2>/dev/null`
178 144 if [ -n "$uport6" ]; then
179 145 generate_rules $FMRI $policy "udp" $uport6 $file6 _6
180 146 fi
181 147
182 148 # mountd IPv6 ports are also reachable through IPv4, so include
183 149 # them when generating IPv4 rules.
184 150 tports=`$SERVINFO -R -p -t -s "mountd" 2>/dev/null`
185 151 tports6=`$SERVINFO -R -p -t6 -s "mountd" 2>/dev/null`
186 152 if [ -n "$tports" -o -n "$tports6" ]; then
187 153 tports=`unique_ports $tports $tports6`
188 154 for tport in $tports; do
189 155 generate_rules $FMRI $policy "tcp" \
190 156 $tport $file
191 157 done
192 158 fi
193 159
194 160 if [ -n "$tports6" ]; then
195 161 for tport6 in $tports6; do
196 162 generate_rules $FMRI $policy "tcp" \
197 163 $tport6 $file6 _6
198 164 done
199 165 fi
200 166
201 167 uports=`$SERVINFO -R -p -u -s "mountd" 2>/dev/null`
202 168 uports6=`$SERVINFO -R -p -u6 -s "mountd" 2>/dev/null`
203 169 if [ -n "$uports" -o -n "$uports6" ]; then
204 170 uports=`unique_ports $uports $uports6`
205 171 for uport in $uports; do
206 172 generate_rules $FMRI $policy "udp" \
207 173 $uport $file
208 174 done
209 175 fi
210 176
211 177 if [ -n "$uports6" ]; then
212 178 for uport6 in $uports6; do
213 179 generate_rules $FMRI $policy "udp" \
214 180 $uport6 $file6 _6
215 181 done
216 182 fi
217 183
218 184 elif [ "$FMRI" = "$RQUOTA_FMRI" ]; then
219 185 iana_name=`svcprop -p inetd/name $FMRI`
220 186
221 187 # rquota IPv6 ports are also reachable through IPv4, so include
222 188 # them when generating IPv4 rules.
223 189 tports=`$SERVINFO -R -p -t -s $iana_name 2>/dev/null`
224 190 tports6=`$SERVINFO -R -p -t6 -s $iana_name 2>/dev/null`
225 191 if [ -n "$tports" -o -n "$tports6" ]; then
226 192 tports=`unique_ports $tports $tports6`
227 193 for tport in $tports; do
228 194 generate_rules $NFS_FMRI $policy "tcp" \
229 195 $tport $file
230 196 done
231 197 fi
232 198
233 199 if [ -n "$tports6" ]; then
234 200 for tport6 in $tports6; do
235 201 generate_rules $NFS_FMRI $policy "tcp" \
236 202 $tport6 $file6 _6
237 203 done
238 204 fi
239 205
240 206 uports=`$SERVINFO -R -p -u -s $iana_name 2>/dev/null`
241 207 uports6=`$SERVINFO -R -p -u6 -s $iana_name 2>/dev/null`
242 208 if [ -n "$uports" -o -n "$uports6" ]; then
243 209 uports=`unique_ports $uports $uports6`
244 210 for uport in $uports; do
245 211 generate_rules $NFS_FMRI $policy "udp" \
246 212 $uport $file
247 213 done
248 214 fi
249 215
250 216 if [ -n "$uports6" ]; then
251 217 for uport6 in $uports6; do
252 218 generate_rules $NFS_FMRI $policy "udp" \
253 219 $uport6 $file6 _6
254 220 done
255 221 fi
256 222 else
257 223 #
258 224 # Handle the client services here
259 225 #
260 226 if service_check_state $NFSCLI_FMRI $SMF_ONLINE; then
261 227 policy=none
262 228 ip=any
263 229 fi
264 230
265 231 restarter=`svcprop -p general/restarter $FMRI 2>/dev/null`
266 232 if [ "$restarter" = "$INETDFMRI" ]; then
267 233 iana_name=`svcprop -p inetd/name $FMRI`
268 234 isrpc=`svcprop -p inetd/isrpc $FMRI`
269 235 else
270 236 iana_name=`svcprop -p $FW_CONTEXT_PG/name $FMRI`
271 237 isrpc=`svcprop -p $FW_CONTEXT_PG/isrpc $FMRI`
272 238 fi
273 239
274 240 if [ "$isrpc" = "true" ]; then
275 241 tports=`$SERVINFO -R -p -t -s $iana_name 2>/dev/null`
276 242 tports6=`$SERVINFO -R -p -t6 -s $iana_name 2>/dev/null`
277 243 uports=`$SERVINFO -R -p -u -s $iana_name 2>/dev/null`
278 244 uports6=`$SERVINFO -R -p -u6 -s $iana_name 2>/dev/null`
279 245 else
280 246 tports=`$SERVINFO -p -t -s $iana_name 2>/dev/null`
281 247 tports6=`$SERVINFO -p -t6 -s $iana_name 2>/dev/null`
282 248 uports=`$SERVINFO -p -u -s $iana_name 2>/dev/null`
283 249 uports6=`$SERVINFO -p -u6 -s $iana_name 2>/dev/null`
284 250 fi
285 251
286 252 # IPv6 ports are also reachable through IPv4, so include
287 253 # them when generating IPv4 rules.
288 254 if [ -n "$tports" -o -n "$tports6" ]; then
289 255 tports=`unique_ports $tports $tports6`
290 256 for tport in $tports; do
291 257 generate_rules $FMRI $policy "tcp" $tport $file
292 258 done
293 259 fi
294 260
295 261 if [ -n "$tports6" ]; then
296 262 for tport6 in $tports6; do
297 263 generate_rules $FMRI $policy "tcp" $tport6 $file6 _6
298 264 done
299 265 fi
300 266
301 267 if [ -n "$uports" -o -n "$uports6" ]; then
302 268 uports=`unique_ports $uports $uports6`
303 269 for uport in $uports; do
304 270 generate_rules $FMRI $policy "udp" $uport $file
305 271 done
306 272 fi
307 273
308 274 if [ -n "$uports6" ]; then
309 275 for uport6 in $uports6; do
310 276 generate_rules $FMRI $policy "udp" $uport6 $file6 _6
311 277 done
312 278 fi
313 279 fi
314 280
315 281 ;;
316 282
317 283 *)
318 284 echo "Usage: $0 { start | stop | refresh }"
319 285 exit 1
320 286 ;;
321 287 esac
322 288 exit $SMF_EXIT_OK
↓ open down ↓ |
207 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX