Print this page
8368 remove warlock leftovers from usr/src/uts
Split |
Close |
Expand all |
Collapse all |
--- old/usr/src/uts/common/fs/udfs/udf_inode.c
+++ new/usr/src/uts/common/fs/udfs/udf_inode.c
1 1 /*
2 2 * CDDL HEADER START
3 3 *
4 4 * The contents of this file are subject to the terms of the
5 5 * Common Development and Distribution License (the "License").
6 6 * You may not use this file except in compliance with the License.
7 7 *
8 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 9 * or http://www.opensolaris.org/os/licensing.
10 10 * See the License for the specific language governing permissions
11 11 * and limitations under the License.
12 12 *
13 13 * When distributing Covered Code, include this CDDL HEADER in each
14 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 15 * If applicable, add the following below this CDDL HEADER, with the
16 16 * fields enclosed by brackets "[]" replaced with your own identifying
17 17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 18 *
19 19 * CDDL HEADER END
20 20 */
21 21 /*
22 22 * Copyright (c) 1998, 2010, Oracle and/or its affiliates. All rights reserved.
23 23 * Copyright (c) 2017 by Delphix. All rights reserved.
24 24 */
25 25
26 26 #include <sys/types.h>
27 27 #include <sys/t_lock.h>
28 28 #include <sys/param.h>
29 29 #include <sys/time.h>
30 30 #include <sys/systm.h>
31 31 #include <sys/sysmacros.h>
32 32 #include <sys/resource.h>
33 33 #include <sys/signal.h>
34 34 #include <sys/cred.h>
35 35 #include <sys/user.h>
36 36 #include <sys/buf.h>
37 37 #include <sys/vfs.h>
38 38 #include <sys/stat.h>
39 39 #include <sys/vnode.h>
40 40 #include <sys/mode.h>
41 41 #include <sys/proc.h>
42 42 #include <sys/disp.h>
43 43 #include <sys/file.h>
44 44 #include <sys/fcntl.h>
45 45 #include <sys/flock.h>
46 46 #include <sys/kmem.h>
47 47 #include <sys/uio.h>
48 48 #include <sys/dnlc.h>
49 49 #include <sys/conf.h>
50 50 #include <sys/errno.h>
51 51 #include <sys/mman.h>
52 52 #include <sys/fbuf.h>
53 53 #include <sys/pathname.h>
54 54 #include <sys/debug.h>
55 55 #include <sys/vmsystm.h>
56 56 #include <sys/cmn_err.h>
57 57 #include <sys/dirent.h>
58 58 #include <sys/errno.h>
59 59 #include <sys/modctl.h>
60 60 #include <sys/statvfs.h>
61 61 #include <sys/mount.h>
62 62 #include <sys/sunddi.h>
63 63 #include <sys/bootconf.h>
64 64 #include <sys/policy.h>
65 65
66 66 #include <vm/hat.h>
67 67 #include <vm/page.h>
68 68 #include <vm/pvn.h>
69 69 #include <vm/as.h>
70 70 #include <vm/seg.h>
71 71 #include <vm/seg_map.h>
72 72 #include <vm/seg_kmem.h>
73 73 #include <vm/seg_vn.h>
74 74 #include <vm/rm.h>
75 75 #include <vm/page.h>
76 76 #include <sys/swap.h>
77 77
78 78
79 79 #include <fs/fs_subr.h>
80 80
81 81
82 82 #include <sys/fs/udf_volume.h>
↓ open down ↓ |
82 lines elided |
↑ open up ↑ |
83 83 #include <sys/fs/udf_inode.h>
84 84
85 85 extern struct vnodeops *udf_vnodeops;
86 86
87 87 kmutex_t ud_sync_busy;
88 88 /*
89 89 * udf_vfs list manipulation routines
90 90 */
91 91 kmutex_t udf_vfs_mutex;
92 92 struct udf_vfs *udf_vfs_instances;
93 -#ifndef __lint
94 -_NOTE(MUTEX_PROTECTS_DATA(udf_vfs_mutex, udf_vfs_instances))
95 -#endif
96 93
97 94 union ihead ud_ihead[UD_HASH_SZ];
98 95 kmutex_t ud_icache_lock;
99 96
100 97 #define UD_BEGIN 0x0
101 98 #define UD_END 0x1
102 99 #define UD_UNKN 0x2
103 100 struct ud_inode *udf_ifreeh, *udf_ifreet;
104 101 kmutex_t udf_ifree_lock;
105 -#ifndef __lint
106 -_NOTE(MUTEX_PROTECTS_DATA(udf_ifree_lock, udf_ifreeh))
107 -_NOTE(MUTEX_PROTECTS_DATA(udf_ifree_lock, udf_ifreet))
108 -#endif
109 102
110 103 kmutex_t ud_nino_lock;
111 104 int32_t ud_max_inodes = 512;
112 105 int32_t ud_cur_inodes = 0;
113 -#ifndef __lint
114 -_NOTE(MUTEX_PROTECTS_DATA(ud_nino_lock, ud_cur_inodes))
115 -#endif
116 106
117 107 uid_t ud_default_uid = 0;
118 108 gid_t ud_default_gid = 3;
119 109
120 110 int32_t ud_updat_ext4(struct ud_inode *, struct file_entry *);
121 111 int32_t ud_updat_ext4096(struct ud_inode *, struct file_entry *);
122 112 void ud_make_sad(struct icb_ext *, struct short_ad *, int32_t);
123 113 void ud_make_lad(struct icb_ext *, struct long_ad *, int32_t);
124 114 void ud_trunc_ext4(struct ud_inode *, u_offset_t);
125 115 void ud_trunc_ext4096(struct ud_inode *, u_offset_t);
126 116 void ud_add_to_free_list(struct ud_inode *, uint32_t);
127 117 void ud_remove_from_free_list(struct ud_inode *, uint32_t);
128 118
129 119
130 120 #ifdef DEBUG
131 121 struct ud_inode *
132 122 ud_search_icache(struct vfs *vfsp, uint16_t prn, uint32_t ploc)
133 123 {
134 124 int32_t hno;
135 125 union ihead *ih;
136 126 struct ud_inode *ip;
137 127 struct udf_vfs *udf_vfsp;
138 128 uint32_t loc, dummy;
139 129
140 130 udf_vfsp = (struct udf_vfs *)vfsp->vfs_data;
141 131 loc = ud_xlate_to_daddr(udf_vfsp, prn, ploc, 1, &dummy);
142 132
143 133 mutex_enter(&ud_icache_lock);
144 134 hno = UD_INOHASH(vfsp->vfs_dev, loc);
145 135 ih = &ud_ihead[hno];
146 136 for (ip = ih->ih_chain[0];
147 137 ip != (struct ud_inode *)ih;
148 138 ip = ip->i_forw) {
149 139 if ((prn == ip->i_icb_prn) && (ploc == ip->i_icb_block) &&
150 140 (vfsp->vfs_dev == ip->i_dev)) {
151 141 mutex_exit(&ud_icache_lock);
152 142 return (ip);
153 143 }
154 144 }
155 145 mutex_exit(&ud_icache_lock);
156 146 return (0);
157 147 }
158 148 #endif
159 149
160 150 /* ARGSUSED */
161 151 int
162 152 ud_iget(struct vfs *vfsp, uint16_t prn, uint32_t ploc, struct ud_inode **ipp,
163 153 struct buf *pbp, struct cred *cred)
164 154 {
165 155 int32_t hno, nomem = 0, icb_tag_flags;
166 156 union ihead *ih;
167 157 struct ud_inode *ip;
168 158 struct vnode *vp;
169 159 struct buf *bp = NULL;
170 160 struct file_entry *fe;
171 161 struct udf_vfs *udf_vfsp;
172 162 struct ext_attr_hdr *eah;
173 163 struct attr_hdr *ah;
174 164 int32_t ea_len, ea_off;
175 165 daddr_t loc;
176 166 uint64_t offset = 0;
177 167 struct icb_ext *iext, *con;
178 168 uint32_t length, dummy;
179 169 int32_t ndesc, ftype;
180 170 uint16_t old_prn;
181 171 uint32_t old_block, old_lbano;
182 172
183 173 ud_printf("ud_iget\n");
184 174 udf_vfsp = (struct udf_vfs *)vfsp->vfs_data;
185 175 old_prn = 0;
186 176 old_block = old_lbano = 0;
187 177 ftype = 0;
188 178 loc = ud_xlate_to_daddr(udf_vfsp, prn, ploc, 1, &dummy);
189 179 loop:
190 180 mutex_enter(&ud_icache_lock);
191 181 hno = UD_INOHASH(vfsp->vfs_dev, loc);
192 182
193 183 ih = &ud_ihead[hno];
194 184 for (ip = ih->ih_chain[0];
195 185 ip != (struct ud_inode *)ih;
196 186 ip = ip->i_forw) {
197 187
198 188 if ((prn == ip->i_icb_prn) &&
199 189 (ploc == ip->i_icb_block) &&
200 190 (vfsp->vfs_dev == ip->i_dev)) {
201 191
202 192 vp = ITOV(ip);
203 193 VN_HOLD(vp);
204 194 mutex_exit(&ud_icache_lock);
205 195
206 196 rw_enter(&ip->i_contents, RW_READER);
207 197 mutex_enter(&ip->i_tlock);
208 198 if ((ip->i_flag & IREF) == 0) {
209 199 mutex_enter(&udf_ifree_lock);
210 200 ud_remove_from_free_list(ip, UD_UNKN);
211 201 mutex_exit(&udf_ifree_lock);
212 202 }
213 203 ip->i_flag |= IREF;
214 204 mutex_exit(&ip->i_tlock);
215 205 rw_exit(&ip->i_contents);
216 206
217 207 *ipp = ip;
218 208
219 209 if (pbp != NULL) {
220 210 brelse(pbp);
221 211 }
222 212
223 213 return (0);
224 214 }
225 215 }
226 216
227 217 /*
228 218 * We don't have it in the cache
229 219 * Allocate a new entry
230 220 */
231 221 tryagain:
232 222 mutex_enter(&udf_ifree_lock);
233 223 mutex_enter(&ud_nino_lock);
234 224 if (ud_cur_inodes > ud_max_inodes) {
235 225 int32_t purged;
236 226
237 227 mutex_exit(&ud_nino_lock);
238 228 while (udf_ifreeh == NULL ||
239 229 vn_has_cached_data(ITOV(udf_ifreeh))) {
240 230 /*
241 231 * Try to put an inode on the freelist that's
242 232 * sitting in the dnlc.
243 233 */
244 234 mutex_exit(&udf_ifree_lock);
245 235 purged = dnlc_fs_purge1(udf_vnodeops);
246 236 mutex_enter(&udf_ifree_lock);
247 237 if (!purged) {
248 238 break;
249 239 }
250 240 }
251 241 mutex_enter(&ud_nino_lock);
252 242 }
253 243
254 244 /*
255 245 * If there's a free one available and it has no pages attached
256 246 * take it. If we're over the high water mark, take it even if
257 247 * it has attached pages. Otherwise, make a new one.
258 248 */
259 249 if (udf_ifreeh &&
260 250 (nomem || !vn_has_cached_data(ITOV(udf_ifreeh)) ||
261 251 ud_cur_inodes >= ud_max_inodes)) {
262 252
263 253 mutex_exit(&ud_nino_lock);
264 254 ip = udf_ifreeh;
265 255 vp = ITOV(ip);
266 256
267 257 ud_remove_from_free_list(ip, UD_BEGIN);
268 258
269 259 mutex_exit(&udf_ifree_lock);
270 260 if (ip->i_flag & IREF) {
271 261 cmn_err(CE_WARN, "ud_iget: bad i_flag\n");
272 262 mutex_exit(&ud_icache_lock);
273 263 if (pbp != NULL) {
274 264 brelse(pbp);
275 265 }
276 266 return (EINVAL);
277 267 }
278 268 rw_enter(&ip->i_contents, RW_WRITER);
279 269
280 270 /*
281 271 * We call udf_syncip() to synchronously destroy all pages
282 272 * associated with the vnode before re-using it. The pageout
283 273 * thread may have beat us to this page so our v_count can
284 274 * be > 0 at this point even though we are on the freelist.
285 275 */
286 276 mutex_enter(&ip->i_tlock);
287 277 ip->i_flag = (ip->i_flag & IMODTIME) | IREF;
288 278 mutex_exit(&ip->i_tlock);
289 279
290 280 VN_HOLD(vp);
291 281 if (ud_syncip(ip, B_INVAL, I_SYNC) != 0) {
292 282 ud_idrop(ip);
293 283 rw_exit(&ip->i_contents);
294 284 mutex_exit(&ud_icache_lock);
295 285 goto loop;
296 286 }
297 287
298 288 mutex_enter(&ip->i_tlock);
299 289 ip->i_flag &= ~IMODTIME;
300 290 mutex_exit(&ip->i_tlock);
301 291
302 292 if (ip->i_ext) {
303 293 kmem_free(ip->i_ext,
304 294 sizeof (struct icb_ext) * ip->i_ext_count);
305 295 ip->i_ext = 0;
306 296 ip->i_ext_count = ip->i_ext_used = 0;
307 297 }
308 298
309 299 if (ip->i_con) {
310 300 kmem_free(ip->i_con,
311 301 sizeof (struct icb_ext) * ip->i_con_count);
312 302 ip->i_con = 0;
313 303 ip->i_con_count = ip->i_con_used = ip->i_con_read = 0;
314 304 }
315 305
316 306 /*
317 307 * The pageout thread may not have had a chance to release
318 308 * its hold on the vnode (if it was active with this vp),
319 309 * but the pages should all be invalidated.
320 310 */
321 311 } else {
322 312 mutex_exit(&ud_nino_lock);
323 313 mutex_exit(&udf_ifree_lock);
324 314 /*
325 315 * Try to get memory for this inode without blocking.
326 316 * If we can't and there is something on the freelist,
327 317 * go ahead and use it, otherwise block waiting for
328 318 * memory holding the hash_lock. We expose a potential
329 319 * deadlock if all users of memory have to do a ud_iget()
330 320 * before releasing memory.
331 321 */
332 322 ip = (struct ud_inode *)kmem_zalloc(sizeof (struct ud_inode),
333 323 KM_NOSLEEP);
334 324 vp = vn_alloc(KM_NOSLEEP);
335 325 if ((ip == NULL) || (vp == NULL)) {
336 326 mutex_enter(&udf_ifree_lock);
337 327 if (udf_ifreeh) {
338 328 mutex_exit(&udf_ifree_lock);
339 329 if (ip != NULL)
340 330 kmem_free(ip, sizeof (struct ud_inode));
341 331 if (vp != NULL)
342 332 vn_free(vp);
343 333 nomem = 1;
344 334 goto tryagain;
345 335 } else {
346 336 mutex_exit(&udf_ifree_lock);
347 337 if (ip == NULL)
348 338 ip = (struct ud_inode *)
349 339 kmem_zalloc(
350 340 sizeof (struct ud_inode),
351 341 KM_SLEEP);
352 342 if (vp == NULL)
353 343 vp = vn_alloc(KM_SLEEP);
354 344 }
355 345 }
356 346 ip->i_vnode = vp;
357 347
358 348 ip->i_marker1 = (uint32_t)0xAAAAAAAA;
359 349 ip->i_marker2 = (uint32_t)0xBBBBBBBB;
360 350 ip->i_marker3 = (uint32_t)0xCCCCCCCC;
361 351
362 352 rw_init(&ip->i_rwlock, NULL, RW_DEFAULT, NULL);
363 353 rw_init(&ip->i_contents, NULL, RW_DEFAULT, NULL);
364 354 mutex_init(&ip->i_tlock, NULL, MUTEX_DEFAULT, NULL);
365 355
366 356 ip->i_forw = ip;
367 357 ip->i_back = ip;
368 358 vp->v_data = (caddr_t)ip;
369 359 vn_setops(vp, udf_vnodeops);
370 360 ip->i_flag = IREF;
371 361 cv_init(&ip->i_wrcv, NULL, CV_DRIVER, NULL);
372 362 mutex_enter(&ud_nino_lock);
373 363 ud_cur_inodes++;
374 364 mutex_exit(&ud_nino_lock);
375 365
376 366 rw_enter(&ip->i_contents, RW_WRITER);
377 367 }
378 368
379 369 if (vp->v_count < 1) {
380 370 cmn_err(CE_WARN, "ud_iget: v_count < 1\n");
381 371 mutex_exit(&ud_icache_lock);
382 372 rw_exit(&ip->i_contents);
383 373 if (pbp != NULL) {
384 374 brelse(pbp);
385 375 }
386 376 return (EINVAL);
387 377 }
388 378 if (vn_has_cached_data(vp)) {
389 379 cmn_err(CE_WARN, "ud_iget: v_pages not NULL\n");
390 380 mutex_exit(&ud_icache_lock);
391 381 rw_exit(&ip->i_contents);
392 382 if (pbp != NULL) {
393 383 brelse(pbp);
394 384 }
395 385 return (EINVAL);
396 386 }
397 387
398 388 /*
399 389 * Move the inode on the chain for its new (ino, dev) pair
400 390 */
401 391 remque(ip);
402 392 ip->i_forw = ip;
403 393 ip->i_back = ip;
404 394 insque(ip, ih);
405 395
406 396 ip->i_dev = vfsp->vfs_dev;
407 397 ip->i_udf = udf_vfsp;
408 398 ip->i_diroff = 0;
409 399 ip->i_devvp = ip->i_udf->udf_devvp;
410 400 ip->i_icb_prn = prn;
411 401 ip->i_icb_block = ploc;
412 402 ip->i_icb_lbano = loc;
413 403 ip->i_nextr = 0;
414 404 ip->i_seq = 0;
415 405 mutex_exit(&ud_icache_lock);
416 406
417 407 read_de:
418 408 if (pbp != NULL) {
419 409 /*
420 410 * assumption is that we will not
421 411 * create a 4096 file
422 412 */
423 413 bp = pbp;
424 414 } else {
425 415 bp = ud_bread(ip->i_dev,
426 416 ip->i_icb_lbano << udf_vfsp->udf_l2d_shift,
427 417 udf_vfsp->udf_lbsize);
428 418 }
429 419
430 420 /*
431 421 * Check I/O errors
432 422 */
433 423 fe = (struct file_entry *)bp->b_un.b_addr;
434 424 if ((bp->b_flags & B_ERROR) ||
435 425 (ud_verify_tag_and_desc(&fe->fe_tag, UD_FILE_ENTRY,
436 426 ip->i_icb_block, 1, udf_vfsp->udf_lbsize) != 0)) {
437 427
438 428 if (((bp->b_flags & B_ERROR) == 0) &&
439 429 (ftype == STRAT_TYPE4096)) {
440 430 if (ud_check_te_unrec(udf_vfsp,
441 431 bp->b_un.b_addr, ip->i_icb_block) == 0) {
442 432
443 433 brelse(bp);
444 434
445 435 /*
446 436 * restore old file entry location
447 437 */
448 438 ip->i_icb_prn = old_prn;
449 439 ip->i_icb_block = old_block;
450 440 ip->i_icb_lbano = old_lbano;
451 441
452 442 /*
453 443 * reread old file entry
454 444 */
455 445 bp = ud_bread(ip->i_dev,
456 446 old_lbano << udf_vfsp->udf_l2d_shift,
457 447 udf_vfsp->udf_lbsize);
458 448 if ((bp->b_flags & B_ERROR) == 0) {
459 449 fe = (struct file_entry *)
460 450 bp->b_un.b_addr;
461 451 if (ud_verify_tag_and_desc(&fe->fe_tag,
462 452 UD_FILE_ENTRY, ip->i_icb_block, 1,
463 453 udf_vfsp->udf_lbsize) == 0) {
464 454 goto end_4096;
465 455 }
466 456 }
467 457 }
468 458 }
469 459 error_ret:
470 460 brelse(bp);
471 461 /*
472 462 * The inode may not contain anything useful. Mark it as
473 463 * having an error and let anyone else who was waiting for
474 464 * this know there was an error. Callers waiting for
475 465 * access to this inode in ud_iget will find
476 466 * the i_icb_lbano == 0, so there won't be a match.
477 467 * It remains in the cache. Put it back on the freelist.
478 468 */
479 469 mutex_enter(&vp->v_lock);
480 470 VN_RELE_LOCKED(vp);
481 471 mutex_exit(&vp->v_lock);
482 472 ip->i_icb_lbano = 0;
483 473
484 474 /*
485 475 * The folowing two lines make
486 476 * it impossible for any one do
487 477 * a VN_HOLD and then a VN_RELE
488 478 * so avoiding a ud_iinactive
489 479 */
490 480 ip->i_icb_prn = 0xffff;
491 481 ip->i_icb_block = 0;
492 482
493 483 /*
494 484 * remove the bad inode from hash chains
495 485 * so that during unmount we will not
496 486 * go through this inode
497 487 */
498 488 mutex_enter(&ud_icache_lock);
499 489 remque(ip);
500 490 ip->i_forw = ip;
501 491 ip->i_back = ip;
502 492 mutex_exit(&ud_icache_lock);
503 493
504 494 /* Put the inode at the front of the freelist */
505 495 mutex_enter(&ip->i_tlock);
506 496 mutex_enter(&udf_ifree_lock);
507 497 ud_add_to_free_list(ip, UD_BEGIN);
508 498 mutex_exit(&udf_ifree_lock);
509 499 ip->i_flag = 0;
510 500 mutex_exit(&ip->i_tlock);
511 501 rw_exit(&ip->i_contents);
512 502 return (EIO);
513 503 }
514 504
515 505 if (fe->fe_icb_tag.itag_strategy == SWAP_16(STRAT_TYPE4096)) {
516 506 struct buf *ibp = NULL;
517 507 struct indirect_entry *ie;
518 508
519 509 /*
520 510 * save old file_entry location
521 511 */
522 512 old_prn = ip->i_icb_prn;
523 513 old_block = ip->i_icb_block;
524 514 old_lbano = ip->i_icb_lbano;
525 515
526 516 ftype = STRAT_TYPE4096;
527 517
528 518 /*
529 519 * If astrat is 4096 different versions
530 520 * of the file exist on the media.
531 521 * we are supposed to get to the latest
532 522 * version of the file
533 523 */
534 524
535 525 /*
536 526 * IE is supposed to be in the next block
537 527 * of DE
538 528 */
539 529 ibp = ud_bread(ip->i_dev,
540 530 (ip->i_icb_lbano + 1) << udf_vfsp->udf_l2d_shift,
541 531 udf_vfsp->udf_lbsize);
542 532 if (ibp->b_flags & B_ERROR) {
543 533 /*
544 534 * Get rid of current ibp and
545 535 * then goto error on DE's bp
546 536 */
547 537 ie_error:
548 538 brelse(ibp);
549 539 goto error_ret;
550 540 }
551 541
552 542 ie = (struct indirect_entry *)ibp->b_un.b_addr;
553 543 if (ud_verify_tag_and_desc(&ie->ie_tag,
554 544 UD_INDIRECT_ENT, ip->i_icb_block + 1,
555 545 1, udf_vfsp->udf_lbsize) == 0) {
556 546 struct long_ad *lad;
557 547
558 548 lad = &ie->ie_indirecticb;
559 549 ip->i_icb_prn = SWAP_16(lad->lad_ext_prn);
560 550 ip->i_icb_block = SWAP_32(lad->lad_ext_loc);
561 551 ip->i_icb_lbano = ud_xlate_to_daddr(udf_vfsp,
562 552 ip->i_icb_prn, ip->i_icb_block,
563 553 1, &dummy);
564 554 brelse(ibp);
565 555 brelse(bp);
566 556 goto read_de;
567 557 }
568 558
569 559 /*
570 560 * If this block is TE or unrecorded we
571 561 * are at the last entry
572 562 */
573 563 if (ud_check_te_unrec(udf_vfsp, ibp->b_un.b_addr,
574 564 ip->i_icb_block + 1) != 0) {
575 565 /*
576 566 * This is not an unrecorded block
577 567 * Check if it a valid IE and
578 568 * get the address of DE that
579 569 * this IE points to
580 570 */
581 571 goto ie_error;
582 572 }
583 573 /*
584 574 * If ud_check_unrec returns "0"
585 575 * this is the last in the chain
586 576 * Latest file_entry
587 577 */
588 578 brelse(ibp);
589 579 }
590 580
591 581 end_4096:
592 582
593 583 ip->i_uid = SWAP_32(fe->fe_uid);
594 584 if (ip->i_uid == -1) {
595 585 ip->i_uid = ud_default_uid;
596 586 }
597 587 ip->i_gid = SWAP_32(fe->fe_gid);
598 588 if (ip->i_gid == -1) {
599 589 ip->i_gid = ud_default_gid;
600 590 }
601 591 ip->i_perm = SWAP_32(fe->fe_perms) & 0xFFFF;
602 592 if (fe->fe_icb_tag.itag_strategy == SWAP_16(STRAT_TYPE4096)) {
603 593 ip->i_perm &= ~(IWRITE | (IWRITE >> 5) | (IWRITE >> 10));
604 594 }
605 595
606 596 ip->i_nlink = SWAP_16(fe->fe_lcount);
607 597 ip->i_size = SWAP_64(fe->fe_info_len);
608 598 ip->i_lbr = SWAP_64(fe->fe_lbr);
609 599
610 600 ud_dtime2utime(&ip->i_atime, &fe->fe_acc_time);
611 601 ud_dtime2utime(&ip->i_mtime, &fe->fe_mod_time);
612 602 ud_dtime2utime(&ip->i_ctime, &fe->fe_attr_time);
613 603
614 604
615 605 ip->i_uniqid = SWAP_64(fe->fe_uniq_id);
616 606 icb_tag_flags = SWAP_16(fe->fe_icb_tag.itag_flags);
617 607
618 608 if ((fe->fe_icb_tag.itag_ftype == FTYPE_CHAR_DEV) ||
619 609 (fe->fe_icb_tag.itag_ftype == FTYPE_BLOCK_DEV)) {
620 610
621 611 eah = (struct ext_attr_hdr *)fe->fe_spec;
622 612 ea_off = GET_32(&eah->eah_ial);
623 613 ea_len = GET_32(&fe->fe_len_ear);
624 614 if (ea_len && (ud_verify_tag_and_desc(&eah->eah_tag,
625 615 UD_EXT_ATTR_HDR, ip->i_icb_block, 1,
626 616 sizeof (struct file_entry) -
627 617 offsetof(struct file_entry, fe_spec)) == 0)) {
628 618
629 619 while (ea_off < ea_len) {
630 620 /*
631 621 * We now check the validity of ea_off.
632 622 * (ea_len - ea_off) should be large enough to
633 623 * hold the attribute header atleast.
634 624 */
635 625 if ((ea_len - ea_off) <
636 626 sizeof (struct attr_hdr)) {
637 627 cmn_err(CE_NOTE,
638 628 "ea_len(0x%x) - ea_off(0x%x) is "
639 629 "too small to hold attr. info. "
640 630 "blockno 0x%x\n",
641 631 ea_len, ea_off, ip->i_icb_block);
642 632 goto error_ret;
643 633 }
644 634 ah = (struct attr_hdr *)&fe->fe_spec[ea_off];
645 635
646 636 /*
647 637 * Device Specification EA
648 638 */
649 639 if ((GET_32(&ah->ahdr_atype) == 12) &&
650 640 (ah->ahdr_astype == 1)) {
651 641 struct dev_spec_ear *ds;
652 642
653 643 if ((ea_len - ea_off) <
654 644 sizeof (struct dev_spec_ear)) {
655 645 cmn_err(CE_NOTE,
656 646 "ea_len(0x%x) - "
657 647 "ea_off(0x%x) is too small "
658 648 "to hold dev_spec_ear."
659 649 " blockno 0x%x\n",
660 650 ea_len, ea_off,
661 651 ip->i_icb_block);
662 652 goto error_ret;
663 653 }
664 654 ds = (struct dev_spec_ear *)ah;
665 655 ip->i_major = GET_32(&ds->ds_major_id);
666 656 ip->i_minor = GET_32(&ds->ds_minor_id);
667 657 }
668 658
669 659 /*
670 660 * Impl Use EA
671 661 */
672 662 if ((GET_32(&ah->ahdr_atype) == 2048) &&
673 663 (ah->ahdr_astype == 1)) {
674 664 struct iu_ea *iuea;
675 665 struct copy_mgt_info *cmi;
676 666
677 667 if ((ea_len - ea_off) <
678 668 sizeof (struct iu_ea)) {
679 669 cmn_err(CE_NOTE,
680 670 "ea_len(0x%x) - ea_off(0x%x) is too small to hold iu_ea. blockno 0x%x\n",
681 671 ea_len, ea_off,
682 672 ip->i_icb_block);
683 673 goto error_ret;
684 674 }
685 675 iuea = (struct iu_ea *)ah;
686 676 if (strncmp(iuea->iuea_ii.reg_id,
687 677 UDF_FREEEASPACE,
688 678 sizeof (iuea->iuea_ii.reg_id))
689 679 == 0) {
690 680 /* skip it */
691 681 iuea = iuea;
692 682 } else if (strncmp(iuea->iuea_ii.reg_id,
693 683 UDF_CGMS_INFO,
694 684 sizeof (iuea->iuea_ii.reg_id))
695 685 == 0) {
696 686 cmi = (struct copy_mgt_info *)
697 687 iuea->iuea_iu;
698 688 cmi = cmi;
699 689 }
700 690 }
701 691 /* ??? PARANOIA */
702 692 if (GET_32(&ah->ahdr_length) == 0) {
703 693 break;
704 694 }
705 695 ea_off += GET_32(&ah->ahdr_length);
706 696 }
707 697 }
708 698 }
709 699
710 700 ip->i_nextr = 0;
711 701
712 702 ip->i_maxent = SWAP_16(fe->fe_icb_tag.itag_max_ent);
713 703 ip->i_astrat = SWAP_16(fe->fe_icb_tag.itag_strategy);
714 704
715 705 ip->i_desc_type = icb_tag_flags & 0x7;
716 706
717 707 /* Strictly Paranoia */
718 708 ip->i_ext = NULL;
719 709 ip->i_ext_count = ip->i_ext_used = 0;
720 710 ip->i_con = 0;
721 711 ip->i_con_count = ip->i_con_used = ip->i_con_read = 0;
722 712
723 713 ip->i_data_off = 0xB0 + SWAP_32(fe->fe_len_ear);
724 714 ip->i_max_emb = udf_vfsp->udf_lbsize - ip->i_data_off;
725 715 if (ip->i_desc_type == ICB_FLAG_SHORT_AD) {
726 716 /* Short allocation desc */
727 717 struct short_ad *sad;
728 718
729 719 ip->i_ext_used = 0;
730 720 ip->i_ext_count = ndesc =
731 721 SWAP_32(fe->fe_len_adesc) / sizeof (struct short_ad);
732 722 ip->i_ext_count =
733 723 ((ip->i_ext_count / EXT_PER_MALLOC) + 1) * EXT_PER_MALLOC;
734 724 ip->i_ext = (struct icb_ext *)kmem_zalloc(ip->i_ext_count *
735 725 sizeof (struct icb_ext), KM_SLEEP);
736 726 ip->i_cur_max_ext = ip->i_max_emb / sizeof (struct short_ad);
737 727 ip->i_cur_max_ext --;
738 728
739 729 if ((ip->i_astrat != STRAT_TYPE4) &&
740 730 (ip->i_astrat != STRAT_TYPE4096)) {
741 731 goto error_ret;
742 732 }
743 733
744 734 sad = (struct short_ad *)
745 735 (fe->fe_spec + SWAP_32(fe->fe_len_ear));
746 736 iext = ip->i_ext;
747 737 while (ndesc --) {
748 738 length = SWAP_32(sad->sad_ext_len);
749 739 if ((length & 0x3FFFFFFF) == 0) {
750 740 break;
751 741 }
752 742 if (((length >> 30) & IB_MASK) == IB_CON) {
753 743 if (ip->i_con == NULL) {
754 744 ip->i_con_count = EXT_PER_MALLOC;
755 745 ip->i_con_used = 0;
756 746 ip->i_con_read = 0;
757 747 ip->i_con = kmem_zalloc(
758 748 ip->i_con_count *
759 749 sizeof (struct icb_ext),
760 750 KM_SLEEP);
761 751 }
762 752 con = &ip->i_con[ip->i_con_used];
763 753 con->ib_prn = 0;
764 754 con->ib_block = SWAP_32(sad->sad_ext_loc);
765 755 con->ib_count = length & 0x3FFFFFFF;
766 756 con->ib_flags = (length >> 30) & IB_MASK;
767 757 ip->i_con_used++;
768 758 sad ++;
769 759 break;
770 760 }
771 761 iext->ib_prn = 0;
772 762 iext->ib_block = SWAP_32(sad->sad_ext_loc);
773 763 length = SWAP_32(sad->sad_ext_len);
774 764 iext->ib_count = length & 0x3FFFFFFF;
775 765 iext->ib_offset = offset;
776 766 iext->ib_marker1 = (uint32_t)0xAAAAAAAA;
777 767 iext->ib_marker2 = (uint32_t)0xBBBBBBBB;
778 768 offset += (iext->ib_count + udf_vfsp->udf_lbmask) &
779 769 (~udf_vfsp->udf_lbmask);
780 770
781 771 iext->ib_flags = (length >> 30) & IB_MASK;
782 772
783 773 ip->i_ext_used++;
784 774 iext++;
785 775 sad ++;
786 776 }
787 777 } else if (ip->i_desc_type == ICB_FLAG_LONG_AD) {
788 778 /* Long allocation desc */
789 779 struct long_ad *lad;
790 780
791 781 ip->i_ext_used = 0;
792 782 ip->i_ext_count = ndesc =
793 783 SWAP_32(fe->fe_len_adesc) / sizeof (struct long_ad);
794 784 ip->i_ext_count =
795 785 ((ip->i_ext_count / EXT_PER_MALLOC) + 1) * EXT_PER_MALLOC;
796 786 ip->i_ext = (struct icb_ext *)kmem_zalloc(ip->i_ext_count *
797 787 sizeof (struct icb_ext), KM_SLEEP);
798 788
799 789 ip->i_cur_max_ext = ip->i_max_emb / sizeof (struct long_ad);
800 790 ip->i_cur_max_ext --;
801 791
802 792 if ((ip->i_astrat != STRAT_TYPE4) &&
803 793 (ip->i_astrat != STRAT_TYPE4096)) {
804 794 goto error_ret;
805 795 }
806 796
807 797 lad = (struct long_ad *)
808 798 (fe->fe_spec + SWAP_32(fe->fe_len_ear));
809 799 iext = ip->i_ext;
810 800 while (ndesc --) {
811 801 length = SWAP_32(lad->lad_ext_len);
812 802 if ((length & 0x3FFFFFFF) == 0) {
813 803 break;
814 804 }
815 805 if (((length >> 30) & IB_MASK) == IB_CON) {
816 806 if (ip->i_con == NULL) {
817 807 ip->i_con_count = EXT_PER_MALLOC;
818 808 ip->i_con_used = 0;
819 809 ip->i_con_read = 0;
820 810 ip->i_con = kmem_zalloc(
821 811 ip->i_con_count *
822 812 sizeof (struct icb_ext),
823 813 KM_SLEEP);
824 814 }
825 815 con = &ip->i_con[ip->i_con_used];
826 816 con->ib_prn = SWAP_16(lad->lad_ext_prn);
827 817 con->ib_block = SWAP_32(lad->lad_ext_loc);
828 818 con->ib_count = length & 0x3FFFFFFF;
829 819 con->ib_flags = (length >> 30) & IB_MASK;
830 820 ip->i_con_used++;
831 821 lad ++;
832 822 break;
833 823 }
834 824 iext->ib_prn = SWAP_16(lad->lad_ext_prn);
835 825 iext->ib_block = SWAP_32(lad->lad_ext_loc);
836 826 iext->ib_count = length & 0x3FFFFFFF;
837 827 iext->ib_offset = offset;
838 828 iext->ib_marker1 = (uint32_t)0xAAAAAAAA;
839 829 iext->ib_marker2 = (uint32_t)0xBBBBBBBB;
840 830 offset += (iext->ib_count + udf_vfsp->udf_lbmask) &
841 831 (~udf_vfsp->udf_lbmask);
842 832
843 833 iext->ib_flags = (length >> 30) & IB_MASK;
844 834
845 835 ip->i_ext_used++;
846 836 iext++;
847 837 lad ++;
848 838 }
849 839 } else if (ip->i_desc_type == ICB_FLAG_ONE_AD) {
850 840 ASSERT(SWAP_32(fe->fe_len_ear) < udf_vfsp->udf_lbsize);
851 841
852 842 if (SWAP_32(fe->fe_len_ear) > udf_vfsp->udf_lbsize) {
853 843 goto error_ret;
854 844 }
855 845 } else {
856 846 /* Not to be used in UDF 1.50 */
857 847 cmn_err(CE_NOTE, "Invalid Allocation Descriptor type %x\n",
858 848 ip->i_desc_type);
859 849 goto error_ret;
860 850 }
861 851
862 852
863 853 if (icb_tag_flags & ICB_FLAG_SETUID) {
864 854 ip->i_char = ISUID;
865 855 } else {
866 856 ip->i_char = 0;
867 857 }
868 858 if (icb_tag_flags & ICB_FLAG_SETGID) {
869 859 ip->i_char |= ISGID;
870 860 }
871 861 if (icb_tag_flags & ICB_FLAG_STICKY) {
872 862 ip->i_char |= ISVTX;
873 863 }
874 864 switch (fe->fe_icb_tag.itag_ftype) {
875 865 case FTYPE_DIRECTORY :
876 866 ip->i_type = VDIR;
877 867 break;
878 868 case FTYPE_FILE :
879 869 ip->i_type = VREG;
880 870 break;
881 871 case FTYPE_BLOCK_DEV :
882 872 ip->i_type = VBLK;
883 873 break;
884 874 case FTYPE_CHAR_DEV :
885 875 ip->i_type = VCHR;
886 876 break;
887 877 case FTYPE_FIFO :
888 878 ip->i_type = VFIFO;
889 879 break;
890 880 case FTYPE_C_ISSOCK :
891 881 ip->i_type = VSOCK;
892 882 break;
893 883 case FTYPE_SYMLINK :
894 884 ip->i_type = VLNK;
895 885 break;
896 886 default :
897 887 ip->i_type = VNON;
898 888 break;
899 889 }
900 890
901 891 if (ip->i_type == VBLK || ip->i_type == VCHR) {
902 892 ip->i_rdev = makedevice(ip->i_major, ip->i_minor);
903 893 }
904 894
905 895 /*
906 896 * Fill in the rest. Don't bother with the vnode lock because nobody
907 897 * should be looking at this vnode. We have already invalidated the
908 898 * pages if it had any so pageout shouldn't be referencing this vnode
909 899 * and we are holding the write contents lock so a look up can't use
910 900 * the vnode.
911 901 */
912 902 vp->v_vfsp = vfsp;
913 903 vp->v_type = ip->i_type;
914 904 vp->v_rdev = ip->i_rdev;
915 905 if (ip->i_udf->udf_root_blkno == loc) {
916 906 vp->v_flag = VROOT;
917 907 } else {
918 908 vp->v_flag = 0;
919 909 }
920 910
921 911 brelse(bp);
922 912 *ipp = ip;
923 913 rw_exit(&ip->i_contents);
924 914 vn_exists(vp);
925 915 return (0);
926 916 }
927 917
928 918 void
929 919 ud_iinactive(struct ud_inode *ip, struct cred *cr)
930 920 {
931 921 int32_t busy = 0;
932 922 struct vnode *vp;
933 923 vtype_t type;
934 924 caddr_t addr, addr1;
935 925 size_t size, size1;
936 926
937 927
938 928 ud_printf("ud_iinactive\n");
939 929
940 930 /*
941 931 * Get exclusive access to inode data.
942 932 */
943 933 rw_enter(&ip->i_contents, RW_WRITER);
944 934
945 935 /*
946 936 * Make sure no one reclaimed the inode before we put
947 937 * it on the freelist or destroy it. We keep our 'hold'
948 938 * on the vnode from vn_rele until we are ready to
949 939 * do something with the inode (freelist/destroy).
950 940 *
951 941 * Pageout may put a VN_HOLD/VN_RELE at anytime during this
952 942 * operation via an async putpage, so we must make sure
953 943 * we don't free/destroy the inode more than once. ud_iget
954 944 * may also put a VN_HOLD on the inode before it grabs
955 945 * the i_contents lock. This is done so we don't kmem_free
956 946 * an inode that a thread is waiting on.
957 947 */
958 948 vp = ITOV(ip);
959 949
960 950 mutex_enter(&vp->v_lock);
961 951 if (vp->v_count < 1) {
962 952 cmn_err(CE_WARN, "ud_iinactive: v_count < 1\n");
963 953 return;
964 954 }
965 955 if ((vp->v_count > 1) || ((ip->i_flag & IREF) == 0)) {
966 956 VN_RELE_LOCKED(vp);
967 957 mutex_exit(&vp->v_lock);
968 958 rw_exit(&ip->i_contents);
969 959 return;
970 960 }
971 961 mutex_exit(&vp->v_lock);
972 962
973 963 /*
974 964 * For forced umount case: if i_udf is NULL, the contents of
975 965 * the inode and all the pages have already been pushed back
976 966 * to disk. It can be safely destroyed.
977 967 */
978 968 if (ip->i_udf == NULL) {
979 969 addr = (caddr_t)ip->i_ext;
980 970 size = sizeof (struct icb_ext) * ip->i_ext_count;
981 971 ip->i_ext = 0;
982 972 ip->i_ext_count = ip->i_ext_used = 0;
983 973 addr1 = (caddr_t)ip->i_con;
984 974 size1 = sizeof (struct icb_ext) * ip->i_con_count;
985 975 ip->i_con = 0;
986 976 ip->i_con_count = ip->i_con_used = ip->i_con_read = 0;
987 977 rw_exit(&ip->i_contents);
988 978 vn_invalid(vp);
989 979
990 980 mutex_enter(&ud_nino_lock);
991 981 ud_cur_inodes--;
992 982 mutex_exit(&ud_nino_lock);
993 983
994 984 cv_destroy(&ip->i_wrcv); /* throttling */
995 985 rw_destroy(&ip->i_rwlock);
996 986 rw_exit(&ip->i_contents);
997 987 rw_destroy(&ip->i_contents);
998 988 kmem_free(addr, size);
999 989 kmem_free(addr1, size1);
1000 990 vn_free(vp);
1001 991 kmem_free(ip, sizeof (struct ud_inode));
1002 992 return;
1003 993 }
1004 994
1005 995 if ((ip->i_udf->udf_flags & UDF_FL_RDONLY) == 0) {
1006 996 if (ip->i_nlink <= 0) {
1007 997 ip->i_marker3 = (uint32_t)0xDDDD0000;
1008 998 ip->i_nlink = 1; /* prevent free-ing twice */
1009 999 (void) ud_itrunc(ip, 0, 0, cr);
1010 1000 type = ip->i_type;
1011 1001 ip->i_perm = 0;
1012 1002 ip->i_uid = 0;
1013 1003 ip->i_gid = 0;
1014 1004 ip->i_rdev = 0; /* Zero in core version of rdev */
1015 1005 mutex_enter(&ip->i_tlock);
1016 1006 ip->i_flag |= IUPD|ICHG;
1017 1007 mutex_exit(&ip->i_tlock);
1018 1008 ud_ifree(ip, type);
1019 1009 ip->i_icb_prn = 0xFFFF;
1020 1010 } else if (!IS_SWAPVP(vp)) {
1021 1011 /*
1022 1012 * Write the inode out if dirty. Pages are
1023 1013 * written back and put on the freelist.
1024 1014 */
1025 1015 (void) ud_syncip(ip, B_FREE | B_ASYNC, 0);
1026 1016 /*
1027 1017 * Do nothing if inode is now busy -- inode may
1028 1018 * have gone busy because ud_syncip
1029 1019 * releases/reacquires the i_contents lock
1030 1020 */
1031 1021 mutex_enter(&vp->v_lock);
1032 1022 if (vp->v_count > 1) {
1033 1023 VN_RELE_LOCKED(vp);
1034 1024 mutex_exit(&vp->v_lock);
1035 1025 rw_exit(&ip->i_contents);
1036 1026 return;
1037 1027 }
1038 1028 mutex_exit(&vp->v_lock);
1039 1029 } else {
1040 1030 ud_iupdat(ip, 0);
1041 1031 }
1042 1032 }
1043 1033
1044 1034
1045 1035 /*
1046 1036 * Put the inode on the end of the free list.
1047 1037 * Possibly in some cases it would be better to
1048 1038 * put the inode at the head of the free list,
1049 1039 * (e.g.: where i_perm == 0 || i_number == 0)
1050 1040 * but I will think about that later.
1051 1041 * (i_number is rarely 0 - only after an i/o error in ud_iget,
1052 1042 * where i_perm == 0, the inode will probably be wanted
1053 1043 * again soon for an ialloc, so possibly we should keep it)
1054 1044 */
1055 1045 /*
1056 1046 * If inode is invalid or there is no page associated with
1057 1047 * this inode, put the inode in the front of the free list.
1058 1048 * Since we have a VN_HOLD on the vnode, and checked that it
1059 1049 * wasn't already on the freelist when we entered, we can safely
1060 1050 * put it on the freelist even if another thread puts a VN_HOLD
1061 1051 * on it (pageout/ud_iget).
1062 1052 */
1063 1053 tryagain:
1064 1054 mutex_enter(&ud_nino_lock);
1065 1055 if (vn_has_cached_data(vp)) {
1066 1056 mutex_exit(&ud_nino_lock);
1067 1057 mutex_enter(&vp->v_lock);
1068 1058 VN_RELE_LOCKED(vp);
1069 1059 mutex_exit(&vp->v_lock);
1070 1060 mutex_enter(&ip->i_tlock);
1071 1061 mutex_enter(&udf_ifree_lock);
1072 1062 ud_add_to_free_list(ip, UD_END);
1073 1063 mutex_exit(&udf_ifree_lock);
1074 1064 ip->i_flag &= IMODTIME;
1075 1065 mutex_exit(&ip->i_tlock);
1076 1066 rw_exit(&ip->i_contents);
1077 1067 } else if (busy || ud_cur_inodes < ud_max_inodes) {
1078 1068 mutex_exit(&ud_nino_lock);
1079 1069 /*
1080 1070 * We're not over our high water mark, or it's
1081 1071 * not safe to kmem_free the inode, so put it
1082 1072 * on the freelist.
1083 1073 */
1084 1074 mutex_enter(&vp->v_lock);
1085 1075 if (vn_has_cached_data(vp)) {
1086 1076 cmn_err(CE_WARN, "ud_iinactive: v_pages not NULL\n");
1087 1077 }
1088 1078 VN_RELE_LOCKED(vp);
1089 1079 mutex_exit(&vp->v_lock);
1090 1080
1091 1081 mutex_enter(&ip->i_tlock);
1092 1082 mutex_enter(&udf_ifree_lock);
1093 1083 ud_add_to_free_list(ip, UD_BEGIN);
1094 1084 mutex_exit(&udf_ifree_lock);
1095 1085 ip->i_flag &= IMODTIME;
1096 1086 mutex_exit(&ip->i_tlock);
1097 1087 rw_exit(&ip->i_contents);
1098 1088 } else {
1099 1089 mutex_exit(&ud_nino_lock);
1100 1090 if (vn_has_cached_data(vp)) {
1101 1091 cmn_err(CE_WARN, "ud_iinactive: v_pages not NULL\n");
1102 1092 }
1103 1093 /*
1104 1094 * Try to free the inode. We must make sure
1105 1095 * it's o.k. to destroy this inode. We can't destroy
1106 1096 * if a thread is waiting for this inode. If we can't get the
1107 1097 * cache now, put it back on the freelist.
1108 1098 */
1109 1099 if (!mutex_tryenter(&ud_icache_lock)) {
1110 1100 busy = 1;
1111 1101 goto tryagain;
1112 1102 }
1113 1103 mutex_enter(&vp->v_lock);
1114 1104 if (vp->v_count > 1) {
1115 1105 /* inode is wanted in ud_iget */
1116 1106 busy = 1;
1117 1107 mutex_exit(&vp->v_lock);
1118 1108 mutex_exit(&ud_icache_lock);
1119 1109 goto tryagain;
1120 1110 }
1121 1111 mutex_exit(&vp->v_lock);
1122 1112 remque(ip);
1123 1113 ip->i_forw = ip;
1124 1114 ip->i_back = ip;
1125 1115 mutex_enter(&ud_nino_lock);
1126 1116 ud_cur_inodes--;
1127 1117 mutex_exit(&ud_nino_lock);
1128 1118 mutex_exit(&ud_icache_lock);
1129 1119 if (ip->i_icb_prn != 0xFFFF) {
1130 1120 ud_iupdat(ip, 0);
1131 1121 }
1132 1122 addr = (caddr_t)ip->i_ext;
1133 1123 size = sizeof (struct icb_ext) * ip->i_ext_count;
1134 1124 ip->i_ext = 0;
1135 1125 ip->i_ext_count = ip->i_ext_used = 0;
1136 1126 addr1 = (caddr_t)ip->i_con;
1137 1127 size1 = sizeof (struct icb_ext) * ip->i_con_count;
1138 1128 ip->i_con = 0;
1139 1129 ip->i_con_count = ip->i_con_used = ip->i_con_read = 0;
1140 1130 cv_destroy(&ip->i_wrcv); /* throttling */
1141 1131 rw_destroy(&ip->i_rwlock);
1142 1132 rw_exit(&ip->i_contents);
1143 1133 rw_destroy(&ip->i_contents);
1144 1134 kmem_free(addr, size);
1145 1135 kmem_free(addr1, size1);
1146 1136 ip->i_marker3 = (uint32_t)0xDDDDDDDD;
1147 1137 vn_free(vp);
1148 1138 kmem_free(ip, sizeof (struct ud_inode));
1149 1139 }
1150 1140 }
1151 1141
1152 1142
1153 1143 void
1154 1144 ud_iupdat(struct ud_inode *ip, int32_t waitfor)
1155 1145 {
1156 1146 uint16_t flag, tag_flags;
1157 1147 int32_t error;
1158 1148 struct buf *bp;
1159 1149 struct udf_vfs *udf_vfsp;
1160 1150 struct file_entry *fe;
1161 1151 uint16_t crc_len = 0;
1162 1152
1163 1153 ASSERT(RW_WRITE_HELD(&ip->i_contents));
1164 1154
1165 1155 ud_printf("ud_iupdat\n");
1166 1156 /*
1167 1157 * Return if file system has been forcibly umounted.
1168 1158 */
1169 1159 if (ip->i_udf == NULL) {
1170 1160 return;
1171 1161 }
1172 1162
1173 1163 udf_vfsp = ip->i_udf;
1174 1164 flag = ip->i_flag; /* Atomic read */
1175 1165 if ((flag & (IUPD|IACC|ICHG|IMOD|IMODACC)) != 0) {
1176 1166 if (udf_vfsp->udf_flags & UDF_FL_RDONLY) {
1177 1167 ip->i_flag &= ~(IUPD|IACC|ICHG|IMOD|IMODACC|IATTCHG);
1178 1168 return;
1179 1169 }
1180 1170
1181 1171 bp = ud_bread(ip->i_dev,
1182 1172 ip->i_icb_lbano << udf_vfsp->udf_l2d_shift,
1183 1173 ip->i_udf->udf_lbsize);
1184 1174 if (bp->b_flags & B_ERROR) {
1185 1175 brelse(bp);
1186 1176 return;
1187 1177 }
1188 1178 fe = (struct file_entry *)bp->b_un.b_addr;
1189 1179 if (ud_verify_tag_and_desc(&fe->fe_tag, UD_FILE_ENTRY,
1190 1180 ip->i_icb_block,
1191 1181 1, ip->i_udf->udf_lbsize) != 0) {
1192 1182 brelse(bp);
1193 1183 return;
1194 1184 }
1195 1185
1196 1186 mutex_enter(&ip->i_tlock);
1197 1187 if (ip->i_flag & (IUPD|IACC|ICHG)) {
1198 1188 IMARK(ip);
1199 1189 }
1200 1190 ip->i_flag &= ~(IUPD|IACC|ICHG|IMOD|IMODACC);
1201 1191 mutex_exit(&ip->i_tlock);
1202 1192
1203 1193 fe->fe_uid = SWAP_32(ip->i_uid);
1204 1194 fe->fe_gid = SWAP_32(ip->i_gid);
1205 1195
1206 1196 fe->fe_perms = SWAP_32(ip->i_perm);
1207 1197
1208 1198 fe->fe_lcount = SWAP_16(ip->i_nlink);
1209 1199 fe->fe_info_len = SWAP_64(ip->i_size);
1210 1200 fe->fe_lbr = SWAP_64(ip->i_lbr);
1211 1201
1212 1202 ud_utime2dtime(&ip->i_atime, &fe->fe_acc_time);
1213 1203 ud_utime2dtime(&ip->i_mtime, &fe->fe_mod_time);
1214 1204 ud_utime2dtime(&ip->i_ctime, &fe->fe_attr_time);
1215 1205
1216 1206 if (ip->i_char & ISUID) {
1217 1207 tag_flags = ICB_FLAG_SETUID;
1218 1208 } else {
1219 1209 tag_flags = 0;
1220 1210 }
1221 1211 if (ip->i_char & ISGID) {
1222 1212 tag_flags |= ICB_FLAG_SETGID;
1223 1213 }
1224 1214 if (ip->i_char & ISVTX) {
1225 1215 tag_flags |= ICB_FLAG_STICKY;
1226 1216 }
1227 1217 tag_flags |= ip->i_desc_type;
1228 1218
1229 1219 /*
1230 1220 * Remove the following it is no longer contig
1231 1221 * if (ip->i_astrat == STRAT_TYPE4) {
1232 1222 * tag_flags |= ICB_FLAG_CONTIG;
1233 1223 * }
1234 1224 */
1235 1225
1236 1226 fe->fe_icb_tag.itag_flags &= ~SWAP_16((uint16_t)0x3C3);
1237 1227 fe->fe_icb_tag.itag_strategy = SWAP_16(ip->i_astrat);
1238 1228 fe->fe_icb_tag.itag_flags |= SWAP_16(tag_flags);
1239 1229
1240 1230 ud_update_regid(&fe->fe_impl_id);
1241 1231
1242 1232 crc_len = offsetof(struct file_entry, fe_spec) +
1243 1233 SWAP_32(fe->fe_len_ear);
1244 1234 if (ip->i_desc_type == ICB_FLAG_ONE_AD) {
1245 1235 crc_len += ip->i_size;
1246 1236 fe->fe_len_adesc = SWAP_32(((uint32_t)ip->i_size));
1247 1237 } else if ((ip->i_size != 0) && (ip->i_ext != NULL) &&
1248 1238 (ip->i_ext_used != 0)) {
1249 1239
1250 1240 if ((error = ud_read_icb_till_off(ip,
1251 1241 ip->i_size)) == 0) {
1252 1242 if (ip->i_astrat == STRAT_TYPE4) {
1253 1243 error = ud_updat_ext4(ip, fe);
1254 1244 } else if (ip->i_astrat == STRAT_TYPE4096) {
1255 1245 error = ud_updat_ext4096(ip, fe);
1256 1246 }
1257 1247 if (error) {
1258 1248 udf_vfsp->udf_mark_bad = 1;
1259 1249 }
1260 1250 }
1261 1251 crc_len += SWAP_32(fe->fe_len_adesc);
1262 1252 } else {
1263 1253 fe->fe_len_adesc = 0;
1264 1254 }
1265 1255
1266 1256 /*
1267 1257 * Zero out the rest of the block
1268 1258 */
1269 1259 bzero(bp->b_un.b_addr + crc_len,
1270 1260 ip->i_udf->udf_lbsize - crc_len);
1271 1261
1272 1262 ud_make_tag(ip->i_udf, &fe->fe_tag,
1273 1263 UD_FILE_ENTRY, ip->i_icb_block, crc_len);
1274 1264
1275 1265
1276 1266 if (waitfor) {
1277 1267 BWRITE(bp);
1278 1268
1279 1269 /*
1280 1270 * Synchronous write has guaranteed that inode
1281 1271 * has been written on disk so clear the flag
1282 1272 */
1283 1273 ip->i_flag &= ~(IBDWRITE);
1284 1274 } else {
1285 1275 bdwrite(bp);
1286 1276
1287 1277 /*
1288 1278 * This write hasn't guaranteed that inode has been
1289 1279 * written on the disk.
1290 1280 * Since, all updat flags on indoe are cleared, we must
1291 1281 * remember the condition in case inode is to be updated
1292 1282 * synchronously later (e.g.- fsync()/fdatasync())
1293 1283 * and inode has not been modified yet.
1294 1284 */
1295 1285 ip->i_flag |= (IBDWRITE);
1296 1286 }
1297 1287 } else {
1298 1288 /*
1299 1289 * In case previous inode update was done asynchronously
1300 1290 * (IBDWRITE) and this inode update request wants guaranteed
1301 1291 * (synchronous) disk update, flush the inode.
1302 1292 */
1303 1293 if (waitfor && (flag & IBDWRITE)) {
1304 1294 blkflush(ip->i_dev,
1305 1295 (daddr_t)fsbtodb(udf_vfsp, ip->i_icb_lbano));
1306 1296 ip->i_flag &= ~(IBDWRITE);
1307 1297 }
1308 1298 }
1309 1299 }
1310 1300
1311 1301 int32_t
1312 1302 ud_updat_ext4(struct ud_inode *ip, struct file_entry *fe)
1313 1303 {
1314 1304 uint32_t dummy;
1315 1305 int32_t elen, ndent, index, count, con_index;
1316 1306 daddr_t bno;
1317 1307 struct buf *bp;
1318 1308 struct short_ad *sad;
1319 1309 struct long_ad *lad;
1320 1310 struct icb_ext *iext, *icon;
1321 1311
1322 1312
1323 1313 ASSERT(ip);
1324 1314 ASSERT(fe);
1325 1315 ASSERT((ip->i_desc_type == ICB_FLAG_SHORT_AD) ||
1326 1316 (ip->i_desc_type == ICB_FLAG_LONG_AD));
1327 1317
1328 1318 if (ip->i_desc_type == ICB_FLAG_SHORT_AD) {
1329 1319 elen = sizeof (struct short_ad);
1330 1320 sad = (struct short_ad *)
1331 1321 (fe->fe_spec + SWAP_32(fe->fe_len_ear));
1332 1322 } else if (ip->i_desc_type == ICB_FLAG_LONG_AD) {
1333 1323 elen = sizeof (struct long_ad);
1334 1324 lad = (struct long_ad *)
1335 1325 (fe->fe_spec + SWAP_32(fe->fe_len_ear));
1336 1326 } else {
1337 1327 /* This cannot happen return */
1338 1328 return (EINVAL);
1339 1329 }
1340 1330
1341 1331 ndent = ip->i_max_emb / elen;
1342 1332
1343 1333 if (ip->i_ext_used < ndent) {
1344 1334
1345 1335 if (ip->i_desc_type == ICB_FLAG_SHORT_AD) {
1346 1336 ud_make_sad(ip->i_ext, sad, ip->i_ext_used);
1347 1337 } else {
1348 1338 ud_make_lad(ip->i_ext, lad, ip->i_ext_used);
1349 1339 }
1350 1340 fe->fe_len_adesc = SWAP_32(ip->i_ext_used * elen);
1351 1341 con_index = 0;
1352 1342 } else {
1353 1343
1354 1344 con_index = index = 0;
1355 1345
1356 1346 while (index < ip->i_ext_used) {
1357 1347 if (index == 0) {
1358 1348 /*
1359 1349 * bp is already read
1360 1350 * First few extents will go
1361 1351 * into the file_entry
1362 1352 */
1363 1353 count = ndent - 1;
1364 1354 fe->fe_len_adesc = SWAP_32(ndent * elen);
1365 1355 bp = NULL;
1366 1356
1367 1357 /*
1368 1358 * Last entry to be cont ext
1369 1359 */
1370 1360 icon = &ip->i_con[con_index];
1371 1361 } else {
1372 1362 /*
1373 1363 * Read the buffer
1374 1364 */
1375 1365 icon = &ip->i_con[con_index];
1376 1366
1377 1367 bno = ud_xlate_to_daddr(ip->i_udf,
1378 1368 icon->ib_prn, icon->ib_block,
1379 1369 icon->ib_count >> ip->i_udf->udf_l2d_shift,
1380 1370 &dummy);
1381 1371 bp = ud_bread(ip->i_dev,
1382 1372 bno << ip->i_udf->udf_l2d_shift,
1383 1373 ip->i_udf->udf_lbsize);
1384 1374 if (bp->b_flags & B_ERROR) {
1385 1375 brelse(bp);
1386 1376 return (EIO);
1387 1377 }
1388 1378
1389 1379 /*
1390 1380 * Figure out how many extents in
1391 1381 * this time
1392 1382 */
1393 1383 count = (bp->b_bcount -
1394 1384 sizeof (struct alloc_ext_desc)) / elen;
1395 1385 if (count > (ip->i_ext_used - index)) {
1396 1386 count = ip->i_ext_used - index;
1397 1387 } else {
1398 1388 count --;
1399 1389 }
1400 1390 con_index++;
1401 1391 if (con_index >= ip->i_con_used) {
1402 1392 icon = NULL;
1403 1393 } else {
1404 1394 icon = &ip->i_con[con_index];
1405 1395 }
1406 1396 }
1407 1397
1408 1398
1409 1399
1410 1400 /*
1411 1401 * convert to on disk form and
1412 1402 * update
1413 1403 */
1414 1404 iext = &ip->i_ext[index];
1415 1405 if (ip->i_desc_type == ICB_FLAG_SHORT_AD) {
1416 1406 if (index != 0) {
1417 1407 sad = (struct short_ad *)
1418 1408 (bp->b_un.b_addr +
1419 1409 sizeof (struct alloc_ext_desc));
1420 1410 }
1421 1411 ud_make_sad(iext, sad, count);
1422 1412 sad += count;
1423 1413 if (icon != NULL) {
1424 1414 ud_make_sad(icon, sad, 1);
1425 1415 }
1426 1416 } else {
1427 1417 if (index != 0) {
1428 1418 lad = (struct long_ad *)
1429 1419 (bp->b_un.b_addr +
1430 1420 sizeof (struct alloc_ext_desc));
1431 1421 }
1432 1422 ud_make_lad(iext, lad, count);
1433 1423 lad += count;
1434 1424 if (icon != NULL) {
1435 1425 ud_make_lad(icon, lad, 1);
1436 1426 }
1437 1427 }
1438 1428
1439 1429 if (con_index != 0) {
1440 1430 struct alloc_ext_desc *aed;
1441 1431 int32_t sz;
1442 1432 struct icb_ext *oicon;
1443 1433
1444 1434 oicon = &ip->i_con[con_index - 1];
1445 1435 sz = count * elen;
1446 1436 if (icon != NULL) {
1447 1437 sz += elen;
1448 1438 }
1449 1439 aed = (struct alloc_ext_desc *)bp->b_un.b_addr;
1450 1440 aed->aed_len_aed = SWAP_32(sz);
1451 1441 if (con_index == 1) {
1452 1442 aed->aed_rev_ael =
1453 1443 SWAP_32(ip->i_icb_block);
1454 1444 } else {
1455 1445 aed->aed_rev_ael =
1456 1446 SWAP_32(oicon->ib_block);
1457 1447 }
1458 1448 sz += sizeof (struct alloc_ext_desc);
1459 1449 ud_make_tag(ip->i_udf, &aed->aed_tag,
1460 1450 UD_ALLOC_EXT_DESC, oicon->ib_block, sz);
1461 1451 }
1462 1452
1463 1453 /*
1464 1454 * Write back to disk
1465 1455 */
1466 1456 if (bp != NULL) {
1467 1457 BWRITE(bp);
1468 1458 }
1469 1459 index += count;
1470 1460 }
1471 1461
1472 1462 }
1473 1463
1474 1464 if (con_index != ip->i_con_used) {
1475 1465 int32_t lbmask, l2b, temp;
1476 1466
1477 1467 temp = con_index;
1478 1468 lbmask = ip->i_udf->udf_lbmask;
1479 1469 l2b = ip->i_udf->udf_l2b_shift;
1480 1470 /*
1481 1471 * Free unused continuation extents
1482 1472 */
1483 1473 for (; con_index < ip->i_con_used; con_index++) {
1484 1474 icon = &ip->i_con[con_index];
1485 1475 count = (icon->ib_count + lbmask) >> l2b;
1486 1476 ud_free_space(ip->i_udf->udf_vfs, icon->ib_prn,
1487 1477 icon->ib_block, count);
1488 1478 count = (count << l2b) - sizeof (struct alloc_ext_desc);
1489 1479 ip->i_cur_max_ext -= (count / elen) - 1;
1490 1480 }
1491 1481 ip->i_con_used = temp;
1492 1482 }
1493 1483 return (0);
1494 1484 }
1495 1485
1496 1486 /* ARGSUSED */
1497 1487 int32_t
1498 1488 ud_updat_ext4096(struct ud_inode *ip, struct file_entry *fe)
1499 1489 {
1500 1490 return (ENXIO);
1501 1491 }
1502 1492
1503 1493 void
1504 1494 ud_make_sad(struct icb_ext *iext, struct short_ad *sad, int32_t count)
1505 1495 {
1506 1496 int32_t index = 0, scount;
1507 1497
1508 1498 ASSERT(iext);
1509 1499 ASSERT(sad);
1510 1500
1511 1501 if (count != 0) {
1512 1502 ASSERT(count > 0);
1513 1503 while (index < count) {
1514 1504 scount = (iext->ib_count & 0x3FFFFFFF) |
1515 1505 (iext->ib_flags << 30);
1516 1506 sad->sad_ext_len = SWAP_32(scount);
1517 1507 sad->sad_ext_loc = SWAP_32(iext->ib_block);
1518 1508 sad++;
1519 1509 iext++;
1520 1510 index++;
1521 1511 }
1522 1512 }
1523 1513 }
1524 1514
1525 1515 void
1526 1516 ud_make_lad(struct icb_ext *iext, struct long_ad *lad, int32_t count)
1527 1517 {
1528 1518 int32_t index = 0, scount;
1529 1519
1530 1520 ASSERT(iext);
1531 1521 ASSERT(lad);
1532 1522
1533 1523 if (count != 0) {
1534 1524 ASSERT(count > 0);
1535 1525
1536 1526 while (index < count) {
1537 1527 lad->lad_ext_prn = SWAP_16(iext->ib_prn);
1538 1528 scount = (iext->ib_count & 0x3FFFFFFF) |
1539 1529 (iext->ib_flags << 30);
1540 1530 lad->lad_ext_len = SWAP_32(scount);
1541 1531 lad->lad_ext_loc = SWAP_32(iext->ib_block);
1542 1532 lad++;
1543 1533 iext++;
1544 1534 index++;
1545 1535 }
1546 1536 }
1547 1537 }
1548 1538
1549 1539 /*
1550 1540 * Truncate the inode ip to at most length size.
1551 1541 * Free affected disk blocks -- the blocks of the
1552 1542 * file are removed in reverse order.
1553 1543 */
1554 1544 /* ARGSUSED */
1555 1545 int
1556 1546 ud_itrunc(struct ud_inode *oip, u_offset_t length,
1557 1547 int32_t flags, struct cred *cr)
1558 1548 {
1559 1549 int32_t error, boff;
1560 1550 off_t bsize;
1561 1551 mode_t mode;
1562 1552 struct udf_vfs *udf_vfsp;
1563 1553
1564 1554 ud_printf("ud_itrunc\n");
1565 1555
1566 1556 ASSERT(RW_WRITE_HELD(&oip->i_contents));
1567 1557 udf_vfsp = oip->i_udf;
1568 1558 bsize = udf_vfsp->udf_lbsize;
1569 1559
1570 1560 /*
1571 1561 * We only allow truncation of regular files and directories
1572 1562 * to arbritary lengths here. In addition, we allow symbolic
1573 1563 * links to be truncated only to zero length. Other inode
1574 1564 * types cannot have their length set here.
1575 1565 */
1576 1566 mode = oip->i_type;
1577 1567 if (mode == VFIFO) {
1578 1568 return (0);
1579 1569 }
1580 1570 if ((mode != VREG) && (mode != VDIR) &&
1581 1571 (!(mode == VLNK && length == 0))) {
1582 1572 return (EINVAL);
1583 1573 }
1584 1574 if (length == oip->i_size) {
1585 1575 /* update ctime and mtime to please POSIX tests */
1586 1576 mutex_enter(&oip->i_tlock);
1587 1577 oip->i_flag |= ICHG |IUPD;
1588 1578 mutex_exit(&oip->i_tlock);
1589 1579 return (0);
1590 1580 }
1591 1581
1592 1582 boff = blkoff(udf_vfsp, length);
1593 1583
1594 1584 if (length > oip->i_size) {
1595 1585 /*
1596 1586 * Trunc up case.ud_bmap_write will insure that the right blocks
1597 1587 * are allocated. This includes doing any work needed for
1598 1588 * allocating the last block.
1599 1589 */
1600 1590 if (boff == 0) {
1601 1591 error = ud_bmap_write(oip, length - 1,
1602 1592 (int)bsize, 0, cr);
1603 1593 } else {
1604 1594 error = ud_bmap_write(oip, length - 1, boff, 0, cr);
1605 1595 }
1606 1596 if (error == 0) {
1607 1597 u_offset_t osize = oip->i_size;
1608 1598 oip->i_size = length;
1609 1599
1610 1600 /*
1611 1601 * Make sure we zero out the remaining bytes of
1612 1602 * the page in case a mmap scribbled on it. We
1613 1603 * can't prevent a mmap from writing beyond EOF
1614 1604 * on the last page of a file.
1615 1605 */
1616 1606 if ((boff = blkoff(udf_vfsp, osize)) != 0) {
1617 1607 pvn_vpzero(ITOV(oip), osize,
1618 1608 (uint32_t)(bsize - boff));
1619 1609 }
1620 1610 mutex_enter(&oip->i_tlock);
1621 1611 oip->i_flag |= ICHG;
1622 1612 ITIMES_NOLOCK(oip);
1623 1613 mutex_exit(&oip->i_tlock);
1624 1614 }
1625 1615 return (error);
1626 1616 }
1627 1617
1628 1618 /*
1629 1619 * Update the pages of the file. If the file is not being
1630 1620 * truncated to a block boundary, the contents of the
1631 1621 * pages following the end of the file must be zero'ed
1632 1622 * in case it ever become accessable again because
1633 1623 * of subsequent file growth.
1634 1624 */
1635 1625 if (boff == 0) {
1636 1626 (void) pvn_vplist_dirty(ITOV(oip), length,
1637 1627 ud_putapage, B_INVAL | B_TRUNC, CRED());
1638 1628 } else {
1639 1629 /*
1640 1630 * Make sure that the last block is properly allocated.
1641 1631 * We only really have to do this if the last block is
1642 1632 * actually allocated. Just to be sure, we do it now
1643 1633 * independent of current allocation.
1644 1634 */
1645 1635 error = ud_bmap_write(oip, length - 1, boff, 0, cr);
1646 1636 if (error) {
1647 1637 return (error);
1648 1638 }
1649 1639
1650 1640 pvn_vpzero(ITOV(oip), length, (uint32_t)(bsize - boff));
1651 1641
1652 1642 (void) pvn_vplist_dirty(ITOV(oip), length,
1653 1643 ud_putapage, B_INVAL | B_TRUNC, CRED());
1654 1644 }
1655 1645
1656 1646
1657 1647 /* Free the blocks */
1658 1648 if (oip->i_desc_type == ICB_FLAG_ONE_AD) {
1659 1649 if (length > oip->i_max_emb) {
1660 1650 return (EFBIG);
1661 1651 }
1662 1652 oip->i_size = length;
1663 1653 mutex_enter(&oip->i_tlock);
1664 1654 oip->i_flag |= ICHG|IUPD;
1665 1655 mutex_exit(&oip->i_tlock);
1666 1656 ud_iupdat(oip, 1);
1667 1657 } else {
1668 1658 if ((error = ud_read_icb_till_off(oip, oip->i_size)) != 0) {
1669 1659 return (error);
1670 1660 }
1671 1661
1672 1662 if (oip->i_astrat == STRAT_TYPE4) {
1673 1663 ud_trunc_ext4(oip, length);
1674 1664 } else if (oip->i_astrat == STRAT_TYPE4096) {
1675 1665 ud_trunc_ext4096(oip, length);
1676 1666 }
1677 1667 }
1678 1668
1679 1669 done:
1680 1670 return (0);
1681 1671 }
1682 1672
1683 1673 void
1684 1674 ud_trunc_ext4(struct ud_inode *ip, u_offset_t length)
1685 1675 {
1686 1676 int32_t index, l2b, count, ecount;
1687 1677 int32_t elen, ndent, nient;
1688 1678 u_offset_t ext_beg, ext_end;
1689 1679 struct icb_ext *iext, *icon;
1690 1680 int32_t lbmask, ext_used;
1691 1681 uint32_t loc;
1692 1682 struct icb_ext text;
1693 1683 uint32_t con_freed;
1694 1684
1695 1685 ASSERT((ip->i_desc_type == ICB_FLAG_SHORT_AD) ||
1696 1686 (ip->i_desc_type == ICB_FLAG_LONG_AD));
1697 1687
1698 1688 if (ip->i_ext_used == 0) {
1699 1689 return;
1700 1690 }
1701 1691
1702 1692 ext_used = ip->i_ext_used;
1703 1693
1704 1694 lbmask = ip->i_udf->udf_lbmask;
1705 1695 l2b = ip->i_udf->udf_l2b_shift;
1706 1696
1707 1697 ASSERT(ip->i_ext);
1708 1698
1709 1699 ip->i_lbr = 0;
1710 1700 for (index = 0; index < ext_used; index++) {
1711 1701 iext = &ip->i_ext[index];
1712 1702
1713 1703 /*
1714 1704 * Find the begining and end
1715 1705 * of current extent
1716 1706 */
1717 1707 ext_beg = iext->ib_offset;
1718 1708 ext_end = iext->ib_offset +
1719 1709 ((iext->ib_count + lbmask) & ~lbmask);
1720 1710
1721 1711 /*
1722 1712 * This is the extent that has offset "length"
1723 1713 * make a copy of this extent and
1724 1714 * remember the index. We can use
1725 1715 * it to free blocks
1726 1716 */
1727 1717 if ((length <= ext_end) && (length >= ext_beg)) {
1728 1718 text = *iext;
1729 1719
1730 1720 iext->ib_count = length - ext_beg;
1731 1721 ip->i_ext_used = index + 1;
1732 1722 break;
1733 1723 }
1734 1724 if (iext->ib_flags != IB_UN_RE_AL) {
1735 1725 ip->i_lbr += iext->ib_count >> l2b;
1736 1726 }
1737 1727 }
1738 1728 if (ip->i_ext_used != index) {
1739 1729 if (iext->ib_flags != IB_UN_RE_AL) {
1740 1730 ip->i_lbr +=
1741 1731 ((iext->ib_count + lbmask) & ~lbmask) >> l2b;
1742 1732 }
1743 1733 }
1744 1734
1745 1735 ip->i_size = length;
1746 1736 mutex_enter(&ip->i_tlock);
1747 1737 ip->i_flag |= ICHG|IUPD;
1748 1738 mutex_exit(&ip->i_tlock);
1749 1739 ud_iupdat(ip, 1);
1750 1740
1751 1741 /*
1752 1742 * Free the unused space
1753 1743 */
1754 1744 if (text.ib_flags != IB_UN_RE_AL) {
1755 1745 count = (ext_end - length) >> l2b;
1756 1746 if (count) {
1757 1747 loc = text.ib_block +
1758 1748 (((length - text.ib_offset) + lbmask) >> l2b);
1759 1749 ud_free_space(ip->i_udf->udf_vfs, text.ib_prn,
1760 1750 loc, count);
1761 1751 }
1762 1752 }
1763 1753 for (index = ip->i_ext_used; index < ext_used; index++) {
1764 1754 iext = &ip->i_ext[index];
1765 1755 if (iext->ib_flags != IB_UN_RE_AL) {
1766 1756 count = (iext->ib_count + lbmask) >> l2b;
1767 1757 ud_free_space(ip->i_udf->udf_vfs, iext->ib_prn,
1768 1758 iext->ib_block, count);
1769 1759 }
1770 1760 bzero(iext, sizeof (struct icb_ext));
1771 1761 continue;
1772 1762 }
1773 1763
1774 1764 /*
1775 1765 * release any continuation blocks
1776 1766 */
1777 1767 if (ip->i_con) {
1778 1768
1779 1769 ASSERT(ip->i_con_count >= ip->i_con_used);
1780 1770
1781 1771 /*
1782 1772 * Find out how many indirect blocks
1783 1773 * are required and release the rest
1784 1774 */
1785 1775 if (ip->i_desc_type == ICB_FLAG_SHORT_AD) {
1786 1776 elen = sizeof (struct short_ad);
1787 1777 } else if (ip->i_desc_type == ICB_FLAG_LONG_AD) {
1788 1778 elen = sizeof (struct long_ad);
1789 1779 }
1790 1780 ndent = ip->i_max_emb / elen;
1791 1781 if (ip->i_ext_used > ndent) {
1792 1782 ecount = ip->i_ext_used - ndent;
1793 1783 } else {
1794 1784 ecount = 0;
1795 1785 }
1796 1786 con_freed = 0;
1797 1787 for (index = 0; index < ip->i_con_used; index++) {
1798 1788 icon = &ip->i_con[index];
1799 1789 nient = icon->ib_count -
1800 1790 (sizeof (struct alloc_ext_desc) + elen);
1801 1791 /* Header + 1 indirect extent */
1802 1792 nient /= elen;
1803 1793 if (ecount) {
1804 1794 if (ecount > nient) {
1805 1795 ecount -= nient;
1806 1796 } else {
1807 1797 ecount = 0;
1808 1798 }
1809 1799 } else {
1810 1800 count = ((icon->ib_count + lbmask) &
1811 1801 ~lbmask) >> l2b;
1812 1802 ud_free_space(ip->i_udf->udf_vfs,
1813 1803 icon->ib_prn, icon->ib_block, count);
1814 1804 con_freed++;
1815 1805 ip->i_cur_max_ext -= nient;
1816 1806 }
1817 1807 }
1818 1808 /*
1819 1809 * set the continuation extents used(i_con_used)i to correct
1820 1810 * value. It is possible for i_con_used to be zero,
1821 1811 * if we free up all continuation extents. This happens
1822 1812 * when ecount is 0 before entering the for loop above.
1823 1813 */
1824 1814 ip->i_con_used -= con_freed;
1825 1815 if (ip->i_con_read > ip->i_con_used) {
1826 1816 ip->i_con_read = ip->i_con_used;
1827 1817 }
1828 1818 }
1829 1819 }
1830 1820
1831 1821 void
1832 1822 ud_trunc_ext4096(struct ud_inode *ip, u_offset_t length)
1833 1823 {
1834 1824 /*
1835 1825 * Truncate code is the same for
1836 1826 * both file of type 4 and 4096
1837 1827 */
1838 1828 ud_trunc_ext4(ip, length);
1839 1829 }
1840 1830
1841 1831 /*
1842 1832 * Remove any inodes in the inode cache belonging to dev
1843 1833 *
1844 1834 * There should not be any active ones, return error if any are found but
1845 1835 * still invalidate others (N.B.: this is a user error, not a system error).
1846 1836 *
1847 1837 * Also, count the references to dev by block devices - this really
1848 1838 * has nothing to do with the object of the procedure, but as we have
1849 1839 * to scan the inode table here anyway, we might as well get the
1850 1840 * extra benefit.
1851 1841 */
1852 1842 int32_t
1853 1843 ud_iflush(struct vfs *vfsp)
1854 1844 {
1855 1845 int32_t index, busy = 0;
1856 1846 union ihead *ih;
1857 1847 struct udf_vfs *udf_vfsp;
1858 1848 dev_t dev;
1859 1849 struct vnode *rvp, *vp;
1860 1850 struct ud_inode *ip, *next;
1861 1851
1862 1852 ud_printf("ud_iflush\n");
1863 1853 udf_vfsp = (struct udf_vfs *)vfsp->vfs_data;
1864 1854 rvp = udf_vfsp->udf_root;
1865 1855 dev = vfsp->vfs_dev;
1866 1856
1867 1857 mutex_enter(&ud_icache_lock);
1868 1858 for (index = 0; index < UD_HASH_SZ; index++) {
1869 1859 ih = &ud_ihead[index];
1870 1860
1871 1861 next = ih->ih_chain[0];
1872 1862 while (next != (struct ud_inode *)ih) {
1873 1863 ip = next;
1874 1864 next = ip->i_forw;
1875 1865 if (ip->i_dev != dev) {
1876 1866 continue;
1877 1867 }
1878 1868 vp = ITOV(ip);
1879 1869 /*
1880 1870 * root inode is processed by the caller
1881 1871 */
1882 1872 if (vp == rvp) {
1883 1873 if (vp->v_count > 1) {
1884 1874 busy = -1;
1885 1875 }
1886 1876 continue;
1887 1877 }
1888 1878 if (ip->i_flag & IREF) {
1889 1879 /*
1890 1880 * Set error indicator for return value,
1891 1881 * but continue invalidating other
1892 1882 * inodes.
1893 1883 */
1894 1884 busy = -1;
1895 1885 continue;
1896 1886 }
1897 1887
1898 1888 rw_enter(&ip->i_contents, RW_WRITER);
1899 1889 remque(ip);
1900 1890 ip->i_forw = ip;
1901 1891 ip->i_back = ip;
1902 1892 /*
1903 1893 * Hold the vnode since its not done
1904 1894 * in VOP_PUTPAGE anymore.
1905 1895 */
1906 1896 VN_HOLD(vp);
1907 1897 /*
1908 1898 * XXX Synchronous write holding
1909 1899 * cache lock
1910 1900 */
1911 1901 (void) ud_syncip(ip, B_INVAL, I_SYNC);
1912 1902 rw_exit(&ip->i_contents);
1913 1903 VN_RELE(vp);
1914 1904 }
1915 1905 }
1916 1906 mutex_exit(&ud_icache_lock);
1917 1907
1918 1908 return (busy);
1919 1909 }
1920 1910
1921 1911
1922 1912 /*
1923 1913 * Check mode permission on inode. Mode is READ, WRITE or EXEC.
1924 1914 * In the case of WRITE, the read-only status of the file system
1925 1915 * is checked. The applicable mode bits are compared with the
1926 1916 * requested form of access. If bits are missing, the secpolicy
1927 1917 * function will check for privileges.
1928 1918 */
1929 1919 int
1930 1920 ud_iaccess(struct ud_inode *ip, int32_t mode, struct cred *cr, int dolock)
1931 1921 {
1932 1922 int shift = 0;
1933 1923 int ret = 0;
1934 1924
1935 1925 if (dolock)
1936 1926 rw_enter(&ip->i_contents, RW_READER);
1937 1927 ASSERT(RW_LOCK_HELD(&ip->i_contents));
1938 1928
1939 1929 ud_printf("ud_iaccess\n");
1940 1930 if (mode & IWRITE) {
1941 1931 /*
1942 1932 * Disallow write attempts on read-only
1943 1933 * file systems, unless the file is a block
1944 1934 * or character device or a FIFO.
1945 1935 */
1946 1936 if (ip->i_udf->udf_flags & UDF_FL_RDONLY) {
1947 1937 if ((ip->i_type != VCHR) &&
1948 1938 (ip->i_type != VBLK) &&
1949 1939 (ip->i_type != VFIFO)) {
1950 1940 ret = EROFS;
1951 1941 goto out;
1952 1942 }
1953 1943 }
1954 1944 }
1955 1945
1956 1946 /*
1957 1947 * Access check is based on only
1958 1948 * one of owner, group, public.
1959 1949 * If not owner, then check group.
1960 1950 * If not a member of the group, then
1961 1951 * check public access.
1962 1952 */
1963 1953 if (crgetuid(cr) != ip->i_uid) {
1964 1954 shift += 5;
1965 1955 if (!groupmember((uid_t)ip->i_gid, cr))
1966 1956 shift += 5;
1967 1957 }
1968 1958
1969 1959 ret = secpolicy_vnode_access2(cr, ITOV(ip), ip->i_uid,
1970 1960 UD2VA_PERM(ip->i_perm << shift), UD2VA_PERM(mode));
1971 1961
1972 1962 out:
1973 1963 if (dolock)
1974 1964 rw_exit(&ip->i_contents);
1975 1965 return (ret);
1976 1966 }
1977 1967
1978 1968 void
1979 1969 ud_imark(struct ud_inode *ip)
1980 1970 {
1981 1971 timestruc_t now;
1982 1972
1983 1973 gethrestime(&now);
1984 1974 ud_printf("ud_imark\n");
1985 1975 if (ip->i_flag & IACC) {
1986 1976 ip->i_atime.tv_sec = now.tv_sec;
1987 1977 ip->i_atime.tv_nsec = now.tv_nsec;
1988 1978 }
1989 1979 if (ip->i_flag & IUPD) {
1990 1980 ip->i_mtime.tv_sec = now.tv_sec;
1991 1981 ip->i_mtime.tv_nsec = now.tv_nsec;
1992 1982 ip->i_flag |= IMODTIME;
1993 1983 }
1994 1984 if (ip->i_flag & ICHG) {
1995 1985 ip->i_diroff = 0;
1996 1986 ip->i_ctime.tv_sec = now.tv_sec;
1997 1987 ip->i_ctime.tv_nsec = now.tv_nsec;
1998 1988 }
1999 1989 }
2000 1990
2001 1991
2002 1992 void
2003 1993 ud_itimes_nolock(struct ud_inode *ip)
2004 1994 {
2005 1995 ud_printf("ud_itimes_nolock\n");
2006 1996
2007 1997 if (ip->i_flag & (IUPD|IACC|ICHG)) {
2008 1998 if (ip->i_flag & ICHG) {
2009 1999 ip->i_flag |= IMOD;
2010 2000 } else {
2011 2001 ip->i_flag |= IMODACC;
2012 2002 }
2013 2003 ud_imark(ip);
2014 2004 ip->i_flag &= ~(IACC|IUPD|ICHG);
2015 2005 }
2016 2006 }
2017 2007
2018 2008 void
2019 2009 ud_delcache(struct ud_inode *ip)
2020 2010 {
2021 2011 ud_printf("ud_delcache\n");
2022 2012
2023 2013 mutex_enter(&ud_icache_lock);
2024 2014 remque(ip);
2025 2015 ip->i_forw = ip;
2026 2016 ip->i_back = ip;
2027 2017 mutex_exit(&ud_icache_lock);
2028 2018 }
2029 2019
2030 2020 void
2031 2021 ud_idrop(struct ud_inode *ip)
2032 2022 {
2033 2023 struct vnode *vp = ITOV(ip);
2034 2024
2035 2025 ASSERT(RW_WRITE_HELD(&ip->i_contents));
2036 2026
2037 2027 ud_printf("ud_idrop\n");
2038 2028
2039 2029 mutex_enter(&vp->v_lock);
2040 2030 VN_RELE_LOCKED(vp);
2041 2031 if (vp->v_count > 0) {
2042 2032 mutex_exit(&vp->v_lock);
2043 2033 return;
2044 2034 }
2045 2035 mutex_exit(&vp->v_lock);
2046 2036
2047 2037 /*
2048 2038 * if inode is invalid or there is no page associated with
2049 2039 * this inode, put the inode in the front of the free list
2050 2040 */
2051 2041 mutex_enter(&ip->i_tlock);
2052 2042 mutex_enter(&udf_ifree_lock);
2053 2043 if (!vn_has_cached_data(vp) || ip->i_perm == 0) {
2054 2044 ud_add_to_free_list(ip, UD_BEGIN);
2055 2045 } else {
2056 2046 /*
2057 2047 * Otherwise, put the inode back on the end of the free list.
2058 2048 */
2059 2049 ud_add_to_free_list(ip, UD_END);
2060 2050 }
2061 2051 mutex_exit(&udf_ifree_lock);
2062 2052 ip->i_flag &= IMODTIME;
2063 2053 mutex_exit(&ip->i_tlock);
2064 2054 }
2065 2055
2066 2056 void
2067 2057 ud_add_to_free_list(struct ud_inode *ip, uint32_t at)
2068 2058 {
2069 2059 ASSERT(ip);
2070 2060 ASSERT(mutex_owned(&udf_ifree_lock));
2071 2061
2072 2062 #ifdef DEBUG
2073 2063 /* Search if the element is already in the list */
2074 2064 if (udf_ifreeh != NULL) {
2075 2065 struct ud_inode *iq;
2076 2066
2077 2067 iq = udf_ifreeh;
2078 2068 while (iq) {
2079 2069 if (iq == ip) {
2080 2070 cmn_err(CE_WARN, "Duplicate %p\n", (void *)ip);
2081 2071 }
2082 2072 iq = iq->i_freef;
2083 2073 }
2084 2074 }
2085 2075 #endif
2086 2076
2087 2077 ip->i_freef = NULL;
2088 2078 ip->i_freeb = NULL;
2089 2079 if (udf_ifreeh == NULL) {
2090 2080 /*
2091 2081 * Nothing on the list just add it
2092 2082 */
2093 2083 udf_ifreeh = ip;
2094 2084 udf_ifreet = ip;
2095 2085 } else {
2096 2086 if (at == UD_BEGIN) {
2097 2087 /*
2098 2088 * Add at the begining of the list
2099 2089 */
2100 2090 ip->i_freef = udf_ifreeh;
2101 2091 udf_ifreeh->i_freeb = ip;
2102 2092 udf_ifreeh = ip;
2103 2093 } else {
2104 2094 /*
2105 2095 * Add at the end of the list
2106 2096 */
2107 2097 ip->i_freeb = udf_ifreet;
2108 2098 udf_ifreet->i_freef = ip;
2109 2099 udf_ifreet = ip;
2110 2100 }
2111 2101 }
2112 2102 }
2113 2103
2114 2104 void
2115 2105 ud_remove_from_free_list(struct ud_inode *ip, uint32_t at)
2116 2106 {
2117 2107 ASSERT(ip);
2118 2108 ASSERT(mutex_owned(&udf_ifree_lock));
2119 2109
2120 2110 #ifdef DEBUG
2121 2111 {
2122 2112 struct ud_inode *iq;
2123 2113 uint32_t found = 0;
2124 2114
2125 2115 iq = udf_ifreeh;
2126 2116 while (iq) {
2127 2117 if (iq == ip) {
2128 2118 found++;
2129 2119 }
2130 2120 iq = iq->i_freef;
2131 2121 }
2132 2122 if (found != 1) {
2133 2123 cmn_err(CE_WARN, "ip %p is found %x times\n",
2134 2124 (void *)ip, found);
2135 2125 }
2136 2126 }
2137 2127 #endif
2138 2128
2139 2129 if ((ip->i_freef == NULL) && (ip->i_freeb == NULL)) {
2140 2130 if (ip != udf_ifreeh) {
2141 2131 return;
2142 2132 }
2143 2133 }
2144 2134
2145 2135 if ((at == UD_BEGIN) || (ip == udf_ifreeh)) {
2146 2136 udf_ifreeh = ip->i_freef;
2147 2137 if (ip->i_freef == NULL) {
2148 2138 udf_ifreet = NULL;
2149 2139 } else {
2150 2140 udf_ifreeh->i_freeb = NULL;
2151 2141 }
2152 2142 } else {
2153 2143 ip->i_freeb->i_freef = ip->i_freef;
2154 2144 if (ip->i_freef) {
2155 2145 ip->i_freef->i_freeb = ip->i_freeb;
2156 2146 } else {
2157 2147 udf_ifreet = ip->i_freeb;
2158 2148 }
2159 2149 }
↓ open down ↓ |
2034 lines elided |
↑ open up ↑ |
2160 2150 ip->i_freef = NULL;
2161 2151 ip->i_freeb = NULL;
2162 2152 }
2163 2153
2164 2154 void
2165 2155 ud_init_inodes(void)
2166 2156 {
2167 2157 union ihead *ih = ud_ihead;
2168 2158 int index;
2169 2159
2170 -#ifndef __lint
2171 - _NOTE(NO_COMPETING_THREADS_NOW);
2172 -#endif
2173 2160 for (index = 0; index < UD_HASH_SZ; index++, ih++) {
2174 2161 ih->ih_head[0] = ih;
2175 2162 ih->ih_head[1] = ih;
2176 2163 }
2177 2164 mutex_init(&ud_icache_lock, NULL, MUTEX_DEFAULT, NULL);
2178 2165 mutex_init(&ud_nino_lock, NULL, MUTEX_DEFAULT, NULL);
2179 2166
2180 2167 udf_ifreeh = NULL;
2181 2168 udf_ifreet = NULL;
2182 2169 mutex_init(&udf_ifree_lock, NULL, MUTEX_DEFAULT, NULL);
2183 2170
2184 2171 mutex_init(&ud_sync_busy, NULL, MUTEX_DEFAULT, NULL);
2185 2172 udf_vfs_instances = NULL;
2186 2173 mutex_init(&udf_vfs_mutex, NULL, MUTEX_DEFAULT, NULL);
2187 -
2188 -#ifndef __lint
2189 - _NOTE(COMPETING_THREADS_NOW);
2190 -#endif
2191 2174 }
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX