5 * Inode handling routines for the OSTA-UDF(tm) filesystem.
8 * This file is distributed under the terms of the GNU General Public
9 * License (GPL). Copies of the GPL can be obtained from:
10 * ftp://prep.ai.mit.edu/pub/gnu/GPL
11 * Each contributing author retains all rights to their own work.
13 * (C) 1998 Dave Boynton
14 * (C) 1998-2004 Ben Fennema
15 * (C) 1999-2000 Stelias Computing Inc
19 * 10/04/98 dgb Added rudimentary directory functions
20 * 10/07/98 Fully working udf_block_map! It works!
21 * 11/25/98 bmap altered to better support extents
22 * 12/06/98 blf partition support in udf_iget, udf_block_map and udf_read_inode
23 * 12/12/98 rewrote udf_block_map to handle next extents and descs across
24 * block boundaries (which is not actually allowed)
25 * 12/20/98 added support for strategy 4096
26 * 03/07/99 rewrote udf_block_map (again)
27 * New funcs, inode_bmap, udf_next_aext
28 * 04/19/99 Support for writing device EA's for major/minor #
33 #include <linux/smp_lock.h>
34 #include <linux/module.h>
35 #include <linux/pagemap.h>
36 #include <linux/buffer_head.h>
37 #include <linux/writeback.h>
38 #include <linux/slab.h>
43 MODULE_AUTHOR("Ben Fennema");
44 MODULE_DESCRIPTION("Universal Disk Format Filesystem");
45 MODULE_LICENSE("GPL");
47 #define EXTENT_MERGE_SIZE 5
49 static mode_t udf_convert_permissions(struct fileEntry *);
50 static int udf_update_inode(struct inode *, int);
51 static void udf_fill_inode(struct inode *, struct buffer_head *);
52 static struct buffer_head *inode_getblk(struct inode *, sector_t, int *,
54 static int8_t udf_insert_aext(struct inode *, kernel_lb_addr, int,
55 kernel_lb_addr, uint32_t, struct buffer_head *);
56 static void udf_split_extents(struct inode *, int *, int, int,
57 kernel_long_ad [EXTENT_MERGE_SIZE], int *);
58 static void udf_prealloc_extents(struct inode *, int, int,
59 kernel_long_ad [EXTENT_MERGE_SIZE], int *);
60 static void udf_merge_extents(struct inode *,
61 kernel_long_ad [EXTENT_MERGE_SIZE], int *);
62 static void udf_update_extents(struct inode *,
63 kernel_long_ad [EXTENT_MERGE_SIZE], int, int,
64 kernel_lb_addr, uint32_t, struct buffer_head **);
65 static int udf_get_block(struct inode *, sector_t, struct buffer_head *, int);
71 * Clean-up before the specified inode is destroyed.
74 * This routine is called when the kernel destroys an inode structure
75 * ie. when iput() finds i_count == 0.
78 * July 1, 1997 - Andrew E. Mileski
79 * Written, tested, and released.
81 * Called at the last iput() if i_nlink is zero.
83 void udf_delete_inode(struct inode * inode)
85 truncate_inode_pages(&inode->i_data, 0);
87 if (is_bad_inode(inode))
94 udf_update_inode(inode, IS_SYNC(inode));
95 udf_free_inode(inode);
103 void udf_clear_inode(struct inode *inode)
105 if (!(inode->i_sb->s_flags & MS_RDONLY)) {
107 udf_discard_prealloc(inode);
111 kfree(UDF_I_DATA(inode));
112 UDF_I_DATA(inode) = NULL;
115 static int udf_writepage(struct page *page, struct writeback_control *wbc)
117 return block_write_full_page(page, udf_get_block, wbc);
120 static int udf_readpage(struct file *file, struct page *page)
122 return block_read_full_page(page, udf_get_block);
125 static int udf_prepare_write(struct file *file, struct page *page, unsigned from, unsigned to)
127 return block_prepare_write(page, from, to, udf_get_block);
130 static sector_t udf_bmap(struct address_space *mapping, sector_t block)
132 return generic_block_bmap(mapping,block,udf_get_block);
135 const struct address_space_operations udf_aops = {
136 .readpage = udf_readpage,
137 .writepage = udf_writepage,
138 .sync_page = block_sync_page,
139 .prepare_write = udf_prepare_write,
140 .commit_write = generic_commit_write,
144 void udf_expand_file_adinicb(struct inode * inode, int newsize, int * err)
148 struct writeback_control udf_wbc = {
149 .sync_mode = WB_SYNC_NONE,
153 /* from now on we have normal address_space methods */
154 inode->i_data.a_ops = &udf_aops;
156 if (!UDF_I_LENALLOC(inode))
158 if (UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_USE_SHORT_AD))
159 UDF_I_ALLOCTYPE(inode) = ICBTAG_FLAG_AD_SHORT;
161 UDF_I_ALLOCTYPE(inode) = ICBTAG_FLAG_AD_LONG;
162 mark_inode_dirty(inode);
166 page = grab_cache_page(inode->i_mapping, 0);
167 BUG_ON(!PageLocked(page));
169 if (!PageUptodate(page))
172 memset(kaddr + UDF_I_LENALLOC(inode), 0x00,
173 PAGE_CACHE_SIZE - UDF_I_LENALLOC(inode));
174 memcpy(kaddr, UDF_I_DATA(inode) + UDF_I_LENEATTR(inode),
175 UDF_I_LENALLOC(inode));
176 flush_dcache_page(page);
177 SetPageUptodate(page);
180 memset(UDF_I_DATA(inode) + UDF_I_LENEATTR(inode), 0x00,
181 UDF_I_LENALLOC(inode));
182 UDF_I_LENALLOC(inode) = 0;
183 if (UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_USE_SHORT_AD))
184 UDF_I_ALLOCTYPE(inode) = ICBTAG_FLAG_AD_SHORT;
186 UDF_I_ALLOCTYPE(inode) = ICBTAG_FLAG_AD_LONG;
188 inode->i_data.a_ops->writepage(page, &udf_wbc);
189 page_cache_release(page);
191 mark_inode_dirty(inode);
194 struct buffer_head * udf_expand_dir_adinicb(struct inode *inode, int *block, int *err)
197 struct buffer_head *sbh = NULL, *dbh = NULL;
198 kernel_lb_addr bloc, eloc;
199 uint32_t elen, extoffset;
202 struct udf_fileident_bh sfibh, dfibh;
203 loff_t f_pos = udf_ext0_offset(inode) >> 2;
204 int size = (udf_ext0_offset(inode) + inode->i_size) >> 2;
205 struct fileIdentDesc cfi, *sfi, *dfi;
207 if (UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_USE_SHORT_AD))
208 alloctype = ICBTAG_FLAG_AD_SHORT;
210 alloctype = ICBTAG_FLAG_AD_LONG;
214 UDF_I_ALLOCTYPE(inode) = alloctype;
215 mark_inode_dirty(inode);
219 /* alloc block, and copy data to it */
220 *block = udf_new_block(inode->i_sb, inode,
221 UDF_I_LOCATION(inode).partitionReferenceNum,
222 UDF_I_LOCATION(inode).logicalBlockNum, err);
226 newblock = udf_get_pblock(inode->i_sb, *block,
227 UDF_I_LOCATION(inode).partitionReferenceNum, 0);
230 dbh = udf_tgetblk(inode->i_sb, newblock);
234 memset(dbh->b_data, 0x00, inode->i_sb->s_blocksize);
235 set_buffer_uptodate(dbh);
237 mark_buffer_dirty_inode(dbh, inode);
239 sfibh.soffset = sfibh.eoffset = (f_pos & ((inode->i_sb->s_blocksize - 1) >> 2)) << 2;
240 sbh = sfibh.sbh = sfibh.ebh = NULL;
241 dfibh.soffset = dfibh.eoffset = 0;
242 dfibh.sbh = dfibh.ebh = dbh;
243 while ( (f_pos < size) )
245 UDF_I_ALLOCTYPE(inode) = ICBTAG_FLAG_AD_IN_ICB;
246 sfi = udf_fileident_read(inode, &f_pos, &sfibh, &cfi, NULL, NULL, NULL, NULL, NULL, NULL);
249 udf_release_data(dbh);
252 UDF_I_ALLOCTYPE(inode) = alloctype;
253 sfi->descTag.tagLocation = cpu_to_le32(*block);
254 dfibh.soffset = dfibh.eoffset;
255 dfibh.eoffset += (sfibh.eoffset - sfibh.soffset);
256 dfi = (struct fileIdentDesc *)(dbh->b_data + dfibh.soffset);
257 if (udf_write_fi(inode, sfi, dfi, &dfibh, sfi->impUse,
258 sfi->fileIdent + le16_to_cpu(sfi->lengthOfImpUse)))
260 UDF_I_ALLOCTYPE(inode) = ICBTAG_FLAG_AD_IN_ICB;
261 udf_release_data(dbh);
265 mark_buffer_dirty_inode(dbh, inode);
267 memset(UDF_I_DATA(inode) + UDF_I_LENEATTR(inode), 0, UDF_I_LENALLOC(inode));
268 UDF_I_LENALLOC(inode) = 0;
269 bloc = UDF_I_LOCATION(inode);
270 eloc.logicalBlockNum = *block;
271 eloc.partitionReferenceNum = UDF_I_LOCATION(inode).partitionReferenceNum;
272 elen = inode->i_size;
273 UDF_I_LENEXTENTS(inode) = elen;
274 extoffset = udf_file_entry_alloc_offset(inode);
275 udf_add_aext(inode, &bloc, &extoffset, eloc, elen, &sbh, 0);
278 udf_release_data(sbh);
279 mark_inode_dirty(inode);
283 static int udf_get_block(struct inode *inode, sector_t block, struct buffer_head *bh_result, int create)
286 struct buffer_head *bh;
291 phys = udf_block_map(inode, block);
293 map_bh(bh_result, inode->i_sb, phys);
306 if (block == UDF_I_NEXT_ALLOC_BLOCK(inode) + 1)
308 UDF_I_NEXT_ALLOC_BLOCK(inode) ++;
309 UDF_I_NEXT_ALLOC_GOAL(inode) ++;
314 bh = inode_getblk(inode, block, &err, &phys, &new);
321 set_buffer_new(bh_result);
322 map_bh(bh_result, inode->i_sb, phys);
328 udf_warning(inode->i_sb, "udf_get_block", "block < 0");
332 static struct buffer_head *
333 udf_getblk(struct inode *inode, long block, int create, int *err)
335 struct buffer_head dummy;
338 dummy.b_blocknr = -1000;
339 *err = udf_get_block(inode, block, &dummy, create);
340 if (!*err && buffer_mapped(&dummy))
342 struct buffer_head *bh;
343 bh = sb_getblk(inode->i_sb, dummy.b_blocknr);
344 if (buffer_new(&dummy))
347 memset(bh->b_data, 0x00, inode->i_sb->s_blocksize);
348 set_buffer_uptodate(bh);
350 mark_buffer_dirty_inode(bh, inode);
357 static struct buffer_head * inode_getblk(struct inode * inode, sector_t block,
358 int *err, long *phys, int *new)
360 struct buffer_head *pbh = NULL, *cbh = NULL, *nbh = NULL, *result = NULL;
361 kernel_long_ad laarr[EXTENT_MERGE_SIZE];
362 uint32_t pextoffset = 0, cextoffset = 0, nextoffset = 0;
363 int count = 0, startnum = 0, endnum = 0;
365 kernel_lb_addr eloc, pbloc, cbloc, nbloc;
367 loff_t lbcount = 0, b_off = 0;
368 uint32_t newblocknum, newblock;
371 int goal = 0, pgoal = UDF_I_LOCATION(inode).logicalBlockNum;
374 pextoffset = cextoffset = nextoffset = udf_file_entry_alloc_offset(inode);
375 b_off = (loff_t)block << inode->i_sb->s_blocksize_bits;
376 pbloc = cbloc = nbloc = UDF_I_LOCATION(inode);
378 /* find the extent which contains the block we are looking for.
379 alternate between laarr[0] and laarr[1] for locations of the
380 current extent, and the previous extent */
385 udf_release_data(pbh);
386 atomic_inc(&cbh->b_count);
391 udf_release_data(cbh);
392 atomic_inc(&nbh->b_count);
401 pextoffset = cextoffset;
402 cextoffset = nextoffset;
404 if ((etype = udf_next_aext(inode, &nbloc, &nextoffset, &eloc, &elen, &nbh, 1)) == -1)
409 laarr[c].extLength = (etype << 30) | elen;
410 laarr[c].extLocation = eloc;
412 if (etype != (EXT_NOT_RECORDED_NOT_ALLOCATED >> 30))
413 pgoal = eloc.logicalBlockNum +
414 ((elen + inode->i_sb->s_blocksize - 1) >>
415 inode->i_sb->s_blocksize_bits);
418 } while (lbcount + elen <= b_off);
421 offset = b_off >> inode->i_sb->s_blocksize_bits;
423 /* if the extent is allocated and recorded, return the block
424 if the extent is not a multiple of the blocksize, round up */
426 if (etype == (EXT_RECORDED_ALLOCATED >> 30))
428 if (elen & (inode->i_sb->s_blocksize - 1))
430 elen = EXT_RECORDED_ALLOCATED |
431 ((elen + inode->i_sb->s_blocksize - 1) &
432 ~(inode->i_sb->s_blocksize - 1));
433 etype = udf_write_aext(inode, nbloc, &cextoffset, eloc, elen, nbh, 1);
435 udf_release_data(pbh);
436 udf_release_data(cbh);
437 udf_release_data(nbh);
438 newblock = udf_get_lb_pblock(inode->i_sb, eloc, offset);
445 endnum = startnum = ((count > 1) ? 1 : count);
446 if (laarr[c].extLength & (inode->i_sb->s_blocksize - 1))
449 (laarr[c].extLength & UDF_EXTENT_FLAG_MASK) |
450 (((laarr[c].extLength & UDF_EXTENT_LENGTH_MASK) +
451 inode->i_sb->s_blocksize - 1) &
452 ~(inode->i_sb->s_blocksize - 1));
453 UDF_I_LENEXTENTS(inode) =
454 (UDF_I_LENEXTENTS(inode) + inode->i_sb->s_blocksize - 1) &
455 ~(inode->i_sb->s_blocksize - 1);
458 laarr[c].extLength = EXT_NOT_RECORDED_NOT_ALLOCATED |
459 ((offset + 1) << inode->i_sb->s_blocksize_bits);
460 memset(&laarr[c].extLocation, 0x00, sizeof(kernel_lb_addr));
466 endnum = startnum = ((count > 2) ? 2 : count);
468 /* if the current extent is in position 0, swap it with the previous */
469 if (!c && count != 1)
477 /* if the current block is located in a extent, read the next extent */
480 if ((etype = udf_next_aext(inode, &nbloc, &nextoffset, &eloc, &elen, &nbh, 0)) != -1)
482 laarr[c+1].extLength = (etype << 30) | elen;
483 laarr[c+1].extLocation = eloc;
491 udf_release_data(cbh);
492 udf_release_data(nbh);
494 /* if the current extent is not recorded but allocated, get the
495 block in the extent corresponding to the requested block */
496 if ((laarr[c].extLength >> 30) == (EXT_NOT_RECORDED_ALLOCATED >> 30))
497 newblocknum = laarr[c].extLocation.logicalBlockNum + offset;
498 else /* otherwise, allocate a new block */
500 if (UDF_I_NEXT_ALLOC_BLOCK(inode) == block)
501 goal = UDF_I_NEXT_ALLOC_GOAL(inode);
506 goal = UDF_I_LOCATION(inode).logicalBlockNum + 1;
509 if (!(newblocknum = udf_new_block(inode->i_sb, inode,
510 UDF_I_LOCATION(inode).partitionReferenceNum, goal, err)))
512 udf_release_data(pbh);
516 UDF_I_LENEXTENTS(inode) += inode->i_sb->s_blocksize;
519 /* if the extent the requsted block is located in contains multiple blocks,
520 split the extent into at most three extents. blocks prior to requested
521 block, requested block, and blocks after requested block */
522 udf_split_extents(inode, &c, offset, newblocknum, laarr, &endnum);
524 #ifdef UDF_PREALLOCATE
525 /* preallocate blocks */
526 udf_prealloc_extents(inode, c, lastblock, laarr, &endnum);
529 /* merge any continuous blocks in laarr */
530 udf_merge_extents(inode, laarr, &endnum);
532 /* write back the new extents, inserting new extents if the new number
533 of extents is greater than the old number, and deleting extents if
534 the new number of extents is less than the old number */
535 udf_update_extents(inode, laarr, startnum, endnum, pbloc, pextoffset, &pbh);
537 udf_release_data(pbh);
539 if (!(newblock = udf_get_pblock(inode->i_sb, newblocknum,
540 UDF_I_LOCATION(inode).partitionReferenceNum, 0)))
547 UDF_I_NEXT_ALLOC_BLOCK(inode) = block;
548 UDF_I_NEXT_ALLOC_GOAL(inode) = newblocknum;
549 inode->i_ctime = current_fs_time(inode->i_sb);
552 udf_sync_inode(inode);
554 mark_inode_dirty(inode);
558 static void udf_split_extents(struct inode *inode, int *c, int offset, int newblocknum,
559 kernel_long_ad laarr[EXTENT_MERGE_SIZE], int *endnum)
561 if ((laarr[*c].extLength >> 30) == (EXT_NOT_RECORDED_ALLOCATED >> 30) ||
562 (laarr[*c].extLength >> 30) == (EXT_NOT_RECORDED_NOT_ALLOCATED >> 30))
565 int blen = ((laarr[curr].extLength & UDF_EXTENT_LENGTH_MASK) +
566 inode->i_sb->s_blocksize - 1) >> inode->i_sb->s_blocksize_bits;
567 int8_t etype = (laarr[curr].extLength >> 30);
571 else if (!offset || blen == offset + 1)
573 laarr[curr+2] = laarr[curr+1];
574 laarr[curr+1] = laarr[curr];
578 laarr[curr+3] = laarr[curr+1];
579 laarr[curr+2] = laarr[curr+1] = laarr[curr];
584 if (etype == (EXT_NOT_RECORDED_ALLOCATED >> 30))
586 udf_free_blocks(inode->i_sb, inode, laarr[curr].extLocation, 0, offset);
587 laarr[curr].extLength = EXT_NOT_RECORDED_NOT_ALLOCATED |
588 (offset << inode->i_sb->s_blocksize_bits);
589 laarr[curr].extLocation.logicalBlockNum = 0;
590 laarr[curr].extLocation.partitionReferenceNum = 0;
593 laarr[curr].extLength = (etype << 30) |
594 (offset << inode->i_sb->s_blocksize_bits);
600 laarr[curr].extLocation.logicalBlockNum = newblocknum;
601 if (etype == (EXT_NOT_RECORDED_NOT_ALLOCATED >> 30))
602 laarr[curr].extLocation.partitionReferenceNum =
603 UDF_I_LOCATION(inode).partitionReferenceNum;
604 laarr[curr].extLength = EXT_RECORDED_ALLOCATED |
605 inode->i_sb->s_blocksize;
608 if (blen != offset + 1)
610 if (etype == (EXT_NOT_RECORDED_ALLOCATED >> 30))
611 laarr[curr].extLocation.logicalBlockNum += (offset + 1);
612 laarr[curr].extLength = (etype << 30) |
613 ((blen - (offset + 1)) << inode->i_sb->s_blocksize_bits);
620 static void udf_prealloc_extents(struct inode *inode, int c, int lastblock,
621 kernel_long_ad laarr[EXTENT_MERGE_SIZE], int *endnum)
623 int start, length = 0, currlength = 0, i;
625 if (*endnum >= (c+1))
634 if ((laarr[c+1].extLength >> 30) == (EXT_NOT_RECORDED_ALLOCATED >> 30))
637 length = currlength = (((laarr[c+1].extLength & UDF_EXTENT_LENGTH_MASK) +
638 inode->i_sb->s_blocksize - 1) >> inode->i_sb->s_blocksize_bits);
644 for (i=start+1; i<=*endnum; i++)
649 length += UDF_DEFAULT_PREALLOC_BLOCKS;
651 else if ((laarr[i].extLength >> 30) == (EXT_NOT_RECORDED_NOT_ALLOCATED >> 30))
652 length += (((laarr[i].extLength & UDF_EXTENT_LENGTH_MASK) +
653 inode->i_sb->s_blocksize - 1) >> inode->i_sb->s_blocksize_bits);
660 int next = laarr[start].extLocation.logicalBlockNum +
661 (((laarr[start].extLength & UDF_EXTENT_LENGTH_MASK) +
662 inode->i_sb->s_blocksize - 1) >> inode->i_sb->s_blocksize_bits);
663 int numalloc = udf_prealloc_blocks(inode->i_sb, inode,
664 laarr[start].extLocation.partitionReferenceNum,
665 next, (UDF_DEFAULT_PREALLOC_BLOCKS > length ? length :
666 UDF_DEFAULT_PREALLOC_BLOCKS) - currlength);
671 laarr[start].extLength +=
672 (numalloc << inode->i_sb->s_blocksize_bits);
675 memmove(&laarr[c+2], &laarr[c+1],
676 sizeof(long_ad) * (*endnum - (c+1)));
678 laarr[c+1].extLocation.logicalBlockNum = next;
679 laarr[c+1].extLocation.partitionReferenceNum =
680 laarr[c].extLocation.partitionReferenceNum;
681 laarr[c+1].extLength = EXT_NOT_RECORDED_ALLOCATED |
682 (numalloc << inode->i_sb->s_blocksize_bits);
686 for (i=start+1; numalloc && i<*endnum; i++)
688 int elen = ((laarr[i].extLength & UDF_EXTENT_LENGTH_MASK) +
689 inode->i_sb->s_blocksize - 1) >> inode->i_sb->s_blocksize_bits;
693 laarr[i].extLength -=
694 (numalloc << inode->i_sb->s_blocksize_bits);
701 memmove(&laarr[i], &laarr[i+1],
702 sizeof(long_ad) * (*endnum - (i+1)));
707 UDF_I_LENEXTENTS(inode) += numalloc << inode->i_sb->s_blocksize_bits;
712 static void udf_merge_extents(struct inode *inode,
713 kernel_long_ad laarr[EXTENT_MERGE_SIZE], int *endnum)
717 for (i=0; i<(*endnum-1); i++)
719 if ((laarr[i].extLength >> 30) == (laarr[i+1].extLength >> 30))
721 if (((laarr[i].extLength >> 30) == (EXT_NOT_RECORDED_NOT_ALLOCATED >> 30)) ||
722 ((laarr[i+1].extLocation.logicalBlockNum - laarr[i].extLocation.logicalBlockNum) ==
723 (((laarr[i].extLength & UDF_EXTENT_LENGTH_MASK) +
724 inode->i_sb->s_blocksize - 1) >> inode->i_sb->s_blocksize_bits)))
726 if (((laarr[i].extLength & UDF_EXTENT_LENGTH_MASK) +
727 (laarr[i+1].extLength & UDF_EXTENT_LENGTH_MASK) +
728 inode->i_sb->s_blocksize - 1) & ~UDF_EXTENT_LENGTH_MASK)
730 laarr[i+1].extLength = (laarr[i+1].extLength -
731 (laarr[i].extLength & UDF_EXTENT_LENGTH_MASK) +
732 UDF_EXTENT_LENGTH_MASK) & ~(inode->i_sb->s_blocksize-1);
733 laarr[i].extLength = (laarr[i].extLength & UDF_EXTENT_FLAG_MASK) +
734 (UDF_EXTENT_LENGTH_MASK + 1) - inode->i_sb->s_blocksize;
735 laarr[i+1].extLocation.logicalBlockNum =
736 laarr[i].extLocation.logicalBlockNum +
737 ((laarr[i].extLength & UDF_EXTENT_LENGTH_MASK) >>
738 inode->i_sb->s_blocksize_bits);
742 laarr[i].extLength = laarr[i+1].extLength +
743 (((laarr[i].extLength & UDF_EXTENT_LENGTH_MASK) +
744 inode->i_sb->s_blocksize - 1) & ~(inode->i_sb->s_blocksize-1));
746 memmove(&laarr[i+1], &laarr[i+2],
747 sizeof(long_ad) * (*endnum - (i+2)));
753 else if (((laarr[i].extLength >> 30) == (EXT_NOT_RECORDED_ALLOCATED >> 30)) &&
754 ((laarr[i+1].extLength >> 30) == (EXT_NOT_RECORDED_NOT_ALLOCATED >> 30)))
756 udf_free_blocks(inode->i_sb, inode, laarr[i].extLocation, 0,
757 ((laarr[i].extLength & UDF_EXTENT_LENGTH_MASK) +
758 inode->i_sb->s_blocksize - 1) >> inode->i_sb->s_blocksize_bits);
759 laarr[i].extLocation.logicalBlockNum = 0;
760 laarr[i].extLocation.partitionReferenceNum = 0;
762 if (((laarr[i].extLength & UDF_EXTENT_LENGTH_MASK) +
763 (laarr[i+1].extLength & UDF_EXTENT_LENGTH_MASK) +
764 inode->i_sb->s_blocksize - 1) & ~UDF_EXTENT_LENGTH_MASK)
766 laarr[i+1].extLength = (laarr[i+1].extLength -
767 (laarr[i].extLength & UDF_EXTENT_LENGTH_MASK) +
768 UDF_EXTENT_LENGTH_MASK) & ~(inode->i_sb->s_blocksize-1);
769 laarr[i].extLength = (laarr[i].extLength & UDF_EXTENT_FLAG_MASK) +
770 (UDF_EXTENT_LENGTH_MASK + 1) - inode->i_sb->s_blocksize;
774 laarr[i].extLength = laarr[i+1].extLength +
775 (((laarr[i].extLength & UDF_EXTENT_LENGTH_MASK) +
776 inode->i_sb->s_blocksize - 1) & ~(inode->i_sb->s_blocksize-1));
778 memmove(&laarr[i+1], &laarr[i+2],
779 sizeof(long_ad) * (*endnum - (i+2)));
784 else if ((laarr[i].extLength >> 30) == (EXT_NOT_RECORDED_ALLOCATED >> 30))
786 udf_free_blocks(inode->i_sb, inode, laarr[i].extLocation, 0,
787 ((laarr[i].extLength & UDF_EXTENT_LENGTH_MASK) +
788 inode->i_sb->s_blocksize - 1) >> inode->i_sb->s_blocksize_bits);
789 laarr[i].extLocation.logicalBlockNum = 0;
790 laarr[i].extLocation.partitionReferenceNum = 0;
791 laarr[i].extLength = (laarr[i].extLength & UDF_EXTENT_LENGTH_MASK) |
792 EXT_NOT_RECORDED_NOT_ALLOCATED;
797 static void udf_update_extents(struct inode *inode,
798 kernel_long_ad laarr[EXTENT_MERGE_SIZE], int startnum, int endnum,
799 kernel_lb_addr pbloc, uint32_t pextoffset, struct buffer_head **pbh)
802 kernel_lb_addr tmploc;
805 if (startnum > endnum)
807 for (i=0; i<(startnum-endnum); i++)
809 udf_delete_aext(inode, pbloc, pextoffset, laarr[i].extLocation,
810 laarr[i].extLength, *pbh);
813 else if (startnum < endnum)
815 for (i=0; i<(endnum-startnum); i++)
817 udf_insert_aext(inode, pbloc, pextoffset, laarr[i].extLocation,
818 laarr[i].extLength, *pbh);
819 udf_next_aext(inode, &pbloc, &pextoffset, &laarr[i].extLocation,
820 &laarr[i].extLength, pbh, 1);
825 for (i=start; i<endnum; i++)
827 udf_next_aext(inode, &pbloc, &pextoffset, &tmploc, &tmplen, pbh, 0);
828 udf_write_aext(inode, pbloc, &pextoffset, laarr[i].extLocation,
829 laarr[i].extLength, *pbh, 1);
833 struct buffer_head * udf_bread(struct inode * inode, int block,
834 int create, int * err)
836 struct buffer_head * bh = NULL;
838 bh = udf_getblk(inode, block, create, err);
842 if (buffer_uptodate(bh))
844 ll_rw_block(READ, 1, &bh);
846 if (buffer_uptodate(bh))
853 void udf_truncate(struct inode * inode)
858 if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
859 S_ISLNK(inode->i_mode)))
861 if (IS_APPEND(inode) || IS_IMMUTABLE(inode))
865 if (UDF_I_ALLOCTYPE(inode) == ICBTAG_FLAG_AD_IN_ICB)
867 if (inode->i_sb->s_blocksize < (udf_file_entry_alloc_offset(inode) +
870 udf_expand_file_adinicb(inode, inode->i_size, &err);
871 if (UDF_I_ALLOCTYPE(inode) == ICBTAG_FLAG_AD_IN_ICB)
873 inode->i_size = UDF_I_LENALLOC(inode);
878 udf_truncate_extents(inode);
882 offset = inode->i_size & (inode->i_sb->s_blocksize - 1);
883 memset(UDF_I_DATA(inode) + UDF_I_LENEATTR(inode) + offset, 0x00, inode->i_sb->s_blocksize - offset - udf_file_entry_alloc_offset(inode));
884 UDF_I_LENALLOC(inode) = inode->i_size;
889 block_truncate_page(inode->i_mapping, inode->i_size, udf_get_block);
890 udf_truncate_extents(inode);
893 inode->i_mtime = inode->i_ctime = current_fs_time(inode->i_sb);
895 udf_sync_inode (inode);
897 mark_inode_dirty(inode);
902 __udf_read_inode(struct inode *inode)
904 struct buffer_head *bh = NULL;
905 struct fileEntry *fe;
909 * Set defaults, but the inode is still incomplete!
910 * Note: get_new_inode() sets the following on a new inode:
913 * i_flags = sb->s_flags
915 * clean_inode(): zero fills and sets
920 bh = udf_read_ptagged(inode->i_sb, UDF_I_LOCATION(inode), 0, &ident);
924 printk(KERN_ERR "udf: udf_read_inode(ino %ld) failed !bh\n",
926 make_bad_inode(inode);
930 if (ident != TAG_IDENT_FE && ident != TAG_IDENT_EFE &&
931 ident != TAG_IDENT_USE)
933 printk(KERN_ERR "udf: udf_read_inode(ino %ld) failed ident=%d\n",
934 inode->i_ino, ident);
935 udf_release_data(bh);
936 make_bad_inode(inode);
940 fe = (struct fileEntry *)bh->b_data;
942 if (le16_to_cpu(fe->icbTag.strategyType) == 4096)
944 struct buffer_head *ibh = NULL, *nbh = NULL;
945 struct indirectEntry *ie;
947 ibh = udf_read_ptagged(inode->i_sb, UDF_I_LOCATION(inode), 1, &ident);
948 if (ident == TAG_IDENT_IE)
953 ie = (struct indirectEntry *)ibh->b_data;
955 loc = lelb_to_cpu(ie->indirectICB.extLocation);
957 if (ie->indirectICB.extLength &&
958 (nbh = udf_read_ptagged(inode->i_sb, loc, 0, &ident)))
960 if (ident == TAG_IDENT_FE ||
961 ident == TAG_IDENT_EFE)
963 memcpy(&UDF_I_LOCATION(inode), &loc, sizeof(kernel_lb_addr));
964 udf_release_data(bh);
965 udf_release_data(ibh);
966 udf_release_data(nbh);
967 __udf_read_inode(inode);
972 udf_release_data(nbh);
973 udf_release_data(ibh);
977 udf_release_data(ibh);
981 udf_release_data(ibh);
983 else if (le16_to_cpu(fe->icbTag.strategyType) != 4)
985 printk(KERN_ERR "udf: unsupported strategy type: %d\n",
986 le16_to_cpu(fe->icbTag.strategyType));
987 udf_release_data(bh);
988 make_bad_inode(inode);
991 udf_fill_inode(inode, bh);
992 udf_release_data(bh);
995 static void udf_fill_inode(struct inode *inode, struct buffer_head *bh)
997 struct fileEntry *fe;
998 struct extendedFileEntry *efe;
1003 fe = (struct fileEntry *)bh->b_data;
1004 efe = (struct extendedFileEntry *)bh->b_data;
1006 if (le16_to_cpu(fe->icbTag.strategyType) == 4)
1007 UDF_I_STRAT4096(inode) = 0;
1008 else /* if (le16_to_cpu(fe->icbTag.strategyType) == 4096) */
1009 UDF_I_STRAT4096(inode) = 1;
1011 UDF_I_ALLOCTYPE(inode) = le16_to_cpu(fe->icbTag.flags) & ICBTAG_FLAG_AD_MASK;
1012 UDF_I_UNIQUE(inode) = 0;
1013 UDF_I_LENEATTR(inode) = 0;
1014 UDF_I_LENEXTENTS(inode) = 0;
1015 UDF_I_LENALLOC(inode) = 0;
1016 UDF_I_NEXT_ALLOC_BLOCK(inode) = 0;
1017 UDF_I_NEXT_ALLOC_GOAL(inode) = 0;
1018 if (le16_to_cpu(fe->descTag.tagIdent) == TAG_IDENT_EFE)
1020 UDF_I_EFE(inode) = 1;
1021 UDF_I_USE(inode) = 0;
1022 UDF_I_DATA(inode) = kmalloc(inode->i_sb->s_blocksize - sizeof(struct extendedFileEntry), GFP_KERNEL);
1023 memcpy(UDF_I_DATA(inode), bh->b_data + sizeof(struct extendedFileEntry), inode->i_sb->s_blocksize - sizeof(struct extendedFileEntry));
1025 else if (le16_to_cpu(fe->descTag.tagIdent) == TAG_IDENT_FE)
1027 UDF_I_EFE(inode) = 0;
1028 UDF_I_USE(inode) = 0;
1029 UDF_I_DATA(inode) = kmalloc(inode->i_sb->s_blocksize - sizeof(struct fileEntry), GFP_KERNEL);
1030 memcpy(UDF_I_DATA(inode), bh->b_data + sizeof(struct fileEntry), inode->i_sb->s_blocksize - sizeof(struct fileEntry));
1032 else if (le16_to_cpu(fe->descTag.tagIdent) == TAG_IDENT_USE)
1034 UDF_I_EFE(inode) = 0;
1035 UDF_I_USE(inode) = 1;
1036 UDF_I_LENALLOC(inode) =
1038 ((struct unallocSpaceEntry *)bh->b_data)->lengthAllocDescs);
1039 UDF_I_DATA(inode) = kmalloc(inode->i_sb->s_blocksize - sizeof(struct unallocSpaceEntry), GFP_KERNEL);
1040 memcpy(UDF_I_DATA(inode), bh->b_data + sizeof(struct unallocSpaceEntry), inode->i_sb->s_blocksize - sizeof(struct unallocSpaceEntry));
1044 inode->i_uid = le32_to_cpu(fe->uid);
1045 if (inode->i_uid == -1 || UDF_QUERY_FLAG(inode->i_sb,
1046 UDF_FLAG_UID_IGNORE))
1047 inode->i_uid = UDF_SB(inode->i_sb)->s_uid;
1049 inode->i_gid = le32_to_cpu(fe->gid);
1050 if (inode->i_gid == -1 || UDF_QUERY_FLAG(inode->i_sb,
1051 UDF_FLAG_GID_IGNORE))
1052 inode->i_gid = UDF_SB(inode->i_sb)->s_gid;
1054 inode->i_nlink = le16_to_cpu(fe->fileLinkCount);
1055 if (!inode->i_nlink)
1058 inode->i_size = le64_to_cpu(fe->informationLength);
1059 UDF_I_LENEXTENTS(inode) = inode->i_size;
1061 inode->i_mode = udf_convert_permissions(fe);
1062 inode->i_mode &= ~UDF_SB(inode->i_sb)->s_umask;
1064 if (UDF_I_EFE(inode) == 0)
1066 inode->i_blocks = le64_to_cpu(fe->logicalBlocksRecorded) <<
1067 (inode->i_sb->s_blocksize_bits - 9);
1069 if ( udf_stamp_to_time(&convtime, &convtime_usec,
1070 lets_to_cpu(fe->accessTime)) )
1072 inode->i_atime.tv_sec = convtime;
1073 inode->i_atime.tv_nsec = convtime_usec * 1000;
1077 inode->i_atime = UDF_SB_RECORDTIME(inode->i_sb);
1080 if ( udf_stamp_to_time(&convtime, &convtime_usec,
1081 lets_to_cpu(fe->modificationTime)) )
1083 inode->i_mtime.tv_sec = convtime;
1084 inode->i_mtime.tv_nsec = convtime_usec * 1000;
1088 inode->i_mtime = UDF_SB_RECORDTIME(inode->i_sb);
1091 if ( udf_stamp_to_time(&convtime, &convtime_usec,
1092 lets_to_cpu(fe->attrTime)) )
1094 inode->i_ctime.tv_sec = convtime;
1095 inode->i_ctime.tv_nsec = convtime_usec * 1000;
1099 inode->i_ctime = UDF_SB_RECORDTIME(inode->i_sb);
1102 UDF_I_UNIQUE(inode) = le64_to_cpu(fe->uniqueID);
1103 UDF_I_LENEATTR(inode) = le32_to_cpu(fe->lengthExtendedAttr);
1104 UDF_I_LENALLOC(inode) = le32_to_cpu(fe->lengthAllocDescs);
1105 offset = sizeof(struct fileEntry) + UDF_I_LENEATTR(inode);
1109 inode->i_blocks = le64_to_cpu(efe->logicalBlocksRecorded) <<
1110 (inode->i_sb->s_blocksize_bits - 9);
1112 if ( udf_stamp_to_time(&convtime, &convtime_usec,
1113 lets_to_cpu(efe->accessTime)) )
1115 inode->i_atime.tv_sec = convtime;
1116 inode->i_atime.tv_nsec = convtime_usec * 1000;
1120 inode->i_atime = UDF_SB_RECORDTIME(inode->i_sb);
1123 if ( udf_stamp_to_time(&convtime, &convtime_usec,
1124 lets_to_cpu(efe->modificationTime)) )
1126 inode->i_mtime.tv_sec = convtime;
1127 inode->i_mtime.tv_nsec = convtime_usec * 1000;
1131 inode->i_mtime = UDF_SB_RECORDTIME(inode->i_sb);
1134 if ( udf_stamp_to_time(&convtime, &convtime_usec,
1135 lets_to_cpu(efe->createTime)) )
1137 UDF_I_CRTIME(inode).tv_sec = convtime;
1138 UDF_I_CRTIME(inode).tv_nsec = convtime_usec * 1000;
1142 UDF_I_CRTIME(inode) = UDF_SB_RECORDTIME(inode->i_sb);
1145 if ( udf_stamp_to_time(&convtime, &convtime_usec,
1146 lets_to_cpu(efe->attrTime)) )
1148 inode->i_ctime.tv_sec = convtime;
1149 inode->i_ctime.tv_nsec = convtime_usec * 1000;
1153 inode->i_ctime = UDF_SB_RECORDTIME(inode->i_sb);
1156 UDF_I_UNIQUE(inode) = le64_to_cpu(efe->uniqueID);
1157 UDF_I_LENEATTR(inode) = le32_to_cpu(efe->lengthExtendedAttr);
1158 UDF_I_LENALLOC(inode) = le32_to_cpu(efe->lengthAllocDescs);
1159 offset = sizeof(struct extendedFileEntry) + UDF_I_LENEATTR(inode);
1162 switch (fe->icbTag.fileType)
1164 case ICBTAG_FILE_TYPE_DIRECTORY:
1166 inode->i_op = &udf_dir_inode_operations;
1167 inode->i_fop = &udf_dir_operations;
1168 inode->i_mode |= S_IFDIR;
1172 case ICBTAG_FILE_TYPE_REALTIME:
1173 case ICBTAG_FILE_TYPE_REGULAR:
1174 case ICBTAG_FILE_TYPE_UNDEF:
1176 if (UDF_I_ALLOCTYPE(inode) == ICBTAG_FLAG_AD_IN_ICB)
1177 inode->i_data.a_ops = &udf_adinicb_aops;
1179 inode->i_data.a_ops = &udf_aops;
1180 inode->i_op = &udf_file_inode_operations;
1181 inode->i_fop = &udf_file_operations;
1182 inode->i_mode |= S_IFREG;
1185 case ICBTAG_FILE_TYPE_BLOCK:
1187 inode->i_mode |= S_IFBLK;
1190 case ICBTAG_FILE_TYPE_CHAR:
1192 inode->i_mode |= S_IFCHR;
1195 case ICBTAG_FILE_TYPE_FIFO:
1197 init_special_inode(inode, inode->i_mode | S_IFIFO, 0);
1200 case ICBTAG_FILE_TYPE_SOCKET:
1202 init_special_inode(inode, inode->i_mode | S_IFSOCK, 0);
1205 case ICBTAG_FILE_TYPE_SYMLINK:
1207 inode->i_data.a_ops = &udf_symlink_aops;
1208 inode->i_op = &page_symlink_inode_operations;
1209 inode->i_mode = S_IFLNK|S_IRWXUGO;
1214 printk(KERN_ERR "udf: udf_fill_inode(ino %ld) failed unknown file type=%d\n",
1215 inode->i_ino, fe->icbTag.fileType);
1216 make_bad_inode(inode);
1220 if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode))
1222 struct deviceSpec *dsea =
1223 (struct deviceSpec *)
1224 udf_get_extendedattr(inode, 12, 1);
1228 init_special_inode(inode, inode->i_mode, MKDEV(
1229 le32_to_cpu(dsea->majorDeviceIdent),
1230 le32_to_cpu(dsea->minorDeviceIdent)));
1231 /* Developer ID ??? */
1235 make_bad_inode(inode);
1241 udf_convert_permissions(struct fileEntry *fe)
1244 uint32_t permissions;
1247 permissions = le32_to_cpu(fe->permissions);
1248 flags = le16_to_cpu(fe->icbTag.flags);
1250 mode = (( permissions ) & S_IRWXO) |
1251 (( permissions >> 2 ) & S_IRWXG) |
1252 (( permissions >> 4 ) & S_IRWXU) |
1253 (( flags & ICBTAG_FLAG_SETUID) ? S_ISUID : 0) |
1254 (( flags & ICBTAG_FLAG_SETGID) ? S_ISGID : 0) |
1255 (( flags & ICBTAG_FLAG_STICKY) ? S_ISVTX : 0);
1264 * Write out the specified inode.
1267 * This routine is called whenever an inode is synced.
1268 * Currently this routine is just a placeholder.
1271 * July 1, 1997 - Andrew E. Mileski
1272 * Written, tested, and released.
1275 int udf_write_inode(struct inode * inode, int sync)
1279 ret = udf_update_inode(inode, sync);
1284 int udf_sync_inode(struct inode * inode)
1286 return udf_update_inode(inode, 1);
1290 udf_update_inode(struct inode *inode, int do_sync)
1292 struct buffer_head *bh = NULL;
1293 struct fileEntry *fe;
1294 struct extendedFileEntry *efe;
1299 kernel_timestamp cpu_time;
1302 bh = udf_tread(inode->i_sb,
1303 udf_get_lb_pblock(inode->i_sb, UDF_I_LOCATION(inode), 0));
1307 udf_debug("bread failure\n");
1311 memset(bh->b_data, 0x00, inode->i_sb->s_blocksize);
1313 fe = (struct fileEntry *)bh->b_data;
1314 efe = (struct extendedFileEntry *)bh->b_data;
1316 if (le16_to_cpu(fe->descTag.tagIdent) == TAG_IDENT_USE)
1318 struct unallocSpaceEntry *use =
1319 (struct unallocSpaceEntry *)bh->b_data;
1321 use->lengthAllocDescs = cpu_to_le32(UDF_I_LENALLOC(inode));
1322 memcpy(bh->b_data + sizeof(struct unallocSpaceEntry), UDF_I_DATA(inode), inode->i_sb->s_blocksize - sizeof(struct unallocSpaceEntry));
1323 crclen = sizeof(struct unallocSpaceEntry) + UDF_I_LENALLOC(inode) -
1325 use->descTag.tagLocation = cpu_to_le32(UDF_I_LOCATION(inode).logicalBlockNum);
1326 use->descTag.descCRCLength = cpu_to_le16(crclen);
1327 use->descTag.descCRC = cpu_to_le16(udf_crc((char *)use + sizeof(tag), crclen, 0));
1329 use->descTag.tagChecksum = 0;
1330 for (i=0; i<16; i++)
1332 use->descTag.tagChecksum += ((uint8_t *)&(use->descTag))[i];
1334 mark_buffer_dirty(bh);
1335 udf_release_data(bh);
1339 if (UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_UID_FORGET))
1340 fe->uid = cpu_to_le32(-1);
1341 else fe->uid = cpu_to_le32(inode->i_uid);
1343 if (UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_GID_FORGET))
1344 fe->gid = cpu_to_le32(-1);
1345 else fe->gid = cpu_to_le32(inode->i_gid);
1347 udfperms = ((inode->i_mode & S_IRWXO) ) |
1348 ((inode->i_mode & S_IRWXG) << 2) |
1349 ((inode->i_mode & S_IRWXU) << 4);
1351 udfperms |= (le32_to_cpu(fe->permissions) &
1352 (FE_PERM_O_DELETE | FE_PERM_O_CHATTR |
1353 FE_PERM_G_DELETE | FE_PERM_G_CHATTR |
1354 FE_PERM_U_DELETE | FE_PERM_U_CHATTR));
1355 fe->permissions = cpu_to_le32(udfperms);
1357 if (S_ISDIR(inode->i_mode))
1358 fe->fileLinkCount = cpu_to_le16(inode->i_nlink - 1);
1360 fe->fileLinkCount = cpu_to_le16(inode->i_nlink);
1362 fe->informationLength = cpu_to_le64(inode->i_size);
1364 if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode))
1367 struct deviceSpec *dsea =
1368 (struct deviceSpec *)
1369 udf_get_extendedattr(inode, 12, 1);
1373 dsea = (struct deviceSpec *)
1374 udf_add_extendedattr(inode,
1375 sizeof(struct deviceSpec) +
1376 sizeof(regid), 12, 0x3);
1377 dsea->attrType = cpu_to_le32(12);
1378 dsea->attrSubtype = 1;
1379 dsea->attrLength = cpu_to_le32(sizeof(struct deviceSpec) +
1381 dsea->impUseLength = cpu_to_le32(sizeof(regid));
1383 eid = (regid *)dsea->impUse;
1384 memset(eid, 0, sizeof(regid));
1385 strcpy(eid->ident, UDF_ID_DEVELOPER);
1386 eid->identSuffix[0] = UDF_OS_CLASS_UNIX;
1387 eid->identSuffix[1] = UDF_OS_ID_LINUX;
1388 dsea->majorDeviceIdent = cpu_to_le32(imajor(inode));
1389 dsea->minorDeviceIdent = cpu_to_le32(iminor(inode));
1392 if (UDF_I_EFE(inode) == 0)
1394 memcpy(bh->b_data + sizeof(struct fileEntry), UDF_I_DATA(inode), inode->i_sb->s_blocksize - sizeof(struct fileEntry));
1395 fe->logicalBlocksRecorded = cpu_to_le64(
1396 (inode->i_blocks + (1 << (inode->i_sb->s_blocksize_bits - 9)) - 1) >>
1397 (inode->i_sb->s_blocksize_bits - 9));
1399 if (udf_time_to_stamp(&cpu_time, inode->i_atime))
1400 fe->accessTime = cpu_to_lets(cpu_time);
1401 if (udf_time_to_stamp(&cpu_time, inode->i_mtime))
1402 fe->modificationTime = cpu_to_lets(cpu_time);
1403 if (udf_time_to_stamp(&cpu_time, inode->i_ctime))
1404 fe->attrTime = cpu_to_lets(cpu_time);
1405 memset(&(fe->impIdent), 0, sizeof(regid));
1406 strcpy(fe->impIdent.ident, UDF_ID_DEVELOPER);
1407 fe->impIdent.identSuffix[0] = UDF_OS_CLASS_UNIX;
1408 fe->impIdent.identSuffix[1] = UDF_OS_ID_LINUX;
1409 fe->uniqueID = cpu_to_le64(UDF_I_UNIQUE(inode));
1410 fe->lengthExtendedAttr = cpu_to_le32(UDF_I_LENEATTR(inode));
1411 fe->lengthAllocDescs = cpu_to_le32(UDF_I_LENALLOC(inode));
1412 fe->descTag.tagIdent = cpu_to_le16(TAG_IDENT_FE);
1413 crclen = sizeof(struct fileEntry);
1417 memcpy(bh->b_data + sizeof(struct extendedFileEntry), UDF_I_DATA(inode), inode->i_sb->s_blocksize - sizeof(struct extendedFileEntry));
1418 efe->objectSize = cpu_to_le64(inode->i_size);
1419 efe->logicalBlocksRecorded = cpu_to_le64(
1420 (inode->i_blocks + (1 << (inode->i_sb->s_blocksize_bits - 9)) - 1) >>
1421 (inode->i_sb->s_blocksize_bits - 9));
1423 if (UDF_I_CRTIME(inode).tv_sec > inode->i_atime.tv_sec ||
1424 (UDF_I_CRTIME(inode).tv_sec == inode->i_atime.tv_sec &&
1425 UDF_I_CRTIME(inode).tv_nsec > inode->i_atime.tv_nsec))
1427 UDF_I_CRTIME(inode) = inode->i_atime;
1429 if (UDF_I_CRTIME(inode).tv_sec > inode->i_mtime.tv_sec ||
1430 (UDF_I_CRTIME(inode).tv_sec == inode->i_mtime.tv_sec &&
1431 UDF_I_CRTIME(inode).tv_nsec > inode->i_mtime.tv_nsec))
1433 UDF_I_CRTIME(inode) = inode->i_mtime;
1435 if (UDF_I_CRTIME(inode).tv_sec > inode->i_ctime.tv_sec ||
1436 (UDF_I_CRTIME(inode).tv_sec == inode->i_ctime.tv_sec &&
1437 UDF_I_CRTIME(inode).tv_nsec > inode->i_ctime.tv_nsec))
1439 UDF_I_CRTIME(inode) = inode->i_ctime;
1442 if (udf_time_to_stamp(&cpu_time, inode->i_atime))
1443 efe->accessTime = cpu_to_lets(cpu_time);
1444 if (udf_time_to_stamp(&cpu_time, inode->i_mtime))
1445 efe->modificationTime = cpu_to_lets(cpu_time);
1446 if (udf_time_to_stamp(&cpu_time, UDF_I_CRTIME(inode)))
1447 efe->createTime = cpu_to_lets(cpu_time);
1448 if (udf_time_to_stamp(&cpu_time, inode->i_ctime))
1449 efe->attrTime = cpu_to_lets(cpu_time);
1451 memset(&(efe->impIdent), 0, sizeof(regid));
1452 strcpy(efe->impIdent.ident, UDF_ID_DEVELOPER);
1453 efe->impIdent.identSuffix[0] = UDF_OS_CLASS_UNIX;
1454 efe->impIdent.identSuffix[1] = UDF_OS_ID_LINUX;
1455 efe->uniqueID = cpu_to_le64(UDF_I_UNIQUE(inode));
1456 efe->lengthExtendedAttr = cpu_to_le32(UDF_I_LENEATTR(inode));
1457 efe->lengthAllocDescs = cpu_to_le32(UDF_I_LENALLOC(inode));
1458 efe->descTag.tagIdent = cpu_to_le16(TAG_IDENT_EFE);
1459 crclen = sizeof(struct extendedFileEntry);
1461 if (UDF_I_STRAT4096(inode))
1463 fe->icbTag.strategyType = cpu_to_le16(4096);
1464 fe->icbTag.strategyParameter = cpu_to_le16(1);
1465 fe->icbTag.numEntries = cpu_to_le16(2);
1469 fe->icbTag.strategyType = cpu_to_le16(4);
1470 fe->icbTag.numEntries = cpu_to_le16(1);
1473 if (S_ISDIR(inode->i_mode))
1474 fe->icbTag.fileType = ICBTAG_FILE_TYPE_DIRECTORY;
1475 else if (S_ISREG(inode->i_mode))
1476 fe->icbTag.fileType = ICBTAG_FILE_TYPE_REGULAR;
1477 else if (S_ISLNK(inode->i_mode))
1478 fe->icbTag.fileType = ICBTAG_FILE_TYPE_SYMLINK;
1479 else if (S_ISBLK(inode->i_mode))
1480 fe->icbTag.fileType = ICBTAG_FILE_TYPE_BLOCK;
1481 else if (S_ISCHR(inode->i_mode))
1482 fe->icbTag.fileType = ICBTAG_FILE_TYPE_CHAR;
1483 else if (S_ISFIFO(inode->i_mode))
1484 fe->icbTag.fileType = ICBTAG_FILE_TYPE_FIFO;
1485 else if (S_ISSOCK(inode->i_mode))
1486 fe->icbTag.fileType = ICBTAG_FILE_TYPE_SOCKET;
1488 icbflags = UDF_I_ALLOCTYPE(inode) |
1489 ((inode->i_mode & S_ISUID) ? ICBTAG_FLAG_SETUID : 0) |
1490 ((inode->i_mode & S_ISGID) ? ICBTAG_FLAG_SETGID : 0) |
1491 ((inode->i_mode & S_ISVTX) ? ICBTAG_FLAG_STICKY : 0) |
1492 (le16_to_cpu(fe->icbTag.flags) &
1493 ~(ICBTAG_FLAG_AD_MASK | ICBTAG_FLAG_SETUID |
1494 ICBTAG_FLAG_SETGID | ICBTAG_FLAG_STICKY));
1496 fe->icbTag.flags = cpu_to_le16(icbflags);
1497 if (UDF_SB_UDFREV(inode->i_sb) >= 0x0200)
1498 fe->descTag.descVersion = cpu_to_le16(3);
1500 fe->descTag.descVersion = cpu_to_le16(2);
1501 fe->descTag.tagSerialNum = cpu_to_le16(UDF_SB_SERIALNUM(inode->i_sb));
1502 fe->descTag.tagLocation = cpu_to_le32(UDF_I_LOCATION(inode).logicalBlockNum);
1503 crclen += UDF_I_LENEATTR(inode) + UDF_I_LENALLOC(inode) - sizeof(tag);
1504 fe->descTag.descCRCLength = cpu_to_le16(crclen);
1505 fe->descTag.descCRC = cpu_to_le16(udf_crc((char *)fe + sizeof(tag), crclen, 0));
1507 fe->descTag.tagChecksum = 0;
1508 for (i=0; i<16; i++)
1510 fe->descTag.tagChecksum += ((uint8_t *)&(fe->descTag))[i];
1512 /* write the data blocks */
1513 mark_buffer_dirty(bh);
1516 sync_dirty_buffer(bh);
1517 if (buffer_req(bh) && !buffer_uptodate(bh))
1519 printk("IO error syncing udf inode [%s:%08lx]\n",
1520 inode->i_sb->s_id, inode->i_ino);
1524 udf_release_data(bh);
1529 udf_iget(struct super_block *sb, kernel_lb_addr ino)
1531 unsigned long block = udf_get_lb_pblock(sb, ino, 0);
1532 struct inode *inode = iget_locked(sb, block);
1537 if (inode->i_state & I_NEW) {
1538 memcpy(&UDF_I_LOCATION(inode), &ino, sizeof(kernel_lb_addr));
1539 __udf_read_inode(inode);
1540 unlock_new_inode(inode);
1543 if (is_bad_inode(inode))
1546 if (ino.logicalBlockNum >= UDF_SB_PARTLEN(sb, ino.partitionReferenceNum)) {
1547 udf_debug("block=%d, partition=%d out of range\n",
1548 ino.logicalBlockNum, ino.partitionReferenceNum);
1549 make_bad_inode(inode);
1560 int8_t udf_add_aext(struct inode *inode, kernel_lb_addr *bloc, int *extoffset,
1561 kernel_lb_addr eloc, uint32_t elen, struct buffer_head **bh, int inc)
1564 short_ad *sad = NULL;
1565 long_ad *lad = NULL;
1566 struct allocExtDesc *aed;
1571 ptr = UDF_I_DATA(inode) + *extoffset - udf_file_entry_alloc_offset(inode) + UDF_I_LENEATTR(inode);
1573 ptr = (*bh)->b_data + *extoffset;
1575 if (UDF_I_ALLOCTYPE(inode) == ICBTAG_FLAG_AD_SHORT)
1576 adsize = sizeof(short_ad);
1577 else if (UDF_I_ALLOCTYPE(inode) == ICBTAG_FLAG_AD_LONG)
1578 adsize = sizeof(long_ad);
1582 if (*extoffset + (2 * adsize) > inode->i_sb->s_blocksize)
1585 struct buffer_head *nbh;
1587 kernel_lb_addr obloc = *bloc;
1589 if (!(bloc->logicalBlockNum = udf_new_block(inode->i_sb, NULL,
1590 obloc.partitionReferenceNum, obloc.logicalBlockNum, &err)))
1594 if (!(nbh = udf_tgetblk(inode->i_sb, udf_get_lb_pblock(inode->i_sb,
1600 memset(nbh->b_data, 0x00, inode->i_sb->s_blocksize);
1601 set_buffer_uptodate(nbh);
1603 mark_buffer_dirty_inode(nbh, inode);
1605 aed = (struct allocExtDesc *)(nbh->b_data);
1606 if (!UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_STRICT))
1607 aed->previousAllocExtLocation = cpu_to_le32(obloc.logicalBlockNum);
1608 if (*extoffset + adsize > inode->i_sb->s_blocksize)
1610 loffset = *extoffset;
1611 aed->lengthAllocDescs = cpu_to_le32(adsize);
1612 sptr = ptr - adsize;
1613 dptr = nbh->b_data + sizeof(struct allocExtDesc);
1614 memcpy(dptr, sptr, adsize);
1615 *extoffset = sizeof(struct allocExtDesc) + adsize;
1619 loffset = *extoffset + adsize;
1620 aed->lengthAllocDescs = cpu_to_le32(0);
1622 *extoffset = sizeof(struct allocExtDesc);
1626 aed = (struct allocExtDesc *)(*bh)->b_data;
1627 aed->lengthAllocDescs =
1628 cpu_to_le32(le32_to_cpu(aed->lengthAllocDescs) + adsize);
1632 UDF_I_LENALLOC(inode) += adsize;
1633 mark_inode_dirty(inode);
1636 if (UDF_SB_UDFREV(inode->i_sb) >= 0x0200)
1637 udf_new_tag(nbh->b_data, TAG_IDENT_AED, 3, 1,
1638 bloc->logicalBlockNum, sizeof(tag));
1640 udf_new_tag(nbh->b_data, TAG_IDENT_AED, 2, 1,
1641 bloc->logicalBlockNum, sizeof(tag));
1642 switch (UDF_I_ALLOCTYPE(inode))
1644 case ICBTAG_FLAG_AD_SHORT:
1646 sad = (short_ad *)sptr;
1647 sad->extLength = cpu_to_le32(
1648 EXT_NEXT_EXTENT_ALLOCDECS |
1649 inode->i_sb->s_blocksize);
1650 sad->extPosition = cpu_to_le32(bloc->logicalBlockNum);
1653 case ICBTAG_FLAG_AD_LONG:
1655 lad = (long_ad *)sptr;
1656 lad->extLength = cpu_to_le32(
1657 EXT_NEXT_EXTENT_ALLOCDECS |
1658 inode->i_sb->s_blocksize);
1659 lad->extLocation = cpu_to_lelb(*bloc);
1660 memset(lad->impUse, 0x00, sizeof(lad->impUse));
1666 if (!UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_STRICT) || UDF_SB_UDFREV(inode->i_sb) >= 0x0201)
1667 udf_update_tag((*bh)->b_data, loffset);
1669 udf_update_tag((*bh)->b_data, sizeof(struct allocExtDesc));
1670 mark_buffer_dirty_inode(*bh, inode);
1671 udf_release_data(*bh);
1674 mark_inode_dirty(inode);
1678 etype = udf_write_aext(inode, *bloc, extoffset, eloc, elen, *bh, inc);
1682 UDF_I_LENALLOC(inode) += adsize;
1683 mark_inode_dirty(inode);
1687 aed = (struct allocExtDesc *)(*bh)->b_data;
1688 aed->lengthAllocDescs =
1689 cpu_to_le32(le32_to_cpu(aed->lengthAllocDescs) + adsize);
1690 if (!UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_STRICT) || UDF_SB_UDFREV(inode->i_sb) >= 0x0201)
1691 udf_update_tag((*bh)->b_data, *extoffset + (inc ? 0 : adsize));
1693 udf_update_tag((*bh)->b_data, sizeof(struct allocExtDesc));
1694 mark_buffer_dirty_inode(*bh, inode);
1700 int8_t udf_write_aext(struct inode *inode, kernel_lb_addr bloc, int *extoffset,
1701 kernel_lb_addr eloc, uint32_t elen, struct buffer_head *bh, int inc)
1707 ptr = UDF_I_DATA(inode) + *extoffset - udf_file_entry_alloc_offset(inode) + UDF_I_LENEATTR(inode);
1710 ptr = bh->b_data + *extoffset;
1711 atomic_inc(&bh->b_count);
1714 switch (UDF_I_ALLOCTYPE(inode))
1716 case ICBTAG_FLAG_AD_SHORT:
1718 short_ad *sad = (short_ad *)ptr;
1719 sad->extLength = cpu_to_le32(elen);
1720 sad->extPosition = cpu_to_le32(eloc.logicalBlockNum);
1721 adsize = sizeof(short_ad);
1724 case ICBTAG_FLAG_AD_LONG:
1726 long_ad *lad = (long_ad *)ptr;
1727 lad->extLength = cpu_to_le32(elen);
1728 lad->extLocation = cpu_to_lelb(eloc);
1729 memset(lad->impUse, 0x00, sizeof(lad->impUse));
1730 adsize = sizeof(long_ad);
1739 if (!UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_STRICT) || UDF_SB_UDFREV(inode->i_sb) >= 0x0201)
1741 struct allocExtDesc *aed = (struct allocExtDesc *)(bh)->b_data;
1742 udf_update_tag((bh)->b_data,
1743 le32_to_cpu(aed->lengthAllocDescs) + sizeof(struct allocExtDesc));
1745 mark_buffer_dirty_inode(bh, inode);
1746 udf_release_data(bh);
1749 mark_inode_dirty(inode);
1752 *extoffset += adsize;
1753 return (elen >> 30);
1756 int8_t udf_next_aext(struct inode *inode, kernel_lb_addr *bloc, int *extoffset,
1757 kernel_lb_addr *eloc, uint32_t *elen, struct buffer_head **bh, int inc)
1761 while ((etype = udf_current_aext(inode, bloc, extoffset, eloc, elen, bh, inc)) ==
1762 (EXT_NEXT_EXTENT_ALLOCDECS >> 30))
1765 *extoffset = sizeof(struct allocExtDesc);
1766 udf_release_data(*bh);
1767 if (!(*bh = udf_tread(inode->i_sb, udf_get_lb_pblock(inode->i_sb, *bloc, 0))))
1769 udf_debug("reading block %d failed!\n",
1770 udf_get_lb_pblock(inode->i_sb, *bloc, 0));
1778 int8_t udf_current_aext(struct inode *inode, kernel_lb_addr *bloc, int *extoffset,
1779 kernel_lb_addr *eloc, uint32_t *elen, struct buffer_head **bh, int inc)
1788 *extoffset = udf_file_entry_alloc_offset(inode);
1789 ptr = UDF_I_DATA(inode) + *extoffset - udf_file_entry_alloc_offset(inode) + UDF_I_LENEATTR(inode);
1790 alen = udf_file_entry_alloc_offset(inode) + UDF_I_LENALLOC(inode);
1795 *extoffset = sizeof(struct allocExtDesc);
1796 ptr = (*bh)->b_data + *extoffset;
1797 alen = sizeof(struct allocExtDesc) + le32_to_cpu(((struct allocExtDesc *)(*bh)->b_data)->lengthAllocDescs);
1800 switch (UDF_I_ALLOCTYPE(inode))
1802 case ICBTAG_FLAG_AD_SHORT:
1806 if (!(sad = udf_get_fileshortad(ptr, alen, extoffset, inc)))
1809 etype = le32_to_cpu(sad->extLength) >> 30;
1810 eloc->logicalBlockNum = le32_to_cpu(sad->extPosition);
1811 eloc->partitionReferenceNum = UDF_I_LOCATION(inode).partitionReferenceNum;
1812 *elen = le32_to_cpu(sad->extLength) & UDF_EXTENT_LENGTH_MASK;
1815 case ICBTAG_FLAG_AD_LONG:
1819 if (!(lad = udf_get_filelongad(ptr, alen, extoffset, inc)))
1822 etype = le32_to_cpu(lad->extLength) >> 30;
1823 *eloc = lelb_to_cpu(lad->extLocation);
1824 *elen = le32_to_cpu(lad->extLength) & UDF_EXTENT_LENGTH_MASK;
1829 udf_debug("alloc_type = %d unsupported\n", UDF_I_ALLOCTYPE(inode));
1838 udf_insert_aext(struct inode *inode, kernel_lb_addr bloc, int extoffset,
1839 kernel_lb_addr neloc, uint32_t nelen, struct buffer_head *bh)
1841 kernel_lb_addr oeloc;
1846 atomic_inc(&bh->b_count);
1848 while ((etype = udf_next_aext(inode, &bloc, &extoffset, &oeloc, &oelen, &bh, 0)) != -1)
1850 udf_write_aext(inode, bloc, &extoffset, neloc, nelen, bh, 1);
1853 nelen = (etype << 30) | oelen;
1855 udf_add_aext(inode, &bloc, &extoffset, neloc, nelen, &bh, 1);
1856 udf_release_data(bh);
1857 return (nelen >> 30);
1860 int8_t udf_delete_aext(struct inode *inode, kernel_lb_addr nbloc, int nextoffset,
1861 kernel_lb_addr eloc, uint32_t elen, struct buffer_head *nbh)
1863 struct buffer_head *obh;
1864 kernel_lb_addr obloc;
1865 int oextoffset, adsize;
1867 struct allocExtDesc *aed;
1871 atomic_inc(&nbh->b_count);
1872 atomic_inc(&nbh->b_count);
1875 if (UDF_I_ALLOCTYPE(inode) == ICBTAG_FLAG_AD_SHORT)
1876 adsize = sizeof(short_ad);
1877 else if (UDF_I_ALLOCTYPE(inode) == ICBTAG_FLAG_AD_LONG)
1878 adsize = sizeof(long_ad);
1884 oextoffset = nextoffset;
1886 if (udf_next_aext(inode, &nbloc, &nextoffset, &eloc, &elen, &nbh, 1) == -1)
1889 while ((etype = udf_next_aext(inode, &nbloc, &nextoffset, &eloc, &elen, &nbh, 1)) != -1)
1891 udf_write_aext(inode, obloc, &oextoffset, eloc, (etype << 30) | elen, obh, 1);
1895 udf_release_data(obh);
1896 atomic_inc(&nbh->b_count);
1898 oextoffset = nextoffset - adsize;
1901 memset(&eloc, 0x00, sizeof(kernel_lb_addr));
1906 udf_free_blocks(inode->i_sb, inode, nbloc, 0, 1);
1907 udf_write_aext(inode, obloc, &oextoffset, eloc, elen, obh, 1);
1908 udf_write_aext(inode, obloc, &oextoffset, eloc, elen, obh, 1);
1911 UDF_I_LENALLOC(inode) -= (adsize * 2);
1912 mark_inode_dirty(inode);
1916 aed = (struct allocExtDesc *)(obh)->b_data;
1917 aed->lengthAllocDescs =
1918 cpu_to_le32(le32_to_cpu(aed->lengthAllocDescs) - (2*adsize));
1919 if (!UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_STRICT) || UDF_SB_UDFREV(inode->i_sb) >= 0x0201)
1920 udf_update_tag((obh)->b_data, oextoffset - (2*adsize));
1922 udf_update_tag((obh)->b_data, sizeof(struct allocExtDesc));
1923 mark_buffer_dirty_inode(obh, inode);
1928 udf_write_aext(inode, obloc, &oextoffset, eloc, elen, obh, 1);
1931 UDF_I_LENALLOC(inode) -= adsize;
1932 mark_inode_dirty(inode);
1936 aed = (struct allocExtDesc *)(obh)->b_data;
1937 aed->lengthAllocDescs =
1938 cpu_to_le32(le32_to_cpu(aed->lengthAllocDescs) - adsize);
1939 if (!UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_STRICT) || UDF_SB_UDFREV(inode->i_sb) >= 0x0201)
1940 udf_update_tag((obh)->b_data, oextoffset - adsize);
1942 udf_update_tag((obh)->b_data, sizeof(struct allocExtDesc));
1943 mark_buffer_dirty_inode(obh, inode);
1947 udf_release_data(nbh);
1948 udf_release_data(obh);
1949 return (elen >> 30);
1952 int8_t inode_bmap(struct inode *inode, sector_t block, kernel_lb_addr *bloc, uint32_t *extoffset,
1953 kernel_lb_addr *eloc, uint32_t *elen, sector_t *offset, struct buffer_head **bh)
1955 loff_t lbcount = 0, bcount = (loff_t)block << inode->i_sb->s_blocksize_bits;
1960 printk(KERN_ERR "udf: inode_bmap: block < 0\n");
1966 *bloc = UDF_I_LOCATION(inode);
1970 if ((etype = udf_next_aext(inode, bloc, extoffset, eloc, elen, bh, 1)) == -1)
1972 *offset = (bcount - lbcount) >> inode->i_sb->s_blocksize_bits;
1973 UDF_I_LENEXTENTS(inode) = lbcount;
1977 } while (lbcount <= bcount);
1979 *offset = (bcount + *elen - lbcount) >> inode->i_sb->s_blocksize_bits;
1984 long udf_block_map(struct inode *inode, sector_t block)
1986 kernel_lb_addr eloc, bloc;
1987 uint32_t extoffset, elen;
1989 struct buffer_head *bh = NULL;
1994 if (inode_bmap(inode, block, &bloc, &extoffset, &eloc, &elen, &offset, &bh) == (EXT_RECORDED_ALLOCATED >> 30))
1995 ret = udf_get_lb_pblock(inode->i_sb, eloc, offset);
2000 udf_release_data(bh);
2002 if (UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_VARCONV))
2003 return udf_fixed_to_variable(ret);