2 * linux/fs/ufs/balloc.c
5 * Daniel Pirkl <daniel.pirkl@email.cz>
6 * Charles University, Faculty of Mathematics and Physics
8 * UFS2 write support Evgeniy Dushistov <dushistov@mail.ru>, 2007
12 #include <linux/ufs_fs.h>
13 #include <linux/stat.h>
14 #include <linux/time.h>
15 #include <linux/string.h>
16 #include <linux/quotaops.h>
17 #include <linux/buffer_head.h>
18 #include <linux/capability.h>
19 #include <linux/sched.h>
20 #include <linux/bitops.h>
21 #include <asm/byteorder.h>
26 #define INVBLOCK ((u64)-1L)
28 static u64 ufs_add_fragments(struct inode *, u64, unsigned, unsigned, int *);
29 static u64 ufs_alloc_fragments(struct inode *, unsigned, u64, unsigned, int *);
30 static u64 ufs_alloccg_block(struct inode *, struct ufs_cg_private_info *, u64, int *);
31 static u64 ufs_bitmap_search (struct super_block *, struct ufs_cg_private_info *, u64, unsigned);
32 static unsigned char ufs_fragtable_8fpb[], ufs_fragtable_other[];
33 static void ufs_clusteracct(struct super_block *, struct ufs_cg_private_info *, unsigned, int);
36 * Free 'count' fragments from fragment number 'fragment'
38 void ufs_free_fragments(struct inode *inode, u64 fragment, unsigned count)
40 struct super_block * sb;
41 struct ufs_sb_private_info * uspi;
42 struct ufs_super_block_first * usb1;
43 struct ufs_cg_private_info * ucpi;
44 struct ufs_cylinder_group * ucg;
45 unsigned cgno, bit, end_bit, bbase, blkmap, i;
49 uspi = UFS_SB(sb)->s_uspi;
50 usb1 = ubh_get_usb_first(uspi);
52 UFSD("ENTER, fragment %llu, count %u\n",
53 (unsigned long long)fragment, count);
55 if (ufs_fragnum(fragment) + count > uspi->s_fpg)
56 ufs_error (sb, "ufs_free_fragments", "internal error");
60 cgno = ufs_dtog(uspi, fragment);
61 bit = ufs_dtogd(uspi, fragment);
62 if (cgno >= uspi->s_ncg) {
63 ufs_panic (sb, "ufs_free_fragments", "freeing blocks are outside device");
67 ucpi = ufs_load_cylinder (sb, cgno);
70 ucg = ubh_get_ucg (UCPI_UBH(ucpi));
71 if (!ufs_cg_chkmagic(sb, ucg)) {
72 ufs_panic (sb, "ufs_free_fragments", "internal error, bad magic number on cg %u", cgno);
76 end_bit = bit + count;
77 bbase = ufs_blknum (bit);
78 blkmap = ubh_blkmap (UCPI_UBH(ucpi), ucpi->c_freeoff, bbase);
79 ufs_fragacct (sb, blkmap, ucg->cg_frsum, -1);
80 for (i = bit; i < end_bit; i++) {
81 if (ubh_isclr (UCPI_UBH(ucpi), ucpi->c_freeoff, i))
82 ubh_setbit (UCPI_UBH(ucpi), ucpi->c_freeoff, i);
84 ufs_error (sb, "ufs_free_fragments",
85 "bit already cleared for fragment %u", i);
88 DQUOT_FREE_BLOCK (inode, count);
91 fs32_add(sb, &ucg->cg_cs.cs_nffree, count);
92 uspi->cs_total.cs_nffree += count;
93 fs32_add(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nffree, count);
94 blkmap = ubh_blkmap (UCPI_UBH(ucpi), ucpi->c_freeoff, bbase);
95 ufs_fragacct(sb, blkmap, ucg->cg_frsum, 1);
98 * Trying to reassemble free fragments into block
100 blkno = ufs_fragstoblks (bbase);
101 if (ubh_isblockset(UCPI_UBH(ucpi), ucpi->c_freeoff, blkno)) {
102 fs32_sub(sb, &ucg->cg_cs.cs_nffree, uspi->s_fpb);
103 uspi->cs_total.cs_nffree -= uspi->s_fpb;
104 fs32_sub(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nffree, uspi->s_fpb);
105 if ((UFS_SB(sb)->s_flags & UFS_CG_MASK) == UFS_CG_44BSD)
106 ufs_clusteracct (sb, ucpi, blkno, 1);
107 fs32_add(sb, &ucg->cg_cs.cs_nbfree, 1);
108 uspi->cs_total.cs_nbfree++;
109 fs32_add(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nbfree, 1);
110 if (uspi->fs_magic != UFS2_MAGIC) {
111 unsigned cylno = ufs_cbtocylno (bbase);
113 fs16_add(sb, &ubh_cg_blks(ucpi, cylno,
114 ufs_cbtorpos(bbase)), 1);
115 fs32_add(sb, &ubh_cg_blktot(ucpi, cylno), 1);
119 ubh_mark_buffer_dirty (USPI_UBH(uspi));
120 ubh_mark_buffer_dirty (UCPI_UBH(ucpi));
121 if (sb->s_flags & MS_SYNCHRONOUS) {
122 ubh_ll_rw_block(SWRITE, UCPI_UBH(ucpi));
123 ubh_wait_on_buffer (UCPI_UBH(ucpi));
133 UFSD("EXIT (FAILED)\n");
138 * Free 'count' fragments from fragment number 'fragment' (free whole blocks)
140 void ufs_free_blocks(struct inode *inode, u64 fragment, unsigned count)
142 struct super_block * sb;
143 struct ufs_sb_private_info * uspi;
144 struct ufs_super_block_first * usb1;
145 struct ufs_cg_private_info * ucpi;
146 struct ufs_cylinder_group * ucg;
147 unsigned overflow, cgno, bit, end_bit, i;
151 uspi = UFS_SB(sb)->s_uspi;
152 usb1 = ubh_get_usb_first(uspi);
154 UFSD("ENTER, fragment %llu, count %u\n",
155 (unsigned long long)fragment, count);
157 if ((fragment & uspi->s_fpbmask) || (count & uspi->s_fpbmask)) {
158 ufs_error (sb, "ufs_free_blocks", "internal error, "
159 "fragment %llu, count %u\n",
160 (unsigned long long)fragment, count);
168 cgno = ufs_dtog(uspi, fragment);
169 bit = ufs_dtogd(uspi, fragment);
170 if (cgno >= uspi->s_ncg) {
171 ufs_panic (sb, "ufs_free_blocks", "freeing blocks are outside device");
174 end_bit = bit + count;
175 if (end_bit > uspi->s_fpg) {
176 overflow = bit + count - uspi->s_fpg;
181 ucpi = ufs_load_cylinder (sb, cgno);
184 ucg = ubh_get_ucg (UCPI_UBH(ucpi));
185 if (!ufs_cg_chkmagic(sb, ucg)) {
186 ufs_panic (sb, "ufs_free_blocks", "internal error, bad magic number on cg %u", cgno);
190 for (i = bit; i < end_bit; i += uspi->s_fpb) {
191 blkno = ufs_fragstoblks(i);
192 if (ubh_isblockset(UCPI_UBH(ucpi), ucpi->c_freeoff, blkno)) {
193 ufs_error(sb, "ufs_free_blocks", "freeing free fragment");
195 ubh_setblock(UCPI_UBH(ucpi), ucpi->c_freeoff, blkno);
196 if ((UFS_SB(sb)->s_flags & UFS_CG_MASK) == UFS_CG_44BSD)
197 ufs_clusteracct (sb, ucpi, blkno, 1);
198 DQUOT_FREE_BLOCK(inode, uspi->s_fpb);
200 fs32_add(sb, &ucg->cg_cs.cs_nbfree, 1);
201 uspi->cs_total.cs_nbfree++;
202 fs32_add(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nbfree, 1);
204 if (uspi->fs_magic != UFS2_MAGIC) {
205 unsigned cylno = ufs_cbtocylno(i);
207 fs16_add(sb, &ubh_cg_blks(ucpi, cylno,
208 ufs_cbtorpos(i)), 1);
209 fs32_add(sb, &ubh_cg_blktot(ucpi, cylno), 1);
213 ubh_mark_buffer_dirty (USPI_UBH(uspi));
214 ubh_mark_buffer_dirty (UCPI_UBH(ucpi));
215 if (sb->s_flags & MS_SYNCHRONOUS) {
216 ubh_ll_rw_block(SWRITE, UCPI_UBH(ucpi));
217 ubh_wait_on_buffer (UCPI_UBH(ucpi));
234 UFSD("EXIT (FAILED)\n");
239 * Modify inode page cache in such way:
240 * have - blocks with b_blocknr equal to oldb...oldb+count-1
241 * get - blocks with b_blocknr equal to newb...newb+count-1
242 * also we suppose that oldb...oldb+count-1 blocks
243 * situated at the end of file.
245 * We can come here from ufs_writepage or ufs_prepare_write,
246 * locked_page is argument of these functions, so we already lock it.
248 static void ufs_change_blocknr(struct inode *inode, unsigned int beg,
249 unsigned int count, unsigned int oldb,
250 unsigned int newb, struct page *locked_page)
252 const unsigned mask = (1 << (PAGE_CACHE_SHIFT - inode->i_blkbits)) - 1;
253 struct address_space * const mapping = inode->i_mapping;
254 pgoff_t index, cur_index;
255 unsigned end, pos, j;
257 struct buffer_head *head, *bh;
259 UFSD("ENTER, ino %lu, count %u, oldb %u, newb %u\n",
260 inode->i_ino, count, oldb, newb);
262 BUG_ON(!locked_page);
263 BUG_ON(!PageLocked(locked_page));
265 cur_index = locked_page->index;
267 for (end = count + beg; beg < end; beg = (beg | mask) + 1) {
268 index = beg >> (PAGE_CACHE_SHIFT - inode->i_blkbits);
270 if (likely(cur_index != index)) {
271 page = ufs_get_locked_page(mapping, index);
272 if (!page || IS_ERR(page)) /* it was truncated or EIO */
277 head = page_buffers(page);
280 for (j = 0; j < pos; ++j)
281 bh = bh->b_this_page;
284 if (buffer_mapped(bh)) {
285 pos = bh->b_blocknr - oldb;
287 UFSD(" change from %llu to %llu\n",
288 (unsigned long long)pos + oldb,
289 (unsigned long long)pos + newb);
290 bh->b_blocknr = newb + pos;
291 unmap_underlying_metadata(bh->b_bdev,
293 mark_buffer_dirty(bh);
298 bh = bh->b_this_page;
299 } while (bh != head);
302 set_page_dirty(page);
304 if (likely(cur_index != index))
305 ufs_put_locked_page(page);
310 static void ufs_clear_frags(struct inode *inode, sector_t beg, unsigned int n,
313 struct buffer_head *bh;
314 sector_t end = beg + n;
316 for (; beg < end; ++beg) {
317 bh = sb_getblk(inode->i_sb, beg);
319 memset(bh->b_data, 0, inode->i_sb->s_blocksize);
320 set_buffer_uptodate(bh);
321 mark_buffer_dirty(bh);
323 if (IS_SYNC(inode) || sync)
324 sync_dirty_buffer(bh);
329 u64 ufs_new_fragments(struct inode *inode, void *p, u64 fragment,
330 u64 goal, unsigned count, int *err,
331 struct page *locked_page)
333 struct super_block * sb;
334 struct ufs_sb_private_info * uspi;
335 struct ufs_super_block_first * usb1;
336 unsigned cgno, oldcount, newcount;
337 u64 tmp, request, result;
339 UFSD("ENTER, ino %lu, fragment %llu, goal %llu, count %u\n",
340 inode->i_ino, (unsigned long long)fragment,
341 (unsigned long long)goal, count);
344 uspi = UFS_SB(sb)->s_uspi;
345 usb1 = ubh_get_usb_first(uspi);
349 tmp = ufs_data_ptr_to_cpu(sb, p);
351 if (count + ufs_fragnum(fragment) > uspi->s_fpb) {
352 ufs_warning(sb, "ufs_new_fragments", "internal warning"
353 " fragment %llu, count %u",
354 (unsigned long long)fragment, count);
355 count = uspi->s_fpb - ufs_fragnum(fragment);
357 oldcount = ufs_fragnum (fragment);
358 newcount = oldcount + count;
361 * Somebody else has just allocated our fragments
365 ufs_error(sb, "ufs_new_fragments", "internal error, "
366 "fragment %llu, tmp %llu\n",
367 (unsigned long long)fragment,
368 (unsigned long long)tmp);
372 if (fragment < UFS_I(inode)->i_lastfrag) {
373 UFSD("EXIT (ALREADY ALLOCATED)\n");
380 UFSD("EXIT (ALREADY ALLOCATED)\n");
387 * There is not enough space for user on the device
389 if (!capable(CAP_SYS_RESOURCE) && ufs_freespace(uspi, UFS_MINFREE) <= 0) {
391 UFSD("EXIT (FAILED)\n");
395 if (goal >= uspi->s_size)
398 cgno = ufs_inotocg (inode->i_ino);
400 cgno = ufs_dtog(uspi, goal);
403 * allocate new fragment
406 result = ufs_alloc_fragments (inode, cgno, goal, count, err);
408 ufs_cpu_to_data_ptr(sb, p, result);
410 UFS_I(inode)->i_lastfrag =
411 max_t(u32, UFS_I(inode)->i_lastfrag,
413 ufs_clear_frags(inode, result + oldcount,
414 newcount - oldcount, locked_page != NULL);
417 UFSD("EXIT, result %llu\n", (unsigned long long)result);
424 result = ufs_add_fragments (inode, tmp, oldcount, newcount, err);
427 UFS_I(inode)->i_lastfrag = max_t(u32, UFS_I(inode)->i_lastfrag, fragment + count);
428 ufs_clear_frags(inode, result + oldcount, newcount - oldcount,
429 locked_page != NULL);
431 UFSD("EXIT, result %llu\n", (unsigned long long)result);
436 * allocate new block and move data
438 switch (fs32_to_cpu(sb, usb1->fs_optim)) {
441 if (uspi->s_minfree < 5 || uspi->cs_total.cs_nffree
442 > uspi->s_dsize * uspi->s_minfree / (2 * 100))
444 usb1->fs_optim = cpu_to_fs32(sb, UFS_OPTTIME);
447 usb1->fs_optim = cpu_to_fs32(sb, UFS_OPTTIME);
450 request = uspi->s_fpb;
451 if (uspi->cs_total.cs_nffree < uspi->s_dsize *
452 (uspi->s_minfree - 2) / 100)
454 usb1->fs_optim = cpu_to_fs32(sb, UFS_OPTTIME);
457 result = ufs_alloc_fragments (inode, cgno, goal, request, err);
459 ufs_clear_frags(inode, result + oldcount, newcount - oldcount,
460 locked_page != NULL);
461 ufs_change_blocknr(inode, fragment - oldcount, oldcount, tmp,
462 result, locked_page);
463 ufs_cpu_to_data_ptr(sb, p, result);
465 UFS_I(inode)->i_lastfrag = max_t(u32, UFS_I(inode)->i_lastfrag, fragment + count);
467 if (newcount < request)
468 ufs_free_fragments (inode, result + newcount, request - newcount);
469 ufs_free_fragments (inode, tmp, oldcount);
470 UFSD("EXIT, result %llu\n", (unsigned long long)result);
475 UFSD("EXIT (FAILED)\n");
479 static u64 ufs_add_fragments(struct inode *inode, u64 fragment,
480 unsigned oldcount, unsigned newcount, int *err)
482 struct super_block * sb;
483 struct ufs_sb_private_info * uspi;
484 struct ufs_super_block_first * usb1;
485 struct ufs_cg_private_info * ucpi;
486 struct ufs_cylinder_group * ucg;
487 unsigned cgno, fragno, fragoff, count, fragsize, i;
489 UFSD("ENTER, fragment %llu, oldcount %u, newcount %u\n",
490 (unsigned long long)fragment, oldcount, newcount);
493 uspi = UFS_SB(sb)->s_uspi;
494 usb1 = ubh_get_usb_first (uspi);
495 count = newcount - oldcount;
497 cgno = ufs_dtog(uspi, fragment);
498 if (fs32_to_cpu(sb, UFS_SB(sb)->fs_cs(cgno).cs_nffree) < count)
500 if ((ufs_fragnum (fragment) + newcount) > uspi->s_fpb)
502 ucpi = ufs_load_cylinder (sb, cgno);
505 ucg = ubh_get_ucg (UCPI_UBH(ucpi));
506 if (!ufs_cg_chkmagic(sb, ucg)) {
507 ufs_panic (sb, "ufs_add_fragments",
508 "internal error, bad magic number on cg %u", cgno);
512 fragno = ufs_dtogd(uspi, fragment);
513 fragoff = ufs_fragnum (fragno);
514 for (i = oldcount; i < newcount; i++)
515 if (ubh_isclr (UCPI_UBH(ucpi), ucpi->c_freeoff, fragno + i))
518 * Block can be extended
520 ucg->cg_time = cpu_to_fs32(sb, get_seconds());
521 for (i = newcount; i < (uspi->s_fpb - fragoff); i++)
522 if (ubh_isclr (UCPI_UBH(ucpi), ucpi->c_freeoff, fragno + i))
524 fragsize = i - oldcount;
525 if (!fs32_to_cpu(sb, ucg->cg_frsum[fragsize]))
526 ufs_panic (sb, "ufs_add_fragments",
527 "internal error or corrupted bitmap on cg %u", cgno);
528 fs32_sub(sb, &ucg->cg_frsum[fragsize], 1);
529 if (fragsize != count)
530 fs32_add(sb, &ucg->cg_frsum[fragsize - count], 1);
531 for (i = oldcount; i < newcount; i++)
532 ubh_clrbit (UCPI_UBH(ucpi), ucpi->c_freeoff, fragno + i);
533 if(DQUOT_ALLOC_BLOCK(inode, count)) {
538 fs32_sub(sb, &ucg->cg_cs.cs_nffree, count);
539 fs32_sub(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nffree, count);
540 uspi->cs_total.cs_nffree -= count;
542 ubh_mark_buffer_dirty (USPI_UBH(uspi));
543 ubh_mark_buffer_dirty (UCPI_UBH(ucpi));
544 if (sb->s_flags & MS_SYNCHRONOUS) {
545 ubh_ll_rw_block(SWRITE, UCPI_UBH(ucpi));
546 ubh_wait_on_buffer (UCPI_UBH(ucpi));
550 UFSD("EXIT, fragment %llu\n", (unsigned long long)fragment);
555 #define UFS_TEST_FREE_SPACE_CG \
556 ucg = (struct ufs_cylinder_group *) UFS_SB(sb)->s_ucg[cgno]->b_data; \
557 if (fs32_to_cpu(sb, ucg->cg_cs.cs_nbfree)) \
559 for (k = count; k < uspi->s_fpb; k++) \
560 if (fs32_to_cpu(sb, ucg->cg_frsum[k])) \
563 static u64 ufs_alloc_fragments(struct inode *inode, unsigned cgno,
564 u64 goal, unsigned count, int *err)
566 struct super_block * sb;
567 struct ufs_sb_private_info * uspi;
568 struct ufs_super_block_first * usb1;
569 struct ufs_cg_private_info * ucpi;
570 struct ufs_cylinder_group * ucg;
571 unsigned oldcg, i, j, k, allocsize;
574 UFSD("ENTER, ino %lu, cgno %u, goal %llu, count %u\n",
575 inode->i_ino, cgno, (unsigned long long)goal, count);
578 uspi = UFS_SB(sb)->s_uspi;
579 usb1 = ubh_get_usb_first(uspi);
583 * 1. searching on preferred cylinder group
585 UFS_TEST_FREE_SPACE_CG
588 * 2. quadratic rehash
590 for (j = 1; j < uspi->s_ncg; j *= 2) {
592 if (cgno >= uspi->s_ncg)
594 UFS_TEST_FREE_SPACE_CG
598 * 3. brute force search
599 * We start at i = 2 ( 0 is checked at 1.step, 1 at 2.step )
601 cgno = (oldcg + 1) % uspi->s_ncg;
602 for (j = 2; j < uspi->s_ncg; j++) {
604 if (cgno >= uspi->s_ncg)
606 UFS_TEST_FREE_SPACE_CG
609 UFSD("EXIT (FAILED)\n");
613 ucpi = ufs_load_cylinder (sb, cgno);
616 ucg = ubh_get_ucg (UCPI_UBH(ucpi));
617 if (!ufs_cg_chkmagic(sb, ucg))
618 ufs_panic (sb, "ufs_alloc_fragments",
619 "internal error, bad magic number on cg %u", cgno);
620 ucg->cg_time = cpu_to_fs32(sb, get_seconds());
622 if (count == uspi->s_fpb) {
623 result = ufs_alloccg_block (inode, ucpi, goal, err);
624 if (result == INVBLOCK)
629 for (allocsize = count; allocsize < uspi->s_fpb; allocsize++)
630 if (fs32_to_cpu(sb, ucg->cg_frsum[allocsize]) != 0)
633 if (allocsize == uspi->s_fpb) {
634 result = ufs_alloccg_block (inode, ucpi, goal, err);
635 if (result == INVBLOCK)
637 goal = ufs_dtogd(uspi, result);
638 for (i = count; i < uspi->s_fpb; i++)
639 ubh_setbit (UCPI_UBH(ucpi), ucpi->c_freeoff, goal + i);
640 i = uspi->s_fpb - count;
641 DQUOT_FREE_BLOCK(inode, i);
643 fs32_add(sb, &ucg->cg_cs.cs_nffree, i);
644 uspi->cs_total.cs_nffree += i;
645 fs32_add(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nffree, i);
646 fs32_add(sb, &ucg->cg_frsum[i], 1);
650 result = ufs_bitmap_search (sb, ucpi, goal, allocsize);
651 if (result == INVBLOCK)
653 if(DQUOT_ALLOC_BLOCK(inode, count)) {
657 for (i = 0; i < count; i++)
658 ubh_clrbit (UCPI_UBH(ucpi), ucpi->c_freeoff, result + i);
660 fs32_sub(sb, &ucg->cg_cs.cs_nffree, count);
661 uspi->cs_total.cs_nffree -= count;
662 fs32_sub(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nffree, count);
663 fs32_sub(sb, &ucg->cg_frsum[allocsize], 1);
665 if (count != allocsize)
666 fs32_add(sb, &ucg->cg_frsum[allocsize - count], 1);
669 ubh_mark_buffer_dirty (USPI_UBH(uspi));
670 ubh_mark_buffer_dirty (UCPI_UBH(ucpi));
671 if (sb->s_flags & MS_SYNCHRONOUS) {
672 ubh_ll_rw_block(SWRITE, UCPI_UBH(ucpi));
673 ubh_wait_on_buffer (UCPI_UBH(ucpi));
677 result += cgno * uspi->s_fpg;
678 UFSD("EXIT3, result %llu\n", (unsigned long long)result);
682 static u64 ufs_alloccg_block(struct inode *inode,
683 struct ufs_cg_private_info *ucpi,
686 struct super_block * sb;
687 struct ufs_sb_private_info * uspi;
688 struct ufs_super_block_first * usb1;
689 struct ufs_cylinder_group * ucg;
692 UFSD("ENTER, goal %llu\n", (unsigned long long)goal);
695 uspi = UFS_SB(sb)->s_uspi;
696 usb1 = ubh_get_usb_first(uspi);
697 ucg = ubh_get_ucg(UCPI_UBH(ucpi));
700 goal = ucpi->c_rotor;
703 goal = ufs_blknum (goal);
704 goal = ufs_dtogd(uspi, goal);
707 * If the requested block is available, use it.
709 if (ubh_isblockset(UCPI_UBH(ucpi), ucpi->c_freeoff, ufs_fragstoblks(goal))) {
715 result = ufs_bitmap_search (sb, ucpi, goal, uspi->s_fpb);
716 if (result == INVBLOCK)
718 ucpi->c_rotor = result;
720 blkno = ufs_fragstoblks(result);
721 ubh_clrblock (UCPI_UBH(ucpi), ucpi->c_freeoff, blkno);
722 if ((UFS_SB(sb)->s_flags & UFS_CG_MASK) == UFS_CG_44BSD)
723 ufs_clusteracct (sb, ucpi, blkno, -1);
724 if(DQUOT_ALLOC_BLOCK(inode, uspi->s_fpb)) {
729 fs32_sub(sb, &ucg->cg_cs.cs_nbfree, 1);
730 uspi->cs_total.cs_nbfree--;
731 fs32_sub(sb, &UFS_SB(sb)->fs_cs(ucpi->c_cgx).cs_nbfree, 1);
733 if (uspi->fs_magic != UFS2_MAGIC) {
734 unsigned cylno = ufs_cbtocylno((unsigned)result);
736 fs16_sub(sb, &ubh_cg_blks(ucpi, cylno,
737 ufs_cbtorpos((unsigned)result)), 1);
738 fs32_sub(sb, &ubh_cg_blktot(ucpi, cylno), 1);
741 UFSD("EXIT, result %llu\n", (unsigned long long)result);
746 static unsigned ubh_scanc(struct ufs_sb_private_info *uspi,
747 struct ufs_buffer_head *ubh,
748 unsigned begin, unsigned size,
749 unsigned char *table, unsigned char mask)
751 unsigned rest, offset;
755 offset = begin & ~uspi->s_fmask;
756 begin >>= uspi->s_fshift;
758 if ((offset + size) < uspi->s_fsize)
761 rest = uspi->s_fsize - offset;
763 cp = ubh->bh[begin]->b_data + offset;
764 while ((table[*cp++] & mask) == 0 && --rest)
771 return (size + rest);
775 * Find a block of the specified size in the specified cylinder group.
776 * @sp: pointer to super block
777 * @ucpi: pointer to cylinder group info
778 * @goal: near which block we want find new one
779 * @count: specified size
781 static u64 ufs_bitmap_search(struct super_block *sb,
782 struct ufs_cg_private_info *ucpi,
783 u64 goal, unsigned count)
786 * Bit patterns for identifying fragments in the block map
787 * used as ((map & mask_arr) == want_arr)
789 static const int mask_arr[9] = {
790 0x3, 0x7, 0xf, 0x1f, 0x3f, 0x7f, 0xff, 0x1ff, 0x3ff
792 static const int want_arr[9] = {
793 0x0, 0x2, 0x6, 0xe, 0x1e, 0x3e, 0x7e, 0xfe, 0x1fe
795 struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi;
796 struct ufs_super_block_first *usb1;
797 struct ufs_cylinder_group *ucg;
798 unsigned start, length, loc;
799 unsigned pos, want, blockmap, mask, end;
802 UFSD("ENTER, cg %u, goal %llu, count %u\n", ucpi->c_cgx,
803 (unsigned long long)goal, count);
805 usb1 = ubh_get_usb_first (uspi);
806 ucg = ubh_get_ucg(UCPI_UBH(ucpi));
809 start = ufs_dtogd(uspi, goal) >> 3;
811 start = ucpi->c_frotor >> 3;
813 length = ((uspi->s_fpg + 7) >> 3) - start;
814 loc = ubh_scanc(uspi, UCPI_UBH(ucpi), ucpi->c_freeoff + start, length,
815 (uspi->s_fpb == 8) ? ufs_fragtable_8fpb : ufs_fragtable_other,
816 1 << (count - 1 + (uspi->s_fpb & 7)));
819 loc = ubh_scanc(uspi, UCPI_UBH(ucpi), ucpi->c_freeoff, length,
820 (uspi->s_fpb == 8) ? ufs_fragtable_8fpb :
822 1 << (count - 1 + (uspi->s_fpb & 7)));
824 ufs_error(sb, "ufs_bitmap_search",
825 "bitmap corrupted on cg %u, start %u,"
826 " length %u, count %u, freeoff %u\n",
827 ucpi->c_cgx, start, length, count,
833 result = (start + length - loc) << 3;
834 ucpi->c_frotor = result;
837 * found the byte in the map
840 for (end = result + 8; result < end; result += uspi->s_fpb) {
841 blockmap = ubh_blkmap(UCPI_UBH(ucpi), ucpi->c_freeoff, result);
843 mask = mask_arr[count];
844 want = want_arr[count];
845 for (pos = 0; pos <= uspi->s_fpb - count; pos++) {
846 if ((blockmap & mask) == want) {
847 UFSD("EXIT, result %llu\n",
848 (unsigned long long)result);
856 ufs_error(sb, "ufs_bitmap_search", "block not in map on cg %u\n",
858 UFSD("EXIT (FAILED)\n");
862 static void ufs_clusteracct(struct super_block * sb,
863 struct ufs_cg_private_info * ucpi, unsigned blkno, int cnt)
865 struct ufs_sb_private_info * uspi;
866 int i, start, end, forw, back;
868 uspi = UFS_SB(sb)->s_uspi;
869 if (uspi->s_contigsumsize <= 0)
873 ubh_setbit(UCPI_UBH(ucpi), ucpi->c_clusteroff, blkno);
875 ubh_clrbit(UCPI_UBH(ucpi), ucpi->c_clusteroff, blkno);
878 * Find the size of the cluster going forward.
881 end = start + uspi->s_contigsumsize;
882 if ( end >= ucpi->c_nclusterblks)
883 end = ucpi->c_nclusterblks;
884 i = ubh_find_next_zero_bit (UCPI_UBH(ucpi), ucpi->c_clusteroff, end, start);
890 * Find the size of the cluster going backward.
893 end = start - uspi->s_contigsumsize;
896 i = ubh_find_last_zero_bit (UCPI_UBH(ucpi), ucpi->c_clusteroff, start, end);
902 * Account for old cluster and the possibly new forward and
906 if (i > uspi->s_contigsumsize)
907 i = uspi->s_contigsumsize;
908 fs32_add(sb, (__fs32*)ubh_get_addr(UCPI_UBH(ucpi), ucpi->c_clustersumoff + (i << 2)), cnt);
910 fs32_sub(sb, (__fs32*)ubh_get_addr(UCPI_UBH(ucpi), ucpi->c_clustersumoff + (back << 2)), cnt);
912 fs32_sub(sb, (__fs32*)ubh_get_addr(UCPI_UBH(ucpi), ucpi->c_clustersumoff + (forw << 2)), cnt);
916 static unsigned char ufs_fragtable_8fpb[] = {
917 0x00, 0x01, 0x01, 0x02, 0x01, 0x01, 0x02, 0x04, 0x01, 0x01, 0x01, 0x03, 0x02, 0x03, 0x04, 0x08,
918 0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x02, 0x03, 0x03, 0x02, 0x04, 0x05, 0x08, 0x10,
919 0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x01, 0x01, 0x01, 0x03, 0x03, 0x03, 0x05, 0x09,
920 0x02, 0x03, 0x03, 0x02, 0x03, 0x03, 0x02, 0x06, 0x04, 0x05, 0x05, 0x06, 0x08, 0x09, 0x10, 0x20,
921 0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x01, 0x01, 0x01, 0x03, 0x03, 0x03, 0x05, 0x09,
922 0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x03, 0x03, 0x03, 0x03, 0x05, 0x05, 0x09, 0x11,
923 0x02, 0x03, 0x03, 0x02, 0x03, 0x03, 0x02, 0x06, 0x03, 0x03, 0x03, 0x03, 0x02, 0x03, 0x06, 0x0A,
924 0x04, 0x05, 0x05, 0x06, 0x05, 0x05, 0x06, 0x04, 0x08, 0x09, 0x09, 0x0A, 0x10, 0x11, 0x20, 0x40,
925 0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x01, 0x01, 0x01, 0x03, 0x03, 0x03, 0x05, 0x09,
926 0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x03, 0x03, 0x03, 0x03, 0x05, 0x05, 0x09, 0x11,
927 0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x01, 0x01, 0x01, 0x03, 0x03, 0x03, 0x05, 0x09,
928 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x07, 0x05, 0x05, 0x05, 0x07, 0x09, 0x09, 0x11, 0x21,
929 0x02, 0x03, 0x03, 0x02, 0x03, 0x03, 0x02, 0x06, 0x03, 0x03, 0x03, 0x03, 0x02, 0x03, 0x06, 0x0A,
930 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x07, 0x02, 0x03, 0x03, 0x02, 0x06, 0x07, 0x0A, 0x12,
931 0x04, 0x05, 0x05, 0x06, 0x05, 0x05, 0x06, 0x04, 0x05, 0x05, 0x05, 0x07, 0x06, 0x07, 0x04, 0x0C,
932 0x08, 0x09, 0x09, 0x0A, 0x09, 0x09, 0x0A, 0x0C, 0x10, 0x11, 0x11, 0x12, 0x20, 0x21, 0x40, 0x80,
935 static unsigned char ufs_fragtable_other[] = {
936 0x00, 0x16, 0x16, 0x2A, 0x16, 0x16, 0x26, 0x4E, 0x16, 0x16, 0x16, 0x3E, 0x2A, 0x3E, 0x4E, 0x8A,
937 0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E,
938 0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E,
939 0x2A, 0x3E, 0x3E, 0x2A, 0x3E, 0x3E, 0x2E, 0x6E, 0x3E, 0x3E, 0x3E, 0x3E, 0x2A, 0x3E, 0x6E, 0xAA,
940 0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E,
941 0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E,
942 0x26, 0x36, 0x36, 0x2E, 0x36, 0x36, 0x26, 0x6E, 0x36, 0x36, 0x36, 0x3E, 0x2E, 0x3E, 0x6E, 0xAE,
943 0x4E, 0x5E, 0x5E, 0x6E, 0x5E, 0x5E, 0x6E, 0x4E, 0x5E, 0x5E, 0x5E, 0x7E, 0x6E, 0x7E, 0x4E, 0xCE,
944 0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E,
945 0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E,
946 0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E,
947 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x7E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x7E, 0xBE,
948 0x2A, 0x3E, 0x3E, 0x2A, 0x3E, 0x3E, 0x2E, 0x6E, 0x3E, 0x3E, 0x3E, 0x3E, 0x2A, 0x3E, 0x6E, 0xAA,
949 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x7E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x7E, 0xBE,
950 0x4E, 0x5E, 0x5E, 0x6E, 0x5E, 0x5E, 0x6E, 0x4E, 0x5E, 0x5E, 0x5E, 0x7E, 0x6E, 0x7E, 0x4E, 0xCE,
951 0x8A, 0x9E, 0x9E, 0xAA, 0x9E, 0x9E, 0xAE, 0xCE, 0x9E, 0x9E, 0x9E, 0xBE, 0xAA, 0xBE, 0xCE, 0x8A,