2 * JFFS2 -- Journalling Flash File System, Version 2.
4 * Copyright © 2001-2007 Red Hat, Inc.
6 * Created by David Woodhouse <dwmw2@infradead.org>
8 * For licensing information, see the file 'LICENCE' in this directory.
12 #include <linux/kernel.h>
13 #include <linux/slab.h>
14 #include <linux/mtd/mtd.h>
15 #include <linux/compiler.h>
16 #include <linux/sched.h> /* For cond_resched() */
21 * jffs2_reserve_space - request physical space to write nodes to flash
23 * @minsize: Minimum acceptable size of allocation
24 * @len: Returned value of allocation length
25 * @prio: Allocation type - ALLOC_{NORMAL,DELETION}
27 * Requests a block of physical space on the flash. Returns zero for success
28 * and puts 'len' into the appropriate place, or returns -ENOSPC or other
29 * error if appropriate. Doesn't return len since that's
31 * If it returns zero, jffs2_reserve_space() also downs the per-filesystem
32 * allocation semaphore, to prevent more than one allocation from being
33 * active at any time. The semaphore is later released by jffs2_commit_allocation()
35 * jffs2_reserve_space() may trigger garbage collection in order to make room
36 * for the requested allocation.
39 static int jffs2_do_reserve_space(struct jffs2_sb_info *c, uint32_t minsize,
40 uint32_t *len, uint32_t sumsize);
42 int jffs2_reserve_space(struct jffs2_sb_info *c, uint32_t minsize,
43 uint32_t *len, int prio, uint32_t sumsize)
46 int blocksneeded = c->resv_blocks_write;
48 minsize = PAD(minsize);
50 D1(printk(KERN_DEBUG "jffs2_reserve_space(): Requested 0x%x bytes\n", minsize));
51 mutex_lock(&c->alloc_sem);
53 D1(printk(KERN_DEBUG "jffs2_reserve_space(): alloc sem got\n"));
55 spin_lock(&c->erase_completion_lock);
57 /* this needs a little more thought (true <tglx> :)) */
58 while(ret == -EAGAIN) {
59 while(c->nr_free_blocks + c->nr_erasing_blocks < blocksneeded) {
60 uint32_t dirty, avail;
62 /* calculate real dirty size
63 * dirty_size contains blocks on erase_pending_list
64 * those blocks are counted in c->nr_erasing_blocks.
65 * If one block is actually erased, it is not longer counted as dirty_space
66 * but it is counted in c->nr_erasing_blocks, so we add it and subtract it
67 * with c->nr_erasing_blocks * c->sector_size again.
68 * Blocks on erasable_list are counted as dirty_size, but not in c->nr_erasing_blocks
69 * This helps us to force gc and pick eventually a clean block to spread the load.
70 * We add unchecked_size here, as we hopefully will find some space to use.
71 * This will affect the sum only once, as gc first finishes checking
74 dirty = c->dirty_size + c->erasing_size - c->nr_erasing_blocks * c->sector_size + c->unchecked_size;
75 if (dirty < c->nospc_dirty_size) {
76 if (prio == ALLOC_DELETION && c->nr_free_blocks + c->nr_erasing_blocks >= c->resv_blocks_deletion) {
77 D1(printk(KERN_NOTICE "jffs2_reserve_space(): Low on dirty space to GC, but it's a deletion. Allowing...\n"));
80 D1(printk(KERN_DEBUG "dirty size 0x%08x + unchecked_size 0x%08x < nospc_dirty_size 0x%08x, returning -ENOSPC\n",
81 dirty, c->unchecked_size, c->sector_size));
83 spin_unlock(&c->erase_completion_lock);
84 mutex_unlock(&c->alloc_sem);
88 /* Calc possibly available space. Possibly available means that we
89 * don't know, if unchecked size contains obsoleted nodes, which could give us some
90 * more usable space. This will affect the sum only once, as gc first finishes checking
92 + Return -ENOSPC, if the maximum possibly available space is less or equal than
93 * blocksneeded * sector_size.
94 * This blocks endless gc looping on a filesystem, which is nearly full, even if
95 * the check above passes.
97 avail = c->free_size + c->dirty_size + c->erasing_size + c->unchecked_size;
98 if ( (avail / c->sector_size) <= blocksneeded) {
99 if (prio == ALLOC_DELETION && c->nr_free_blocks + c->nr_erasing_blocks >= c->resv_blocks_deletion) {
100 D1(printk(KERN_NOTICE "jffs2_reserve_space(): Low on possibly available space, but it's a deletion. Allowing...\n"));
104 D1(printk(KERN_DEBUG "max. available size 0x%08x < blocksneeded * sector_size 0x%08x, returning -ENOSPC\n",
105 avail, blocksneeded * c->sector_size));
106 spin_unlock(&c->erase_completion_lock);
107 mutex_unlock(&c->alloc_sem);
111 mutex_unlock(&c->alloc_sem);
113 D1(printk(KERN_DEBUG "Triggering GC pass. nr_free_blocks %d, nr_erasing_blocks %d, free_size 0x%08x, dirty_size 0x%08x, wasted_size 0x%08x, used_size 0x%08x, erasing_size 0x%08x, bad_size 0x%08x (total 0x%08x of 0x%08x)\n",
114 c->nr_free_blocks, c->nr_erasing_blocks, c->free_size, c->dirty_size, c->wasted_size, c->used_size, c->erasing_size, c->bad_size,
115 c->free_size + c->dirty_size + c->wasted_size + c->used_size + c->erasing_size + c->bad_size, c->flash_size));
116 spin_unlock(&c->erase_completion_lock);
118 ret = jffs2_garbage_collect_pass(c);
124 if (signal_pending(current))
127 mutex_lock(&c->alloc_sem);
128 spin_lock(&c->erase_completion_lock);
131 ret = jffs2_do_reserve_space(c, minsize, len, sumsize);
133 D1(printk(KERN_DEBUG "jffs2_reserve_space: ret is %d\n", ret));
136 spin_unlock(&c->erase_completion_lock);
138 ret = jffs2_prealloc_raw_node_refs(c, c->nextblock, 1);
140 mutex_unlock(&c->alloc_sem);
144 int jffs2_reserve_space_gc(struct jffs2_sb_info *c, uint32_t minsize,
145 uint32_t *len, uint32_t sumsize)
148 minsize = PAD(minsize);
150 D1(printk(KERN_DEBUG "jffs2_reserve_space_gc(): Requested 0x%x bytes\n", minsize));
152 spin_lock(&c->erase_completion_lock);
153 while(ret == -EAGAIN) {
154 ret = jffs2_do_reserve_space(c, minsize, len, sumsize);
156 D1(printk(KERN_DEBUG "jffs2_reserve_space_gc: looping, ret is %d\n", ret));
159 spin_unlock(&c->erase_completion_lock);
161 ret = jffs2_prealloc_raw_node_refs(c, c->nextblock, 1);
167 /* Classify nextblock (clean, dirty of verydirty) and force to select an other one */
169 static void jffs2_close_nextblock(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb)
172 if (c->nextblock == NULL) {
173 D1(printk(KERN_DEBUG "jffs2_close_nextblock: Erase block at 0x%08x has already been placed in a list\n",
177 /* Check, if we have a dirty block now, or if it was dirty already */
178 if (ISDIRTY (jeb->wasted_size + jeb->dirty_size)) {
179 c->dirty_size += jeb->wasted_size;
180 c->wasted_size -= jeb->wasted_size;
181 jeb->dirty_size += jeb->wasted_size;
182 jeb->wasted_size = 0;
183 if (VERYDIRTY(c, jeb->dirty_size)) {
184 D1(printk(KERN_DEBUG "Adding full erase block at 0x%08x to very_dirty_list (free 0x%08x, dirty 0x%08x, used 0x%08x\n",
185 jeb->offset, jeb->free_size, jeb->dirty_size, jeb->used_size));
186 list_add_tail(&jeb->list, &c->very_dirty_list);
188 D1(printk(KERN_DEBUG "Adding full erase block at 0x%08x to dirty_list (free 0x%08x, dirty 0x%08x, used 0x%08x\n",
189 jeb->offset, jeb->free_size, jeb->dirty_size, jeb->used_size));
190 list_add_tail(&jeb->list, &c->dirty_list);
193 D1(printk(KERN_DEBUG "Adding full erase block at 0x%08x to clean_list (free 0x%08x, dirty 0x%08x, used 0x%08x\n",
194 jeb->offset, jeb->free_size, jeb->dirty_size, jeb->used_size));
195 list_add_tail(&jeb->list, &c->clean_list);
201 /* Select a new jeb for nextblock */
203 static int jffs2_find_nextblock(struct jffs2_sb_info *c)
205 struct list_head *next;
207 /* Take the next block off the 'free' list */
209 if (list_empty(&c->free_list)) {
211 if (!c->nr_erasing_blocks &&
212 !list_empty(&c->erasable_list)) {
213 struct jffs2_eraseblock *ejeb;
215 ejeb = list_entry(c->erasable_list.next, struct jffs2_eraseblock, list);
216 list_move_tail(&ejeb->list, &c->erase_pending_list);
217 c->nr_erasing_blocks++;
218 jffs2_erase_pending_trigger(c);
219 D1(printk(KERN_DEBUG "jffs2_find_nextblock: Triggering erase of erasable block at 0x%08x\n",
223 if (!c->nr_erasing_blocks &&
224 !list_empty(&c->erasable_pending_wbuf_list)) {
225 D1(printk(KERN_DEBUG "jffs2_find_nextblock: Flushing write buffer\n"));
226 /* c->nextblock is NULL, no update to c->nextblock allowed */
227 spin_unlock(&c->erase_completion_lock);
228 jffs2_flush_wbuf_pad(c);
229 spin_lock(&c->erase_completion_lock);
230 /* Have another go. It'll be on the erasable_list now */
234 if (!c->nr_erasing_blocks) {
235 /* Ouch. We're in GC, or we wouldn't have got here.
236 And there's no space left. At all. */
237 printk(KERN_CRIT "Argh. No free space left for GC. nr_erasing_blocks is %d. nr_free_blocks is %d. (erasableempty: %s, erasingempty: %s, erasependingempty: %s)\n",
238 c->nr_erasing_blocks, c->nr_free_blocks, list_empty(&c->erasable_list)?"yes":"no",
239 list_empty(&c->erasing_list)?"yes":"no", list_empty(&c->erase_pending_list)?"yes":"no");
243 spin_unlock(&c->erase_completion_lock);
244 /* Don't wait for it; just erase one right now */
245 jffs2_erase_pending_blocks(c, 1);
246 spin_lock(&c->erase_completion_lock);
248 /* An erase may have failed, decreasing the
249 amount of free space available. So we must
250 restart from the beginning */
254 next = c->free_list.next;
256 c->nextblock = list_entry(next, struct jffs2_eraseblock, list);
259 jffs2_sum_reset_collected(c->summary); /* reset collected summary */
261 D1(printk(KERN_DEBUG "jffs2_find_nextblock(): new nextblock = 0x%08x\n", c->nextblock->offset));
266 /* Called with alloc sem _and_ erase_completion_lock */
267 static int jffs2_do_reserve_space(struct jffs2_sb_info *c, uint32_t minsize,
268 uint32_t *len, uint32_t sumsize)
270 struct jffs2_eraseblock *jeb = c->nextblock;
271 uint32_t reserved_size; /* for summary information at the end of the jeb */
277 if (jffs2_sum_active() && (sumsize != JFFS2_SUMMARY_NOSUM_SIZE)) {
278 /* NOSUM_SIZE means not to generate summary */
281 reserved_size = PAD(sumsize + c->summary->sum_size + JFFS2_SUMMARY_FRAME_SIZE);
282 dbg_summary("minsize=%d , jeb->free=%d ,"
283 "summary->size=%d , sumsize=%d\n",
284 minsize, jeb->free_size,
285 c->summary->sum_size, sumsize);
288 /* Is there enough space for writing out the current node, or we have to
289 write out summary information now, close this jeb and select new nextblock? */
290 if (jeb && (PAD(minsize) + PAD(c->summary->sum_size + sumsize +
291 JFFS2_SUMMARY_FRAME_SIZE) > jeb->free_size)) {
293 /* Has summary been disabled for this jeb? */
294 if (jffs2_sum_is_disabled(c->summary)) {
295 sumsize = JFFS2_SUMMARY_NOSUM_SIZE;
299 /* Writing out the collected summary information */
300 dbg_summary("generating summary for 0x%08x.\n", jeb->offset);
301 ret = jffs2_sum_write_sumnode(c);
306 if (jffs2_sum_is_disabled(c->summary)) {
307 /* jffs2_write_sumnode() couldn't write out the summary information
308 diabling summary for this jeb and free the collected information
310 sumsize = JFFS2_SUMMARY_NOSUM_SIZE;
314 jffs2_close_nextblock(c, jeb);
316 /* keep always valid value in reserved_size */
317 reserved_size = PAD(sumsize + c->summary->sum_size + JFFS2_SUMMARY_FRAME_SIZE);
320 if (jeb && minsize > jeb->free_size) {
323 /* Skip the end of this block and file it as having some dirty space */
324 /* If there's a pending write to it, flush now */
326 if (jffs2_wbuf_dirty(c)) {
327 spin_unlock(&c->erase_completion_lock);
328 D1(printk(KERN_DEBUG "jffs2_do_reserve_space: Flushing write buffer\n"));
329 jffs2_flush_wbuf_pad(c);
330 spin_lock(&c->erase_completion_lock);
335 spin_unlock(&c->erase_completion_lock);
337 ret = jffs2_prealloc_raw_node_refs(c, jeb, 1);
340 /* Just lock it again and continue. Nothing much can change because
341 we hold c->alloc_sem anyway. In fact, it's not entirely clear why
342 we hold c->erase_completion_lock in the majority of this function...
343 but that's a question for another (more caffeine-rich) day. */
344 spin_lock(&c->erase_completion_lock);
346 waste = jeb->free_size;
347 jffs2_link_node_ref(c, jeb,
348 (jeb->offset + c->sector_size - waste) | REF_OBSOLETE,
350 /* FIXME: that made it count as dirty. Convert to wasted */
351 jeb->dirty_size -= waste;
352 c->dirty_size -= waste;
353 jeb->wasted_size += waste;
354 c->wasted_size += waste;
356 jffs2_close_nextblock(c, jeb);
363 ret = jffs2_find_nextblock(c);
369 if (jeb->free_size != c->sector_size - c->cleanmarker_size) {
370 printk(KERN_WARNING "Eep. Block 0x%08x taken from free_list had free_size of 0x%08x!!\n", jeb->offset, jeb->free_size);
374 /* OK, jeb (==c->nextblock) is now pointing at a block which definitely has
376 *len = jeb->free_size - reserved_size;
378 if (c->cleanmarker_size && jeb->used_size == c->cleanmarker_size &&
379 !jeb->first_node->next_in_ino) {
380 /* Only node in it beforehand was a CLEANMARKER node (we think).
381 So mark it obsolete now that there's going to be another node
382 in the block. This will reduce used_size to zero but We've
383 already set c->nextblock so that jffs2_mark_node_obsolete()
384 won't try to refile it to the dirty_list.
386 spin_unlock(&c->erase_completion_lock);
387 jffs2_mark_node_obsolete(c, jeb->first_node);
388 spin_lock(&c->erase_completion_lock);
391 D1(printk(KERN_DEBUG "jffs2_do_reserve_space(): Giving 0x%x bytes at 0x%x\n",
392 *len, jeb->offset + (c->sector_size - jeb->free_size)));
397 * jffs2_add_physical_node_ref - add a physical node reference to the list
398 * @c: superblock info
399 * @new: new node reference to add
400 * @len: length of this physical node
402 * Should only be used to report nodes for which space has been allocated
403 * by jffs2_reserve_space.
405 * Must be called with the alloc_sem held.
408 struct jffs2_raw_node_ref *jffs2_add_physical_node_ref(struct jffs2_sb_info *c,
409 uint32_t ofs, uint32_t len,
410 struct jffs2_inode_cache *ic)
412 struct jffs2_eraseblock *jeb;
413 struct jffs2_raw_node_ref *new;
415 jeb = &c->blocks[ofs / c->sector_size];
417 D1(printk(KERN_DEBUG "jffs2_add_physical_node_ref(): Node at 0x%x(%d), size 0x%x\n",
418 ofs & ~3, ofs & 3, len));
420 /* Allow non-obsolete nodes only to be added at the end of c->nextblock,
421 if c->nextblock is set. Note that wbuf.c will file obsolete nodes
422 even after refiling c->nextblock */
423 if ((c->nextblock || ((ofs & 3) != REF_OBSOLETE))
424 && (jeb != c->nextblock || (ofs & ~3) != jeb->offset + (c->sector_size - jeb->free_size))) {
425 printk(KERN_WARNING "argh. node added in wrong place at 0x%08x(%d)\n", ofs & ~3, ofs & 3);
427 printk(KERN_WARNING "nextblock 0x%08x", c->nextblock->offset);
429 printk(KERN_WARNING "No nextblock");
430 printk(", expected at %08x\n", jeb->offset + (c->sector_size - jeb->free_size));
431 return ERR_PTR(-EINVAL);
434 spin_lock(&c->erase_completion_lock);
436 new = jffs2_link_node_ref(c, jeb, ofs, len, ic);
438 if (!jeb->free_size && !jeb->dirty_size && !ISDIRTY(jeb->wasted_size)) {
439 /* If it lives on the dirty_list, jffs2_reserve_space will put it there */
440 D1(printk(KERN_DEBUG "Adding full erase block at 0x%08x to clean_list (free 0x%08x, dirty 0x%08x, used 0x%08x\n",
441 jeb->offset, jeb->free_size, jeb->dirty_size, jeb->used_size));
442 if (jffs2_wbuf_dirty(c)) {
443 /* Flush the last write in the block if it's outstanding */
444 spin_unlock(&c->erase_completion_lock);
445 jffs2_flush_wbuf_pad(c);
446 spin_lock(&c->erase_completion_lock);
449 list_add_tail(&jeb->list, &c->clean_list);
452 jffs2_dbg_acct_sanity_check_nolock(c,jeb);
453 jffs2_dbg_acct_paranoia_check_nolock(c, jeb);
455 spin_unlock(&c->erase_completion_lock);
461 void jffs2_complete_reservation(struct jffs2_sb_info *c)
463 D1(printk(KERN_DEBUG "jffs2_complete_reservation()\n"));
464 jffs2_garbage_collect_trigger(c);
465 mutex_unlock(&c->alloc_sem);
468 static inline int on_list(struct list_head *obj, struct list_head *head)
470 struct list_head *this;
472 list_for_each(this, head) {
474 D1(printk("%p is on list at %p\n", obj, head));
482 void jffs2_mark_node_obsolete(struct jffs2_sb_info *c, struct jffs2_raw_node_ref *ref)
484 struct jffs2_eraseblock *jeb;
486 struct jffs2_unknown_node n;
492 printk(KERN_NOTICE "EEEEEK. jffs2_mark_node_obsolete called with NULL node\n");
495 if (ref_obsolete(ref)) {
496 D1(printk(KERN_DEBUG "jffs2_mark_node_obsolete called with already obsolete node at 0x%08x\n", ref_offset(ref)));
499 blocknr = ref->flash_offset / c->sector_size;
500 if (blocknr >= c->nr_blocks) {
501 printk(KERN_NOTICE "raw node at 0x%08x is off the end of device!\n", ref->flash_offset);
504 jeb = &c->blocks[blocknr];
506 if (jffs2_can_mark_obsolete(c) && !jffs2_is_readonly(c) &&
507 !(c->flags & (JFFS2_SB_FLAG_SCANNING | JFFS2_SB_FLAG_BUILDING))) {
508 /* Hm. This may confuse static lock analysis. If any of the above
509 three conditions is false, we're going to return from this
510 function without actually obliterating any nodes or freeing
511 any jffs2_raw_node_refs. So we don't need to stop erases from
512 happening, or protect against people holding an obsolete
513 jffs2_raw_node_ref without the erase_completion_lock. */
514 mutex_lock(&c->erase_free_sem);
517 spin_lock(&c->erase_completion_lock);
519 freed_len = ref_totlen(c, jeb, ref);
521 if (ref_flags(ref) == REF_UNCHECKED) {
522 D1(if (unlikely(jeb->unchecked_size < freed_len)) {
523 printk(KERN_NOTICE "raw unchecked node of size 0x%08x freed from erase block %d at 0x%08x, but unchecked_size was already 0x%08x\n",
524 freed_len, blocknr, ref->flash_offset, jeb->used_size);
527 D1(printk(KERN_DEBUG "Obsoleting previously unchecked node at 0x%08x of len %x: ", ref_offset(ref), freed_len));
528 jeb->unchecked_size -= freed_len;
529 c->unchecked_size -= freed_len;
531 D1(if (unlikely(jeb->used_size < freed_len)) {
532 printk(KERN_NOTICE "raw node of size 0x%08x freed from erase block %d at 0x%08x, but used_size was already 0x%08x\n",
533 freed_len, blocknr, ref->flash_offset, jeb->used_size);
536 D1(printk(KERN_DEBUG "Obsoleting node at 0x%08x of len %#x: ", ref_offset(ref), freed_len));
537 jeb->used_size -= freed_len;
538 c->used_size -= freed_len;
541 // Take care, that wasted size is taken into concern
542 if ((jeb->dirty_size || ISDIRTY(jeb->wasted_size + freed_len)) && jeb != c->nextblock) {
543 D1(printk("Dirtying\n"));
544 addedsize = freed_len;
545 jeb->dirty_size += freed_len;
546 c->dirty_size += freed_len;
548 /* Convert wasted space to dirty, if not a bad block */
549 if (jeb->wasted_size) {
550 if (on_list(&jeb->list, &c->bad_used_list)) {
551 D1(printk(KERN_DEBUG "Leaving block at %08x on the bad_used_list\n",
553 addedsize = 0; /* To fool the refiling code later */
555 D1(printk(KERN_DEBUG "Converting %d bytes of wasted space to dirty in block at %08x\n",
556 jeb->wasted_size, jeb->offset));
557 addedsize += jeb->wasted_size;
558 jeb->dirty_size += jeb->wasted_size;
559 c->dirty_size += jeb->wasted_size;
560 c->wasted_size -= jeb->wasted_size;
561 jeb->wasted_size = 0;
565 D1(printk("Wasting\n"));
567 jeb->wasted_size += freed_len;
568 c->wasted_size += freed_len;
570 ref->flash_offset = ref_offset(ref) | REF_OBSOLETE;
572 jffs2_dbg_acct_sanity_check_nolock(c, jeb);
573 jffs2_dbg_acct_paranoia_check_nolock(c, jeb);
575 if (c->flags & JFFS2_SB_FLAG_SCANNING) {
576 /* Flash scanning is in progress. Don't muck about with the block
577 lists because they're not ready yet, and don't actually
578 obliterate nodes that look obsolete. If they weren't
579 marked obsolete on the flash at the time they _became_
580 obsolete, there was probably a reason for that. */
581 spin_unlock(&c->erase_completion_lock);
582 /* We didn't lock the erase_free_sem */
586 if (jeb == c->nextblock) {
587 D2(printk(KERN_DEBUG "Not moving nextblock 0x%08x to dirty/erase_pending list\n", jeb->offset));
588 } else if (!jeb->used_size && !jeb->unchecked_size) {
589 if (jeb == c->gcblock) {
590 D1(printk(KERN_DEBUG "gcblock at 0x%08x completely dirtied. Clearing gcblock...\n", jeb->offset));
593 D1(printk(KERN_DEBUG "Eraseblock at 0x%08x completely dirtied. Removing from (dirty?) list...\n", jeb->offset));
594 list_del(&jeb->list);
596 if (jffs2_wbuf_dirty(c)) {
597 D1(printk(KERN_DEBUG "...and adding to erasable_pending_wbuf_list\n"));
598 list_add_tail(&jeb->list, &c->erasable_pending_wbuf_list);
601 /* Most of the time, we just erase it immediately. Otherwise we
602 spend ages scanning it on mount, etc. */
603 D1(printk(KERN_DEBUG "...and adding to erase_pending_list\n"));
604 list_add_tail(&jeb->list, &c->erase_pending_list);
605 c->nr_erasing_blocks++;
606 jffs2_erase_pending_trigger(c);
608 /* Sometimes, however, we leave it elsewhere so it doesn't get
609 immediately reused, and we spread the load a bit. */
610 D1(printk(KERN_DEBUG "...and adding to erasable_list\n"));
611 list_add_tail(&jeb->list, &c->erasable_list);
614 D1(printk(KERN_DEBUG "Done OK\n"));
615 } else if (jeb == c->gcblock) {
616 D2(printk(KERN_DEBUG "Not moving gcblock 0x%08x to dirty_list\n", jeb->offset));
617 } else if (ISDIRTY(jeb->dirty_size) && !ISDIRTY(jeb->dirty_size - addedsize)) {
618 D1(printk(KERN_DEBUG "Eraseblock at 0x%08x is freshly dirtied. Removing from clean list...\n", jeb->offset));
619 list_del(&jeb->list);
620 D1(printk(KERN_DEBUG "...and adding to dirty_list\n"));
621 list_add_tail(&jeb->list, &c->dirty_list);
622 } else if (VERYDIRTY(c, jeb->dirty_size) &&
623 !VERYDIRTY(c, jeb->dirty_size - addedsize)) {
624 D1(printk(KERN_DEBUG "Eraseblock at 0x%08x is now very dirty. Removing from dirty list...\n", jeb->offset));
625 list_del(&jeb->list);
626 D1(printk(KERN_DEBUG "...and adding to very_dirty_list\n"));
627 list_add_tail(&jeb->list, &c->very_dirty_list);
629 D1(printk(KERN_DEBUG "Eraseblock at 0x%08x not moved anywhere. (free 0x%08x, dirty 0x%08x, used 0x%08x)\n",
630 jeb->offset, jeb->free_size, jeb->dirty_size, jeb->used_size));
633 spin_unlock(&c->erase_completion_lock);
635 if (!jffs2_can_mark_obsolete(c) || jffs2_is_readonly(c) ||
636 (c->flags & JFFS2_SB_FLAG_BUILDING)) {
637 /* We didn't lock the erase_free_sem */
641 /* The erase_free_sem is locked, and has been since before we marked the node obsolete
642 and potentially put its eraseblock onto the erase_pending_list. Thus, we know that
643 the block hasn't _already_ been erased, and that 'ref' itself hasn't been freed yet
644 by jffs2_free_jeb_node_refs() in erase.c. Which is nice. */
646 D1(printk(KERN_DEBUG "obliterating obsoleted node at 0x%08x\n", ref_offset(ref)));
647 ret = jffs2_flash_read(c, ref_offset(ref), sizeof(n), &retlen, (char *)&n);
649 printk(KERN_WARNING "Read error reading from obsoleted node at 0x%08x: %d\n", ref_offset(ref), ret);
652 if (retlen != sizeof(n)) {
653 printk(KERN_WARNING "Short read from obsoleted node at 0x%08x: %zd\n", ref_offset(ref), retlen);
656 if (PAD(je32_to_cpu(n.totlen)) != PAD(freed_len)) {
657 printk(KERN_WARNING "Node totlen on flash (0x%08x) != totlen from node ref (0x%08x)\n", je32_to_cpu(n.totlen), freed_len);
660 if (!(je16_to_cpu(n.nodetype) & JFFS2_NODE_ACCURATE)) {
661 D1(printk(KERN_DEBUG "Node at 0x%08x was already marked obsolete (nodetype 0x%04x)\n", ref_offset(ref), je16_to_cpu(n.nodetype)));
664 /* XXX FIXME: This is ugly now */
665 n.nodetype = cpu_to_je16(je16_to_cpu(n.nodetype) & ~JFFS2_NODE_ACCURATE);
666 ret = jffs2_flash_write(c, ref_offset(ref), sizeof(n), &retlen, (char *)&n);
668 printk(KERN_WARNING "Write error in obliterating obsoleted node at 0x%08x: %d\n", ref_offset(ref), ret);
671 if (retlen != sizeof(n)) {
672 printk(KERN_WARNING "Short write in obliterating obsoleted node at 0x%08x: %zd\n", ref_offset(ref), retlen);
676 /* Nodes which have been marked obsolete no longer need to be
677 associated with any inode. Remove them from the per-inode list.
679 Note we can't do this for NAND at the moment because we need
680 obsolete dirent nodes to stay on the lists, because of the
681 horridness in jffs2_garbage_collect_deletion_dirent(). Also
682 because we delete the inocache, and on NAND we need that to
683 stay around until all the nodes are actually erased, in order
684 to stop us from giving the same inode number to another newly
686 if (ref->next_in_ino) {
687 struct jffs2_inode_cache *ic;
688 struct jffs2_raw_node_ref **p;
690 spin_lock(&c->erase_completion_lock);
692 ic = jffs2_raw_ref_to_ic(ref);
693 for (p = &ic->nodes; (*p) != ref; p = &((*p)->next_in_ino))
696 *p = ref->next_in_ino;
697 ref->next_in_ino = NULL;
700 #ifdef CONFIG_JFFS2_FS_XATTR
701 case RAWNODE_CLASS_XATTR_DATUM:
702 jffs2_release_xattr_datum(c, (struct jffs2_xattr_datum *)ic);
704 case RAWNODE_CLASS_XATTR_REF:
705 jffs2_release_xattr_ref(c, (struct jffs2_xattr_ref *)ic);
709 if (ic->nodes == (void *)ic && ic->nlink == 0)
710 jffs2_del_ino_cache(c, ic);
713 spin_unlock(&c->erase_completion_lock);
717 mutex_unlock(&c->erase_free_sem);
720 int jffs2_thread_should_wake(struct jffs2_sb_info *c)
724 int nr_very_dirty = 0;
725 struct jffs2_eraseblock *jeb;
727 if (c->unchecked_size) {
728 D1(printk(KERN_DEBUG "jffs2_thread_should_wake(): unchecked_size %d, checked_ino #%d\n",
729 c->unchecked_size, c->checked_ino));
733 /* dirty_size contains blocks on erase_pending_list
734 * those blocks are counted in c->nr_erasing_blocks.
735 * If one block is actually erased, it is not longer counted as dirty_space
736 * but it is counted in c->nr_erasing_blocks, so we add it and subtract it
737 * with c->nr_erasing_blocks * c->sector_size again.
738 * Blocks on erasable_list are counted as dirty_size, but not in c->nr_erasing_blocks
739 * This helps us to force gc and pick eventually a clean block to spread the load.
741 dirty = c->dirty_size + c->erasing_size - c->nr_erasing_blocks * c->sector_size;
743 if (c->nr_free_blocks + c->nr_erasing_blocks < c->resv_blocks_gctrigger &&
744 (dirty > c->nospc_dirty_size))
747 list_for_each_entry(jeb, &c->very_dirty_list, list) {
749 if (nr_very_dirty == c->vdirty_blocks_gctrigger) {
751 /* In debug mode, actually go through and count them all */
757 D1(printk(KERN_DEBUG "jffs2_thread_should_wake(): nr_free_blocks %d, nr_erasing_blocks %d, dirty_size 0x%x, vdirty_blocks %d: %s\n",
758 c->nr_free_blocks, c->nr_erasing_blocks, c->dirty_size, nr_very_dirty, ret?"yes":"no"));