2 * JFFS2 -- Journalling Flash File System, Version 2.
4 * Copyright (C) 2001-2003 Red Hat, Inc.
6 * Created by David Woodhouse <dwmw2@infradead.org>
8 * For licensing information, see the file 'LICENCE' in this directory.
10 * $Id: nodemgmt.c,v 1.127 2005/09/20 15:49:12 dedekind Exp $
14 #include <linux/kernel.h>
15 #include <linux/slab.h>
16 #include <linux/mtd/mtd.h>
17 #include <linux/compiler.h>
18 #include <linux/sched.h> /* For cond_resched() */
23 * jffs2_reserve_space - request physical space to write nodes to flash
25 * @minsize: Minimum acceptable size of allocation
26 * @len: Returned value of allocation length
27 * @prio: Allocation type - ALLOC_{NORMAL,DELETION}
29 * Requests a block of physical space on the flash. Returns zero for success
30 * and puts 'len' into the appropriate place, or returns -ENOSPC or other
31 * error if appropriate. Doesn't return len since that's
33 * If it returns zero, jffs2_reserve_space() also downs the per-filesystem
34 * allocation semaphore, to prevent more than one allocation from being
35 * active at any time. The semaphore is later released by jffs2_commit_allocation()
37 * jffs2_reserve_space() may trigger garbage collection in order to make room
38 * for the requested allocation.
41 static int jffs2_do_reserve_space(struct jffs2_sb_info *c, uint32_t minsize,
42 uint32_t *len, uint32_t sumsize);
44 int jffs2_reserve_space(struct jffs2_sb_info *c, uint32_t minsize,
45 uint32_t *len, int prio, uint32_t sumsize)
48 int blocksneeded = c->resv_blocks_write;
50 minsize = PAD(minsize);
52 D1(printk(KERN_DEBUG "jffs2_reserve_space(): Requested 0x%x bytes\n", minsize));
55 D1(printk(KERN_DEBUG "jffs2_reserve_space(): alloc sem got\n"));
57 spin_lock(&c->erase_completion_lock);
59 /* this needs a little more thought (true <tglx> :)) */
60 while(ret == -EAGAIN) {
61 while(c->nr_free_blocks + c->nr_erasing_blocks < blocksneeded) {
63 uint32_t dirty, avail;
65 /* calculate real dirty size
66 * dirty_size contains blocks on erase_pending_list
67 * those blocks are counted in c->nr_erasing_blocks.
68 * If one block is actually erased, it is not longer counted as dirty_space
69 * but it is counted in c->nr_erasing_blocks, so we add it and subtract it
70 * with c->nr_erasing_blocks * c->sector_size again.
71 * Blocks on erasable_list are counted as dirty_size, but not in c->nr_erasing_blocks
72 * This helps us to force gc and pick eventually a clean block to spread the load.
73 * We add unchecked_size here, as we hopefully will find some space to use.
74 * This will affect the sum only once, as gc first finishes checking
77 dirty = c->dirty_size + c->erasing_size - c->nr_erasing_blocks * c->sector_size + c->unchecked_size;
78 if (dirty < c->nospc_dirty_size) {
79 if (prio == ALLOC_DELETION && c->nr_free_blocks + c->nr_erasing_blocks >= c->resv_blocks_deletion) {
80 D1(printk(KERN_NOTICE "jffs2_reserve_space(): Low on dirty space to GC, but it's a deletion. Allowing...\n"));
83 D1(printk(KERN_DEBUG "dirty size 0x%08x + unchecked_size 0x%08x < nospc_dirty_size 0x%08x, returning -ENOSPC\n",
84 dirty, c->unchecked_size, c->sector_size));
86 spin_unlock(&c->erase_completion_lock);
91 /* Calc possibly available space. Possibly available means that we
92 * don't know, if unchecked size contains obsoleted nodes, which could give us some
93 * more usable space. This will affect the sum only once, as gc first finishes checking
95 + Return -ENOSPC, if the maximum possibly available space is less or equal than
96 * blocksneeded * sector_size.
97 * This blocks endless gc looping on a filesystem, which is nearly full, even if
98 * the check above passes.
100 avail = c->free_size + c->dirty_size + c->erasing_size + c->unchecked_size;
101 if ( (avail / c->sector_size) <= blocksneeded) {
102 if (prio == ALLOC_DELETION && c->nr_free_blocks + c->nr_erasing_blocks >= c->resv_blocks_deletion) {
103 D1(printk(KERN_NOTICE "jffs2_reserve_space(): Low on possibly available space, but it's a deletion. Allowing...\n"));
107 D1(printk(KERN_DEBUG "max. available size 0x%08x < blocksneeded * sector_size 0x%08x, returning -ENOSPC\n",
108 avail, blocksneeded * c->sector_size));
109 spin_unlock(&c->erase_completion_lock);
116 D1(printk(KERN_DEBUG "Triggering GC pass. nr_free_blocks %d, nr_erasing_blocks %d, free_size 0x%08x, dirty_size 0x%08x, wasted_size 0x%08x, used_size 0x%08x, erasing_size 0x%08x, bad_size 0x%08x (total 0x%08x of 0x%08x)\n",
117 c->nr_free_blocks, c->nr_erasing_blocks, c->free_size, c->dirty_size, c->wasted_size, c->used_size, c->erasing_size, c->bad_size,
118 c->free_size + c->dirty_size + c->wasted_size + c->used_size + c->erasing_size + c->bad_size, c->flash_size));
119 spin_unlock(&c->erase_completion_lock);
121 ret = jffs2_garbage_collect_pass(c);
127 if (signal_pending(current))
131 spin_lock(&c->erase_completion_lock);
134 ret = jffs2_do_reserve_space(c, minsize, len, sumsize);
136 D1(printk(KERN_DEBUG "jffs2_reserve_space: ret is %d\n", ret));
139 spin_unlock(&c->erase_completion_lock);
141 ret = jffs2_prealloc_raw_node_refs(c, c->nextblock, 1);
147 int jffs2_reserve_space_gc(struct jffs2_sb_info *c, uint32_t minsize,
148 uint32_t *len, uint32_t sumsize)
151 minsize = PAD(minsize);
153 D1(printk(KERN_DEBUG "jffs2_reserve_space_gc(): Requested 0x%x bytes\n", minsize));
155 spin_lock(&c->erase_completion_lock);
156 while(ret == -EAGAIN) {
157 ret = jffs2_do_reserve_space(c, minsize, len, sumsize);
159 D1(printk(KERN_DEBUG "jffs2_reserve_space_gc: looping, ret is %d\n", ret));
162 spin_unlock(&c->erase_completion_lock);
164 ret = jffs2_prealloc_raw_node_refs(c, c->nextblock, 1);
170 /* Classify nextblock (clean, dirty of verydirty) and force to select an other one */
172 static void jffs2_close_nextblock(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb)
175 if (c->nextblock == NULL) {
176 D1(printk(KERN_DEBUG "jffs2_close_nextblock: Erase block at 0x%08x has already been placed in a list\n",
180 /* Check, if we have a dirty block now, or if it was dirty already */
181 if (ISDIRTY (jeb->wasted_size + jeb->dirty_size)) {
182 c->dirty_size += jeb->wasted_size;
183 c->wasted_size -= jeb->wasted_size;
184 jeb->dirty_size += jeb->wasted_size;
185 jeb->wasted_size = 0;
186 if (VERYDIRTY(c, jeb->dirty_size)) {
187 D1(printk(KERN_DEBUG "Adding full erase block at 0x%08x to very_dirty_list (free 0x%08x, dirty 0x%08x, used 0x%08x\n",
188 jeb->offset, jeb->free_size, jeb->dirty_size, jeb->used_size));
189 list_add_tail(&jeb->list, &c->very_dirty_list);
191 D1(printk(KERN_DEBUG "Adding full erase block at 0x%08x to dirty_list (free 0x%08x, dirty 0x%08x, used 0x%08x\n",
192 jeb->offset, jeb->free_size, jeb->dirty_size, jeb->used_size));
193 list_add_tail(&jeb->list, &c->dirty_list);
196 D1(printk(KERN_DEBUG "Adding full erase block at 0x%08x to clean_list (free 0x%08x, dirty 0x%08x, used 0x%08x\n",
197 jeb->offset, jeb->free_size, jeb->dirty_size, jeb->used_size));
198 list_add_tail(&jeb->list, &c->clean_list);
204 /* Select a new jeb for nextblock */
206 static int jffs2_find_nextblock(struct jffs2_sb_info *c)
208 struct list_head *next;
210 /* Take the next block off the 'free' list */
212 if (list_empty(&c->free_list)) {
214 if (!c->nr_erasing_blocks &&
215 !list_empty(&c->erasable_list)) {
216 struct jffs2_eraseblock *ejeb;
218 ejeb = list_entry(c->erasable_list.next, struct jffs2_eraseblock, list);
219 list_move_tail(&ejeb->list, &c->erase_pending_list);
220 c->nr_erasing_blocks++;
221 jffs2_erase_pending_trigger(c);
222 D1(printk(KERN_DEBUG "jffs2_find_nextblock: Triggering erase of erasable block at 0x%08x\n",
226 if (!c->nr_erasing_blocks &&
227 !list_empty(&c->erasable_pending_wbuf_list)) {
228 D1(printk(KERN_DEBUG "jffs2_find_nextblock: Flushing write buffer\n"));
229 /* c->nextblock is NULL, no update to c->nextblock allowed */
230 spin_unlock(&c->erase_completion_lock);
231 jffs2_flush_wbuf_pad(c);
232 spin_lock(&c->erase_completion_lock);
233 /* Have another go. It'll be on the erasable_list now */
237 if (!c->nr_erasing_blocks) {
238 /* Ouch. We're in GC, or we wouldn't have got here.
239 And there's no space left. At all. */
240 printk(KERN_CRIT "Argh. No free space left for GC. nr_erasing_blocks is %d. nr_free_blocks is %d. (erasableempty: %s, erasingempty: %s, erasependingempty: %s)\n",
241 c->nr_erasing_blocks, c->nr_free_blocks, list_empty(&c->erasable_list)?"yes":"no",
242 list_empty(&c->erasing_list)?"yes":"no", list_empty(&c->erase_pending_list)?"yes":"no");
246 spin_unlock(&c->erase_completion_lock);
247 /* Don't wait for it; just erase one right now */
248 jffs2_erase_pending_blocks(c, 1);
249 spin_lock(&c->erase_completion_lock);
251 /* An erase may have failed, decreasing the
252 amount of free space available. So we must
253 restart from the beginning */
257 next = c->free_list.next;
259 c->nextblock = list_entry(next, struct jffs2_eraseblock, list);
262 jffs2_sum_reset_collected(c->summary); /* reset collected summary */
264 D1(printk(KERN_DEBUG "jffs2_find_nextblock(): new nextblock = 0x%08x\n", c->nextblock->offset));
269 /* Called with alloc sem _and_ erase_completion_lock */
270 static int jffs2_do_reserve_space(struct jffs2_sb_info *c, uint32_t minsize,
271 uint32_t *len, uint32_t sumsize)
273 struct jffs2_eraseblock *jeb = c->nextblock;
274 uint32_t reserved_size; /* for summary information at the end of the jeb */
280 if (jffs2_sum_active() && (sumsize != JFFS2_SUMMARY_NOSUM_SIZE)) {
281 /* NOSUM_SIZE means not to generate summary */
284 reserved_size = PAD(sumsize + c->summary->sum_size + JFFS2_SUMMARY_FRAME_SIZE);
285 dbg_summary("minsize=%d , jeb->free=%d ,"
286 "summary->size=%d , sumsize=%d\n",
287 minsize, jeb->free_size,
288 c->summary->sum_size, sumsize);
291 /* Is there enough space for writing out the current node, or we have to
292 write out summary information now, close this jeb and select new nextblock? */
293 if (jeb && (PAD(minsize) + PAD(c->summary->sum_size + sumsize +
294 JFFS2_SUMMARY_FRAME_SIZE) > jeb->free_size)) {
296 /* Has summary been disabled for this jeb? */
297 if (jffs2_sum_is_disabled(c->summary)) {
298 sumsize = JFFS2_SUMMARY_NOSUM_SIZE;
302 /* Writing out the collected summary information */
303 dbg_summary("generating summary for 0x%08x.\n", jeb->offset);
304 ret = jffs2_sum_write_sumnode(c);
309 if (jffs2_sum_is_disabled(c->summary)) {
310 /* jffs2_write_sumnode() couldn't write out the summary information
311 diabling summary for this jeb and free the collected information
313 sumsize = JFFS2_SUMMARY_NOSUM_SIZE;
317 jffs2_close_nextblock(c, jeb);
319 /* keep always valid value in reserved_size */
320 reserved_size = PAD(sumsize + c->summary->sum_size + JFFS2_SUMMARY_FRAME_SIZE);
323 if (jeb && minsize > jeb->free_size) {
326 /* Skip the end of this block and file it as having some dirty space */
327 /* If there's a pending write to it, flush now */
329 if (jffs2_wbuf_dirty(c)) {
330 spin_unlock(&c->erase_completion_lock);
331 D1(printk(KERN_DEBUG "jffs2_do_reserve_space: Flushing write buffer\n"));
332 jffs2_flush_wbuf_pad(c);
333 spin_lock(&c->erase_completion_lock);
338 spin_unlock(&c->erase_completion_lock);
340 ret = jffs2_prealloc_raw_node_refs(c, jeb, 1);
343 /* Just lock it again and continue. Nothing much can change because
344 we hold c->alloc_sem anyway. In fact, it's not entirely clear why
345 we hold c->erase_completion_lock in the majority of this function...
346 but that's a question for another (more caffeine-rich) day. */
347 spin_lock(&c->erase_completion_lock);
349 waste = jeb->free_size;
350 jffs2_link_node_ref(c, jeb,
351 (jeb->offset + c->sector_size - waste) | REF_OBSOLETE,
353 /* FIXME: that made it count as dirty. Convert to wasted */
354 jeb->dirty_size -= waste;
355 c->dirty_size -= waste;
356 jeb->wasted_size += waste;
357 c->wasted_size += waste;
359 jffs2_close_nextblock(c, jeb);
366 ret = jffs2_find_nextblock(c);
372 if (jeb->free_size != c->sector_size - c->cleanmarker_size) {
373 printk(KERN_WARNING "Eep. Block 0x%08x taken from free_list had free_size of 0x%08x!!\n", jeb->offset, jeb->free_size);
377 /* OK, jeb (==c->nextblock) is now pointing at a block which definitely has
379 *len = jeb->free_size - reserved_size;
381 if (c->cleanmarker_size && jeb->used_size == c->cleanmarker_size &&
382 !jeb->first_node->next_in_ino) {
383 /* Only node in it beforehand was a CLEANMARKER node (we think).
384 So mark it obsolete now that there's going to be another node
385 in the block. This will reduce used_size to zero but We've
386 already set c->nextblock so that jffs2_mark_node_obsolete()
387 won't try to refile it to the dirty_list.
389 spin_unlock(&c->erase_completion_lock);
390 jffs2_mark_node_obsolete(c, jeb->first_node);
391 spin_lock(&c->erase_completion_lock);
394 D1(printk(KERN_DEBUG "jffs2_do_reserve_space(): Giving 0x%x bytes at 0x%x\n",
395 *len, jeb->offset + (c->sector_size - jeb->free_size)));
400 * jffs2_add_physical_node_ref - add a physical node reference to the list
401 * @c: superblock info
402 * @new: new node reference to add
403 * @len: length of this physical node
405 * Should only be used to report nodes for which space has been allocated
406 * by jffs2_reserve_space.
408 * Must be called with the alloc_sem held.
411 struct jffs2_raw_node_ref *jffs2_add_physical_node_ref(struct jffs2_sb_info *c,
412 uint32_t ofs, uint32_t len,
413 struct jffs2_inode_cache *ic)
415 struct jffs2_eraseblock *jeb;
416 struct jffs2_raw_node_ref *new;
418 jeb = &c->blocks[ofs / c->sector_size];
420 D1(printk(KERN_DEBUG "jffs2_add_physical_node_ref(): Node at 0x%x(%d), size 0x%x\n",
421 ofs & ~3, ofs & 3, len));
423 /* Allow non-obsolete nodes only to be added at the end of c->nextblock,
424 if c->nextblock is set. Note that wbuf.c will file obsolete nodes
425 even after refiling c->nextblock */
426 if ((c->nextblock || ((ofs & 3) != REF_OBSOLETE))
427 && (jeb != c->nextblock || (ofs & ~3) != jeb->offset + (c->sector_size - jeb->free_size))) {
428 printk(KERN_WARNING "argh. node added in wrong place\n");
429 return ERR_PTR(-EINVAL);
432 spin_lock(&c->erase_completion_lock);
434 new = jffs2_link_node_ref(c, jeb, ofs, len, ic);
436 if (!jeb->free_size && !jeb->dirty_size && !ISDIRTY(jeb->wasted_size)) {
437 /* If it lives on the dirty_list, jffs2_reserve_space will put it there */
438 D1(printk(KERN_DEBUG "Adding full erase block at 0x%08x to clean_list (free 0x%08x, dirty 0x%08x, used 0x%08x\n",
439 jeb->offset, jeb->free_size, jeb->dirty_size, jeb->used_size));
440 if (jffs2_wbuf_dirty(c)) {
441 /* Flush the last write in the block if it's outstanding */
442 spin_unlock(&c->erase_completion_lock);
443 jffs2_flush_wbuf_pad(c);
444 spin_lock(&c->erase_completion_lock);
447 list_add_tail(&jeb->list, &c->clean_list);
450 jffs2_dbg_acct_sanity_check_nolock(c,jeb);
451 jffs2_dbg_acct_paranoia_check_nolock(c, jeb);
453 spin_unlock(&c->erase_completion_lock);
459 void jffs2_complete_reservation(struct jffs2_sb_info *c)
461 D1(printk(KERN_DEBUG "jffs2_complete_reservation()\n"));
462 jffs2_garbage_collect_trigger(c);
466 static inline int on_list(struct list_head *obj, struct list_head *head)
468 struct list_head *this;
470 list_for_each(this, head) {
472 D1(printk("%p is on list at %p\n", obj, head));
480 void jffs2_mark_node_obsolete(struct jffs2_sb_info *c, struct jffs2_raw_node_ref *ref)
482 struct jffs2_eraseblock *jeb;
484 struct jffs2_unknown_node n;
490 printk(KERN_NOTICE "EEEEEK. jffs2_mark_node_obsolete called with NULL node\n");
493 if (ref_obsolete(ref)) {
494 D1(printk(KERN_DEBUG "jffs2_mark_node_obsolete called with already obsolete node at 0x%08x\n", ref_offset(ref)));
497 blocknr = ref->flash_offset / c->sector_size;
498 if (blocknr >= c->nr_blocks) {
499 printk(KERN_NOTICE "raw node at 0x%08x is off the end of device!\n", ref->flash_offset);
502 jeb = &c->blocks[blocknr];
504 if (jffs2_can_mark_obsolete(c) && !jffs2_is_readonly(c) &&
505 !(c->flags & (JFFS2_SB_FLAG_SCANNING | JFFS2_SB_FLAG_BUILDING))) {
506 /* Hm. This may confuse static lock analysis. If any of the above
507 three conditions is false, we're going to return from this
508 function without actually obliterating any nodes or freeing
509 any jffs2_raw_node_refs. So we don't need to stop erases from
510 happening, or protect against people holding an obsolete
511 jffs2_raw_node_ref without the erase_completion_lock. */
512 down(&c->erase_free_sem);
515 spin_lock(&c->erase_completion_lock);
517 freed_len = ref_totlen(c, jeb, ref);
519 if (ref_flags(ref) == REF_UNCHECKED) {
520 D1(if (unlikely(jeb->unchecked_size < freed_len)) {
521 printk(KERN_NOTICE "raw unchecked node of size 0x%08x freed from erase block %d at 0x%08x, but unchecked_size was already 0x%08x\n",
522 freed_len, blocknr, ref->flash_offset, jeb->used_size);
525 D1(printk(KERN_DEBUG "Obsoleting previously unchecked node at 0x%08x of len %x: ", ref_offset(ref), freed_len));
526 jeb->unchecked_size -= freed_len;
527 c->unchecked_size -= freed_len;
529 D1(if (unlikely(jeb->used_size < freed_len)) {
530 printk(KERN_NOTICE "raw node of size 0x%08x freed from erase block %d at 0x%08x, but used_size was already 0x%08x\n",
531 freed_len, blocknr, ref->flash_offset, jeb->used_size);
534 D1(printk(KERN_DEBUG "Obsoleting node at 0x%08x of len %#x: ", ref_offset(ref), freed_len));
535 jeb->used_size -= freed_len;
536 c->used_size -= freed_len;
539 // Take care, that wasted size is taken into concern
540 if ((jeb->dirty_size || ISDIRTY(jeb->wasted_size + freed_len)) && jeb != c->nextblock) {
541 D1(printk("Dirtying\n"));
542 addedsize = freed_len;
543 jeb->dirty_size += freed_len;
544 c->dirty_size += freed_len;
546 /* Convert wasted space to dirty, if not a bad block */
547 if (jeb->wasted_size) {
548 if (on_list(&jeb->list, &c->bad_used_list)) {
549 D1(printk(KERN_DEBUG "Leaving block at %08x on the bad_used_list\n",
551 addedsize = 0; /* To fool the refiling code later */
553 D1(printk(KERN_DEBUG "Converting %d bytes of wasted space to dirty in block at %08x\n",
554 jeb->wasted_size, jeb->offset));
555 addedsize += jeb->wasted_size;
556 jeb->dirty_size += jeb->wasted_size;
557 c->dirty_size += jeb->wasted_size;
558 c->wasted_size -= jeb->wasted_size;
559 jeb->wasted_size = 0;
563 D1(printk("Wasting\n"));
565 jeb->wasted_size += freed_len;
566 c->wasted_size += freed_len;
568 ref->flash_offset = ref_offset(ref) | REF_OBSOLETE;
570 jffs2_dbg_acct_sanity_check_nolock(c, jeb);
571 jffs2_dbg_acct_paranoia_check_nolock(c, jeb);
573 if (c->flags & JFFS2_SB_FLAG_SCANNING) {
574 /* Flash scanning is in progress. Don't muck about with the block
575 lists because they're not ready yet, and don't actually
576 obliterate nodes that look obsolete. If they weren't
577 marked obsolete on the flash at the time they _became_
578 obsolete, there was probably a reason for that. */
579 spin_unlock(&c->erase_completion_lock);
580 /* We didn't lock the erase_free_sem */
584 if (jeb == c->nextblock) {
585 D2(printk(KERN_DEBUG "Not moving nextblock 0x%08x to dirty/erase_pending list\n", jeb->offset));
586 } else if (!jeb->used_size && !jeb->unchecked_size) {
587 if (jeb == c->gcblock) {
588 D1(printk(KERN_DEBUG "gcblock at 0x%08x completely dirtied. Clearing gcblock...\n", jeb->offset));
591 D1(printk(KERN_DEBUG "Eraseblock at 0x%08x completely dirtied. Removing from (dirty?) list...\n", jeb->offset));
592 list_del(&jeb->list);
594 if (jffs2_wbuf_dirty(c)) {
595 D1(printk(KERN_DEBUG "...and adding to erasable_pending_wbuf_list\n"));
596 list_add_tail(&jeb->list, &c->erasable_pending_wbuf_list);
599 /* Most of the time, we just erase it immediately. Otherwise we
600 spend ages scanning it on mount, etc. */
601 D1(printk(KERN_DEBUG "...and adding to erase_pending_list\n"));
602 list_add_tail(&jeb->list, &c->erase_pending_list);
603 c->nr_erasing_blocks++;
604 jffs2_erase_pending_trigger(c);
606 /* Sometimes, however, we leave it elsewhere so it doesn't get
607 immediately reused, and we spread the load a bit. */
608 D1(printk(KERN_DEBUG "...and adding to erasable_list\n"));
609 list_add_tail(&jeb->list, &c->erasable_list);
612 D1(printk(KERN_DEBUG "Done OK\n"));
613 } else if (jeb == c->gcblock) {
614 D2(printk(KERN_DEBUG "Not moving gcblock 0x%08x to dirty_list\n", jeb->offset));
615 } else if (ISDIRTY(jeb->dirty_size) && !ISDIRTY(jeb->dirty_size - addedsize)) {
616 D1(printk(KERN_DEBUG "Eraseblock at 0x%08x is freshly dirtied. Removing from clean list...\n", jeb->offset));
617 list_del(&jeb->list);
618 D1(printk(KERN_DEBUG "...and adding to dirty_list\n"));
619 list_add_tail(&jeb->list, &c->dirty_list);
620 } else if (VERYDIRTY(c, jeb->dirty_size) &&
621 !VERYDIRTY(c, jeb->dirty_size - addedsize)) {
622 D1(printk(KERN_DEBUG "Eraseblock at 0x%08x is now very dirty. Removing from dirty list...\n", jeb->offset));
623 list_del(&jeb->list);
624 D1(printk(KERN_DEBUG "...and adding to very_dirty_list\n"));
625 list_add_tail(&jeb->list, &c->very_dirty_list);
627 D1(printk(KERN_DEBUG "Eraseblock at 0x%08x not moved anywhere. (free 0x%08x, dirty 0x%08x, used 0x%08x)\n",
628 jeb->offset, jeb->free_size, jeb->dirty_size, jeb->used_size));
631 spin_unlock(&c->erase_completion_lock);
633 if (!jffs2_can_mark_obsolete(c) || jffs2_is_readonly(c) ||
634 (c->flags & JFFS2_SB_FLAG_BUILDING)) {
635 /* We didn't lock the erase_free_sem */
639 /* The erase_free_sem is locked, and has been since before we marked the node obsolete
640 and potentially put its eraseblock onto the erase_pending_list. Thus, we know that
641 the block hasn't _already_ been erased, and that 'ref' itself hasn't been freed yet
642 by jffs2_free_jeb_node_refs() in erase.c. Which is nice. */
644 D1(printk(KERN_DEBUG "obliterating obsoleted node at 0x%08x\n", ref_offset(ref)));
645 ret = jffs2_flash_read(c, ref_offset(ref), sizeof(n), &retlen, (char *)&n);
647 printk(KERN_WARNING "Read error reading from obsoleted node at 0x%08x: %d\n", ref_offset(ref), ret);
650 if (retlen != sizeof(n)) {
651 printk(KERN_WARNING "Short read from obsoleted node at 0x%08x: %zd\n", ref_offset(ref), retlen);
654 if (PAD(je32_to_cpu(n.totlen)) != PAD(freed_len)) {
655 printk(KERN_WARNING "Node totlen on flash (0x%08x) != totlen from node ref (0x%08x)\n", je32_to_cpu(n.totlen), freed_len);
658 if (!(je16_to_cpu(n.nodetype) & JFFS2_NODE_ACCURATE)) {
659 D1(printk(KERN_DEBUG "Node at 0x%08x was already marked obsolete (nodetype 0x%04x)\n", ref_offset(ref), je16_to_cpu(n.nodetype)));
662 /* XXX FIXME: This is ugly now */
663 n.nodetype = cpu_to_je16(je16_to_cpu(n.nodetype) & ~JFFS2_NODE_ACCURATE);
664 ret = jffs2_flash_write(c, ref_offset(ref), sizeof(n), &retlen, (char *)&n);
666 printk(KERN_WARNING "Write error in obliterating obsoleted node at 0x%08x: %d\n", ref_offset(ref), ret);
669 if (retlen != sizeof(n)) {
670 printk(KERN_WARNING "Short write in obliterating obsoleted node at 0x%08x: %zd\n", ref_offset(ref), retlen);
674 /* Nodes which have been marked obsolete no longer need to be
675 associated with any inode. Remove them from the per-inode list.
677 Note we can't do this for NAND at the moment because we need
678 obsolete dirent nodes to stay on the lists, because of the
679 horridness in jffs2_garbage_collect_deletion_dirent(). Also
680 because we delete the inocache, and on NAND we need that to
681 stay around until all the nodes are actually erased, in order
682 to stop us from giving the same inode number to another newly
684 if (ref->next_in_ino) {
685 struct jffs2_inode_cache *ic;
686 struct jffs2_raw_node_ref **p;
688 spin_lock(&c->erase_completion_lock);
690 ic = jffs2_raw_ref_to_ic(ref);
691 for (p = &ic->nodes; (*p) != ref; p = &((*p)->next_in_ino))
694 *p = ref->next_in_ino;
695 ref->next_in_ino = NULL;
698 #ifdef CONFIG_JFFS2_FS_XATTR
699 case RAWNODE_CLASS_XATTR_DATUM:
700 jffs2_release_xattr_datum(c, (struct jffs2_xattr_datum *)ic);
702 case RAWNODE_CLASS_XATTR_REF:
703 jffs2_release_xattr_ref(c, (struct jffs2_xattr_ref *)ic);
707 if (ic->nodes == (void *)ic && ic->nlink == 0)
708 jffs2_del_ino_cache(c, ic);
711 spin_unlock(&c->erase_completion_lock);
715 up(&c->erase_free_sem);
718 int jffs2_thread_should_wake(struct jffs2_sb_info *c)
723 if (c->unchecked_size) {
724 D1(printk(KERN_DEBUG "jffs2_thread_should_wake(): unchecked_size %d, checked_ino #%d\n",
725 c->unchecked_size, c->checked_ino));
729 /* dirty_size contains blocks on erase_pending_list
730 * those blocks are counted in c->nr_erasing_blocks.
731 * If one block is actually erased, it is not longer counted as dirty_space
732 * but it is counted in c->nr_erasing_blocks, so we add it and subtract it
733 * with c->nr_erasing_blocks * c->sector_size again.
734 * Blocks on erasable_list are counted as dirty_size, but not in c->nr_erasing_blocks
735 * This helps us to force gc and pick eventually a clean block to spread the load.
737 dirty = c->dirty_size + c->erasing_size - c->nr_erasing_blocks * c->sector_size;
739 if (c->nr_free_blocks + c->nr_erasing_blocks < c->resv_blocks_gctrigger &&
740 (dirty > c->nospc_dirty_size))
743 D1(printk(KERN_DEBUG "jffs2_thread_should_wake(): nr_free_blocks %d, nr_erasing_blocks %d, dirty_size 0x%x: %s\n",
744 c->nr_free_blocks, c->nr_erasing_blocks, c->dirty_size, ret?"yes":"no"));