]> err.no Git - linux-2.6/blob - fs/jffs2/nodemgmt.c
Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux-2.6
[linux-2.6] / fs / jffs2 / nodemgmt.c
1 /*
2  * JFFS2 -- Journalling Flash File System, Version 2.
3  *
4  * Copyright © 2001-2007 Red Hat, Inc.
5  *
6  * Created by David Woodhouse <dwmw2@infradead.org>
7  *
8  * For licensing information, see the file 'LICENCE' in this directory.
9  *
10  */
11
12 #include <linux/kernel.h>
13 #include <linux/slab.h>
14 #include <linux/mtd/mtd.h>
15 #include <linux/compiler.h>
16 #include <linux/sched.h> /* For cond_resched() */
17 #include "nodelist.h"
18 #include "debug.h"
19
20 /**
21  *      jffs2_reserve_space - request physical space to write nodes to flash
22  *      @c: superblock info
23  *      @minsize: Minimum acceptable size of allocation
24  *      @len: Returned value of allocation length
25  *      @prio: Allocation type - ALLOC_{NORMAL,DELETION}
26  *
27  *      Requests a block of physical space on the flash. Returns zero for success
28  *      and puts 'len' into the appropriate place, or returns -ENOSPC or other 
29  *      error if appropriate. Doesn't return len since that's 
30  *
31  *      If it returns zero, jffs2_reserve_space() also downs the per-filesystem
32  *      allocation semaphore, to prevent more than one allocation from being
33  *      active at any time. The semaphore is later released by jffs2_commit_allocation()
34  *
35  *      jffs2_reserve_space() may trigger garbage collection in order to make room
36  *      for the requested allocation.
37  */
38
39 static int jffs2_do_reserve_space(struct jffs2_sb_info *c,  uint32_t minsize,
40                                   uint32_t *len, uint32_t sumsize);
41
42 int jffs2_reserve_space(struct jffs2_sb_info *c, uint32_t minsize,
43                         uint32_t *len, int prio, uint32_t sumsize)
44 {
45         int ret = -EAGAIN;
46         int blocksneeded = c->resv_blocks_write;
47         /* align it */
48         minsize = PAD(minsize);
49
50         D1(printk(KERN_DEBUG "jffs2_reserve_space(): Requested 0x%x bytes\n", minsize));
51         down(&c->alloc_sem);
52
53         D1(printk(KERN_DEBUG "jffs2_reserve_space(): alloc sem got\n"));
54
55         spin_lock(&c->erase_completion_lock);
56
57         /* this needs a little more thought (true <tglx> :)) */
58         while(ret == -EAGAIN) {
59                 while(c->nr_free_blocks + c->nr_erasing_blocks < blocksneeded) {
60                         int ret;
61                         uint32_t dirty, avail;
62
63                         /* calculate real dirty size
64                          * dirty_size contains blocks on erase_pending_list
65                          * those blocks are counted in c->nr_erasing_blocks.
66                          * If one block is actually erased, it is not longer counted as dirty_space
67                          * but it is counted in c->nr_erasing_blocks, so we add it and subtract it
68                          * with c->nr_erasing_blocks * c->sector_size again.
69                          * Blocks on erasable_list are counted as dirty_size, but not in c->nr_erasing_blocks
70                          * This helps us to force gc and pick eventually a clean block to spread the load.
71                          * We add unchecked_size here, as we hopefully will find some space to use.
72                          * This will affect the sum only once, as gc first finishes checking
73                          * of nodes.
74                          */
75                         dirty = c->dirty_size + c->erasing_size - c->nr_erasing_blocks * c->sector_size + c->unchecked_size;
76                         if (dirty < c->nospc_dirty_size) {
77                                 if (prio == ALLOC_DELETION && c->nr_free_blocks + c->nr_erasing_blocks >= c->resv_blocks_deletion) {
78                                         D1(printk(KERN_NOTICE "jffs2_reserve_space(): Low on dirty space to GC, but it's a deletion. Allowing...\n"));
79                                         break;
80                                 }
81                                 D1(printk(KERN_DEBUG "dirty size 0x%08x + unchecked_size 0x%08x < nospc_dirty_size 0x%08x, returning -ENOSPC\n",
82                                           dirty, c->unchecked_size, c->sector_size));
83
84                                 spin_unlock(&c->erase_completion_lock);
85                                 up(&c->alloc_sem);
86                                 return -ENOSPC;
87                         }
88
89                         /* Calc possibly available space. Possibly available means that we
90                          * don't know, if unchecked size contains obsoleted nodes, which could give us some
91                          * more usable space. This will affect the sum only once, as gc first finishes checking
92                          * of nodes.
93                          + Return -ENOSPC, if the maximum possibly available space is less or equal than
94                          * blocksneeded * sector_size.
95                          * This blocks endless gc looping on a filesystem, which is nearly full, even if
96                          * the check above passes.
97                          */
98                         avail = c->free_size + c->dirty_size + c->erasing_size + c->unchecked_size;
99                         if ( (avail / c->sector_size) <= blocksneeded) {
100                                 if (prio == ALLOC_DELETION && c->nr_free_blocks + c->nr_erasing_blocks >= c->resv_blocks_deletion) {
101                                         D1(printk(KERN_NOTICE "jffs2_reserve_space(): Low on possibly available space, but it's a deletion. Allowing...\n"));
102                                         break;
103                                 }
104
105                                 D1(printk(KERN_DEBUG "max. available size 0x%08x  < blocksneeded * sector_size 0x%08x, returning -ENOSPC\n",
106                                           avail, blocksneeded * c->sector_size));
107                                 spin_unlock(&c->erase_completion_lock);
108                                 up(&c->alloc_sem);
109                                 return -ENOSPC;
110                         }
111
112                         up(&c->alloc_sem);
113
114                         D1(printk(KERN_DEBUG "Triggering GC pass. nr_free_blocks %d, nr_erasing_blocks %d, free_size 0x%08x, dirty_size 0x%08x, wasted_size 0x%08x, used_size 0x%08x, erasing_size 0x%08x, bad_size 0x%08x (total 0x%08x of 0x%08x)\n",
115                                   c->nr_free_blocks, c->nr_erasing_blocks, c->free_size, c->dirty_size, c->wasted_size, c->used_size, c->erasing_size, c->bad_size,
116                                   c->free_size + c->dirty_size + c->wasted_size + c->used_size + c->erasing_size + c->bad_size, c->flash_size));
117                         spin_unlock(&c->erase_completion_lock);
118
119                         ret = jffs2_garbage_collect_pass(c);
120                         if (ret)
121                                 return ret;
122
123                         cond_resched();
124
125                         if (signal_pending(current))
126                                 return -EINTR;
127
128                         down(&c->alloc_sem);
129                         spin_lock(&c->erase_completion_lock);
130                 }
131
132                 ret = jffs2_do_reserve_space(c, minsize, len, sumsize);
133                 if (ret) {
134                         D1(printk(KERN_DEBUG "jffs2_reserve_space: ret is %d\n", ret));
135                 }
136         }
137         spin_unlock(&c->erase_completion_lock);
138         if (!ret)
139                 ret = jffs2_prealloc_raw_node_refs(c, c->nextblock, 1);
140         if (ret)
141                 up(&c->alloc_sem);
142         return ret;
143 }
144
145 int jffs2_reserve_space_gc(struct jffs2_sb_info *c, uint32_t minsize,
146                            uint32_t *len, uint32_t sumsize)
147 {
148         int ret = -EAGAIN;
149         minsize = PAD(minsize);
150
151         D1(printk(KERN_DEBUG "jffs2_reserve_space_gc(): Requested 0x%x bytes\n", minsize));
152
153         spin_lock(&c->erase_completion_lock);
154         while(ret == -EAGAIN) {
155                 ret = jffs2_do_reserve_space(c, minsize, len, sumsize);
156                 if (ret) {
157                         D1(printk(KERN_DEBUG "jffs2_reserve_space_gc: looping, ret is %d\n", ret));
158                 }
159         }
160         spin_unlock(&c->erase_completion_lock);
161         if (!ret)
162                 ret = jffs2_prealloc_raw_node_refs(c, c->nextblock, 1);
163
164         return ret;
165 }
166
167
168 /* Classify nextblock (clean, dirty of verydirty) and force to select an other one */
169
170 static void jffs2_close_nextblock(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb)
171 {
172
173         if (c->nextblock == NULL) {
174                 D1(printk(KERN_DEBUG "jffs2_close_nextblock: Erase block at 0x%08x has already been placed in a list\n",
175                   jeb->offset));
176                 return;
177         }
178         /* Check, if we have a dirty block now, or if it was dirty already */
179         if (ISDIRTY (jeb->wasted_size + jeb->dirty_size)) {
180                 c->dirty_size += jeb->wasted_size;
181                 c->wasted_size -= jeb->wasted_size;
182                 jeb->dirty_size += jeb->wasted_size;
183                 jeb->wasted_size = 0;
184                 if (VERYDIRTY(c, jeb->dirty_size)) {
185                         D1(printk(KERN_DEBUG "Adding full erase block at 0x%08x to very_dirty_list (free 0x%08x, dirty 0x%08x, used 0x%08x\n",
186                           jeb->offset, jeb->free_size, jeb->dirty_size, jeb->used_size));
187                         list_add_tail(&jeb->list, &c->very_dirty_list);
188                 } else {
189                         D1(printk(KERN_DEBUG "Adding full erase block at 0x%08x to dirty_list (free 0x%08x, dirty 0x%08x, used 0x%08x\n",
190                           jeb->offset, jeb->free_size, jeb->dirty_size, jeb->used_size));
191                         list_add_tail(&jeb->list, &c->dirty_list);
192                 }
193         } else {
194                 D1(printk(KERN_DEBUG "Adding full erase block at 0x%08x to clean_list (free 0x%08x, dirty 0x%08x, used 0x%08x\n",
195                   jeb->offset, jeb->free_size, jeb->dirty_size, jeb->used_size));
196                 list_add_tail(&jeb->list, &c->clean_list);
197         }
198         c->nextblock = NULL;
199
200 }
201
202 /* Select a new jeb for nextblock */
203
204 static int jffs2_find_nextblock(struct jffs2_sb_info *c)
205 {
206         struct list_head *next;
207
208         /* Take the next block off the 'free' list */
209
210         if (list_empty(&c->free_list)) {
211
212                 if (!c->nr_erasing_blocks &&
213                         !list_empty(&c->erasable_list)) {
214                         struct jffs2_eraseblock *ejeb;
215
216                         ejeb = list_entry(c->erasable_list.next, struct jffs2_eraseblock, list);
217                         list_move_tail(&ejeb->list, &c->erase_pending_list);
218                         c->nr_erasing_blocks++;
219                         jffs2_erase_pending_trigger(c);
220                         D1(printk(KERN_DEBUG "jffs2_find_nextblock: Triggering erase of erasable block at 0x%08x\n",
221                                   ejeb->offset));
222                 }
223
224                 if (!c->nr_erasing_blocks &&
225                         !list_empty(&c->erasable_pending_wbuf_list)) {
226                         D1(printk(KERN_DEBUG "jffs2_find_nextblock: Flushing write buffer\n"));
227                         /* c->nextblock is NULL, no update to c->nextblock allowed */
228                         spin_unlock(&c->erase_completion_lock);
229                         jffs2_flush_wbuf_pad(c);
230                         spin_lock(&c->erase_completion_lock);
231                         /* Have another go. It'll be on the erasable_list now */
232                         return -EAGAIN;
233                 }
234
235                 if (!c->nr_erasing_blocks) {
236                         /* Ouch. We're in GC, or we wouldn't have got here.
237                            And there's no space left. At all. */
238                         printk(KERN_CRIT "Argh. No free space left for GC. nr_erasing_blocks is %d. nr_free_blocks is %d. (erasableempty: %s, erasingempty: %s, erasependingempty: %s)\n",
239                                    c->nr_erasing_blocks, c->nr_free_blocks, list_empty(&c->erasable_list)?"yes":"no",
240                                    list_empty(&c->erasing_list)?"yes":"no", list_empty(&c->erase_pending_list)?"yes":"no");
241                         return -ENOSPC;
242                 }
243
244                 spin_unlock(&c->erase_completion_lock);
245                 /* Don't wait for it; just erase one right now */
246                 jffs2_erase_pending_blocks(c, 1);
247                 spin_lock(&c->erase_completion_lock);
248
249                 /* An erase may have failed, decreasing the
250                    amount of free space available. So we must
251                    restart from the beginning */
252                 return -EAGAIN;
253         }
254
255         next = c->free_list.next;
256         list_del(next);
257         c->nextblock = list_entry(next, struct jffs2_eraseblock, list);
258         c->nr_free_blocks--;
259
260         jffs2_sum_reset_collected(c->summary); /* reset collected summary */
261
262         D1(printk(KERN_DEBUG "jffs2_find_nextblock(): new nextblock = 0x%08x\n", c->nextblock->offset));
263
264         return 0;
265 }
266
267 /* Called with alloc sem _and_ erase_completion_lock */
268 static int jffs2_do_reserve_space(struct jffs2_sb_info *c, uint32_t minsize,
269                                   uint32_t *len, uint32_t sumsize)
270 {
271         struct jffs2_eraseblock *jeb = c->nextblock;
272         uint32_t reserved_size;                         /* for summary information at the end of the jeb */
273         int ret;
274
275  restart:
276         reserved_size = 0;
277
278         if (jffs2_sum_active() && (sumsize != JFFS2_SUMMARY_NOSUM_SIZE)) {
279                                                         /* NOSUM_SIZE means not to generate summary */
280
281                 if (jeb) {
282                         reserved_size = PAD(sumsize + c->summary->sum_size + JFFS2_SUMMARY_FRAME_SIZE);
283                         dbg_summary("minsize=%d , jeb->free=%d ,"
284                                                 "summary->size=%d , sumsize=%d\n",
285                                                 minsize, jeb->free_size,
286                                                 c->summary->sum_size, sumsize);
287                 }
288
289                 /* Is there enough space for writing out the current node, or we have to
290                    write out summary information now, close this jeb and select new nextblock? */
291                 if (jeb && (PAD(minsize) + PAD(c->summary->sum_size + sumsize +
292                                         JFFS2_SUMMARY_FRAME_SIZE) > jeb->free_size)) {
293
294                         /* Has summary been disabled for this jeb? */
295                         if (jffs2_sum_is_disabled(c->summary)) {
296                                 sumsize = JFFS2_SUMMARY_NOSUM_SIZE;
297                                 goto restart;
298                         }
299
300                         /* Writing out the collected summary information */
301                         dbg_summary("generating summary for 0x%08x.\n", jeb->offset);
302                         ret = jffs2_sum_write_sumnode(c);
303
304                         if (ret)
305                                 return ret;
306
307                         if (jffs2_sum_is_disabled(c->summary)) {
308                                 /* jffs2_write_sumnode() couldn't write out the summary information
309                                    diabling summary for this jeb and free the collected information
310                                  */
311                                 sumsize = JFFS2_SUMMARY_NOSUM_SIZE;
312                                 goto restart;
313                         }
314
315                         jffs2_close_nextblock(c, jeb);
316                         jeb = NULL;
317                         /* keep always valid value in reserved_size */
318                         reserved_size = PAD(sumsize + c->summary->sum_size + JFFS2_SUMMARY_FRAME_SIZE);
319                 }
320         } else {
321                 if (jeb && minsize > jeb->free_size) {
322                         uint32_t waste;
323
324                         /* Skip the end of this block and file it as having some dirty space */
325                         /* If there's a pending write to it, flush now */
326
327                         if (jffs2_wbuf_dirty(c)) {
328                                 spin_unlock(&c->erase_completion_lock);
329                                 D1(printk(KERN_DEBUG "jffs2_do_reserve_space: Flushing write buffer\n"));
330                                 jffs2_flush_wbuf_pad(c);
331                                 spin_lock(&c->erase_completion_lock);
332                                 jeb = c->nextblock;
333                                 goto restart;
334                         }
335
336                         spin_unlock(&c->erase_completion_lock);
337
338                         ret = jffs2_prealloc_raw_node_refs(c, jeb, 1);
339                         if (ret)
340                                 return ret;
341                         /* Just lock it again and continue. Nothing much can change because
342                            we hold c->alloc_sem anyway. In fact, it's not entirely clear why
343                            we hold c->erase_completion_lock in the majority of this function...
344                            but that's a question for another (more caffeine-rich) day. */
345                         spin_lock(&c->erase_completion_lock);
346
347                         waste = jeb->free_size;
348                         jffs2_link_node_ref(c, jeb,
349                                             (jeb->offset + c->sector_size - waste) | REF_OBSOLETE,
350                                             waste, NULL);
351                         /* FIXME: that made it count as dirty. Convert to wasted */
352                         jeb->dirty_size -= waste;
353                         c->dirty_size -= waste;
354                         jeb->wasted_size += waste;
355                         c->wasted_size += waste;
356
357                         jffs2_close_nextblock(c, jeb);
358                         jeb = NULL;
359                 }
360         }
361
362         if (!jeb) {
363
364                 ret = jffs2_find_nextblock(c);
365                 if (ret)
366                         return ret;
367
368                 jeb = c->nextblock;
369
370                 if (jeb->free_size != c->sector_size - c->cleanmarker_size) {
371                         printk(KERN_WARNING "Eep. Block 0x%08x taken from free_list had free_size of 0x%08x!!\n", jeb->offset, jeb->free_size);
372                         goto restart;
373                 }
374         }
375         /* OK, jeb (==c->nextblock) is now pointing at a block which definitely has
376            enough space */
377         *len = jeb->free_size - reserved_size;
378
379         if (c->cleanmarker_size && jeb->used_size == c->cleanmarker_size &&
380             !jeb->first_node->next_in_ino) {
381                 /* Only node in it beforehand was a CLEANMARKER node (we think).
382                    So mark it obsolete now that there's going to be another node
383                    in the block. This will reduce used_size to zero but We've
384                    already set c->nextblock so that jffs2_mark_node_obsolete()
385                    won't try to refile it to the dirty_list.
386                 */
387                 spin_unlock(&c->erase_completion_lock);
388                 jffs2_mark_node_obsolete(c, jeb->first_node);
389                 spin_lock(&c->erase_completion_lock);
390         }
391
392         D1(printk(KERN_DEBUG "jffs2_do_reserve_space(): Giving 0x%x bytes at 0x%x\n",
393                   *len, jeb->offset + (c->sector_size - jeb->free_size)));
394         return 0;
395 }
396
397 /**
398  *      jffs2_add_physical_node_ref - add a physical node reference to the list
399  *      @c: superblock info
400  *      @new: new node reference to add
401  *      @len: length of this physical node
402  *
403  *      Should only be used to report nodes for which space has been allocated
404  *      by jffs2_reserve_space.
405  *
406  *      Must be called with the alloc_sem held.
407  */
408
409 struct jffs2_raw_node_ref *jffs2_add_physical_node_ref(struct jffs2_sb_info *c,
410                                                        uint32_t ofs, uint32_t len,
411                                                        struct jffs2_inode_cache *ic)
412 {
413         struct jffs2_eraseblock *jeb;
414         struct jffs2_raw_node_ref *new;
415
416         jeb = &c->blocks[ofs / c->sector_size];
417
418         D1(printk(KERN_DEBUG "jffs2_add_physical_node_ref(): Node at 0x%x(%d), size 0x%x\n",
419                   ofs & ~3, ofs & 3, len));
420 #if 1
421         /* Allow non-obsolete nodes only to be added at the end of c->nextblock, 
422            if c->nextblock is set. Note that wbuf.c will file obsolete nodes
423            even after refiling c->nextblock */
424         if ((c->nextblock || ((ofs & 3) != REF_OBSOLETE))
425             && (jeb != c->nextblock || (ofs & ~3) != jeb->offset + (c->sector_size - jeb->free_size))) {
426                 printk(KERN_WARNING "argh. node added in wrong place at 0x%08x(%d)\n", ofs & ~3, ofs & 3);
427                 if (c->nextblock)
428                         printk(KERN_WARNING "nextblock 0x%08x", c->nextblock->offset);
429                 else
430                         printk(KERN_WARNING "No nextblock");
431                 printk(", expected at %08x\n", jeb->offset + (c->sector_size - jeb->free_size));
432                 return ERR_PTR(-EINVAL);
433         }
434 #endif
435         spin_lock(&c->erase_completion_lock);
436
437         new = jffs2_link_node_ref(c, jeb, ofs, len, ic);
438
439         if (!jeb->free_size && !jeb->dirty_size && !ISDIRTY(jeb->wasted_size)) {
440                 /* If it lives on the dirty_list, jffs2_reserve_space will put it there */
441                 D1(printk(KERN_DEBUG "Adding full erase block at 0x%08x to clean_list (free 0x%08x, dirty 0x%08x, used 0x%08x\n",
442                           jeb->offset, jeb->free_size, jeb->dirty_size, jeb->used_size));
443                 if (jffs2_wbuf_dirty(c)) {
444                         /* Flush the last write in the block if it's outstanding */
445                         spin_unlock(&c->erase_completion_lock);
446                         jffs2_flush_wbuf_pad(c);
447                         spin_lock(&c->erase_completion_lock);
448                 }
449
450                 list_add_tail(&jeb->list, &c->clean_list);
451                 c->nextblock = NULL;
452         }
453         jffs2_dbg_acct_sanity_check_nolock(c,jeb);
454         jffs2_dbg_acct_paranoia_check_nolock(c, jeb);
455
456         spin_unlock(&c->erase_completion_lock);
457
458         return new;
459 }
460
461
462 void jffs2_complete_reservation(struct jffs2_sb_info *c)
463 {
464         D1(printk(KERN_DEBUG "jffs2_complete_reservation()\n"));
465         jffs2_garbage_collect_trigger(c);
466         up(&c->alloc_sem);
467 }
468
469 static inline int on_list(struct list_head *obj, struct list_head *head)
470 {
471         struct list_head *this;
472
473         list_for_each(this, head) {
474                 if (this == obj) {
475                         D1(printk("%p is on list at %p\n", obj, head));
476                         return 1;
477
478                 }
479         }
480         return 0;
481 }
482
483 void jffs2_mark_node_obsolete(struct jffs2_sb_info *c, struct jffs2_raw_node_ref *ref)
484 {
485         struct jffs2_eraseblock *jeb;
486         int blocknr;
487         struct jffs2_unknown_node n;
488         int ret, addedsize;
489         size_t retlen;
490         uint32_t freed_len;
491
492         if(unlikely(!ref)) {
493                 printk(KERN_NOTICE "EEEEEK. jffs2_mark_node_obsolete called with NULL node\n");
494                 return;
495         }
496         if (ref_obsolete(ref)) {
497                 D1(printk(KERN_DEBUG "jffs2_mark_node_obsolete called with already obsolete node at 0x%08x\n", ref_offset(ref)));
498                 return;
499         }
500         blocknr = ref->flash_offset / c->sector_size;
501         if (blocknr >= c->nr_blocks) {
502                 printk(KERN_NOTICE "raw node at 0x%08x is off the end of device!\n", ref->flash_offset);
503                 BUG();
504         }
505         jeb = &c->blocks[blocknr];
506
507         if (jffs2_can_mark_obsolete(c) && !jffs2_is_readonly(c) &&
508             !(c->flags & (JFFS2_SB_FLAG_SCANNING | JFFS2_SB_FLAG_BUILDING))) {
509                 /* Hm. This may confuse static lock analysis. If any of the above
510                    three conditions is false, we're going to return from this
511                    function without actually obliterating any nodes or freeing
512                    any jffs2_raw_node_refs. So we don't need to stop erases from
513                    happening, or protect against people holding an obsolete
514                    jffs2_raw_node_ref without the erase_completion_lock. */
515                 down(&c->erase_free_sem);
516         }
517
518         spin_lock(&c->erase_completion_lock);
519
520         freed_len = ref_totlen(c, jeb, ref);
521
522         if (ref_flags(ref) == REF_UNCHECKED) {
523                 D1(if (unlikely(jeb->unchecked_size < freed_len)) {
524                         printk(KERN_NOTICE "raw unchecked node of size 0x%08x freed from erase block %d at 0x%08x, but unchecked_size was already 0x%08x\n",
525                                freed_len, blocknr, ref->flash_offset, jeb->used_size);
526                         BUG();
527                 })
528                 D1(printk(KERN_DEBUG "Obsoleting previously unchecked node at 0x%08x of len %x: ", ref_offset(ref), freed_len));
529                 jeb->unchecked_size -= freed_len;
530                 c->unchecked_size -= freed_len;
531         } else {
532                 D1(if (unlikely(jeb->used_size < freed_len)) {
533                         printk(KERN_NOTICE "raw node of size 0x%08x freed from erase block %d at 0x%08x, but used_size was already 0x%08x\n",
534                                freed_len, blocknr, ref->flash_offset, jeb->used_size);
535                         BUG();
536                 })
537                 D1(printk(KERN_DEBUG "Obsoleting node at 0x%08x of len %#x: ", ref_offset(ref), freed_len));
538                 jeb->used_size -= freed_len;
539                 c->used_size -= freed_len;
540         }
541
542         // Take care, that wasted size is taken into concern
543         if ((jeb->dirty_size || ISDIRTY(jeb->wasted_size + freed_len)) && jeb != c->nextblock) {
544                 D1(printk("Dirtying\n"));
545                 addedsize = freed_len;
546                 jeb->dirty_size += freed_len;
547                 c->dirty_size += freed_len;
548
549                 /* Convert wasted space to dirty, if not a bad block */
550                 if (jeb->wasted_size) {
551                         if (on_list(&jeb->list, &c->bad_used_list)) {
552                                 D1(printk(KERN_DEBUG "Leaving block at %08x on the bad_used_list\n",
553                                           jeb->offset));
554                                 addedsize = 0; /* To fool the refiling code later */
555                         } else {
556                                 D1(printk(KERN_DEBUG "Converting %d bytes of wasted space to dirty in block at %08x\n",
557                                           jeb->wasted_size, jeb->offset));
558                                 addedsize += jeb->wasted_size;
559                                 jeb->dirty_size += jeb->wasted_size;
560                                 c->dirty_size += jeb->wasted_size;
561                                 c->wasted_size -= jeb->wasted_size;
562                                 jeb->wasted_size = 0;
563                         }
564                 }
565         } else {
566                 D1(printk("Wasting\n"));
567                 addedsize = 0;
568                 jeb->wasted_size += freed_len;
569                 c->wasted_size += freed_len;
570         }
571         ref->flash_offset = ref_offset(ref) | REF_OBSOLETE;
572
573         jffs2_dbg_acct_sanity_check_nolock(c, jeb);
574         jffs2_dbg_acct_paranoia_check_nolock(c, jeb);
575
576         if (c->flags & JFFS2_SB_FLAG_SCANNING) {
577                 /* Flash scanning is in progress. Don't muck about with the block
578                    lists because they're not ready yet, and don't actually
579                    obliterate nodes that look obsolete. If they weren't
580                    marked obsolete on the flash at the time they _became_
581                    obsolete, there was probably a reason for that. */
582                 spin_unlock(&c->erase_completion_lock);
583                 /* We didn't lock the erase_free_sem */
584                 return;
585         }
586
587         if (jeb == c->nextblock) {
588                 D2(printk(KERN_DEBUG "Not moving nextblock 0x%08x to dirty/erase_pending list\n", jeb->offset));
589         } else if (!jeb->used_size && !jeb->unchecked_size) {
590                 if (jeb == c->gcblock) {
591                         D1(printk(KERN_DEBUG "gcblock at 0x%08x completely dirtied. Clearing gcblock...\n", jeb->offset));
592                         c->gcblock = NULL;
593                 } else {
594                         D1(printk(KERN_DEBUG "Eraseblock at 0x%08x completely dirtied. Removing from (dirty?) list...\n", jeb->offset));
595                         list_del(&jeb->list);
596                 }
597                 if (jffs2_wbuf_dirty(c)) {
598                         D1(printk(KERN_DEBUG "...and adding to erasable_pending_wbuf_list\n"));
599                         list_add_tail(&jeb->list, &c->erasable_pending_wbuf_list);
600                 } else {
601                         if (jiffies & 127) {
602                                 /* Most of the time, we just erase it immediately. Otherwise we
603                                    spend ages scanning it on mount, etc. */
604                                 D1(printk(KERN_DEBUG "...and adding to erase_pending_list\n"));
605                                 list_add_tail(&jeb->list, &c->erase_pending_list);
606                                 c->nr_erasing_blocks++;
607                                 jffs2_erase_pending_trigger(c);
608                         } else {
609                                 /* Sometimes, however, we leave it elsewhere so it doesn't get
610                                    immediately reused, and we spread the load a bit. */
611                                 D1(printk(KERN_DEBUG "...and adding to erasable_list\n"));
612                                 list_add_tail(&jeb->list, &c->erasable_list);
613                         }
614                 }
615                 D1(printk(KERN_DEBUG "Done OK\n"));
616         } else if (jeb == c->gcblock) {
617                 D2(printk(KERN_DEBUG "Not moving gcblock 0x%08x to dirty_list\n", jeb->offset));
618         } else if (ISDIRTY(jeb->dirty_size) && !ISDIRTY(jeb->dirty_size - addedsize)) {
619                 D1(printk(KERN_DEBUG "Eraseblock at 0x%08x is freshly dirtied. Removing from clean list...\n", jeb->offset));
620                 list_del(&jeb->list);
621                 D1(printk(KERN_DEBUG "...and adding to dirty_list\n"));
622                 list_add_tail(&jeb->list, &c->dirty_list);
623         } else if (VERYDIRTY(c, jeb->dirty_size) &&
624                    !VERYDIRTY(c, jeb->dirty_size - addedsize)) {
625                 D1(printk(KERN_DEBUG "Eraseblock at 0x%08x is now very dirty. Removing from dirty list...\n", jeb->offset));
626                 list_del(&jeb->list);
627                 D1(printk(KERN_DEBUG "...and adding to very_dirty_list\n"));
628                 list_add_tail(&jeb->list, &c->very_dirty_list);
629         } else {
630                 D1(printk(KERN_DEBUG "Eraseblock at 0x%08x not moved anywhere. (free 0x%08x, dirty 0x%08x, used 0x%08x)\n",
631                           jeb->offset, jeb->free_size, jeb->dirty_size, jeb->used_size));
632         }
633
634         spin_unlock(&c->erase_completion_lock);
635
636         if (!jffs2_can_mark_obsolete(c) || jffs2_is_readonly(c) ||
637                 (c->flags & JFFS2_SB_FLAG_BUILDING)) {
638                 /* We didn't lock the erase_free_sem */
639                 return;
640         }
641
642         /* The erase_free_sem is locked, and has been since before we marked the node obsolete
643            and potentially put its eraseblock onto the erase_pending_list. Thus, we know that
644            the block hasn't _already_ been erased, and that 'ref' itself hasn't been freed yet
645            by jffs2_free_jeb_node_refs() in erase.c. Which is nice. */
646
647         D1(printk(KERN_DEBUG "obliterating obsoleted node at 0x%08x\n", ref_offset(ref)));
648         ret = jffs2_flash_read(c, ref_offset(ref), sizeof(n), &retlen, (char *)&n);
649         if (ret) {
650                 printk(KERN_WARNING "Read error reading from obsoleted node at 0x%08x: %d\n", ref_offset(ref), ret);
651                 goto out_erase_sem;
652         }
653         if (retlen != sizeof(n)) {
654                 printk(KERN_WARNING "Short read from obsoleted node at 0x%08x: %zd\n", ref_offset(ref), retlen);
655                 goto out_erase_sem;
656         }
657         if (PAD(je32_to_cpu(n.totlen)) != PAD(freed_len)) {
658                 printk(KERN_WARNING "Node totlen on flash (0x%08x) != totlen from node ref (0x%08x)\n", je32_to_cpu(n.totlen), freed_len);
659                 goto out_erase_sem;
660         }
661         if (!(je16_to_cpu(n.nodetype) & JFFS2_NODE_ACCURATE)) {
662                 D1(printk(KERN_DEBUG "Node at 0x%08x was already marked obsolete (nodetype 0x%04x)\n", ref_offset(ref), je16_to_cpu(n.nodetype)));
663                 goto out_erase_sem;
664         }
665         /* XXX FIXME: This is ugly now */
666         n.nodetype = cpu_to_je16(je16_to_cpu(n.nodetype) & ~JFFS2_NODE_ACCURATE);
667         ret = jffs2_flash_write(c, ref_offset(ref), sizeof(n), &retlen, (char *)&n);
668         if (ret) {
669                 printk(KERN_WARNING "Write error in obliterating obsoleted node at 0x%08x: %d\n", ref_offset(ref), ret);
670                 goto out_erase_sem;
671         }
672         if (retlen != sizeof(n)) {
673                 printk(KERN_WARNING "Short write in obliterating obsoleted node at 0x%08x: %zd\n", ref_offset(ref), retlen);
674                 goto out_erase_sem;
675         }
676
677         /* Nodes which have been marked obsolete no longer need to be
678            associated with any inode. Remove them from the per-inode list.
679
680            Note we can't do this for NAND at the moment because we need
681            obsolete dirent nodes to stay on the lists, because of the
682            horridness in jffs2_garbage_collect_deletion_dirent(). Also
683            because we delete the inocache, and on NAND we need that to
684            stay around until all the nodes are actually erased, in order
685            to stop us from giving the same inode number to another newly
686            created inode. */
687         if (ref->next_in_ino) {
688                 struct jffs2_inode_cache *ic;
689                 struct jffs2_raw_node_ref **p;
690
691                 spin_lock(&c->erase_completion_lock);
692
693                 ic = jffs2_raw_ref_to_ic(ref);
694                 for (p = &ic->nodes; (*p) != ref; p = &((*p)->next_in_ino))
695                         ;
696
697                 *p = ref->next_in_ino;
698                 ref->next_in_ino = NULL;
699
700                 switch (ic->class) {
701 #ifdef CONFIG_JFFS2_FS_XATTR
702                         case RAWNODE_CLASS_XATTR_DATUM:
703                                 jffs2_release_xattr_datum(c, (struct jffs2_xattr_datum *)ic);
704                                 break;
705                         case RAWNODE_CLASS_XATTR_REF:
706                                 jffs2_release_xattr_ref(c, (struct jffs2_xattr_ref *)ic);
707                                 break;
708 #endif
709                         default:
710                                 if (ic->nodes == (void *)ic && ic->nlink == 0)
711                                         jffs2_del_ino_cache(c, ic);
712                                 break;
713                 }
714                 spin_unlock(&c->erase_completion_lock);
715         }
716
717  out_erase_sem:
718         up(&c->erase_free_sem);
719 }
720
721 int jffs2_thread_should_wake(struct jffs2_sb_info *c)
722 {
723         int ret = 0;
724         uint32_t dirty;
725         int nr_very_dirty = 0;
726         struct jffs2_eraseblock *jeb;
727
728         if (c->unchecked_size) {
729                 D1(printk(KERN_DEBUG "jffs2_thread_should_wake(): unchecked_size %d, checked_ino #%d\n",
730                           c->unchecked_size, c->checked_ino));
731                 return 1;
732         }
733
734         /* dirty_size contains blocks on erase_pending_list
735          * those blocks are counted in c->nr_erasing_blocks.
736          * If one block is actually erased, it is not longer counted as dirty_space
737          * but it is counted in c->nr_erasing_blocks, so we add it and subtract it
738          * with c->nr_erasing_blocks * c->sector_size again.
739          * Blocks on erasable_list are counted as dirty_size, but not in c->nr_erasing_blocks
740          * This helps us to force gc and pick eventually a clean block to spread the load.
741          */
742         dirty = c->dirty_size + c->erasing_size - c->nr_erasing_blocks * c->sector_size;
743
744         if (c->nr_free_blocks + c->nr_erasing_blocks < c->resv_blocks_gctrigger &&
745                         (dirty > c->nospc_dirty_size))
746                 ret = 1;
747
748         list_for_each_entry(jeb, &c->very_dirty_list, list) {
749                 nr_very_dirty++;
750                 if (nr_very_dirty == c->vdirty_blocks_gctrigger) {
751                         ret = 1;
752                         /* In debug mode, actually go through and count them all */
753                         D1(continue);
754                         break;
755                 }
756         }
757
758         D1(printk(KERN_DEBUG "jffs2_thread_should_wake(): nr_free_blocks %d, nr_erasing_blocks %d, dirty_size 0x%x, vdirty_blocks %d: %s\n",
759                   c->nr_free_blocks, c->nr_erasing_blocks, c->dirty_size, nr_very_dirty, ret?"yes":"no"));
760
761         return ret;
762 }