X-Git-Url: https://err.no/cgi-bin/gitweb.cgi?a=blobdiff_plain;f=drivers%2Fmtd%2Fmtdoops.c;h=5a680e1e61f14dbc2ba684482792f2f40aabbee9;hb=771999b65f79264acde4b855e5d35696eca5e80c;hp=72c434c61b0a24f0aa43bd41755d66ce121abaa8;hpb=6ce0a856c10c8ab8568764436864616efa88e908;p=linux-2.6 diff --git a/drivers/mtd/mtdoops.c b/drivers/mtd/mtdoops.c index 72c434c61b..5a680e1e61 100644 --- a/drivers/mtd/mtdoops.c +++ b/drivers/mtd/mtdoops.c @@ -28,11 +28,14 @@ #include #include #include +#include +#include +#include #include #define OOPS_PAGE_SIZE 4096 -struct mtdoops_context { +static struct mtdoops_context { int mtd_index; struct work_struct work_erase; struct work_struct work_write; @@ -42,6 +45,9 @@ struct mtdoops_context { int nextcount; void *oops_buf; + + /* writecount and disabling ready are spin lock protected */ + spinlock_t writecount_lock; int ready; int writecount; } oops_cxt; @@ -63,10 +69,7 @@ static int mtdoops_erase_block(struct mtd_info *mtd, int offset) erase.mtd = mtd; erase.callback = mtdoops_erase_callback; erase.addr = offset; - if (mtd->erasesize < OOPS_PAGE_SIZE) - erase.len = OOPS_PAGE_SIZE; - else - erase.len = mtd->erasesize; + erase.len = mtd->erasesize; erase.priv = (u_long)&wait_q; set_current_state(TASK_INTERRUPTIBLE); @@ -182,10 +185,8 @@ badblock: goto badblock; } -static void mtdoops_workfunc_write(struct work_struct *work) +static void mtdoops_write(struct mtdoops_context *cxt, int panic) { - struct mtdoops_context *cxt = - container_of(work, struct mtdoops_context, work_write); struct mtd_info *mtd = cxt->mtd; size_t retlen; int ret; @@ -194,7 +195,11 @@ static void mtdoops_workfunc_write(struct work_struct *work) memset(cxt->oops_buf + cxt->writecount, 0xff, OOPS_PAGE_SIZE - cxt->writecount); - ret = mtd->write(mtd, cxt->nextpage * OOPS_PAGE_SIZE, + if (panic) + ret = mtd->panic_write(mtd, cxt->nextpage * OOPS_PAGE_SIZE, + OOPS_PAGE_SIZE, &retlen, cxt->oops_buf); + else + ret = mtd->write(mtd, cxt->nextpage * OOPS_PAGE_SIZE, OOPS_PAGE_SIZE, &retlen, cxt->oops_buf); cxt->writecount = 0; @@ -204,6 +209,15 @@ static void mtdoops_workfunc_write(struct work_struct *work) cxt->nextpage * OOPS_PAGE_SIZE, retlen, OOPS_PAGE_SIZE, ret); mtdoops_inc_counter(cxt); +} + + +static void mtdoops_workfunc_write(struct work_struct *work) +{ + struct mtdoops_context *cxt = + container_of(work, struct mtdoops_context, work_write); + + mtdoops_write(cxt, 0); } static void find_next_position(struct mtdoops_context *cxt) @@ -267,12 +281,18 @@ static void mtdoops_notify_add(struct mtd_info *mtd) return; } + if (mtd->erasesize < OOPS_PAGE_SIZE) { + printk(KERN_ERR "Eraseblock size of MTD partition %d too small\n", + mtd->index); + return; + } + cxt->mtd = mtd; cxt->oops_pages = mtd->size / OOPS_PAGE_SIZE; find_next_position(cxt); - printk(KERN_DEBUG "mtdoops: Attached to MTD device %d\n", mtd->index); + printk(KERN_INFO "mtdoops: Attached to MTD device %d\n", mtd->index); } static void mtdoops_notify_remove(struct mtd_info *mtd) @@ -290,13 +310,28 @@ static void mtdoops_console_sync(void) { struct mtdoops_context *cxt = &oops_cxt; struct mtd_info *mtd = cxt->mtd; + unsigned long flags; if (!cxt->ready || !mtd || cxt->writecount == 0) return; + /* + * Once ready is 0 and we've held the lock no further writes to the + * buffer will happen + */ + spin_lock_irqsave(&cxt->writecount_lock, flags); + if (!cxt->ready) { + spin_unlock_irqrestore(&cxt->writecount_lock, flags); + return; + } cxt->ready = 0; + spin_unlock_irqrestore(&cxt->writecount_lock, flags); - schedule_work(&cxt->work_write); + if (mtd->panic_write && in_interrupt()) + /* Interrupt context, we're going to panic so try and log */ + mtdoops_write(cxt, 1); + else + schedule_work(&cxt->work_write); } static void @@ -304,6 +339,7 @@ mtdoops_console_write(struct console *co, const char *s, unsigned int count) { struct mtdoops_context *cxt = co->data; struct mtd_info *mtd = cxt->mtd; + unsigned long flags; if (!oops_in_progress) { mtdoops_console_sync(); @@ -313,6 +349,13 @@ mtdoops_console_write(struct console *co, const char *s, unsigned int count) if (!cxt->ready || !mtd) return; + /* Locking on writecount ensures sequential writes to the buffer */ + spin_lock_irqsave(&cxt->writecount_lock, flags); + + /* Check ready status didn't change whilst waiting for the lock */ + if (!cxt->ready) + return; + if (cxt->writecount == 0) { u32 *stamp = cxt->oops_buf; *stamp = cxt->nextcount; @@ -324,6 +367,11 @@ mtdoops_console_write(struct console *co, const char *s, unsigned int count) memcpy(cxt->oops_buf + cxt->writecount, s, count); cxt->writecount += count; + + spin_unlock_irqrestore(&cxt->writecount_lock, flags); + + if (cxt->writecount == OOPS_PAGE_SIZE) + mtdoops_console_sync(); } static int __init mtdoops_console_setup(struct console *co, char *options) @@ -349,7 +397,6 @@ static struct console mtdoops_console = { .write = mtdoops_console_write, .setup = mtdoops_console_setup, .unblank = mtdoops_console_sync, - .flags = CON_PRINTBUFFER, .index = -1, .data = &oops_cxt, }; @@ -362,7 +409,7 @@ static int __init mtdoops_console_init(void) cxt->oops_buf = vmalloc(OOPS_PAGE_SIZE); if (!cxt->oops_buf) { - printk(KERN_ERR "Failed to allocate oops buffer workspace\n"); + printk(KERN_ERR "Failed to allocate mtdoops buffer workspace\n"); return -ENOMEM; }