2 * $Id: mtd_blkdevs.c,v 1.27 2005/11/07 11:14:20 gleixner Exp $
4 * (C) 2003 David Woodhouse <dwmw2@infradead.org>
6 * Interface to Linux 2.5 block layer for MTD 'translation layers'.
10 #include <linux/kernel.h>
11 #include <linux/slab.h>
12 #include <linux/module.h>
13 #include <linux/list.h>
15 #include <linux/mtd/blktrans.h>
16 #include <linux/mtd/mtd.h>
17 #include <linux/blkdev.h>
18 #include <linux/blkpg.h>
19 #include <linux/spinlock.h>
20 #include <linux/hdreg.h>
21 #include <linux/init.h>
22 #include <linux/mutex.h>
23 #include <linux/kthread.h>
24 #include <asm/uaccess.h>
26 static LIST_HEAD(blktrans_majors);
28 extern struct mutex mtd_table_mutex;
29 extern struct mtd_info *mtd_table[];
31 struct mtd_blkcore_priv {
32 struct completion thread_dead;
34 wait_queue_head_t thread_wq;
35 struct request_queue *rq;
36 spinlock_t queue_lock;
39 static int do_blktrans_request(struct mtd_blktrans_ops *tr,
40 struct mtd_blktrans_dev *dev,
43 unsigned long block, nsect;
46 block = req->sector << 9 >> tr->blkshift;
47 nsect = req->current_nr_sectors << 9 >> tr->blkshift;
51 if (!blk_fs_request(req))
54 if (req->sector + req->current_nr_sectors > get_capacity(req->rq_disk))
57 switch(rq_data_dir(req)) {
59 for (; nsect > 0; nsect--, block++, buf += tr->blksize)
60 if (tr->readsect(dev, block, buf))
68 for (; nsect > 0; nsect--, block++, buf += tr->blksize)
69 if (tr->writesect(dev, block, buf))
74 printk(KERN_NOTICE "Unknown request %u\n", rq_data_dir(req));
79 static int mtd_blktrans_thread(void *arg)
81 struct mtd_blktrans_ops *tr = arg;
82 struct request_queue *rq = tr->blkcore_priv->rq;
84 /* we might get involved when memory gets low, so use PF_MEMALLOC */
85 current->flags |= PF_MEMALLOC | PF_NOFREEZE;
87 spin_lock_irq(rq->queue_lock);
89 while (!tr->blkcore_priv->exiting) {
91 struct mtd_blktrans_dev *dev;
93 DECLARE_WAITQUEUE(wait, current);
95 req = elv_next_request(rq);
98 add_wait_queue(&tr->blkcore_priv->thread_wq, &wait);
99 set_current_state(TASK_INTERRUPTIBLE);
101 spin_unlock_irq(rq->queue_lock);
104 remove_wait_queue(&tr->blkcore_priv->thread_wq, &wait);
106 spin_lock_irq(rq->queue_lock);
111 dev = req->rq_disk->private_data;
114 spin_unlock_irq(rq->queue_lock);
116 mutex_lock(&dev->lock);
117 res = do_blktrans_request(tr, dev, req);
118 mutex_unlock(&dev->lock);
120 spin_lock_irq(rq->queue_lock);
122 end_request(req, res);
124 spin_unlock_irq(rq->queue_lock);
126 complete_and_exit(&tr->blkcore_priv->thread_dead, 0);
129 static void mtd_blktrans_request(struct request_queue *rq)
131 struct mtd_blktrans_ops *tr = rq->queuedata;
132 wake_up(&tr->blkcore_priv->thread_wq);
136 static int blktrans_open(struct inode *i, struct file *f)
138 struct mtd_blktrans_dev *dev;
139 struct mtd_blktrans_ops *tr;
142 dev = i->i_bdev->bd_disk->private_data;
145 if (!try_module_get(dev->mtd->owner))
148 if (!try_module_get(tr->owner))
151 /* FIXME: Locking. A hot pluggable device can go away
152 (del_mtd_device can be called for it) without its module
154 dev->mtd->usecount++;
157 if (tr->open && (ret = tr->open(dev))) {
158 dev->mtd->usecount--;
159 module_put(dev->mtd->owner);
161 module_put(tr->owner);
167 static int blktrans_release(struct inode *i, struct file *f)
169 struct mtd_blktrans_dev *dev;
170 struct mtd_blktrans_ops *tr;
173 dev = i->i_bdev->bd_disk->private_data;
177 ret = tr->release(dev);
180 dev->mtd->usecount--;
181 module_put(dev->mtd->owner);
182 module_put(tr->owner);
188 static int blktrans_getgeo(struct block_device *bdev, struct hd_geometry *geo)
190 struct mtd_blktrans_dev *dev = bdev->bd_disk->private_data;
193 return dev->tr->getgeo(dev, geo);
197 static int blktrans_ioctl(struct inode *inode, struct file *file,
198 unsigned int cmd, unsigned long arg)
200 struct mtd_blktrans_dev *dev = inode->i_bdev->bd_disk->private_data;
201 struct mtd_blktrans_ops *tr = dev->tr;
206 return tr->flush(dev);
207 /* The core code did the work, we had nothing to do. */
214 struct block_device_operations mtd_blktrans_ops = {
215 .owner = THIS_MODULE,
216 .open = blktrans_open,
217 .release = blktrans_release,
218 .ioctl = blktrans_ioctl,
219 .getgeo = blktrans_getgeo,
222 int add_mtd_blktrans_dev(struct mtd_blktrans_dev *new)
224 struct mtd_blktrans_ops *tr = new->tr;
225 struct list_head *this;
226 int last_devnum = -1;
229 if (!!mutex_trylock(&mtd_table_mutex)) {
230 mutex_unlock(&mtd_table_mutex);
234 list_for_each(this, &tr->devs) {
235 struct mtd_blktrans_dev *d = list_entry(this, struct mtd_blktrans_dev, list);
236 if (new->devnum == -1) {
237 /* Use first free number */
238 if (d->devnum != last_devnum+1) {
239 /* Found a free devnum. Plug it in here */
240 new->devnum = last_devnum+1;
241 list_add_tail(&new->list, &d->list);
244 } else if (d->devnum == new->devnum) {
245 /* Required number taken */
247 } else if (d->devnum > new->devnum) {
248 /* Required number was free */
249 list_add_tail(&new->list, &d->list);
252 last_devnum = d->devnum;
254 if (new->devnum == -1)
255 new->devnum = last_devnum+1;
257 if ((new->devnum << tr->part_bits) > 256) {
261 mutex_init(&new->lock);
262 list_add_tail(&new->list, &tr->devs);
267 gd = alloc_disk(1 << tr->part_bits);
269 list_del(&new->list);
272 gd->major = tr->major;
273 gd->first_minor = (new->devnum) << tr->part_bits;
274 gd->fops = &mtd_blktrans_ops;
277 if (new->devnum < 26)
278 snprintf(gd->disk_name, sizeof(gd->disk_name),
279 "%s%c", tr->name, 'a' + new->devnum);
281 snprintf(gd->disk_name, sizeof(gd->disk_name),
283 'a' - 1 + new->devnum / 26,
284 'a' + new->devnum % 26);
286 snprintf(gd->disk_name, sizeof(gd->disk_name),
287 "%s%d", tr->name, new->devnum);
289 /* 2.5 has capacity in units of 512 bytes while still
290 having BLOCK_SIZE_BITS set to 10. Just to keep us amused. */
291 set_capacity(gd, (new->size * tr->blksize) >> 9);
293 gd->private_data = new;
294 new->blkcore_priv = gd;
295 gd->queue = tr->blkcore_priv->rq;
305 int del_mtd_blktrans_dev(struct mtd_blktrans_dev *old)
307 if (!!mutex_trylock(&mtd_table_mutex)) {
308 mutex_unlock(&mtd_table_mutex);
312 list_del(&old->list);
314 del_gendisk(old->blkcore_priv);
315 put_disk(old->blkcore_priv);
320 static void blktrans_notify_remove(struct mtd_info *mtd)
322 struct list_head *this, *this2, *next;
324 list_for_each(this, &blktrans_majors) {
325 struct mtd_blktrans_ops *tr = list_entry(this, struct mtd_blktrans_ops, list);
327 list_for_each_safe(this2, next, &tr->devs) {
328 struct mtd_blktrans_dev *dev = list_entry(this2, struct mtd_blktrans_dev, list);
336 static void blktrans_notify_add(struct mtd_info *mtd)
338 struct list_head *this;
340 if (mtd->type == MTD_ABSENT)
343 list_for_each(this, &blktrans_majors) {
344 struct mtd_blktrans_ops *tr = list_entry(this, struct mtd_blktrans_ops, list);
346 tr->add_mtd(tr, mtd);
351 static struct mtd_notifier blktrans_notifier = {
352 .add = blktrans_notify_add,
353 .remove = blktrans_notify_remove,
356 int register_mtd_blktrans(struct mtd_blktrans_ops *tr)
358 struct task_struct *task;
361 /* Register the notifier if/when the first device type is
362 registered, to prevent the link/init ordering from fucking
364 if (!blktrans_notifier.list.next)
365 register_mtd_user(&blktrans_notifier);
367 tr->blkcore_priv = kzalloc(sizeof(*tr->blkcore_priv), GFP_KERNEL);
368 if (!tr->blkcore_priv)
371 mutex_lock(&mtd_table_mutex);
373 ret = register_blkdev(tr->major, tr->name);
375 printk(KERN_WARNING "Unable to register %s block device on major %d: %d\n",
376 tr->name, tr->major, ret);
377 kfree(tr->blkcore_priv);
378 mutex_unlock(&mtd_table_mutex);
381 spin_lock_init(&tr->blkcore_priv->queue_lock);
382 init_completion(&tr->blkcore_priv->thread_dead);
383 init_waitqueue_head(&tr->blkcore_priv->thread_wq);
385 tr->blkcore_priv->rq = blk_init_queue(mtd_blktrans_request, &tr->blkcore_priv->queue_lock);
386 if (!tr->blkcore_priv->rq) {
387 unregister_blkdev(tr->major, tr->name);
388 kfree(tr->blkcore_priv);
389 mutex_unlock(&mtd_table_mutex);
393 tr->blkcore_priv->rq->queuedata = tr;
394 blk_queue_hardsect_size(tr->blkcore_priv->rq, tr->blksize);
395 tr->blkshift = ffs(tr->blksize) - 1;
397 task = kthread_run(mtd_blktrans_thread, tr, "%sd", tr->name);
399 blk_cleanup_queue(tr->blkcore_priv->rq);
400 unregister_blkdev(tr->major, tr->name);
401 kfree(tr->blkcore_priv);
402 mutex_unlock(&mtd_table_mutex);
403 return PTR_ERR(task);
406 INIT_LIST_HEAD(&tr->devs);
407 list_add(&tr->list, &blktrans_majors);
409 for (i=0; i<MAX_MTD_DEVICES; i++) {
410 if (mtd_table[i] && mtd_table[i]->type != MTD_ABSENT)
411 tr->add_mtd(tr, mtd_table[i]);
414 mutex_unlock(&mtd_table_mutex);
419 int deregister_mtd_blktrans(struct mtd_blktrans_ops *tr)
421 struct list_head *this, *next;
423 mutex_lock(&mtd_table_mutex);
425 /* Clean up the kernel thread */
426 tr->blkcore_priv->exiting = 1;
427 wake_up(&tr->blkcore_priv->thread_wq);
428 wait_for_completion(&tr->blkcore_priv->thread_dead);
430 /* Remove it from the list of active majors */
433 list_for_each_safe(this, next, &tr->devs) {
434 struct mtd_blktrans_dev *dev = list_entry(this, struct mtd_blktrans_dev, list);
438 blk_cleanup_queue(tr->blkcore_priv->rq);
439 unregister_blkdev(tr->major, tr->name);
441 mutex_unlock(&mtd_table_mutex);
443 kfree(tr->blkcore_priv);
445 BUG_ON(!list_empty(&tr->devs));
449 static void __exit mtd_blktrans_exit(void)
451 /* No race here -- if someone's currently in register_mtd_blktrans
452 we're screwed anyway. */
453 if (blktrans_notifier.list.next)
454 unregister_mtd_user(&blktrans_notifier);
457 module_exit(mtd_blktrans_exit);
459 EXPORT_SYMBOL_GPL(register_mtd_blktrans);
460 EXPORT_SYMBOL_GPL(deregister_mtd_blktrans);
461 EXPORT_SYMBOL_GPL(add_mtd_blktrans_dev);
462 EXPORT_SYMBOL_GPL(del_mtd_blktrans_dev);
464 MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org>");
465 MODULE_LICENSE("GPL");
466 MODULE_DESCRIPTION("Common interface to block layer for MTD 'translation layers'");