]> err.no Git - linux-2.6/blobdiff - drivers/md/dm-mpath.c
video: sh7760fb: SH7760/SH7763 LCDC framebuffer driver
[linux-2.6] / drivers / md / dm-mpath.c
index e54ff372d711b53e652220e8616ddf3d07995f5c..fea966d66f9838c140176a7dbd0ed8223e908f44 100644 (file)
@@ -7,7 +7,6 @@
 
 #include "dm.h"
 #include "dm-path-selector.h"
-#include "dm-hw-handler.h"
 #include "dm-bio-list.h"
 #include "dm-bio-record.h"
 #include "dm-uevent.h"
@@ -63,6 +62,7 @@ struct multipath {
        spinlock_t lock;
 
        const char *hw_handler_name;
+       struct work_struct activate_path;
        unsigned nr_priority_groups;
        struct list_head priority_groups;
        unsigned pg_init_required;      /* pg_init needs calling? */
@@ -107,10 +107,10 @@ typedef int (*action_fn) (struct pgpath *pgpath);
 
 static struct kmem_cache *_mpio_cache;
 
-static struct workqueue_struct *kmultipathd;
+static struct workqueue_struct *kmultipathd, *kmpath_handlerd;
 static void process_queued_ios(struct work_struct *work);
 static void trigger_event(struct work_struct *work);
-static void pg_init_done(struct dm_path *, int);
+static void activate_path(struct work_struct *work);
 
 
 /*-----------------------------------------------
@@ -180,6 +180,7 @@ static struct multipath *alloc_multipath(struct dm_target *ti)
                m->queue_io = 1;
                INIT_WORK(&m->process_queued_ios, process_queued_ios);
                INIT_WORK(&m->trigger_event, trigger_event);
+               INIT_WORK(&m->activate_path, activate_path);
                m->mpio_pool = mempool_create_slab_pool(MIN_IOS, _mpio_cache);
                if (!m->mpio_pool) {
                        kfree(m);
@@ -432,11 +433,8 @@ static void process_queued_ios(struct work_struct *work)
 out:
        spin_unlock_irqrestore(&m->lock, flags);
 
-       if (init_required) {
-               struct dm_path *path = &pgpath->path;
-               int ret = scsi_dh_activate(bdev_get_queue(path->dev->bdev));
-               pg_init_done(path, ret);
-       }
+       if (init_required)
+               queue_work(kmpath_handlerd, &m->activate_path);
 
        if (!must_queue)
                dispatch_queued_ios(m);
@@ -527,8 +525,10 @@ static int parse_path_selector(struct arg_set *as, struct priority_group *pg,
        }
 
        r = read_param(_params, shift(as), &ps_argc, &ti->error);
-       if (r)
+       if (r) {
+               dm_put_path_selector(pst);
                return -EINVAL;
+       }
 
        r = pst->create(&pg->ps, ps_argc, as->argv);
        if (r) {
@@ -625,8 +625,10 @@ static struct priority_group *parse_priority_group(struct arg_set *as,
                struct pgpath *pgpath;
                struct arg_set path_args;
 
-               if (as->argc < nr_params)
+               if (as->argc < nr_params) {
+                       ti->error = "not enough path parameters";
                        goto bad;
+               }
 
                path_args.argc = nr_params;
                path_args.argv = as->argv;
@@ -666,6 +668,8 @@ static int parse_hw_handler(struct arg_set *as, struct multipath *m)
        request_module("scsi_dh_%s", m->hw_handler_name);
        if (scsi_dh_handler_exist(m->hw_handler_name) == 0) {
                ti->error = "unknown hardware handler type";
+               kfree(m->hw_handler_name);
+               m->hw_handler_name = NULL;
                return -EINVAL;
        }
        consume(as, hw_argc - 1);
@@ -791,6 +795,7 @@ static void multipath_dtr(struct dm_target *ti)
 {
        struct multipath *m = (struct multipath *) ti->private;
 
+       flush_workqueue(kmpath_handlerd);
        flush_workqueue(kmultipathd);
        free_multipath(m);
 }
@@ -866,7 +871,7 @@ static int reinstate_path(struct pgpath *pgpath)
        if (pgpath->path.is_active)
                goto out;
 
-       if (!pgpath->pg->ps.type) {
+       if (!pgpath->pg->ps.type->reinstate_path) {
                DMWARN("Reinstate path not supported by path selector %s",
                       pgpath->pg->ps.type->name);
                r = -EINVAL;
@@ -1008,44 +1013,6 @@ static int pg_init_limit_reached(struct multipath *m, struct pgpath *pgpath)
        return limit_reached;
 }
 
-/*
- * pg_init must call this when it has completed its initialisation
- */
-void dm_pg_init_complete(struct dm_path *path, unsigned err_flags)
-{
-       struct pgpath *pgpath = path_to_pgpath(path);
-       struct priority_group *pg = pgpath->pg;
-       struct multipath *m = pg->m;
-       unsigned long flags;
-
-       /*
-        * If requested, retry pg_init until maximum number of retries exceeded.
-        * If retry not requested and PG already bypassed, always fail the path.
-        */
-       if (err_flags & MP_RETRY) {
-               if (pg_init_limit_reached(m, pgpath))
-                       err_flags |= MP_FAIL_PATH;
-       } else if (err_flags && pg->bypassed)
-               err_flags |= MP_FAIL_PATH;
-
-       if (err_flags & MP_FAIL_PATH)
-               fail_path(pgpath);
-
-       if (err_flags & MP_BYPASS_PG)
-               bypass_pg(m, pg, 1);
-
-       spin_lock_irqsave(&m->lock, flags);
-       if (err_flags & ~MP_RETRY) {
-               m->current_pgpath = NULL;
-               m->current_pg = NULL;
-       } else if (!m->pg_init_required)
-               m->queue_io = 0;
-
-       m->pg_init_in_progress = 0;
-       queue_work(kmultipathd, &m->process_queued_ios);
-       spin_unlock_irqrestore(&m->lock, flags);
-}
-
 static void pg_init_done(struct dm_path *path, int errors)
 {
        struct pgpath *pgpath = path_to_pgpath(path);
@@ -1108,6 +1075,17 @@ static void pg_init_done(struct dm_path *path, int errors)
        spin_unlock_irqrestore(&m->lock, flags);
 }
 
+static void activate_path(struct work_struct *work)
+{
+       int ret;
+       struct multipath *m =
+               container_of(work, struct multipath, activate_path);
+       struct dm_path *path = &m->current_pgpath->path;
+
+       ret = scsi_dh_activate(bdev_get_queue(path->dev->bdev));
+       pg_init_done(path, ret);
+}
+
 /*
  * end_io handling
  */
@@ -1451,6 +1429,21 @@ static int __init dm_multipath_init(void)
                return -ENOMEM;
        }
 
+       /*
+        * A separate workqueue is used to handle the device handlers
+        * to avoid overloading existing workqueue. Overloading the
+        * old workqueue would also create a bottleneck in the
+        * path of the storage hardware device activation.
+        */
+       kmpath_handlerd = create_singlethread_workqueue("kmpath_handlerd");
+       if (!kmpath_handlerd) {
+               DMERR("failed to create workqueue kmpath_handlerd");
+               destroy_workqueue(kmultipathd);
+               dm_unregister_target(&multipath_target);
+               kmem_cache_destroy(_mpio_cache);
+               return -ENOMEM;
+       }
+
        DMINFO("version %u.%u.%u loaded",
               multipath_target.version[0], multipath_target.version[1],
               multipath_target.version[2]);
@@ -1462,6 +1455,7 @@ static void __exit dm_multipath_exit(void)
 {
        int r;
 
+       destroy_workqueue(kmpath_handlerd);
        destroy_workqueue(kmultipathd);
 
        r = dm_unregister_target(&multipath_target);
@@ -1470,8 +1464,6 @@ static void __exit dm_multipath_exit(void)
        kmem_cache_destroy(_mpio_cache);
 }
 
-EXPORT_SYMBOL_GPL(dm_pg_init_complete);
-
 module_init(dm_multipath_init);
 module_exit(dm_multipath_exit);