#include <scsi/scsi.h>
#include <scsi/scsi_ioctl.h>
#include <scsi/scsi_cmnd.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_driver.h>
#include <scsi/sg.h>
-static char bsg_version[] = "block layer sg (bsg) 0.4";
+#define BSG_DESCRIPTION "Block layer SCSI generic (bsg) driver"
+#define BSG_VERSION "0.4"
struct bsg_device {
- struct gendisk *disk;
request_queue_t *queue;
spinlock_t lock;
struct list_head busy_list;
int done_cmds;
wait_queue_head_t wq_done;
wait_queue_head_t wq_free;
- char name[BDEVNAME_SIZE];
+ char name[BUS_ID_SIZE];
int max_queue;
unsigned long flags;
};
};
#define BSG_DEFAULT_CMDS 64
+#define BSG_MAX_DEVS 32768
#undef BSG_DEBUG
#define dprintk(fmt, args...)
#endif
-#define list_entry_bc(entry) list_entry((entry), struct bsg_command, list)
-
-/*
- * just for testing
- */
-#define BSG_MAJOR (240)
-
static DEFINE_MUTEX(bsg_mutex);
-static int bsg_device_nr;
+static int bsg_device_nr, bsg_minor_idx;
-#define BSG_LIST_SIZE (8)
-#define bsg_list_idx(minor) ((minor) & (BSG_LIST_SIZE - 1))
-static struct hlist_head bsg_device_list[BSG_LIST_SIZE];
+#define BSG_LIST_ARRAY_SIZE 8
+static struct hlist_head bsg_device_list[BSG_LIST_ARRAY_SIZE];
static struct class *bsg_class;
static LIST_HEAD(bsg_class_list);
+static int bsg_major;
static struct kmem_cache *bsg_cmd_cachep;
struct list_head list;
struct request *rq;
struct bio *bio;
+ struct bio *bidi_bio;
int err;
struct sg_io_v4 hdr;
struct sg_io_v4 __user *uhdr;
wake_up(&bd->wq_free);
}
-static struct bsg_command *__bsg_alloc_command(struct bsg_device *bd)
+static struct bsg_command *bsg_alloc_command(struct bsg_device *bd)
{
- struct bsg_command *bc = NULL;
+ struct bsg_command *bc = ERR_PTR(-EINVAL);
spin_lock_irq(&bd->lock);
bd->queued_cmds++;
spin_unlock_irq(&bd->lock);
- bc = kmem_cache_alloc(bsg_cmd_cachep, GFP_USER);
+ bc = kmem_cache_zalloc(bsg_cmd_cachep, GFP_KERNEL);
if (unlikely(!bc)) {
spin_lock_irq(&bd->lock);
bd->queued_cmds--;
+ bc = ERR_PTR(-ENOMEM);
goto out;
}
- memset(bc, 0, sizeof(*bc));
bc->bd = bd;
INIT_LIST_HEAD(&bc->list);
dprintk("%s: returning free cmd %p\n", bd->name, bc);
return bc;
}
-static inline void
-bsg_del_done_cmd(struct bsg_device *bd, struct bsg_command *bc)
+static inline struct hlist_head *bsg_dev_idx_hash(int index)
{
- bd->done_cmds--;
- list_del(&bc->list);
-}
-
-static inline void
-bsg_add_done_cmd(struct bsg_device *bd, struct bsg_command *bc)
-{
- bd->done_cmds++;
- list_add_tail(&bc->list, &bd->done_list);
- wake_up(&bd->wq_done);
+ return &bsg_device_list[index & (BSG_LIST_ARRAY_SIZE - 1)];
}
-static inline int bsg_io_schedule(struct bsg_device *bd, int state)
+static int bsg_io_schedule(struct bsg_device *bd)
{
DEFINE_WAIT(wait);
int ret = 0;
goto unlock;
}
- prepare_to_wait(&bd->wq_done, &wait, state);
+ prepare_to_wait(&bd->wq_done, &wait, TASK_UNINTERRUPTIBLE);
spin_unlock_irq(&bd->lock);
io_schedule();
finish_wait(&bd->wq_done, &wait);
- if ((state == TASK_INTERRUPTIBLE) && signal_pending(current))
- ret = -ERESTARTSYS;
-
return ret;
unlock:
spin_unlock_irq(&bd->lock);
return ret;
}
-/*
- * get a new free command, blocking if needed and specified
- */
-static struct bsg_command *bsg_get_command(struct bsg_device *bd)
-{
- struct bsg_command *bc;
- int ret;
-
- do {
- bc = __bsg_alloc_command(bd);
- if (bc)
- break;
-
- ret = bsg_io_schedule(bd, TASK_INTERRUPTIBLE);
- if (ret) {
- bc = ERR_PTR(ret);
- break;
- }
-
- } while (1);
-
- return bc;
-}
-
static int blk_fill_sgv4_hdr_rq(request_queue_t *q, struct request *rq,
struct sg_io_v4 *hdr, int has_write_perm)
{
if (copy_from_user(rq->cmd, (void *)(unsigned long)hdr->request,
hdr->request_len))
return -EFAULT;
- if (blk_verify_command(rq->cmd, has_write_perm))
+
+ if (hdr->subprotocol == BSG_SUB_PROTOCOL_SCSI_CMD) {
+ if (blk_verify_command(rq->cmd, has_write_perm))
+ return -EPERM;
+ } else if (!capable(CAP_SYS_RAWIO))
return -EPERM;
/*
static int
bsg_validate_sgv4_hdr(request_queue_t *q, struct sg_io_v4 *hdr, int *rw)
{
+ int ret = 0;
+
if (hdr->guard != 'Q')
return -EINVAL;
if (hdr->request_len > BLK_MAX_CDB)
hdr->din_xfer_len > (q->max_sectors << 9))
return -EIO;
- /* not supported currently */
- if (hdr->protocol || hdr->subprotocol)
- return -EINVAL;
-
- /*
- * looks sane, if no data then it should be fine from our POV
- */
- if (!hdr->dout_xfer_len && !hdr->din_xfer_len)
- return 0;
-
- /* not supported currently */
- if (hdr->dout_xfer_len && hdr->din_xfer_len)
- return -EINVAL;
+ switch (hdr->protocol) {
+ case BSG_PROTOCOL_SCSI:
+ switch (hdr->subprotocol) {
+ case BSG_SUB_PROTOCOL_SCSI_CMD:
+ case BSG_SUB_PROTOCOL_SCSI_TRANSPORT:
+ break;
+ default:
+ ret = -EINVAL;
+ }
+ break;
+ default:
+ ret = -EINVAL;
+ }
*rw = hdr->dout_xfer_len ? WRITE : READ;
-
- return 0;
+ return ret;
}
/*
bsg_map_hdr(struct bsg_device *bd, struct sg_io_v4 *hdr)
{
request_queue_t *q = bd->queue;
- struct request *rq;
- int ret, rw = 0; /* shut up gcc */
+ struct request *rq, *next_rq = NULL;
+ int ret, rw;
unsigned int dxfer_len;
void *dxferp = NULL;
* map scatter-gather elements seperately and string them to request
*/
rq = blk_get_request(q, rw, GFP_KERNEL);
+ if (!rq)
+ return ERR_PTR(-ENOMEM);
ret = blk_fill_sgv4_hdr_rq(q, rq, hdr, test_bit(BSG_F_WRITE_PERM,
&bd->flags));
- if (ret) {
- blk_put_request(rq);
- return ERR_PTR(ret);
+ if (ret)
+ goto out;
+
+ if (rw == WRITE && hdr->din_xfer_len) {
+ if (!test_bit(QUEUE_FLAG_BIDI, &q->queue_flags)) {
+ ret = -EOPNOTSUPP;
+ goto out;
+ }
+
+ next_rq = blk_get_request(q, READ, GFP_KERNEL);
+ if (!next_rq) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ rq->next_rq = next_rq;
+
+ dxferp = (void*)(unsigned long)hdr->din_xferp;
+ ret = blk_rq_map_user(q, next_rq, dxferp, hdr->din_xfer_len);
+ if (ret)
+ goto out;
}
if (hdr->dout_xfer_len) {
if (dxfer_len) {
ret = blk_rq_map_user(q, rq, dxferp, dxfer_len);
- if (ret) {
- dprintk("failed map at %d\n", ret);
- blk_put_request(rq);
- rq = ERR_PTR(ret);
- }
+ if (ret)
+ goto out;
}
-
return rq;
+out:
+ blk_put_request(rq);
+ if (next_rq) {
+ blk_rq_unmap_user(next_rq->bio);
+ blk_put_request(next_rq);
+ }
+ return ERR_PTR(ret);
}
/*
bc->hdr.duration = jiffies_to_msecs(jiffies - bc->hdr.duration);
spin_lock_irqsave(&bd->lock, flags);
- list_del(&bc->list);
- bsg_add_done_cmd(bd, bc);
+ list_move_tail(&bc->list, &bd->done_list);
+ bd->done_cmds++;
spin_unlock_irqrestore(&bd->lock, flags);
+
+ wake_up(&bd->wq_done);
}
/*
*/
bc->rq = rq;
bc->bio = rq->bio;
+ if (rq->next_rq)
+ bc->bidi_bio = rq->next_rq->bio;
bc->hdr.duration = jiffies;
spin_lock_irq(&bd->lock);
list_add_tail(&bc->list, &bd->busy_list);
dprintk("%s: queueing rq %p, bc %p\n", bd->name, rq, bc);
rq->end_io_data = bc;
- blk_execute_rq_nowait(q, bd->disk, rq, 1, bsg_rq_end_io);
+ blk_execute_rq_nowait(q, NULL, rq, 1, bsg_rq_end_io);
}
-static inline struct bsg_command *bsg_next_done_cmd(struct bsg_device *bd)
+static struct bsg_command *bsg_next_done_cmd(struct bsg_device *bd)
{
struct bsg_command *bc = NULL;
spin_lock_irq(&bd->lock);
if (bd->done_cmds) {
- bc = list_entry_bc(bd->done_list.next);
- bsg_del_done_cmd(bd, bc);
+ bc = list_entry(bd->done_list.next, struct bsg_command, list);
+ list_del(&bc->list);
+ bd->done_cmds--;
}
spin_unlock_irq(&bd->lock);
/*
* Get a finished command from the done list
*/
-static struct bsg_command *__bsg_get_done_cmd(struct bsg_device *bd, int state)
+static struct bsg_command *bsg_get_done_cmd(struct bsg_device *bd)
{
struct bsg_command *bc;
int ret;
if (bc)
break;
- ret = bsg_io_schedule(bd, state);
+ if (!test_bit(BSG_F_BLOCK, &bd->flags)) {
+ bc = ERR_PTR(-EAGAIN);
+ break;
+ }
+
+ ret = wait_event_interruptible(bd->wq_done, bd->done_cmds);
if (ret) {
- bc = ERR_PTR(ret);
+ bc = ERR_PTR(-ERESTARTSYS);
break;
}
} while (1);
return bc;
}
-static struct bsg_command *
-bsg_get_done_cmd(struct bsg_device *bd, const struct iovec *iov)
-{
- return __bsg_get_done_cmd(bd, TASK_INTERRUPTIBLE);
-}
-
-static struct bsg_command *
-bsg_get_done_cmd_nosignals(struct bsg_device *bd)
-{
- return __bsg_get_done_cmd(bd, TASK_UNINTERRUPTIBLE);
-}
-
static int blk_complete_sgv4_hdr_rq(struct request *rq, struct sg_io_v4 *hdr,
- struct bio *bio)
+ struct bio *bio, struct bio *bidi_bio)
{
int ret = 0;
hdr->response_len = 0;
if (rq->sense_len && hdr->response) {
- int len = min((unsigned int) hdr->max_response_len,
- rq->sense_len);
+ int len = min_t(unsigned int, hdr->max_response_len,
+ rq->sense_len);
ret = copy_to_user((void*)(unsigned long)hdr->response,
rq->sense, len);
ret = -EFAULT;
}
+ if (rq->next_rq) {
+ blk_rq_unmap_user(bidi_bio);
+ blk_put_request(rq->next_rq);
+ }
+
blk_rq_unmap_user(bio);
blk_put_request(rq);
*/
ret = 0;
do {
- ret = bsg_io_schedule(bd, TASK_UNINTERRUPTIBLE);
+ ret = bsg_io_schedule(bd);
/*
* look for -ENODATA specifically -- we'll sometimes get
* -ERESTARTSYS when we've taken a signal, but we can't
*/
ret = 0;
do {
- bc = bsg_get_done_cmd_nosignals(bd);
-
- /*
- * we _must_ complete before restarting, because
- * bsg_release can't handle this failing.
- */
- if (PTR_ERR(bc) == -ERESTARTSYS)
- continue;
- if (IS_ERR(bc)) {
- ret = PTR_ERR(bc);
+ spin_lock_irq(&bd->lock);
+ if (!bd->queued_cmds) {
+ spin_unlock_irq(&bd->lock);
break;
}
+ spin_unlock_irq(&bd->lock);
- tret = blk_complete_sgv4_hdr_rq(bc->rq, &bc->hdr, bc->bio);
+ bc = bsg_get_done_cmd(bd);
+ if (IS_ERR(bc))
+ break;
+
+ tret = blk_complete_sgv4_hdr_rq(bc->rq, &bc->hdr, bc->bio,
+ bc->bidi_bio);
if (!ret)
ret = tret;
return ret;
}
-typedef struct bsg_command *(*bsg_command_callback)(struct bsg_device *bd, const struct iovec *iov);
-
-static ssize_t
-__bsg_read(char __user *buf, size_t count, bsg_command_callback get_bc,
- struct bsg_device *bd, const struct iovec *iov, ssize_t *bytes_read)
+static int
+__bsg_read(char __user *buf, size_t count, struct bsg_device *bd,
+ const struct iovec *iov, ssize_t *bytes_read)
{
struct bsg_command *bc;
int nr_commands, ret;
ret = 0;
nr_commands = count / sizeof(struct sg_io_v4);
while (nr_commands) {
- bc = get_bc(bd, iov);
+ bc = bsg_get_done_cmd(bd);
if (IS_ERR(bc)) {
ret = PTR_ERR(bc);
break;
* after completing the request. so do that here,
* bsg_complete_work() cannot do that for us
*/
- ret = blk_complete_sgv4_hdr_rq(bc->rq, &bc->hdr, bc->bio);
+ ret = blk_complete_sgv4_hdr_rq(bc->rq, &bc->hdr, bc->bio,
+ bc->bidi_bio);
- if (copy_to_user(buf, (char *) &bc->hdr, sizeof(bc->hdr)))
+ if (copy_to_user(buf, &bc->hdr, sizeof(bc->hdr)))
ret = -EFAULT;
bsg_free_command(bc);
clear_bit(BSG_F_WRITE_PERM, &bd->flags);
}
+/*
+ * Check if the error is a "real" error that we should return.
+ */
static inline int err_block_err(int ret)
{
if (ret && ret != -ENOSPC && ret != -ENODATA && ret != -EAGAIN)
bsg_set_block(bd, file);
bytes_read = 0;
- ret = __bsg_read(buf, count, bsg_get_done_cmd,
- bd, NULL, &bytes_read);
+ ret = __bsg_read(buf, count, bd, NULL, &bytes_read);
*ppos = bytes_read;
if (!bytes_read || (bytes_read && err_block_err(ret)))
return bytes_read;
}
-static ssize_t __bsg_write(struct bsg_device *bd, const char __user *buf,
- size_t count, ssize_t *bytes_read)
+static int __bsg_write(struct bsg_device *bd, const char __user *buf,
+ size_t count, ssize_t *bytes_written)
{
struct bsg_command *bc;
struct request *rq;
while (nr_commands) {
request_queue_t *q = bd->queue;
- bc = bsg_get_command(bd);
- if (!bc)
- break;
+ bc = bsg_alloc_command(bd);
if (IS_ERR(bc)) {
ret = PTR_ERR(bc);
bc = NULL;
rq = NULL;
nr_commands--;
buf += sizeof(struct sg_io_v4);
- *bytes_read += sizeof(struct sg_io_v4);
+ *bytes_written += sizeof(struct sg_io_v4);
}
if (bc)
bsg_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos)
{
struct bsg_device *bd = file->private_data;
- ssize_t bytes_read;
+ ssize_t bytes_written;
int ret;
dprintk("%s: write %Zd bytes\n", bd->name, count);
bsg_set_block(bd, file);
bsg_set_write_perm(bd, file);
- bytes_read = 0;
- ret = __bsg_write(bd, buf, count, &bytes_read);
- *ppos = bytes_read;
+ bytes_written = 0;
+ ret = __bsg_write(bd, buf, count, &bytes_written);
+ *ppos = bytes_written;
/*
* return bytes written on non-fatal errors
*/
- if (!bytes_read || (bytes_read && err_block_err(ret)))
- bytes_read = ret;
+ if (!bytes_written || (bytes_written && err_block_err(ret)))
+ bytes_written = ret;
- dprintk("%s: returning %Zd\n", bd->name, bytes_read);
- return bytes_read;
+ dprintk("%s: returning %Zd\n", bd->name, bytes_written);
+ return bytes_written;
}
static struct bsg_device *bsg_alloc_device(void)
}
static struct bsg_device *bsg_add_device(struct inode *inode,
- struct gendisk *disk,
+ struct request_queue *rq,
struct file *file)
{
- struct bsg_device *bd = NULL;
+ struct bsg_device *bd;
#ifdef BSG_DEBUG
unsigned char buf[32];
#endif
if (!bd)
return ERR_PTR(-ENOMEM);
- bd->disk = disk;
- bd->queue = disk->queue;
- kobject_get(&disk->queue->kobj);
+ bd->queue = rq;
+ kobject_get(&rq->kobj);
bsg_set_block(bd, file);
atomic_set(&bd->ref_count, 1);
bd->minor = iminor(inode);
mutex_lock(&bsg_mutex);
- hlist_add_head(&bd->dev_list,&bsg_device_list[bsg_list_idx(bd->minor)]);
+ hlist_add_head(&bd->dev_list, bsg_dev_idx_hash(bd->minor));
- strncpy(bd->name, disk->disk_name, sizeof(bd->name) - 1);
+ strncpy(bd->name, rq->bsg_dev.class_dev->class_id, sizeof(bd->name) - 1);
dprintk("bound to <%s>, max queue %d\n",
format_dev_t(buf, inode->i_rdev), bd->max_queue);
static struct bsg_device *__bsg_get_device(int minor)
{
- struct hlist_head *list = &bsg_device_list[bsg_list_idx(minor)];
struct bsg_device *bd = NULL;
struct hlist_node *entry;
mutex_lock(&bsg_mutex);
- hlist_for_each(entry, list) {
+ hlist_for_each(entry, bsg_dev_idx_hash(minor)) {
bd = hlist_entry(entry, struct bsg_device, dev_list);
if (bd->minor == minor) {
atomic_inc(&bd->ref_count);
if (!bcd)
return ERR_PTR(-ENODEV);
- return bsg_add_device(inode, bcd->disk, file);
+ return bsg_add_device(inode, bcd->queue, file);
}
static int bsg_open(struct inode *inode, struct file *file)
return mask;
}
-static int
-bsg_ioctl(struct inode *inode, struct file *file, unsigned int cmd,
- unsigned long arg)
+static long bsg_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
{
struct bsg_device *bd = file->private_data;
int __user *uarg = (int __user *) arg;
- if (!bd)
- return -ENXIO;
-
switch (cmd) {
/*
* our own ioctls
case SG_EMULATED_HOST:
case SCSI_IOCTL_SEND_COMMAND: {
void __user *uarg = (void __user *) arg;
- return scsi_cmd_ioctl(file, bd->disk, cmd, uarg);
+ return scsi_cmd_ioctl(file, bd->queue, NULL, cmd, uarg);
}
case SG_IO: {
struct request *rq;
- struct bio *bio;
+ struct bio *bio, *bidi_bio = NULL;
struct sg_io_v4 hdr;
if (copy_from_user(&hdr, uarg, sizeof(hdr)))
return PTR_ERR(rq);
bio = rq->bio;
- blk_execute_rq(bd->queue, bd->disk, rq, 0);
- blk_complete_sgv4_hdr_rq(rq, &hdr, bio);
+ if (rq->next_rq)
+ bidi_bio = rq->next_rq->bio;
+ blk_execute_rq(bd->queue, NULL, rq, 0);
+ blk_complete_sgv4_hdr_rq(rq, &hdr, bio, bidi_bio);
if (copy_to_user(uarg, &hdr, sizeof(hdr)))
return -EFAULT;
.poll = bsg_poll,
.open = bsg_open,
.release = bsg_release,
- .ioctl = bsg_ioctl,
+ .unlocked_ioctl = bsg_ioctl,
.owner = THIS_MODULE,
};
-void bsg_unregister_disk(struct gendisk *disk)
+void bsg_unregister_queue(struct request_queue *q)
{
- struct bsg_class_device *bcd = &disk->bsg_dev;
+ struct bsg_class_device *bcd = &q->bsg_dev;
if (!bcd->class_dev)
return;
mutex_lock(&bsg_mutex);
- sysfs_remove_link(&bcd->disk->queue->kobj, "bsg");
- class_device_destroy(bsg_class, MKDEV(BSG_MAJOR, bcd->minor));
+ sysfs_remove_link(&q->kobj, "bsg");
+ class_device_unregister(bcd->class_dev);
+ put_device(bcd->dev);
bcd->class_dev = NULL;
+ bcd->dev = NULL;
list_del_init(&bcd->list);
+ bsg_device_nr--;
mutex_unlock(&bsg_mutex);
}
+EXPORT_SYMBOL_GPL(bsg_unregister_queue);
-int bsg_register_disk(struct gendisk *disk)
+int bsg_register_queue(struct request_queue *q, struct device *gdev,
+ const char *name)
{
- request_queue_t *q = disk->queue;
- struct bsg_class_device *bcd;
+ struct bsg_class_device *bcd, *__bcd;
dev_t dev;
+ int ret = -EMFILE;
+ struct class_device *class_dev = NULL;
+ const char *devname;
+
+ if (name)
+ devname = name;
+ else
+ devname = gdev->bus_id;
/*
* we need a proper transport to send commands, not a stacked device
if (!q->request_fn)
return 0;
- bcd = &disk->bsg_dev;
+ bcd = &q->bsg_dev;
memset(bcd, 0, sizeof(*bcd));
INIT_LIST_HEAD(&bcd->list);
mutex_lock(&bsg_mutex);
- dev = MKDEV(BSG_MAJOR, bsg_device_nr);
- bcd->minor = bsg_device_nr;
- bsg_device_nr++;
- bcd->disk = disk;
- bcd->class_dev = class_device_create(bsg_class, NULL, dev, bcd->dev, "%s", disk->disk_name);
- if (!bcd->class_dev)
+ if (bsg_device_nr == BSG_MAX_DEVS) {
+ printk(KERN_ERR "bsg: too many bsg devices\n");
goto err;
+ }
+
+retry:
+ list_for_each_entry(__bcd, &bsg_class_list, list) {
+ if (__bcd->minor == bsg_minor_idx) {
+ bsg_minor_idx++;
+ if (bsg_minor_idx == BSG_MAX_DEVS)
+ bsg_minor_idx = 0;
+ goto retry;
+ }
+ }
+
+ bcd->minor = bsg_minor_idx++;
+ if (bsg_minor_idx == BSG_MAX_DEVS)
+ bsg_minor_idx = 0;
+
+ bcd->queue = q;
+ bcd->dev = get_device(gdev);
+ dev = MKDEV(bsg_major, bcd->minor);
+ class_dev = class_device_create(bsg_class, NULL, dev, gdev, "%s",
+ devname);
+ if (IS_ERR(class_dev)) {
+ ret = PTR_ERR(class_dev);
+ goto err_put;
+ }
+ bcd->class_dev = class_dev;
+
+ if (q->kobj.sd) {
+ ret = sysfs_create_link(&q->kobj, &bcd->class_dev->kobj, "bsg");
+ if (ret)
+ goto err_unregister;
+ }
+
list_add_tail(&bcd->list, &bsg_class_list);
- if (sysfs_create_link(&q->kobj, &bcd->class_dev->kobj, "bsg"))
- goto err;
+ bsg_device_nr++;
+
mutex_unlock(&bsg_mutex);
return 0;
+
+err_unregister:
+ class_device_unregister(class_dev);
+err_put:
+ put_device(gdev);
err:
- bsg_device_nr--;
- if (bcd->class_dev)
- class_device_destroy(bsg_class, MKDEV(BSG_MAJOR, bcd->minor));
mutex_unlock(&bsg_mutex);
- return -ENOMEM;
+ return ret;
}
+EXPORT_SYMBOL_GPL(bsg_register_queue);
+
+static struct cdev bsg_cdev = {
+ .kobj = {.name = "bsg", },
+ .owner = THIS_MODULE,
+};
static int __init bsg_init(void)
{
int ret, i;
+ dev_t devid;
bsg_cmd_cachep = kmem_cache_create("bsg_cmd",
- sizeof(struct bsg_command), 0, 0, NULL, NULL);
+ sizeof(struct bsg_command), 0, 0, NULL);
if (!bsg_cmd_cachep) {
printk(KERN_ERR "bsg: failed creating slab cache\n");
return -ENOMEM;
}
- for (i = 0; i < BSG_LIST_SIZE; i++)
+ for (i = 0; i < BSG_LIST_ARRAY_SIZE; i++)
INIT_HLIST_HEAD(&bsg_device_list[i]);
bsg_class = class_create(THIS_MODULE, "bsg");
if (IS_ERR(bsg_class)) {
- kmem_cache_destroy(bsg_cmd_cachep);
- return PTR_ERR(bsg_class);
+ ret = PTR_ERR(bsg_class);
+ goto destroy_kmemcache;
}
- ret = register_chrdev(BSG_MAJOR, "bsg", &bsg_fops);
- if (ret) {
- kmem_cache_destroy(bsg_cmd_cachep);
- class_destroy(bsg_class);
- return ret;
- }
+ ret = alloc_chrdev_region(&devid, 0, BSG_MAX_DEVS, "bsg");
+ if (ret)
+ goto destroy_bsg_class;
- printk(KERN_INFO "%s loaded\n", bsg_version);
+ bsg_major = MAJOR(devid);
+
+ cdev_init(&bsg_cdev, &bsg_fops);
+ ret = cdev_add(&bsg_cdev, MKDEV(bsg_major, 0), BSG_MAX_DEVS);
+ if (ret)
+ goto unregister_chrdev;
+
+ printk(KERN_INFO BSG_DESCRIPTION " version " BSG_VERSION
+ " loaded (major %d)\n", bsg_major);
return 0;
+unregister_chrdev:
+ unregister_chrdev_region(MKDEV(bsg_major, 0), BSG_MAX_DEVS);
+destroy_bsg_class:
+ class_destroy(bsg_class);
+destroy_kmemcache:
+ kmem_cache_destroy(bsg_cmd_cachep);
+ return ret;
}
MODULE_AUTHOR("Jens Axboe");
-MODULE_DESCRIPTION("Block layer SGSI generic (sg) driver");
+MODULE_DESCRIPTION(BSG_DESCRIPTION);
MODULE_LICENSE("GPL");
-subsys_initcall(bsg_init);
+device_initcall(bsg_init);