2 * Network block device - make block devices work over TCP
4 * Note that you can not swap over this thing, yet. Seems to work but
5 * deadlocks sometimes - you can not swap over TCP in general.
7 * Copyright 1997-2000 Pavel Machek <pavel@ucw.cz>
8 * Parts copyright 2001 Steven Whitehouse <steve@chygwyn.com>
10 * This file is released under GPLv2 or later.
12 * (part of code stolen from loop.c)
15 #include <linux/major.h>
17 #include <linux/blkdev.h>
18 #include <linux/module.h>
19 #include <linux/init.h>
20 #include <linux/sched.h>
22 #include <linux/bio.h>
23 #include <linux/stat.h>
24 #include <linux/errno.h>
25 #include <linux/file.h>
26 #include <linux/ioctl.h>
27 #include <linux/compiler.h>
28 #include <linux/err.h>
29 #include <linux/kernel.h>
32 #include <asm/uaccess.h>
33 #include <asm/system.h>
34 #include <asm/types.h>
36 #include <linux/nbd.h>
38 #define LO_MAGIC 0x68797548
41 #define dprintk(flags, fmt...)
43 #define dprintk(flags, fmt...) do { \
44 if (debugflags & (flags)) printk(KERN_DEBUG fmt); \
46 #define DBG_IOCTL 0x0004
47 #define DBG_INIT 0x0010
48 #define DBG_EXIT 0x0020
49 #define DBG_BLKDEV 0x0100
52 static unsigned int debugflags;
55 static unsigned int nbds_max = 16;
56 static struct nbd_device nbd_dev[MAX_NBD];
59 * Use just one lock (or at most 1 per NIC). Two arguments for this:
60 * 1. Each NIC is essentially a synchronization point for all servers
61 * accessed through that NIC so there's no need to have more locks
63 * 2. More locks lead to more "Dirty cache line bouncing" which will slow
64 * down each lock to the point where they're actually slower than just
66 * Thanks go to Jens Axboe and Al Viro for their LKML emails explaining this!
68 static DEFINE_SPINLOCK(nbd_lock);
71 static const char *ioctl_cmd_to_ascii(int cmd)
74 case NBD_SET_SOCK: return "set-sock";
75 case NBD_SET_BLKSIZE: return "set-blksize";
76 case NBD_SET_SIZE: return "set-size";
77 case NBD_DO_IT: return "do-it";
78 case NBD_CLEAR_SOCK: return "clear-sock";
79 case NBD_CLEAR_QUE: return "clear-que";
80 case NBD_PRINT_DEBUG: return "print-debug";
81 case NBD_SET_SIZE_BLOCKS: return "set-size-blocks";
82 case NBD_DISCONNECT: return "disconnect";
83 case BLKROSET: return "set-read-only";
84 case BLKFLSBUF: return "flush-buffer-cache";
89 static const char *nbdcmd_to_ascii(int cmd)
92 case NBD_CMD_READ: return "read";
93 case NBD_CMD_WRITE: return "write";
94 case NBD_CMD_DISC: return "disconnect";
100 static void nbd_end_request(struct request *req)
102 int uptodate = (req->errors == 0) ? 1 : 0;
103 request_queue_t *q = req->q;
106 dprintk(DBG_BLKDEV, "%s: request %p: %s\n", req->rq_disk->disk_name,
107 req, uptodate? "done": "failed");
109 spin_lock_irqsave(q->queue_lock, flags);
110 if (!end_that_request_first(req, uptodate, req->nr_sectors)) {
111 end_that_request_last(req, uptodate);
113 spin_unlock_irqrestore(q->queue_lock, flags);
117 * Send or receive packet.
119 static int sock_xmit(struct socket *sock, int send, void *buf, int size,
128 /* Allow interception of SIGKILL only
129 * Don't allow other signals to interrupt the transmission */
130 spin_lock_irqsave(¤t->sighand->siglock, flags);
131 oldset = current->blocked;
132 sigfillset(¤t->blocked);
133 sigdelsetmask(¤t->blocked, sigmask(SIGKILL));
135 spin_unlock_irqrestore(¤t->sighand->siglock, flags);
138 sock->sk->sk_allocation = GFP_NOIO;
143 msg.msg_control = NULL;
144 msg.msg_controllen = 0;
145 msg.msg_flags = msg_flags | MSG_NOSIGNAL;
148 result = kernel_sendmsg(sock, &msg, &iov, 1, size);
150 result = kernel_recvmsg(sock, &msg, &iov, 1, size, 0);
152 if (signal_pending(current)) {
154 spin_lock_irqsave(¤t->sighand->siglock, flags);
155 printk(KERN_WARNING "nbd (pid %d: %s) got signal %d\n",
156 current->pid, current->comm,
157 dequeue_signal(current, ¤t->blocked, &info));
158 spin_unlock_irqrestore(¤t->sighand->siglock, flags);
165 result = -EPIPE; /* short read */
172 spin_lock_irqsave(¤t->sighand->siglock, flags);
173 current->blocked = oldset;
175 spin_unlock_irqrestore(¤t->sighand->siglock, flags);
180 static inline int sock_send_bvec(struct socket *sock, struct bio_vec *bvec,
184 void *kaddr = kmap(bvec->bv_page);
185 result = sock_xmit(sock, 1, kaddr + bvec->bv_offset, bvec->bv_len,
187 kunmap(bvec->bv_page);
191 static int nbd_send_req(struct nbd_device *lo, struct request *req)
193 int result, i, flags;
194 struct nbd_request request;
195 unsigned long size = req->nr_sectors << 9;
196 struct socket *sock = lo->sock;
198 request.magic = htonl(NBD_REQUEST_MAGIC);
199 request.type = htonl(nbd_cmd(req));
200 request.from = cpu_to_be64((u64) req->sector << 9);
201 request.len = htonl(size);
202 memcpy(request.handle, &req, sizeof(req));
204 dprintk(DBG_TX, "%s: request %p: sending control (%s@%llu,%luB)\n",
205 lo->disk->disk_name, req,
206 nbdcmd_to_ascii(nbd_cmd(req)),
207 (unsigned long long)req->sector << 9,
208 req->nr_sectors << 9);
209 result = sock_xmit(sock, 1, &request, sizeof(request),
210 (nbd_cmd(req) == NBD_CMD_WRITE)? MSG_MORE: 0);
212 printk(KERN_ERR "%s: Send control failed (result %d)\n",
213 lo->disk->disk_name, result);
217 if (nbd_cmd(req) == NBD_CMD_WRITE) {
220 * we are really probing at internals to determine
221 * whether to set MSG_MORE or not...
223 rq_for_each_bio(bio, req) {
224 struct bio_vec *bvec;
225 bio_for_each_segment(bvec, bio, i) {
227 if ((i < (bio->bi_vcnt - 1)) || bio->bi_next)
229 dprintk(DBG_TX, "%s: request %p: sending %d bytes data\n",
230 lo->disk->disk_name, req,
232 result = sock_send_bvec(sock, bvec, flags);
234 printk(KERN_ERR "%s: Send data failed (result %d)\n",
248 static struct request *nbd_find_request(struct nbd_device *lo, char *handle)
251 struct list_head *tmp;
252 struct request *xreq;
255 memcpy(&xreq, handle, sizeof(xreq));
257 err = wait_event_interruptible(lo->active_wq, lo->active_req != xreq);
261 spin_lock(&lo->queue_lock);
262 list_for_each(tmp, &lo->queue_head) {
263 req = list_entry(tmp, struct request, queuelist);
266 list_del_init(&req->queuelist);
267 spin_unlock(&lo->queue_lock);
270 spin_unlock(&lo->queue_lock);
278 static inline int sock_recv_bvec(struct socket *sock, struct bio_vec *bvec)
281 void *kaddr = kmap(bvec->bv_page);
282 result = sock_xmit(sock, 0, kaddr + bvec->bv_offset, bvec->bv_len,
284 kunmap(bvec->bv_page);
288 /* NULL returned = something went wrong, inform userspace */
289 static struct request *nbd_read_stat(struct nbd_device *lo)
292 struct nbd_reply reply;
294 struct socket *sock = lo->sock;
297 result = sock_xmit(sock, 0, &reply, sizeof(reply), MSG_WAITALL);
299 printk(KERN_ERR "%s: Receive control failed (result %d)\n",
300 lo->disk->disk_name, result);
304 if (ntohl(reply.magic) != NBD_REPLY_MAGIC) {
305 printk(KERN_ERR "%s: Wrong magic (0x%lx)\n",
307 (unsigned long)ntohl(reply.magic));
312 req = nbd_find_request(lo, reply.handle);
313 if (unlikely(IS_ERR(req))) {
314 result = PTR_ERR(req);
315 if (result != -ENOENT)
318 printk(KERN_ERR "%s: Unexpected reply (%p)\n",
319 lo->disk->disk_name, reply.handle);
324 if (ntohl(reply.error)) {
325 printk(KERN_ERR "%s: Other side returned error (%d)\n",
326 lo->disk->disk_name, ntohl(reply.error));
331 dprintk(DBG_RX, "%s: request %p: got reply\n",
332 lo->disk->disk_name, req);
333 if (nbd_cmd(req) == NBD_CMD_READ) {
336 rq_for_each_bio(bio, req) {
337 struct bio_vec *bvec;
338 bio_for_each_segment(bvec, bio, i) {
339 result = sock_recv_bvec(sock, bvec);
341 printk(KERN_ERR "%s: Receive data failed (result %d)\n",
347 dprintk(DBG_RX, "%s: request %p: got %d bytes data\n",
348 lo->disk->disk_name, req, bvec->bv_len);
354 lo->harderror = result;
358 static ssize_t pid_show(struct gendisk *disk, char *page)
360 return sprintf(page, "%ld\n",
361 (long) ((struct nbd_device *)disk->private_data)->pid);
364 static struct disk_attribute pid_attr = {
365 .attr = { .name = "pid", .mode = S_IRUGO },
369 static int nbd_do_it(struct nbd_device *lo)
374 BUG_ON(lo->magic != LO_MAGIC);
376 lo->pid = current->pid;
377 ret = sysfs_create_file(&lo->disk->kobj, &pid_attr.attr);
379 printk(KERN_ERR "nbd: sysfs_create_file failed!");
383 while ((req = nbd_read_stat(lo)) != NULL)
384 nbd_end_request(req);
386 sysfs_remove_file(&lo->disk->kobj, &pid_attr.attr);
390 static void nbd_clear_que(struct nbd_device *lo)
394 BUG_ON(lo->magic != LO_MAGIC);
397 * Because we have set lo->sock to NULL under the tx_lock, all
398 * modifications to the list must have completed by now. For
399 * the same reason, the active_req must be NULL.
401 * As a consequence, we don't need to take the spin lock while
402 * purging the list here.
405 BUG_ON(lo->active_req);
407 while (!list_empty(&lo->queue_head)) {
408 req = list_entry(lo->queue_head.next, struct request,
410 list_del_init(&req->queuelist);
412 nbd_end_request(req);
417 * We always wait for result of write, for now. It would be nice to make it optional
419 * if ((req->cmd == WRITE) && (lo->flags & NBD_WRITE_NOCHK))
420 * { printk( "Warning: Ignoring result!\n"); nbd_end_request( req ); }
423 static void do_nbd_request(request_queue_t * q)
427 while ((req = elv_next_request(q)) != NULL) {
428 struct nbd_device *lo;
430 blkdev_dequeue_request(req);
431 dprintk(DBG_BLKDEV, "%s: request %p: dequeued (flags=%x)\n",
432 req->rq_disk->disk_name, req, req->cmd_type);
434 if (!blk_fs_request(req))
437 lo = req->rq_disk->private_data;
439 BUG_ON(lo->magic != LO_MAGIC);
441 nbd_cmd(req) = NBD_CMD_READ;
442 if (rq_data_dir(req) == WRITE) {
443 nbd_cmd(req) = NBD_CMD_WRITE;
444 if (lo->flags & NBD_READ_ONLY) {
445 printk(KERN_ERR "%s: Write on read-only\n",
446 lo->disk->disk_name);
452 spin_unlock_irq(q->queue_lock);
454 mutex_lock(&lo->tx_lock);
455 if (unlikely(!lo->sock)) {
456 mutex_unlock(&lo->tx_lock);
457 printk(KERN_ERR "%s: Attempted send on closed socket\n",
458 lo->disk->disk_name);
460 nbd_end_request(req);
461 spin_lock_irq(q->queue_lock);
465 lo->active_req = req;
467 if (nbd_send_req(lo, req) != 0) {
468 printk(KERN_ERR "%s: Request send failed\n",
469 lo->disk->disk_name);
471 nbd_end_request(req);
473 spin_lock(&lo->queue_lock);
474 list_add(&req->queuelist, &lo->queue_head);
475 spin_unlock(&lo->queue_lock);
478 lo->active_req = NULL;
479 mutex_unlock(&lo->tx_lock);
480 wake_up_all(&lo->active_wq);
482 spin_lock_irq(q->queue_lock);
487 spin_unlock(q->queue_lock);
488 nbd_end_request(req);
489 spin_lock(q->queue_lock);
494 static int nbd_ioctl(struct inode *inode, struct file *file,
495 unsigned int cmd, unsigned long arg)
497 struct nbd_device *lo = inode->i_bdev->bd_disk->private_data;
499 struct request sreq ;
501 if (!capable(CAP_SYS_ADMIN))
504 BUG_ON(lo->magic != LO_MAGIC);
506 /* Anyone capable of this syscall can do *real bad* things */
507 dprintk(DBG_IOCTL, "%s: nbd_ioctl cmd=%s(0x%x) arg=%lu\n",
508 lo->disk->disk_name, ioctl_cmd_to_ascii(cmd), cmd, arg);
512 printk(KERN_INFO "%s: NBD_DISCONNECT\n", lo->disk->disk_name);
513 sreq.cmd_type = REQ_TYPE_SPECIAL;
514 nbd_cmd(&sreq) = NBD_CMD_DISC;
516 * Set these to sane values in case server implementation
517 * fails to check the request type first and also to keep
518 * debugging output cleaner.
524 nbd_send_req(lo, &sreq);
529 mutex_lock(&lo->tx_lock);
531 mutex_unlock(&lo->tx_lock);
535 BUG_ON(!list_empty(&lo->queue_head));
545 inode = file->f_path.dentry->d_inode;
546 if (S_ISSOCK(inode->i_mode)) {
548 lo->sock = SOCKET_I(inode);
555 case NBD_SET_BLKSIZE:
557 lo->bytesize &= ~(lo->blksize-1);
558 inode->i_bdev->bd_inode->i_size = lo->bytesize;
559 set_blocksize(inode->i_bdev, lo->blksize);
560 set_capacity(lo->disk, lo->bytesize >> 9);
563 lo->bytesize = arg & ~(lo->blksize-1);
564 inode->i_bdev->bd_inode->i_size = lo->bytesize;
565 set_blocksize(inode->i_bdev, lo->blksize);
566 set_capacity(lo->disk, lo->bytesize >> 9);
568 case NBD_SET_SIZE_BLOCKS:
569 lo->bytesize = ((u64) arg) * lo->blksize;
570 inode->i_bdev->bd_inode->i_size = lo->bytesize;
571 set_blocksize(inode->i_bdev, lo->blksize);
572 set_capacity(lo->disk, lo->bytesize >> 9);
577 error = nbd_do_it(lo);
580 /* on return tidy up in case we have a signal */
581 /* Forcibly shutdown the socket causing all listeners
584 * FIXME: This code is duplicated from sys_shutdown, but
585 * there should be a more generic interface rather than
586 * calling socket ops directly here */
587 mutex_lock(&lo->tx_lock);
589 printk(KERN_WARNING "%s: shutting down socket\n",
590 lo->disk->disk_name);
591 lo->sock->ops->shutdown(lo->sock,
592 SEND_SHUTDOWN|RCV_SHUTDOWN);
595 mutex_unlock(&lo->tx_lock);
599 printk(KERN_WARNING "%s: queue cleared\n", lo->disk->disk_name);
602 return lo->harderror;
605 * This is for compatibility only. The queue is always cleared
606 * by NBD_DO_IT or NBD_CLEAR_SOCK.
608 BUG_ON(!lo->sock && !list_empty(&lo->queue_head));
610 case NBD_PRINT_DEBUG:
611 printk(KERN_INFO "%s: next = %p, prev = %p, head = %p\n",
612 inode->i_bdev->bd_disk->disk_name,
613 lo->queue_head.next, lo->queue_head.prev,
620 static struct block_device_operations nbd_fops =
622 .owner = THIS_MODULE,
627 * And here should be modules and kernel interface
628 * (Just smiley confuses emacs :-)
631 static int __init nbd_init(void)
636 BUILD_BUG_ON(sizeof(struct nbd_request) != 28);
638 if (nbds_max > MAX_NBD) {
639 printk(KERN_CRIT "nbd: cannot allocate more than %u nbds; %u requested.\n", MAX_NBD,
644 for (i = 0; i < nbds_max; i++) {
645 struct gendisk *disk = alloc_disk(1);
648 nbd_dev[i].disk = disk;
650 * The new linux 2.5 block layer implementation requires
651 * every gendisk to have its very own request_queue struct.
652 * These structs are big so we dynamically allocate them.
654 disk->queue = blk_init_queue(do_nbd_request, &nbd_lock);
661 if (register_blkdev(NBD_MAJOR, "nbd")) {
666 printk(KERN_INFO "nbd: registered device at major %d\n", NBD_MAJOR);
667 dprintk(DBG_INIT, "nbd: debugflags=0x%x\n", debugflags);
669 for (i = 0; i < nbds_max; i++) {
670 struct gendisk *disk = nbd_dev[i].disk;
671 nbd_dev[i].file = NULL;
672 nbd_dev[i].magic = LO_MAGIC;
673 nbd_dev[i].flags = 0;
674 spin_lock_init(&nbd_dev[i].queue_lock);
675 INIT_LIST_HEAD(&nbd_dev[i].queue_head);
676 mutex_init(&nbd_dev[i].tx_lock);
677 init_waitqueue_head(&nbd_dev[i].active_wq);
678 nbd_dev[i].blksize = 1024;
679 nbd_dev[i].bytesize = 0x7ffffc00ULL << 10; /* 2TB */
680 disk->major = NBD_MAJOR;
681 disk->first_minor = i;
682 disk->fops = &nbd_fops;
683 disk->private_data = &nbd_dev[i];
684 disk->flags |= GENHD_FL_SUPPRESS_PARTITION_INFO;
685 sprintf(disk->disk_name, "nbd%d", i);
686 set_capacity(disk, 0x7ffffc00ULL << 1); /* 2 TB */
693 blk_cleanup_queue(nbd_dev[i].disk->queue);
694 put_disk(nbd_dev[i].disk);
699 static void __exit nbd_cleanup(void)
702 for (i = 0; i < nbds_max; i++) {
703 struct gendisk *disk = nbd_dev[i].disk;
704 nbd_dev[i].magic = 0;
707 blk_cleanup_queue(disk->queue);
711 unregister_blkdev(NBD_MAJOR, "nbd");
712 printk(KERN_INFO "nbd: unregistered device at major %d\n", NBD_MAJOR);
715 module_init(nbd_init);
716 module_exit(nbd_cleanup);
718 MODULE_DESCRIPTION("Network Block Device");
719 MODULE_LICENSE("GPL");
721 module_param(nbds_max, int, 0444);
722 MODULE_PARM_DESC(nbds_max, "How many network block devices to initialize.");
724 module_param(debugflags, int, 0644);
725 MODULE_PARM_DESC(debugflags, "flags for controlling debug output");