2 #include <linux/spinlock.h>
3 #include <linux/blkdev.h>
4 #include <linux/hdreg.h>
5 #include <linux/virtio.h>
6 #include <linux/virtio_blk.h>
7 #include <linux/scatterlist.h>
9 #define VIRTIO_MAX_SG (3+MAX_PHYS_SEGMENTS)
12 static int major, index;
18 struct virtio_device *vdev;
21 /* The disk structure for the kernel. */
24 /* Request tracking. */
25 struct list_head reqs;
29 /* Scatterlist: can be too big for stack. */
30 struct scatterlist sg[VIRTIO_MAX_SG];
35 struct list_head list;
37 struct virtio_blk_outhdr out_hdr;
41 static void blk_done(struct virtqueue *vq)
43 struct virtio_blk *vblk = vq->vdev->priv;
44 struct virtblk_req *vbr;
48 spin_lock_irqsave(&vblk->lock, flags);
49 while ((vbr = vblk->vq->vq_ops->get_buf(vblk->vq, &len)) != NULL) {
51 switch (vbr->status) {
55 case VIRTIO_BLK_S_UNSUPP:
63 end_dequeued_request(vbr->req, uptodate);
65 mempool_free(vbr, vblk->pool);
67 /* In case queue is stopped waiting for more buffers. */
68 blk_start_queue(vblk->disk->queue);
69 spin_unlock_irqrestore(&vblk->lock, flags);
72 static bool do_req(struct request_queue *q, struct virtio_blk *vblk,
75 unsigned long num, out, in;
76 struct virtblk_req *vbr;
78 vbr = mempool_alloc(vblk->pool, GFP_ATOMIC);
80 /* When another request finishes we'll try again. */
84 if (blk_fs_request(vbr->req)) {
85 vbr->out_hdr.type = 0;
86 vbr->out_hdr.sector = vbr->req->sector;
87 vbr->out_hdr.ioprio = vbr->req->ioprio;
88 } else if (blk_pc_request(vbr->req)) {
89 vbr->out_hdr.type = VIRTIO_BLK_T_SCSI_CMD;
90 vbr->out_hdr.sector = 0;
91 vbr->out_hdr.ioprio = vbr->req->ioprio;
93 /* We don't put anything else in the queue. */
97 if (blk_barrier_rq(vbr->req))
98 vbr->out_hdr.type |= VIRTIO_BLK_T_BARRIER;
100 /* This init could be done at vblk creation time */
101 sg_init_table(vblk->sg, VIRTIO_MAX_SG);
102 sg_set_buf(&vblk->sg[0], &vbr->out_hdr, sizeof(vbr->out_hdr));
103 num = blk_rq_map_sg(q, vbr->req, vblk->sg+1);
104 sg_set_buf(&vblk->sg[num+1], &vbr->status, sizeof(vbr->status));
106 if (rq_data_dir(vbr->req) == WRITE) {
107 vbr->out_hdr.type |= VIRTIO_BLK_T_OUT;
111 vbr->out_hdr.type |= VIRTIO_BLK_T_IN;
116 if (vblk->vq->vq_ops->add_buf(vblk->vq, vblk->sg, out, in, vbr)) {
117 mempool_free(vbr, vblk->pool);
121 list_add_tail(&vbr->list, &vblk->reqs);
125 static void do_virtblk_request(struct request_queue *q)
127 struct virtio_blk *vblk = NULL;
129 unsigned int issued = 0;
131 while ((req = elv_next_request(q)) != NULL) {
132 vblk = req->rq_disk->private_data;
133 BUG_ON(req->nr_phys_segments > ARRAY_SIZE(vblk->sg));
135 /* If this request fails, stop queue and wait for something to
136 finish to restart it. */
137 if (!do_req(q, vblk, req)) {
141 blkdev_dequeue_request(req);
146 vblk->vq->vq_ops->kick(vblk->vq);
149 static int virtblk_ioctl(struct inode *inode, struct file *filp,
150 unsigned cmd, unsigned long data)
152 return scsi_cmd_ioctl(filp, inode->i_bdev->bd_disk->queue,
153 inode->i_bdev->bd_disk, cmd,
154 (void __user *)data);
157 /* We provide getgeo only to please some old bootloader/partitioning tools */
158 static int virtblk_getgeo(struct block_device *bd, struct hd_geometry *geo)
160 struct virtio_blk *vblk = bd->bd_disk->private_data;
161 struct virtio_blk_geometry vgeo;
164 /* see if the host passed in geometry config */
165 err = virtio_config_val(vblk->vdev, VIRTIO_BLK_F_GEOMETRY,
166 offsetof(struct virtio_blk_config, geometry),
170 geo->heads = vgeo.heads;
171 geo->sectors = vgeo.sectors;
172 geo->cylinders = vgeo.cylinders;
174 /* some standard values, similar to sd */
176 geo->sectors = 1 << 5;
177 geo->cylinders = get_capacity(bd->bd_disk) >> 11;
182 static struct block_device_operations virtblk_fops = {
183 .ioctl = virtblk_ioctl,
184 .owner = THIS_MODULE,
185 .getgeo = virtblk_getgeo,
188 static int index_to_minor(int index)
190 return index << PART_BITS;
193 static int virtblk_probe(struct virtio_device *vdev)
195 struct virtio_blk *vblk;
201 if (index_to_minor(index) >= 1 << MINORBITS)
204 vdev->priv = vblk = kmalloc(sizeof(*vblk), GFP_KERNEL);
210 INIT_LIST_HEAD(&vblk->reqs);
211 spin_lock_init(&vblk->lock);
214 /* We expect one virtqueue, for output. */
215 vblk->vq = vdev->config->find_vq(vdev, 0, blk_done);
216 if (IS_ERR(vblk->vq)) {
217 err = PTR_ERR(vblk->vq);
221 vblk->pool = mempool_create_kmalloc_pool(1,sizeof(struct virtblk_req));
227 /* FIXME: How many partitions? How long is a piece of string? */
228 vblk->disk = alloc_disk(1 << PART_BITS);
234 vblk->disk->queue = blk_init_queue(do_virtblk_request, &vblk->lock);
235 if (!vblk->disk->queue) {
241 sprintf(vblk->disk->disk_name, "vd%c", 'a' + index % 26);
242 } else if (index < (26 + 1) * 26) {
243 sprintf(vblk->disk->disk_name, "vd%c%c",
244 'a' + index / 26 - 1, 'a' + index % 26);
246 const unsigned int m1 = (index / 26 - 1) / 26 - 1;
247 const unsigned int m2 = (index / 26 - 1) % 26;
248 const unsigned int m3 = index % 26;
249 sprintf(vblk->disk->disk_name, "vd%c%c%c",
250 'a' + m1, 'a' + m2, 'a' + m3);
253 vblk->disk->major = major;
254 vblk->disk->first_minor = index_to_minor(index);
255 vblk->disk->private_data = vblk;
256 vblk->disk->fops = &virtblk_fops;
257 vblk->disk->driverfs_dev = &vdev->dev;
260 /* If barriers are supported, tell block layer that queue is ordered */
261 if (virtio_has_feature(vdev, VIRTIO_BLK_F_BARRIER))
262 blk_queue_ordered(vblk->disk->queue, QUEUE_ORDERED_TAG, NULL);
264 /* If disk is read-only in the host, the guest should obey */
265 if (virtio_has_feature(vdev, VIRTIO_BLK_F_RO))
266 set_disk_ro(vblk->disk, 1);
268 /* Host must always specify the capacity. */
269 vdev->config->get(vdev, offsetof(struct virtio_blk_config, capacity),
272 /* If capacity is too big, truncate with warning. */
273 if ((sector_t)cap != cap) {
274 dev_warn(&vdev->dev, "Capacity %llu too large: truncating\n",
275 (unsigned long long)cap);
278 set_capacity(vblk->disk, cap);
280 /* Host can optionally specify maximum segment size and number of
282 err = virtio_config_val(vdev, VIRTIO_BLK_F_SIZE_MAX,
283 offsetof(struct virtio_blk_config, size_max),
286 blk_queue_max_segment_size(vblk->disk->queue, v);
288 err = virtio_config_val(vdev, VIRTIO_BLK_F_SEG_MAX,
289 offsetof(struct virtio_blk_config, seg_max),
292 blk_queue_max_hw_segments(vblk->disk->queue, v);
294 /* Host can optionally specify the block size of the device */
295 err = virtio_config_val(vdev, VIRTIO_BLK_F_BLK_SIZE,
296 offsetof(struct virtio_blk_config, blk_size),
299 blk_queue_hardsect_size(vblk->disk->queue, blk_size);
301 add_disk(vblk->disk);
305 put_disk(vblk->disk);
307 mempool_destroy(vblk->pool);
309 vdev->config->del_vq(vblk->vq);
316 static void virtblk_remove(struct virtio_device *vdev)
318 struct virtio_blk *vblk = vdev->priv;
320 /* Nothing should be pending. */
321 BUG_ON(!list_empty(&vblk->reqs));
323 /* Stop all the virtqueues. */
324 vdev->config->reset(vdev);
326 del_gendisk(vblk->disk);
327 blk_cleanup_queue(vblk->disk->queue);
328 put_disk(vblk->disk);
329 mempool_destroy(vblk->pool);
330 vdev->config->del_vq(vblk->vq);
334 static struct virtio_device_id id_table[] = {
335 { VIRTIO_ID_BLOCK, VIRTIO_DEV_ANY_ID },
339 static unsigned int features[] = {
340 VIRTIO_BLK_F_BARRIER, VIRTIO_BLK_F_SEG_MAX, VIRTIO_BLK_F_SIZE_MAX,
341 VIRTIO_BLK_F_GEOMETRY, VIRTIO_BLK_F_RO, VIRTIO_BLK_F_BLK_SIZE,
344 static struct virtio_driver virtio_blk = {
345 .feature_table = features,
346 .feature_table_size = ARRAY_SIZE(features),
347 .driver.name = KBUILD_MODNAME,
348 .driver.owner = THIS_MODULE,
349 .id_table = id_table,
350 .probe = virtblk_probe,
351 .remove = __devexit_p(virtblk_remove),
354 static int __init init(void)
356 major = register_blkdev(0, "virtblk");
359 return register_virtio_driver(&virtio_blk);
362 static void __exit fini(void)
364 unregister_blkdev(major, "virtblk");
365 unregister_virtio_driver(&virtio_blk);
370 MODULE_DEVICE_TABLE(virtio, id_table);
371 MODULE_DESCRIPTION("Virtio block driver");
372 MODULE_LICENSE("GPL");