1 /* A simple network driver using virtio.
3 * Copyright 2007 Rusty Russell <rusty@rustcorp.com.au> IBM Corporation
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 #include <linux/netdevice.h>
21 #include <linux/etherdevice.h>
22 #include <linux/ethtool.h>
23 #include <linux/module.h>
24 #include <linux/virtio.h>
25 #include <linux/virtio_net.h>
26 #include <linux/scatterlist.h>
28 static int napi_weight = 128;
29 module_param(napi_weight, int, 0444);
31 static int csum = 1, gso = 1;
32 module_param(csum, bool, 0444);
33 module_param(gso, bool, 0444);
35 /* FIXME: MTU in config. */
36 #define MAX_PACKET_LEN (ETH_HLEN+ETH_DATA_LEN)
40 struct virtio_device *vdev;
41 struct virtqueue *rvq, *svq;
42 struct net_device *dev;
43 struct napi_struct napi;
45 /* The skb we couldn't send because buffers were full. */
46 struct sk_buff *last_xmit_skb;
48 /* If we need to free in a timer, this is it. */
49 struct timer_list xmit_free_timer;
51 /* Number of input buffers, and max we've ever had. */
52 unsigned int num, max;
54 /* For cleaning up after transmission. */
55 struct tasklet_struct tasklet;
58 /* Receive & send queues. */
59 struct sk_buff_head recv;
60 struct sk_buff_head send;
63 static inline struct virtio_net_hdr *skb_vnet_hdr(struct sk_buff *skb)
65 return (struct virtio_net_hdr *)skb->cb;
68 static inline void vnet_hdr_to_sg(struct scatterlist *sg, struct sk_buff *skb)
70 sg_init_one(sg, skb_vnet_hdr(skb), sizeof(struct virtio_net_hdr));
73 static void skb_xmit_done(struct virtqueue *svq)
75 struct virtnet_info *vi = svq->vdev->priv;
77 /* Suppress further interrupts. */
78 svq->vq_ops->disable_cb(svq);
80 /* We were probably waiting for more output buffers. */
81 netif_wake_queue(vi->dev);
83 /* Make sure we re-xmit last_xmit_skb: if there are no more packets
84 * queued, start_xmit won't be called. */
85 tasklet_schedule(&vi->tasklet);
88 static void receive_skb(struct net_device *dev, struct sk_buff *skb,
91 struct virtio_net_hdr *hdr = skb_vnet_hdr(skb);
93 if (unlikely(len < sizeof(struct virtio_net_hdr) + ETH_HLEN)) {
94 pr_debug("%s: short packet %i\n", dev->name, len);
95 dev->stats.rx_length_errors++;
98 len -= sizeof(struct virtio_net_hdr);
99 BUG_ON(len > MAX_PACKET_LEN);
103 dev->stats.rx_bytes += skb->len;
104 dev->stats.rx_packets++;
106 if (hdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) {
107 pr_debug("Needs csum!\n");
108 if (!skb_partial_csum_set(skb,hdr->csum_start,hdr->csum_offset))
112 skb->protocol = eth_type_trans(skb, dev);
113 pr_debug("Receiving skb proto 0x%04x len %i type %i\n",
114 ntohs(skb->protocol), skb->len, skb->pkt_type);
116 if (hdr->gso_type != VIRTIO_NET_HDR_GSO_NONE) {
118 switch (hdr->gso_type & ~VIRTIO_NET_HDR_GSO_ECN) {
119 case VIRTIO_NET_HDR_GSO_TCPV4:
120 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
122 case VIRTIO_NET_HDR_GSO_UDP:
123 skb_shinfo(skb)->gso_type = SKB_GSO_UDP;
125 case VIRTIO_NET_HDR_GSO_TCPV6:
126 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
130 printk(KERN_WARNING "%s: bad gso type %u.\n",
131 dev->name, hdr->gso_type);
135 if (hdr->gso_type & VIRTIO_NET_HDR_GSO_ECN)
136 skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_ECN;
138 skb_shinfo(skb)->gso_size = hdr->gso_size;
139 if (skb_shinfo(skb)->gso_size == 0) {
141 printk(KERN_WARNING "%s: zero gso size.\n",
146 /* Header must be checked, and gso_segs computed. */
147 skb_shinfo(skb)->gso_type |= SKB_GSO_DODGY;
148 skb_shinfo(skb)->gso_segs = 0;
151 netif_receive_skb(skb);
155 dev->stats.rx_frame_errors++;
160 static void try_fill_recv(struct virtnet_info *vi)
163 struct scatterlist sg[2+MAX_SKB_FRAGS];
166 sg_init_table(sg, 2+MAX_SKB_FRAGS);
168 skb = netdev_alloc_skb(vi->dev, MAX_PACKET_LEN);
172 skb_put(skb, MAX_PACKET_LEN);
173 vnet_hdr_to_sg(sg, skb);
174 num = skb_to_sgvec(skb, sg+1, 0, skb->len) + 1;
175 skb_queue_head(&vi->recv, skb);
177 err = vi->rvq->vq_ops->add_buf(vi->rvq, sg, 0, num, skb);
179 skb_unlink(skb, &vi->recv);
185 if (unlikely(vi->num > vi->max))
187 vi->rvq->vq_ops->kick(vi->rvq);
190 static void skb_recv_done(struct virtqueue *rvq)
192 struct virtnet_info *vi = rvq->vdev->priv;
193 /* Schedule NAPI, Suppress further interrupts if successful. */
194 if (netif_rx_schedule_prep(vi->dev, &vi->napi)) {
195 rvq->vq_ops->disable_cb(rvq);
196 __netif_rx_schedule(vi->dev, &vi->napi);
200 static int virtnet_poll(struct napi_struct *napi, int budget)
202 struct virtnet_info *vi = container_of(napi, struct virtnet_info, napi);
203 struct sk_buff *skb = NULL;
204 unsigned int len, received = 0;
207 while (received < budget &&
208 (skb = vi->rvq->vq_ops->get_buf(vi->rvq, &len)) != NULL) {
209 __skb_unlink(skb, &vi->recv);
210 receive_skb(vi->dev, skb, len);
215 /* FIXME: If we oom and completely run out of inbufs, we need
216 * to start a timer trying to fill more. */
217 if (vi->num < vi->max / 2)
220 /* Out of packets? */
221 if (received < budget) {
222 netif_rx_complete(vi->dev, napi);
223 if (unlikely(!vi->rvq->vq_ops->enable_cb(vi->rvq))
224 && napi_schedule_prep(napi)) {
225 vi->rvq->vq_ops->disable_cb(vi->rvq);
226 __netif_rx_schedule(vi->dev, napi);
234 static void free_old_xmit_skbs(struct virtnet_info *vi)
239 while ((skb = vi->svq->vq_ops->get_buf(vi->svq, &len)) != NULL) {
240 pr_debug("Sent skb %p\n", skb);
241 __skb_unlink(skb, &vi->send);
242 vi->dev->stats.tx_bytes += skb->len;
243 vi->dev->stats.tx_packets++;
248 /* If the virtio transport doesn't always notify us when all in-flight packets
249 * are consumed, we fall back to using this function on a timer to free them. */
250 static void xmit_free(unsigned long data)
252 struct virtnet_info *vi = (void *)data;
254 netif_tx_lock(vi->dev);
256 free_old_xmit_skbs(vi);
258 if (!skb_queue_empty(&vi->send))
259 mod_timer(&vi->xmit_free_timer, jiffies + (HZ/10));
261 netif_tx_unlock(vi->dev);
264 static int xmit_skb(struct virtnet_info *vi, struct sk_buff *skb)
267 struct scatterlist sg[2+MAX_SKB_FRAGS];
268 struct virtio_net_hdr *hdr;
269 const unsigned char *dest = ((struct ethhdr *)skb->data)->h_dest;
271 sg_init_table(sg, 2+MAX_SKB_FRAGS);
273 pr_debug("%s: xmit %p " MAC_FMT "\n", vi->dev->name, skb,
274 dest[0], dest[1], dest[2],
275 dest[3], dest[4], dest[5]);
277 /* Encode metadata header at front. */
278 hdr = skb_vnet_hdr(skb);
279 if (skb->ip_summed == CHECKSUM_PARTIAL) {
280 hdr->flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;
281 hdr->csum_start = skb->csum_start - skb_headroom(skb);
282 hdr->csum_offset = skb->csum_offset;
285 hdr->csum_offset = hdr->csum_start = 0;
288 if (skb_is_gso(skb)) {
289 hdr->hdr_len = skb_transport_header(skb) - skb->data;
290 hdr->gso_size = skb_shinfo(skb)->gso_size;
291 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)
292 hdr->gso_type = VIRTIO_NET_HDR_GSO_TCPV4;
293 else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
294 hdr->gso_type = VIRTIO_NET_HDR_GSO_TCPV6;
295 else if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP)
296 hdr->gso_type = VIRTIO_NET_HDR_GSO_UDP;
299 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCP_ECN)
300 hdr->gso_type |= VIRTIO_NET_HDR_GSO_ECN;
302 hdr->gso_type = VIRTIO_NET_HDR_GSO_NONE;
303 hdr->gso_size = hdr->hdr_len = 0;
306 vnet_hdr_to_sg(sg, skb);
307 num = skb_to_sgvec(skb, sg+1, 0, skb->len) + 1;
309 err = vi->svq->vq_ops->add_buf(vi->svq, sg, num, 0, skb);
310 if (!err && !vi->free_in_tasklet)
311 mod_timer(&vi->xmit_free_timer, jiffies + (HZ/10));
316 static void xmit_tasklet(unsigned long data)
318 struct virtnet_info *vi = (void *)data;
320 netif_tx_lock_bh(vi->dev);
321 if (vi->last_xmit_skb && xmit_skb(vi, vi->last_xmit_skb) == 0) {
322 vi->svq->vq_ops->kick(vi->svq);
323 vi->last_xmit_skb = NULL;
325 if (vi->free_in_tasklet)
326 free_old_xmit_skbs(vi);
327 netif_tx_unlock_bh(vi->dev);
330 static int start_xmit(struct sk_buff *skb, struct net_device *dev)
332 struct virtnet_info *vi = netdev_priv(dev);
335 /* Free up any pending old buffers before queueing new ones. */
336 free_old_xmit_skbs(vi);
338 /* If we has a buffer left over from last time, send it now. */
339 if (unlikely(vi->last_xmit_skb) &&
340 xmit_skb(vi, vi->last_xmit_skb) != 0)
343 vi->last_xmit_skb = NULL;
345 /* Put new one in send queue and do transmit */
347 __skb_queue_head(&vi->send, skb);
348 if (xmit_skb(vi, skb) != 0) {
349 vi->last_xmit_skb = skb;
355 vi->svq->vq_ops->kick(vi->svq);
359 pr_debug("%s: virtio not prepared to send\n", dev->name);
360 netif_stop_queue(dev);
362 /* Activate callback for using skbs: if this returns false it
363 * means some were used in the meantime. */
364 if (unlikely(!vi->svq->vq_ops->enable_cb(vi->svq))) {
365 vi->svq->vq_ops->disable_cb(vi->svq);
366 netif_start_queue(dev);
370 /* Drop this skb: we only queue one. */
371 vi->dev->stats.tx_dropped++;
377 #ifdef CONFIG_NET_POLL_CONTROLLER
378 static void virtnet_netpoll(struct net_device *dev)
380 struct virtnet_info *vi = netdev_priv(dev);
382 napi_schedule(&vi->napi);
386 static int virtnet_open(struct net_device *dev)
388 struct virtnet_info *vi = netdev_priv(dev);
390 napi_enable(&vi->napi);
392 /* If all buffers were filled by other side before we napi_enabled, we
393 * won't get another interrupt, so process any outstanding packets
394 * now. virtnet_poll wants re-enable the queue, so we disable here.
395 * We synchronize against interrupts via NAPI_STATE_SCHED */
396 if (netif_rx_schedule_prep(dev, &vi->napi)) {
397 vi->rvq->vq_ops->disable_cb(vi->rvq);
398 __netif_rx_schedule(dev, &vi->napi);
403 static int virtnet_close(struct net_device *dev)
405 struct virtnet_info *vi = netdev_priv(dev);
407 napi_disable(&vi->napi);
412 static int virtnet_set_tx_csum(struct net_device *dev, u32 data)
414 struct virtnet_info *vi = netdev_priv(dev);
415 struct virtio_device *vdev = vi->vdev;
417 if (data && !virtio_has_feature(vdev, VIRTIO_NET_F_CSUM))
420 return ethtool_op_set_tx_hw_csum(dev, data);
423 static struct ethtool_ops virtnet_ethtool_ops = {
424 .set_tx_csum = virtnet_set_tx_csum,
425 .set_sg = ethtool_op_set_sg,
428 static int virtnet_probe(struct virtio_device *vdev)
431 struct net_device *dev;
432 struct virtnet_info *vi;
434 /* Allocate ourselves a network device with room for our info */
435 dev = alloc_etherdev(sizeof(struct virtnet_info));
439 /* Set up network device as normal. */
440 dev->open = virtnet_open;
441 dev->stop = virtnet_close;
442 dev->hard_start_xmit = start_xmit;
443 dev->features = NETIF_F_HIGHDMA;
444 #ifdef CONFIG_NET_POLL_CONTROLLER
445 dev->poll_controller = virtnet_netpoll;
447 SET_ETHTOOL_OPS(dev, &virtnet_ethtool_ops);
448 SET_NETDEV_DEV(dev, &vdev->dev);
450 /* Do we support "hardware" checksums? */
451 if (csum && virtio_has_feature(vdev, VIRTIO_NET_F_CSUM)) {
452 /* This opens up the world of extra features. */
453 dev->features |= NETIF_F_HW_CSUM|NETIF_F_SG|NETIF_F_FRAGLIST;
454 if (gso && virtio_has_feature(vdev, VIRTIO_NET_F_GSO)) {
455 dev->features |= NETIF_F_TSO | NETIF_F_UFO
456 | NETIF_F_TSO_ECN | NETIF_F_TSO6;
458 /* Individual feature bits: what can host handle? */
459 if (gso && virtio_has_feature(vdev, VIRTIO_NET_F_HOST_TSO4))
460 dev->features |= NETIF_F_TSO;
461 if (gso && virtio_has_feature(vdev, VIRTIO_NET_F_HOST_TSO6))
462 dev->features |= NETIF_F_TSO6;
463 if (gso && virtio_has_feature(vdev, VIRTIO_NET_F_HOST_ECN))
464 dev->features |= NETIF_F_TSO_ECN;
465 if (gso && virtio_has_feature(vdev, VIRTIO_NET_F_HOST_UFO))
466 dev->features |= NETIF_F_UFO;
469 /* Configuration may specify what MAC to use. Otherwise random. */
470 if (virtio_has_feature(vdev, VIRTIO_NET_F_MAC)) {
471 vdev->config->get(vdev,
472 offsetof(struct virtio_net_config, mac),
473 dev->dev_addr, dev->addr_len);
475 random_ether_addr(dev->dev_addr);
477 /* Set up our device-specific information */
478 vi = netdev_priv(dev);
479 netif_napi_add(dev, &vi->napi, virtnet_poll, napi_weight);
484 /* If they give us a callback when all buffers are done, we don't need
486 vi->free_in_tasklet = virtio_has_feature(vdev,VIRTIO_F_NOTIFY_ON_EMPTY);
488 /* We expect two virtqueues, receive then send. */
489 vi->rvq = vdev->config->find_vq(vdev, 0, skb_recv_done);
490 if (IS_ERR(vi->rvq)) {
491 err = PTR_ERR(vi->rvq);
495 vi->svq = vdev->config->find_vq(vdev, 1, skb_xmit_done);
496 if (IS_ERR(vi->svq)) {
497 err = PTR_ERR(vi->svq);
501 /* Initialize our empty receive and send queues. */
502 skb_queue_head_init(&vi->recv);
503 skb_queue_head_init(&vi->send);
505 tasklet_init(&vi->tasklet, xmit_tasklet, (unsigned long)vi);
507 if (!vi->free_in_tasklet)
508 setup_timer(&vi->xmit_free_timer, xmit_free, (unsigned long)vi);
510 err = register_netdev(dev);
512 pr_debug("virtio_net: registering device failed\n");
516 /* Last of all, set up some receive buffers. */
519 /* If we didn't even get one input buffer, we're useless. */
525 pr_debug("virtnet: registered device %s\n", dev->name);
529 unregister_netdev(dev);
531 vdev->config->del_vq(vi->svq);
533 vdev->config->del_vq(vi->rvq);
539 static void virtnet_remove(struct virtio_device *vdev)
541 struct virtnet_info *vi = vdev->priv;
544 /* Stop all the virtqueues. */
545 vdev->config->reset(vdev);
547 if (!vi->free_in_tasklet)
548 del_timer_sync(&vi->xmit_free_timer);
550 /* Free our skbs in send and recv queues, if any. */
551 while ((skb = __skb_dequeue(&vi->recv)) != NULL) {
555 __skb_queue_purge(&vi->send);
557 BUG_ON(vi->num != 0);
559 vdev->config->del_vq(vi->svq);
560 vdev->config->del_vq(vi->rvq);
561 unregister_netdev(vi->dev);
562 free_netdev(vi->dev);
565 static struct virtio_device_id id_table[] = {
566 { VIRTIO_ID_NET, VIRTIO_DEV_ANY_ID },
570 static unsigned int features[] = {
571 VIRTIO_NET_F_CSUM, VIRTIO_NET_F_GUEST_CSUM,
572 VIRTIO_NET_F_GSO, VIRTIO_NET_F_MAC,
573 VIRTIO_NET_F_HOST_TSO4, VIRTIO_NET_F_HOST_UFO, VIRTIO_NET_F_HOST_TSO6,
574 VIRTIO_NET_F_HOST_ECN, VIRTIO_F_NOTIFY_ON_EMPTY,
577 static struct virtio_driver virtio_net = {
578 .feature_table = features,
579 .feature_table_size = ARRAY_SIZE(features),
580 .driver.name = KBUILD_MODNAME,
581 .driver.owner = THIS_MODULE,
582 .id_table = id_table,
583 .probe = virtnet_probe,
584 .remove = __devexit_p(virtnet_remove),
587 static int __init init(void)
589 return register_virtio_driver(&virtio_net);
592 static void __exit fini(void)
594 unregister_virtio_driver(&virtio_net);
599 MODULE_DEVICE_TABLE(virtio, id_table);
600 MODULE_DESCRIPTION("Virtio network driver");
601 MODULE_LICENSE("GPL");