guest->host throughput with 03-catch-more-io

guest->host host->guest || cpu utilization throughput vmexits || 01-tx-notifies-disabled-for-longer 02-flush-in-io-thread 03-catch-more-io 04-no-tx-timer 05-drop-mutex

From: Mark McLoughlin 
Subject: [PATCH] kvm: qemu: virtio_net: try and catch yet more I/O without exits

When we flush some packets from the tx bottom half or timer,
re-schedule the bottom half to quickly come back and check
for more I/O before re-enabling notifies.

Signed-off-by: Mark McLoughlin 
---
 qemu/hw/virtio-net.c |   23 ++++++++++++++++++-----
 1 files changed, 18 insertions(+), 5 deletions(-)

diff --git a/qemu/hw/virtio-net.c b/qemu/hw/virtio-net.c
index 2d5f712..32a93ba 100644
--- a/qemu/hw/virtio-net.c
+++ b/qemu/hw/virtio-net.c
@@ -229,13 +229,14 @@ static void virtio_net_receive(void *opaque, const uint8_t *buf, int size)
 }
 
 /* TX */
-static void virtio_net_flush_tx(VirtIONet *n, VirtQueue *vq, int enable_notify)
+static int virtio_net_flush_tx(VirtIONet *n, VirtQueue *vq, int enable_notify)
 {
     VirtQueueElement elem;
     int has_vnet_hdr = tap_has_vnet_hdr(n->vc->vlan->first_client);
+    int num_packets = 0;
 
     if (!(n->vdev.status & VIRTIO_CONFIG_S_DRIVER_OK))
-        return;
+        return num_packets;
 
     while (virtqueue_pop(vq, &elem)) {
 	ssize_t len = 0;
@@ -258,12 +259,16 @@ static void virtio_net_flush_tx(VirtIONet *n, VirtQueue *vq, int enable_notify)
 
 	virtqueue_push(vq, &elem, len);
 	virtio_notify(&n->vdev, vq);
+
+	num_packets++;
     }
 
     if (enable_notify) {
 	vq->vring.used->flags &= ~VRING_USED_F_NO_NOTIFY;
-	virtio_net_flush_tx(n, vq, 0);
+	num_packets += virtio_net_flush_tx(n, vq, 0);
     }
+
+    return num_packets;
 }
 
 static void virtio_net_handle_tx(VirtIODevice *vdev, VirtQueue *vq)
@@ -292,7 +297,11 @@ static void virtio_net_tx_timer(void *opaque)
 
     n->tx_timer_active = 0;
 
-    virtio_net_flush_tx(n, n->tx_vq, 1);
+    if (virtio_net_flush_tx(n, n->tx_vq, 1)) {
+	n->tx_vq->vring.used->flags |= VRING_USED_F_NO_NOTIFY;
+	qemu_bh_schedule(n->tx_bh);
+	n->tx_bh_scheduled = 1;
+    }
 }
 
 static void virtio_net_tx_bh(void *opaque)
@@ -301,7 +310,11 @@ static void virtio_net_tx_bh(void *opaque)
 
     n->tx_bh_scheduled = 0;
 
-    virtio_net_flush_tx(n, n->tx_vq, 1);
+    if (virtio_net_flush_tx(n, n->tx_vq, 1)) {
+	n->tx_vq->vring.used->flags |= VRING_USED_F_NO_NOTIFY;
+	qemu_bh_schedule(n->tx_bh);
+	n->tx_bh_scheduled = 1;
+    }
 }
 
 static void virtio_net_save(QEMUFile *f, void *opaque)
-- 
1.6.0.1

If you're interested, the nasty scripts I used to generate these are here