From 5e78d1e3aaf9266803d9d6eb56073c4ea7588854 Mon Sep 17 00:00:00 2001
From: Fam Zheng <famz@redhat.com>
Date: Thu, 21 Aug 2014 12:05:39 -0500
Subject: [CHANGE 1/3] block: Improve bdrv_aio_co_cancel_em
To: rhvirt-patches@redhat.com,
    jen@redhat.com

RH-Author: Fam Zheng <famz@redhat.com>
Message-id: <1408622740-10835-2-git-send-email-famz@redhat.com>
Patchwork-id: 60663
O-Subject: [RHEL-6.6 qemu-kvm PATCH v4 1/2] block: Improve bdrv_aio_co_cancel_em
Bugzilla: 1018537
RH-Acked-by: Markus Armbruster <armbru@redhat.com>
RH-Acked-by: Kevin Wolf <kwolf@redhat.com>
RH-Acked-by: Stefan Hajnoczi <stefanha@redhat.com>

From: Kevin Wolf <kwolf@redhat.com>

Instead of waiting for all requests to complete, wait just for the
specific request that should be cancelled.

Signed-off-by: Kevin Wolf <kwolf@redhat.com>
(cherry picked from commit d318aea9325c99b15c87a7c14865386c2fde0d2c)
Signed-off-by: Fam Zheng <famz@redhat.com>
Signed-off-by: Jeff E. Nelson <jen@redhat.com>

Conflicts:
	block.c

Because we don't have d7331bed1 (aio: rename AIOPool to AIOCBInfo) yet,
the conflict is bdrv_em_co_aio_pool -> bdrv_em_co_aiocb_info.

Downstream notes:

Upstream's aio_poll() just works even when throttling is active.
Downstream's qemu_aio_wait() can't cope with throttling, and may wait
forever then.

Backporting the new async code is too intrusive, let's just restart all
throttled requests in the busy wait poll loop. It is similar to the
solution in bdrv_drain_all.
---
 block.c | 21 ++++++++++++++++++++-
 1 file changed, 20 insertions(+), 1 deletion(-)

Signed-off-by: Jeff E. Nelson <jen@redhat.com>
---
 block.c | 21 ++++++++++++++++++++-
 1 file changed, 20 insertions(+), 1 deletion(-)

diff --git a/block.c b/block.c
index 0405aa2..b562da7 100644
--- a/block.c
+++ b/block.c
@@ -4049,12 +4049,22 @@ typedef struct BlockDriverAIOCBCoroutine {
     BlockDriverAIOCB common;
     BlockRequest req;
     bool is_write;
+    bool *done;
     QEMUBH* bh;
 } BlockDriverAIOCBCoroutine;
 
 static void bdrv_aio_co_cancel_em(BlockDriverAIOCB *blockacb)
 {
-    qemu_aio_flush();
+    BlockDriverAIOCBCoroutine *acb =
+        container_of(blockacb, BlockDriverAIOCBCoroutine, common);
+    BlockDriverState *bs = blockacb->bs;
+    bool done = false;
+
+    acb->done = &done;
+    while (!done) {
+        qemu_co_queue_restart_all(&bs->throttled_reqs);
+        qemu_aio_wait();
+    }
 }
 
 static AIOPool bdrv_em_co_aio_pool = {
@@ -4067,6 +4077,11 @@ static void bdrv_co_em_bh(void *opaque)
     BlockDriverAIOCBCoroutine *acb = opaque;
 
     acb->common.cb(acb->common.opaque, acb->req.error);
+
+    if (acb->done) {
+        *acb->done = true;
+    }
+
     qemu_bh_delete(acb->bh);
     qemu_aio_release(acb);
 }
@@ -4105,6 +4120,7 @@ static BlockDriverAIOCB *bdrv_co_aio_rw_vector(BlockDriverState *bs,
     acb->req.nb_sectors = nb_sectors;
     acb->req.qiov = qiov;
     acb->is_write = is_write;
+    acb->done = NULL;
 
     co = qemu_coroutine_create(bdrv_co_do_rw);
     qemu_coroutine_enter(co, acb);
@@ -4131,6 +4147,8 @@ BlockDriverAIOCB *bdrv_aio_flush(BlockDriverState *bs,
     BlockDriverAIOCBCoroutine *acb;
 
     acb = qemu_aio_get(&bdrv_em_co_aio_pool, bs, cb, opaque);
+    acb->done = NULL;
+
     co = qemu_coroutine_create(bdrv_aio_flush_co_entry);
     qemu_coroutine_enter(co, acb);
 
@@ -4159,6 +4177,7 @@ BlockDriverAIOCB *bdrv_aio_discard(BlockDriverState *bs,
     acb = qemu_aio_get(&bdrv_em_co_aio_pool, bs, cb, opaque);
     acb->req.sector = sector_num;
     acb->req.nb_sectors = nb_sectors;
+    acb->done = NULL;
     co = qemu_coroutine_create(bdrv_aio_discard_co_entry);
     qemu_coroutine_enter(co, acb);
 
-- 
1.9.3

