Browse Source

block: explicitly acquire aiocontext in bottom halves that need it

Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Reviewed-by: Fam Zheng <famz@redhat.com>
Reviewed-by: Daniel P. Berrange <berrange@redhat.com>
Message-id: 2017021313.12274-15-pbonzini@redhat.com
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
tags/v2.9.0-rc0
Paolo Bonzini 2 years ago
parent
commit
1919631e6b
17 changed files with 71 additions and 28 deletions
  1. 3
    0
      block/archipelago.c
  2. 1
    1
      block/blkreplay.c
  3. 6
    0
      block/block-backend.c
  4. 18
    8
      block/curl.c
  5. 1
    8
      block/gluster.c
  6. 5
    1
      block/io.c
  7. 5
    1
      block/iscsi.c
  8. 9
    6
      block/linux-aio.c
  9. 2
    1
      block/nfs.c
  10. 4
    0
      block/null.c
  11. 3
    0
      block/qed.c
  12. 4
    0
      block/rbd.c
  13. 2
    0
      dma-helpers.c
  14. 2
    0
      hw/block/virtio-blk.c
  15. 2
    0
      hw/scsi/scsi-bus.c
  16. 2
    2
      util/async.c
  17. 2
    0
      util/thread-pool.c

+ 3
- 0
block/archipelago.c View File

@@ -310,8 +310,11 @@ static void qemu_archipelago_complete_aio(void *opaque)
310 310
 {
311 311
     AIORequestData *reqdata = (AIORequestData *) opaque;
312 312
     ArchipelagoAIOCB *aio_cb = (ArchipelagoAIOCB *) reqdata->aio_cb;
313
+    AioContext *ctx = bdrv_get_aio_context(aio_cb->common.bs);
313 314
 
315
+    aio_context_acquire(ctx);
314 316
     aio_cb->common.cb(aio_cb->common.opaque, aio_cb->ret);
317
+    aio_context_release(ctx);
315 318
     aio_cb->status = 0;
316 319
 
317 320
     qemu_aio_unref(aio_cb);

+ 1
- 1
block/blkreplay.c View File

@@ -60,7 +60,7 @@ static int64_t blkreplay_getlength(BlockDriverState *bs)
60 60
 static void blkreplay_bh_cb(void *opaque)
61 61
 {
62 62
     Request *req = opaque;
63
-    qemu_coroutine_enter(req->co);
63
+    aio_co_wake(req->co);
64 64
     qemu_bh_delete(req->bh);
65 65
     g_free(req);
66 66
 }

+ 6
- 0
block/block-backend.c View File

@@ -939,9 +939,12 @@ int blk_make_zero(BlockBackend *blk, BdrvRequestFlags flags)
939 939
 static void error_callback_bh(void *opaque)
940 940
 {
941 941
     struct BlockBackendAIOCB *acb = opaque;
942
+    AioContext *ctx = bdrv_get_aio_context(acb->common.bs);
942 943
 
943 944
     bdrv_dec_in_flight(acb->common.bs);
945
+    aio_context_acquire(ctx);
944 946
     acb->common.cb(acb->common.opaque, acb->ret);
947
+    aio_context_release(ctx);
945 948
     qemu_aio_unref(acb);
946 949
 }
947 950
 
@@ -983,9 +986,12 @@ static void blk_aio_complete(BlkAioEmAIOCB *acb)
983 986
 static void blk_aio_complete_bh(void *opaque)
984 987
 {
985 988
     BlkAioEmAIOCB *acb = opaque;
989
+    AioContext *ctx = bdrv_get_aio_context(acb->common.bs);
986 990
 
987 991
     assert(acb->has_returned);
992
+    aio_context_acquire(ctx);
988 993
     blk_aio_complete(acb);
994
+    aio_context_release(ctx);
989 995
 }
990 996
 
991 997
 static BlockAIOCB *blk_aio_prwv(BlockBackend *blk, int64_t offset, int bytes,

+ 18
- 8
block/curl.c View File

@@ -796,13 +796,18 @@ static void curl_readv_bh_cb(void *p)
796 796
 {
797 797
     CURLState *state;
798 798
     int running;
799
+    int ret = -EINPROGRESS;
799 800
 
800 801
     CURLAIOCB *acb = p;
801
-    BDRVCURLState *s = acb->common.bs->opaque;
802
+    BlockDriverState *bs = acb->common.bs;
803
+    BDRVCURLState *s = bs->opaque;
804
+    AioContext *ctx = bdrv_get_aio_context(bs);
802 805
 
803 806
     size_t start = acb->sector_num * BDRV_SECTOR_SIZE;
804 807
     size_t end;
805 808
 
809
+    aio_context_acquire(ctx);
810
+
806 811
     // In case we have the requested data already (e.g. read-ahead),
807 812
     // we can just call the callback and be done.
808 813
     switch (curl_find_buf(s, start, acb->nb_sectors * BDRV_SECTOR_SIZE, acb)) {
@@ -810,7 +815,7 @@ static void curl_readv_bh_cb(void *p)
810 815
             qemu_aio_unref(acb);
811 816
             // fall through
812 817
         case FIND_RET_WAIT:
813
-            return;
818
+            goto out;
814 819
         default:
815 820
             break;
816 821
     }
@@ -818,9 +823,8 @@ static void curl_readv_bh_cb(void *p)
818 823
     // No cache found, so let's start a new request
819 824
     state = curl_init_state(acb->common.bs, s);
820 825
     if (!state) {
821
-        acb->common.cb(acb->common.opaque, -EIO);
822
-        qemu_aio_unref(acb);
823
-        return;
826
+        ret = -EIO;
827
+        goto out;
824 828
     }
825 829
 
826 830
     acb->start = 0;
@@ -834,9 +838,8 @@ static void curl_readv_bh_cb(void *p)
834 838
     state->orig_buf = g_try_malloc(state->buf_len);
835 839
     if (state->buf_len && state->orig_buf == NULL) {
836 840
         curl_clean_state(state);
837
-        acb->common.cb(acb->common.opaque, -ENOMEM);
838
-        qemu_aio_unref(acb);
839
-        return;
841
+        ret = -ENOMEM;
842
+        goto out;
840 843
     }
841 844
     state->acb[0] = acb;
842 845
 
@@ -849,6 +852,13 @@ static void curl_readv_bh_cb(void *p)
849 852
 
850 853
     /* Tell curl it needs to kick things off */
851 854
     curl_multi_socket_action(s->multi, CURL_SOCKET_TIMEOUT, 0, &running);
855
+
856
+out:
857
+    if (ret != -EINPROGRESS) {
858
+        acb->common.cb(acb->common.opaque, ret);
859
+        qemu_aio_unref(acb);
860
+    }
861
+    aio_context_release(ctx);
852 862
 }
853 863
 
854 864
 static BlockAIOCB *curl_aio_readv(BlockDriverState *bs,

+ 1
- 8
block/gluster.c View File

@@ -698,13 +698,6 @@ static struct glfs *qemu_gluster_init(BlockdevOptionsGluster *gconf,
698 698
     return qemu_gluster_glfs_init(gconf, errp);
699 699
 }
700 700
 
701
-static void qemu_gluster_complete_aio(void *opaque)
702
-{
703
-    GlusterAIOCB *acb = (GlusterAIOCB *)opaque;
704
-
705
-    qemu_coroutine_enter(acb->coroutine);
706
-}
707
-
708 701
 /*
709 702
  * AIO callback routine called from GlusterFS thread.
710 703
  */
@@ -720,7 +713,7 @@ static void gluster_finish_aiocb(struct glfs_fd *fd, ssize_t ret, void *arg)
720 713
         acb->ret = -EIO; /* Partial read/write - fail it */
721 714
     }
722 715
 
723
-    aio_bh_schedule_oneshot(acb->aio_context, qemu_gluster_complete_aio, acb);
716
+    aio_co_schedule(acb->aio_context, acb->coroutine);
724 717
 }
725 718
 
726 719
 static void qemu_gluster_parse_flags(int bdrv_flags, int *open_flags)

+ 5
- 1
block/io.c View File

@@ -189,7 +189,7 @@ static void bdrv_co_drain_bh_cb(void *opaque)
189 189
     bdrv_dec_in_flight(bs);
190 190
     bdrv_drained_begin(bs);
191 191
     data->done = true;
192
-    qemu_coroutine_enter(co);
192
+    aio_co_wake(co);
193 193
 }
194 194
 
195 195
 static void coroutine_fn bdrv_co_yield_to_drain(BlockDriverState *bs)
@@ -2152,9 +2152,13 @@ static void bdrv_co_complete(BlockAIOCBCoroutine *acb)
2152 2152
 static void bdrv_co_em_bh(void *opaque)
2153 2153
 {
2154 2154
     BlockAIOCBCoroutine *acb = opaque;
2155
+    BlockDriverState *bs = acb->common.bs;
2156
+    AioContext *ctx = bdrv_get_aio_context(bs);
2155 2157
 
2156 2158
     assert(!acb->need_bh);
2159
+    aio_context_acquire(ctx);
2157 2160
     bdrv_co_complete(acb);
2161
+    aio_context_release(ctx);
2158 2162
 }
2159 2163
 
2160 2164
 static void bdrv_co_maybe_schedule_bh(BlockAIOCBCoroutine *acb)

+ 5
- 1
block/iscsi.c View File

@@ -136,13 +136,16 @@ static void
136 136
 iscsi_bh_cb(void *p)
137 137
 {
138 138
     IscsiAIOCB *acb = p;
139
+    AioContext *ctx = bdrv_get_aio_context(acb->common.bs);
139 140
 
140 141
     qemu_bh_delete(acb->bh);
141 142
 
142 143
     g_free(acb->buf);
143 144
     acb->buf = NULL;
144 145
 
146
+    aio_context_acquire(ctx);
145 147
     acb->common.cb(acb->common.opaque, acb->status);
148
+    aio_context_release(ctx);
146 149
 
147 150
     if (acb->task != NULL) {
148 151
         scsi_free_scsi_task(acb->task);
@@ -165,8 +168,9 @@ iscsi_schedule_bh(IscsiAIOCB *acb)
165 168
 static void iscsi_co_generic_bh_cb(void *opaque)
166 169
 {
167 170
     struct IscsiTask *iTask = opaque;
171
+
168 172
     iTask->complete = 1;
169
-    qemu_coroutine_enter(iTask->co);
173
+    aio_co_wake(iTask->co);
170 174
 }
171 175
 
172 176
 static void iscsi_retry_timer_expired(void *opaque)

+ 9
- 6
block/linux-aio.c View File

@@ -54,10 +54,10 @@ struct LinuxAioState {
54 54
     io_context_t ctx;
55 55
     EventNotifier e;
56 56
 
57
-    /* io queue for submit at batch */
57
+    /* io queue for submit at batch.  Protected by AioContext lock. */
58 58
     LaioQueue io_q;
59 59
 
60
-    /* I/O completion processing */
60
+    /* I/O completion processing.  Only runs in I/O thread.  */
61 61
     QEMUBH *completion_bh;
62 62
     int event_idx;
63 63
     int event_max;
@@ -75,6 +75,7 @@ static inline ssize_t io_event_ret(struct io_event *ev)
75 75
  */
76 76
 static void qemu_laio_process_completion(struct qemu_laiocb *laiocb)
77 77
 {
78
+    LinuxAioState *s = laiocb->ctx;
78 79
     int ret;
79 80
 
80 81
     ret = laiocb->ret;
@@ -93,6 +94,7 @@ static void qemu_laio_process_completion(struct qemu_laiocb *laiocb)
93 94
     }
94 95
 
95 96
     laiocb->ret = ret;
97
+    aio_context_acquire(s->aio_context);
96 98
     if (laiocb->co) {
97 99
         /* If the coroutine is already entered it must be in ioq_submit() and
98 100
          * will notice laio->ret has been filled in when it eventually runs
@@ -106,6 +108,7 @@ static void qemu_laio_process_completion(struct qemu_laiocb *laiocb)
106 108
         laiocb->common.cb(laiocb->common.opaque, ret);
107 109
         qemu_aio_unref(laiocb);
108 110
     }
111
+    aio_context_release(s->aio_context);
109 112
 }
110 113
 
111 114
 /**
@@ -234,9 +237,12 @@ static void qemu_laio_process_completions(LinuxAioState *s)
234 237
 static void qemu_laio_process_completions_and_submit(LinuxAioState *s)
235 238
 {
236 239
     qemu_laio_process_completions(s);
240
+
241
+    aio_context_acquire(s->aio_context);
237 242
     if (!s->io_q.plugged && !QSIMPLEQ_EMPTY(&s->io_q.pending)) {
238 243
         ioq_submit(s);
239 244
     }
245
+    aio_context_release(s->aio_context);
240 246
 }
241 247
 
242 248
 static void qemu_laio_completion_bh(void *opaque)
@@ -251,9 +257,7 @@ static void qemu_laio_completion_cb(EventNotifier *e)
251 257
     LinuxAioState *s = container_of(e, LinuxAioState, e);
252 258
 
253 259
     if (event_notifier_test_and_clear(&s->e)) {
254
-        aio_context_acquire(s->aio_context);
255 260
         qemu_laio_process_completions_and_submit(s);
256
-        aio_context_release(s->aio_context);
257 261
     }
258 262
 }
259 263
 
@@ -267,9 +271,7 @@ static bool qemu_laio_poll_cb(void *opaque)
267 271
         return false;
268 272
     }
269 273
 
270
-    aio_context_acquire(s->aio_context);
271 274
     qemu_laio_process_completions_and_submit(s);
272
-    aio_context_release(s->aio_context);
273 275
     return true;
274 276
 }
275 277
 
@@ -459,6 +461,7 @@ void laio_detach_aio_context(LinuxAioState *s, AioContext *old_context)
459 461
 {
460 462
     aio_set_event_notifier(old_context, &s->e, false, NULL, NULL);
461 463
     qemu_bh_delete(s->completion_bh);
464
+    s->aio_context = NULL;
462 465
 }
463 466
 
464 467
 void laio_attach_aio_context(LinuxAioState *s, AioContext *new_context)

+ 2
- 1
block/nfs.c View File

@@ -237,8 +237,9 @@ static void nfs_co_init_task(BlockDriverState *bs, NFSRPC *task)
237 237
 static void nfs_co_generic_bh_cb(void *opaque)
238 238
 {
239 239
     NFSRPC *task = opaque;
240
+
240 241
     task->complete = 1;
241
-    qemu_coroutine_enter(task->co);
242
+    aio_co_wake(task->co);
242 243
 }
243 244
 
244 245
 static void

+ 4
- 0
block/null.c View File

@@ -134,7 +134,11 @@ static const AIOCBInfo null_aiocb_info = {
134 134
 static void null_bh_cb(void *opaque)
135 135
 {
136 136
     NullAIOCB *acb = opaque;
137
+    AioContext *ctx = bdrv_get_aio_context(acb->common.bs);
138
+
139
+    aio_context_acquire(ctx);
137 140
     acb->common.cb(acb->common.opaque, 0);
141
+    aio_context_release(ctx);
138 142
     qemu_aio_unref(acb);
139 143
 }
140 144
 

+ 3
- 0
block/qed.c View File

@@ -942,6 +942,7 @@ static void qed_update_l2_table(BDRVQEDState *s, QEDTable *table, int index,
942 942
 static void qed_aio_complete_bh(void *opaque)
943 943
 {
944 944
     QEDAIOCB *acb = opaque;
945
+    BDRVQEDState *s = acb_to_s(acb);
945 946
     BlockCompletionFunc *cb = acb->common.cb;
946 947
     void *user_opaque = acb->common.opaque;
947 948
     int ret = acb->bh_ret;
@@ -949,7 +950,9 @@ static void qed_aio_complete_bh(void *opaque)
949 950
     qemu_aio_unref(acb);
950 951
 
951 952
     /* Invoke callback */
953
+    qed_acquire(s);
952 954
     cb(user_opaque, ret);
955
+    qed_release(s);
953 956
 }
954 957
 
955 958
 static void qed_aio_complete(QEDAIOCB *acb, int ret)

+ 4
- 0
block/rbd.c View File

@@ -413,6 +413,7 @@ shutdown:
413 413
 static void qemu_rbd_complete_aio(RADOSCB *rcb)
414 414
 {
415 415
     RBDAIOCB *acb = rcb->acb;
416
+    AioContext *ctx = bdrv_get_aio_context(acb->common.bs);
416 417
     int64_t r;
417 418
 
418 419
     r = rcb->ret;
@@ -445,7 +446,10 @@ static void qemu_rbd_complete_aio(RADOSCB *rcb)
445 446
         qemu_iovec_from_buf(acb->qiov, 0, acb->bounce, acb->qiov->size);
446 447
     }
447 448
     qemu_vfree(acb->bounce);
449
+
450
+    aio_context_acquire(ctx);
448 451
     acb->common.cb(acb->common.opaque, (acb->ret > 0 ? 0 : acb->ret));
452
+    aio_context_release(ctx);
449 453
 
450 454
     qemu_aio_unref(acb);
451 455
 }

+ 2
- 0
dma-helpers.c View File

@@ -166,8 +166,10 @@ static void dma_blk_cb(void *opaque, int ret)
166 166
                                 QEMU_ALIGN_DOWN(dbs->iov.size, dbs->align));
167 167
     }
168 168
 
169
+    aio_context_acquire(dbs->ctx);
169 170
     dbs->acb = dbs->io_func(dbs->offset, &dbs->iov,
170 171
                             dma_blk_cb, dbs, dbs->io_func_opaque);
172
+    aio_context_release(dbs->ctx);
171 173
     assert(dbs->acb);
172 174
 }
173 175
 

+ 2
- 0
hw/block/virtio-blk.c View File

@@ -647,6 +647,7 @@ static void virtio_blk_dma_restart_bh(void *opaque)
647 647
 
648 648
     s->rq = NULL;
649 649
 
650
+    aio_context_acquire(blk_get_aio_context(s->conf.conf.blk));
650 651
     while (req) {
651 652
         VirtIOBlockReq *next = req->next;
652 653
         if (virtio_blk_handle_request(req, &mrb)) {
@@ -667,6 +668,7 @@ static void virtio_blk_dma_restart_bh(void *opaque)
667 668
     if (mrb.num_reqs) {
668 669
         virtio_blk_submit_multireq(s->blk, &mrb);
669 670
     }
671
+    aio_context_release(blk_get_aio_context(s->conf.conf.blk));
670 672
 }
671 673
 
672 674
 static void virtio_blk_dma_restart_cb(void *opaque, int running,

+ 2
- 0
hw/scsi/scsi-bus.c View File

@@ -105,6 +105,7 @@ static void scsi_dma_restart_bh(void *opaque)
105 105
     qemu_bh_delete(s->bh);
106 106
     s->bh = NULL;
107 107
 
108
+    aio_context_acquire(blk_get_aio_context(s->conf.blk));
108 109
     QTAILQ_FOREACH_SAFE(req, &s->requests, next, next) {
109 110
         scsi_req_ref(req);
110 111
         if (req->retry) {
@@ -122,6 +123,7 @@ static void scsi_dma_restart_bh(void *opaque)
122 123
         }
123 124
         scsi_req_unref(req);
124 125
     }
126
+    aio_context_release(blk_get_aio_context(s->conf.blk));
125 127
 }
126 128
 
127 129
 void scsi_req_retry(SCSIRequest *req)

+ 2
- 2
util/async.c View File

@@ -114,9 +114,7 @@ int aio_bh_poll(AioContext *ctx)
114 114
                 ret = 1;
115 115
             }
116 116
             bh->idle = 0;
117
-            aio_context_acquire(ctx);
118 117
             aio_bh_call(bh);
119
-            aio_context_release(ctx);
120 118
         }
121 119
         if (bh->deleted) {
122 120
             deleted = true;
@@ -389,7 +387,9 @@ static void co_schedule_bh_cb(void *opaque)
389 387
         Coroutine *co = QSLIST_FIRST(&straight);
390 388
         QSLIST_REMOVE_HEAD(&straight, co_scheduled_next);
391 389
         trace_aio_co_schedule_bh_cb(ctx, co);
390
+        aio_context_acquire(ctx);
392 391
         qemu_coroutine_enter(co);
392
+        aio_context_release(ctx);
393 393
     }
394 394
 }
395 395
 

+ 2
- 0
util/thread-pool.c View File

@@ -165,6 +165,7 @@ static void thread_pool_completion_bh(void *opaque)
165 165
     ThreadPool *pool = opaque;
166 166
     ThreadPoolElement *elem, *next;
167 167
 
168
+    aio_context_acquire(pool->ctx);
168 169
 restart:
169 170
     QLIST_FOREACH_SAFE(elem, &pool->head, all, next) {
170 171
         if (elem->state != THREAD_DONE) {
@@ -191,6 +192,7 @@ restart:
191 192
             qemu_aio_unref(elem);
192 193
         }
193 194
     }
195
+    aio_context_release(pool->ctx);
194 196
 }
195 197
 
196 198
 static void thread_pool_cancel(BlockAIOCB *acb)

Loading…
Cancel
Save