diff --git a/hw/block/virtio-blk.c b/hw/block/virtio-blk.c index 82676cdd01..540c38f829 100644 --- a/hw/block/virtio-blk.c +++ b/hw/block/virtio-blk.c @@ -32,6 +32,7 @@ #include "hw/virtio/virtio-bus.h" #include "migration/qemu-file-types.h" #include "hw/virtio/virtio-access.h" +#include "qemu/coroutine.h" /* Config size before the discard support (hide associated config fields) */ #define VIRTIO_BLK_CFG_SIZE offsetof(struct virtio_blk_config, \ @@ -1214,6 +1215,8 @@ static void virtio_blk_device_realize(DeviceState *dev, Error **errp) for (i = 0; i < conf->num_queues; i++) { virtio_add_queue(vdev, conf->queue_size, virtio_blk_handle_output); } + qemu_coroutine_increase_pool_batch_size(conf->num_queues * conf->queue_size + / 2); virtio_blk_data_plane_create(vdev, conf, &s->dataplane, &err); if (err != NULL) { error_propagate(errp, err); @@ -1250,6 +1253,8 @@ static void virtio_blk_device_unrealize(DeviceState *dev) for (i = 0; i < conf->num_queues; i++) { virtio_del_queue(vdev, i); } + qemu_coroutine_decrease_pool_batch_size(conf->num_queues * conf->queue_size + / 2); qemu_del_vm_change_state_handler(s->change); blockdev_mark_auto_del(s->blk); virtio_cleanup(vdev); diff --git a/include/qemu/coroutine.h b/include/qemu/coroutine.h index 4829ff373d..c828a95ee0 100644 --- a/include/qemu/coroutine.h +++ b/include/qemu/coroutine.h @@ -331,6 +331,16 @@ void qemu_co_sleep_wake(QemuCoSleep *w); */ void coroutine_fn yield_until_fd_readable(int fd); +/** + * Increase coroutine pool size + */ +void qemu_coroutine_increase_pool_batch_size(unsigned int additional_pool_size); + +/** + * Devcrease coroutine pool size + */ +void qemu_coroutine_decrease_pool_batch_size(unsigned int additional_pool_size); + #include "qemu/lockable.h" #endif /* QEMU_COROUTINE_H */ diff --git a/util/qemu-coroutine.c b/util/qemu-coroutine.c index 38fb6d3084..c03b2422ff 100644 --- a/util/qemu-coroutine.c +++ b/util/qemu-coroutine.c @@ -20,12 +20,14 @@ #include "qemu/coroutine_int.h" #include "block/aio.h" +/** Initial batch size is 64, and is increased on demand */ enum { - POOL_BATCH_SIZE = 64, + POOL_INITIAL_BATCH_SIZE = 64, }; /** Free list to speed up creation */ static QSLIST_HEAD(, Coroutine) release_pool = QSLIST_HEAD_INITIALIZER(pool); +static unsigned int pool_batch_size = POOL_INITIAL_BATCH_SIZE; static unsigned int release_pool_size; static __thread QSLIST_HEAD(, Coroutine) alloc_pool = QSLIST_HEAD_INITIALIZER(pool); static __thread unsigned int alloc_pool_size; @@ -49,7 +51,7 @@ Coroutine *qemu_coroutine_create(CoroutineEntry *entry, void *opaque) if (CONFIG_COROUTINE_POOL) { co = QSLIST_FIRST(&alloc_pool); if (!co) { - if (release_pool_size > POOL_BATCH_SIZE) { + if (release_pool_size > qatomic_read(&pool_batch_size)) { /* Slow path; a good place to register the destructor, too. */ if (!coroutine_pool_cleanup_notifier.notify) { coroutine_pool_cleanup_notifier.notify = coroutine_pool_cleanup; @@ -86,12 +88,12 @@ static void coroutine_delete(Coroutine *co) co->caller = NULL; if (CONFIG_COROUTINE_POOL) { - if (release_pool_size < POOL_BATCH_SIZE * 2) { + if (release_pool_size < qatomic_read(&pool_batch_size) * 2) { QSLIST_INSERT_HEAD_ATOMIC(&release_pool, co, pool_next); qatomic_inc(&release_pool_size); return; } - if (alloc_pool_size < POOL_BATCH_SIZE) { + if (alloc_pool_size < qatomic_read(&pool_batch_size)) { QSLIST_INSERT_HEAD(&alloc_pool, co, pool_next); alloc_pool_size++; return; @@ -202,3 +204,13 @@ AioContext *coroutine_fn qemu_coroutine_get_aio_context(Coroutine *co) { return co->ctx; } + +void qemu_coroutine_increase_pool_batch_size(unsigned int additional_pool_size) +{ + qatomic_add(&pool_batch_size, additional_pool_size); +} + +void qemu_coroutine_decrease_pool_batch_size(unsigned int removing_pool_size) +{ + qatomic_sub(&pool_batch_size, removing_pool_size); +}