Browse Source

exec: Change RAMBlockIterFunc definition

Currently, qemu_ram_foreach_* calls RAMBlockIterFunc with many
block-specific arguments. But often iter func needs RAMBlock*.
This refactoring is needed for fast access to RAMBlock flags from
qemu_ram_foreach_block's callback. The only way to achieve this now
is to call qemu_ram_block_from_host (which also enumerates blocks).

So, this patch reduces complexity of
qemu_ram_foreach_block() -> cb() -> qemu_ram_block_from_host()
from O(n^2) to O(n).

Fix RAMBlockIterFunc definition and add some functions to read
RAMBlock* fields witch were passed.

Signed-off-by: Yury Kotov <yury-kotov@yandex-team.ru>
Message-Id: <20190215174548.2630-2-yury-kotov@yandex-team.ru>
Reviewed-by: Dr. David Alan Gilbert <dgilbert@redhat.com>
Signed-off-by: Dr. David Alan Gilbert <dgilbert@redhat.com>
tags/v4.0.0-rc0
Yury Kotov 10 months ago
parent
commit
754cb9c0eb
6 changed files with 65 additions and 26 deletions
  1. +17
    -4
      exec.c
  2. +4
    -2
      include/exec/cpu-common.h
  3. +21
    -15
      migration/postcopy-ram.c
  4. +5
    -2
      migration/rdma.c
  5. +15
    -0
      stubs/ram-block.c
  6. +3
    -3
      util/vfio-helpers.c

+ 17
- 4
exec.c View File

@@ -1972,6 +1972,21 @@ const char *qemu_ram_get_idstr(RAMBlock *rb)
return rb->idstr;
}

void *qemu_ram_get_host_addr(RAMBlock *rb)
{
return rb->host;
}

ram_addr_t qemu_ram_get_offset(RAMBlock *rb)
{
return rb->offset;
}

ram_addr_t qemu_ram_get_used_length(RAMBlock *rb)
{
return rb->used_length;
}

bool qemu_ram_is_shared(RAMBlock *rb)
{
return rb->flags & RAM_SHARED;
@@ -3961,8 +3976,7 @@ int qemu_ram_foreach_block(RAMBlockIterFunc func, void *opaque)

rcu_read_lock();
RAMBLOCK_FOREACH(block) {
ret = func(block->idstr, block->host, block->offset,
block->used_length, opaque);
ret = func(block, opaque);
if (ret) {
break;
}
@@ -3981,8 +3995,7 @@ int qemu_ram_foreach_migratable_block(RAMBlockIterFunc func, void *opaque)
if (!qemu_ram_is_migratable(block)) {
continue;
}
ret = func(block->idstr, block->host, block->offset,
block->used_length, opaque);
ret = func(block, opaque);
if (ret) {
break;
}

+ 4
- 2
include/exec/cpu-common.h View File

@@ -72,6 +72,9 @@ ram_addr_t qemu_ram_block_host_offset(RAMBlock *rb, void *host);
void qemu_ram_set_idstr(RAMBlock *block, const char *name, DeviceState *dev);
void qemu_ram_unset_idstr(RAMBlock *block);
const char *qemu_ram_get_idstr(RAMBlock *rb);
void *qemu_ram_get_host_addr(RAMBlock *rb);
ram_addr_t qemu_ram_get_offset(RAMBlock *rb);
ram_addr_t qemu_ram_get_used_length(RAMBlock *rb);
bool qemu_ram_is_shared(RAMBlock *rb);
bool qemu_ram_is_uf_zeroable(RAMBlock *rb);
void qemu_ram_set_uf_zeroable(RAMBlock *rb);
@@ -116,8 +119,7 @@ void cpu_flush_icache_range(hwaddr start, hwaddr len);
extern struct MemoryRegion io_mem_rom;
extern struct MemoryRegion io_mem_notdirty;

typedef int (RAMBlockIterFunc)(const char *block_name, void *host_addr,
ram_addr_t offset, ram_addr_t length, void *opaque);
typedef int (RAMBlockIterFunc)(RAMBlock *rb, void *opaque);

int qemu_ram_foreach_block(RAMBlockIterFunc func, void *opaque);
int qemu_ram_foreach_migratable_block(RAMBlockIterFunc func, void *opaque);

+ 21
- 15
migration/postcopy-ram.c View File

@@ -319,10 +319,10 @@ static bool ufd_check_and_apply(int ufd, MigrationIncomingState *mis)

/* Callback from postcopy_ram_supported_by_host block iterator.
*/
static int test_ramblock_postcopiable(const char *block_name, void *host_addr,
ram_addr_t offset, ram_addr_t length, void *opaque)
static int test_ramblock_postcopiable(RAMBlock *rb, void *opaque)
{
RAMBlock *rb = qemu_ram_block_by_name(block_name);
const char *block_name = qemu_ram_get_idstr(rb);
ram_addr_t length = qemu_ram_get_used_length(rb);
size_t pagesize = qemu_ram_pagesize(rb);

if (length % pagesize) {
@@ -443,9 +443,12 @@ out:
* must be done right at the start prior to pre-copy.
* opaque should be the MIS.
*/
static int init_range(const char *block_name, void *host_addr,
ram_addr_t offset, ram_addr_t length, void *opaque)
static int init_range(RAMBlock *rb, void *opaque)
{
const char *block_name = qemu_ram_get_idstr(rb);
void *host_addr = qemu_ram_get_host_addr(rb);
ram_addr_t offset = qemu_ram_get_offset(rb);
ram_addr_t length = qemu_ram_get_used_length(rb);
trace_postcopy_init_range(block_name, host_addr, offset, length);

/*
@@ -465,9 +468,12 @@ static int init_range(const char *block_name, void *host_addr,
* At the end of migration, undo the effects of init_range
* opaque should be the MIS.
*/
static int cleanup_range(const char *block_name, void *host_addr,
ram_addr_t offset, ram_addr_t length, void *opaque)
static int cleanup_range(RAMBlock *rb, void *opaque)
{
const char *block_name = qemu_ram_get_idstr(rb);
void *host_addr = qemu_ram_get_host_addr(rb);
ram_addr_t offset = qemu_ram_get_offset(rb);
ram_addr_t length = qemu_ram_get_used_length(rb);
MigrationIncomingState *mis = opaque;
struct uffdio_range range_struct;
trace_postcopy_cleanup_range(block_name, host_addr, offset, length);
@@ -586,9 +592,12 @@ int postcopy_ram_incoming_cleanup(MigrationIncomingState *mis)
/*
* Disable huge pages on an area
*/
static int nhp_range(const char *block_name, void *host_addr,
ram_addr_t offset, ram_addr_t length, void *opaque)
static int nhp_range(RAMBlock *rb, void *opaque)
{
const char *block_name = qemu_ram_get_idstr(rb);
void *host_addr = qemu_ram_get_host_addr(rb);
ram_addr_t offset = qemu_ram_get_offset(rb);
ram_addr_t length = qemu_ram_get_used_length(rb);
trace_postcopy_nhp_range(block_name, host_addr, offset, length);

/*
@@ -626,15 +635,13 @@ int postcopy_ram_prepare_discard(MigrationIncomingState *mis)
* opaque: MigrationIncomingState pointer
* Returns 0 on success
*/
static int ram_block_enable_notify(const char *block_name, void *host_addr,
ram_addr_t offset, ram_addr_t length,
void *opaque)
static int ram_block_enable_notify(RAMBlock *rb, void *opaque)
{
MigrationIncomingState *mis = opaque;
struct uffdio_register reg_struct;

reg_struct.range.start = (uintptr_t)host_addr;
reg_struct.range.len = length;
reg_struct.range.start = (uintptr_t)qemu_ram_get_host_addr(rb);
reg_struct.range.len = qemu_ram_get_used_length(rb);
reg_struct.mode = UFFDIO_REGISTER_MODE_MISSING;

/* Now tell our userfault_fd that it's responsible for this area */
@@ -647,7 +654,6 @@ static int ram_block_enable_notify(const char *block_name, void *host_addr,
return -1;
}
if (reg_struct.ioctls & ((__u64)1 << _UFFDIO_ZEROPAGE)) {
RAMBlock *rb = qemu_ram_block_by_name(block_name);
qemu_ram_set_uf_zeroable(rb);
}


+ 5
- 2
migration/rdma.c View File

@@ -624,9 +624,12 @@ static int rdma_add_block(RDMAContext *rdma, const char *block_name,
* in advanced before the migration starts. This tells us where the RAM blocks
* are so that we can register them individually.
*/
static int qemu_rdma_init_one_block(const char *block_name, void *host_addr,
ram_addr_t block_offset, ram_addr_t length, void *opaque)
static int qemu_rdma_init_one_block(RAMBlock *rb, void *opaque)
{
const char *block_name = qemu_ram_get_idstr(rb);
void *host_addr = qemu_ram_get_host_addr(rb);
ram_addr_t block_offset = qemu_ram_get_offset(rb);
ram_addr_t length = qemu_ram_get_used_length(rb);
return rdma_add_block(opaque, block_name, host_addr, block_offset, length);
}


+ 15
- 0
stubs/ram-block.c View File

@@ -2,6 +2,21 @@
#include "exec/ramlist.h"
#include "exec/cpu-common.h"

void *qemu_ram_get_host_addr(RAMBlock *rb)
{
return 0;
}

ram_addr_t qemu_ram_get_offset(RAMBlock *rb)
{
return 0;
}

ram_addr_t qemu_ram_get_used_length(RAMBlock *rb)
{
return 0;
}

void ram_block_notifier_add(RAMBlockNotifier *n)
{
}

+ 3
- 3
util/vfio-helpers.c View File

@@ -391,10 +391,10 @@ static void qemu_vfio_ram_block_removed(RAMBlockNotifier *n,
}
}

static int qemu_vfio_init_ramblock(const char *block_name, void *host_addr,
ram_addr_t offset, ram_addr_t length,
void *opaque)
static int qemu_vfio_init_ramblock(RAMBlock *rb, void *opaque)
{
void *host_addr = qemu_ram_get_host_addr(rb);
ram_addr_t length = qemu_ram_get_used_length(rb);
int ret;
QEMUVFIOState *s = opaque;


Loading…
Cancel
Save