forked from rpms/qemu-kvm
You can not select more than 25 topics
Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.
167 lines
6.1 KiB
167 lines
6.1 KiB
From e6f770506091eada46c63ac1c8b934b508e3807f Mon Sep 17 00:00:00 2001
|
|
From: Juan Quintela <quintela@redhat.com>
|
|
Date: Tue, 21 Jun 2022 13:36:11 +0200
|
|
Subject: [PATCH 07/12] multifd: Only flush once each full round of memory
|
|
MIME-Version: 1.0
|
|
Content-Type: text/plain; charset=UTF-8
|
|
Content-Transfer-Encoding: 8bit
|
|
|
|
RH-Author: quintela1 <quintela@redhat.com>
|
|
RH-MergeRequest: 186: Multifd flushes its channels 10 times per second
|
|
RH-Bugzilla: 2196295
|
|
RH-Acked-by: Peter Xu <peterx@redhat.com>
|
|
RH-Acked-by: Leonardo Brás <leobras@redhat.com>
|
|
RH-Commit: [3/3] 33f76dfc72a2552a42dc7f0fe3923564185a7bf7 (juan.quintela/c9s-qemu-kvm)
|
|
|
|
We need to add a new flag to mean to flush at that point.
|
|
Notice that we still flush at the end of setup and at the end of
|
|
complete stages.
|
|
|
|
Signed-off-by: Juan Quintela <quintela@redhat.com>
|
|
Acked-by: Peter Xu <peterx@redhat.com>
|
|
|
|
---
|
|
|
|
Add missing qemu_fflush(), now it passes all tests always.
|
|
In the previous version, the check that changes the default value to
|
|
false got lost in some rebase. Get it back.
|
|
|
|
(cherry picked from commit 294e5a4034e81b3d8db03b4e0f691386f20d6ed3)
|
|
---
|
|
migration/migration.h | 3 +--
|
|
migration/options.c | 8 ++------
|
|
migration/ram.c | 28 +++++++++++++++++++++++++++-
|
|
3 files changed, 30 insertions(+), 9 deletions(-)
|
|
|
|
diff --git a/migration/migration.h b/migration/migration.h
|
|
index 04c78c1fd6..dfec649af8 100644
|
|
--- a/migration/migration.h
|
|
+++ b/migration/migration.h
|
|
@@ -419,8 +419,7 @@ struct MigrationState {
|
|
* only need to do this flush after we have go through all the
|
|
* dirty pages. For historical reasons, we do that after each
|
|
* section. This is suboptimal (we flush too many times).
|
|
- * Default value is false. Setting this property has no effect
|
|
- * until the patch that removes this comment. (since 8.1)
|
|
+ * Default value is false. (since 8.1)
|
|
*/
|
|
bool multifd_flush_after_each_section;
|
|
/*
|
|
diff --git a/migration/options.c b/migration/options.c
|
|
index 5b0d080ecb..e13c7cb8e5 100644
|
|
--- a/migration/options.c
|
|
+++ b/migration/options.c
|
|
@@ -89,7 +89,7 @@ Property migration_properties[] = {
|
|
DEFINE_PROP_BOOL("decompress-error-check", MigrationState,
|
|
decompress_error_check, true),
|
|
DEFINE_PROP_BOOL("multifd-flush-after-each-section", MigrationState,
|
|
- multifd_flush_after_each_section, true),
|
|
+ multifd_flush_after_each_section, false),
|
|
DEFINE_PROP_UINT8("x-clear-bitmap-shift", MigrationState,
|
|
clear_bitmap_shift, CLEAR_BITMAP_SHIFT_DEFAULT),
|
|
DEFINE_PROP_BOOL("x-preempt-pre-7-2", MigrationState,
|
|
@@ -350,11 +350,7 @@ bool migrate_multifd_flush_after_each_section(void)
|
|
{
|
|
MigrationState *s = migrate_get_current();
|
|
|
|
- /*
|
|
- * Until the patch that remove this comment, we always return that
|
|
- * the property is enabled.
|
|
- */
|
|
- return true || s->multifd_flush_after_each_section;
|
|
+ return s->multifd_flush_after_each_section;
|
|
}
|
|
|
|
bool migrate_postcopy(void)
|
|
diff --git a/migration/ram.c b/migration/ram.c
|
|
index 1e2414d681..e9dcda8b9d 100644
|
|
--- a/migration/ram.c
|
|
+++ b/migration/ram.c
|
|
@@ -86,6 +86,7 @@
|
|
#define RAM_SAVE_FLAG_XBZRLE 0x40
|
|
/* 0x80 is reserved in qemu-file.h for RAM_SAVE_FLAG_HOOK */
|
|
#define RAM_SAVE_FLAG_COMPRESS_PAGE 0x100
|
|
+#define RAM_SAVE_FLAG_MULTIFD_FLUSH 0x200
|
|
/* We can't use any flag that is bigger than 0x200 */
|
|
|
|
int (*xbzrle_encode_buffer_func)(uint8_t *, uint8_t *, int,
|
|
@@ -1581,6 +1582,7 @@ retry:
|
|
* associated with the search process.
|
|
*
|
|
* Returns:
|
|
+ * <0: An error happened
|
|
* PAGE_ALL_CLEAN: no dirty page found, give up
|
|
* PAGE_TRY_AGAIN: no dirty page found, retry for next block
|
|
* PAGE_DIRTY_FOUND: dirty page found
|
|
@@ -1608,6 +1610,15 @@ static int find_dirty_block(RAMState *rs, PageSearchStatus *pss)
|
|
pss->page = 0;
|
|
pss->block = QLIST_NEXT_RCU(pss->block, next);
|
|
if (!pss->block) {
|
|
+ if (!migrate_multifd_flush_after_each_section()) {
|
|
+ QEMUFile *f = rs->pss[RAM_CHANNEL_PRECOPY].pss_channel;
|
|
+ int ret = multifd_send_sync_main(f);
|
|
+ if (ret < 0) {
|
|
+ return ret;
|
|
+ }
|
|
+ qemu_put_be64(f, RAM_SAVE_FLAG_MULTIFD_FLUSH);
|
|
+ qemu_fflush(f);
|
|
+ }
|
|
/*
|
|
* If memory migration starts over, we will meet a dirtied page
|
|
* which may still exists in compression threads's ring, so we
|
|
@@ -2600,6 +2611,9 @@ static int ram_find_and_save_block(RAMState *rs)
|
|
break;
|
|
} else if (res == PAGE_TRY_AGAIN) {
|
|
continue;
|
|
+ } else if (res < 0) {
|
|
+ pages = res;
|
|
+ break;
|
|
}
|
|
}
|
|
}
|
|
@@ -3286,6 +3300,10 @@ static int ram_save_setup(QEMUFile *f, void *opaque)
|
|
return ret;
|
|
}
|
|
|
|
+ if (!migrate_multifd_flush_after_each_section()) {
|
|
+ qemu_put_be64(f, RAM_SAVE_FLAG_MULTIFD_FLUSH);
|
|
+ }
|
|
+
|
|
qemu_put_be64(f, RAM_SAVE_FLAG_EOS);
|
|
qemu_fflush(f);
|
|
|
|
@@ -3471,6 +3489,9 @@ static int ram_save_complete(QEMUFile *f, void *opaque)
|
|
return ret;
|
|
}
|
|
|
|
+ if (!migrate_multifd_flush_after_each_section()) {
|
|
+ qemu_put_be64(f, RAM_SAVE_FLAG_MULTIFD_FLUSH);
|
|
+ }
|
|
qemu_put_be64(f, RAM_SAVE_FLAG_EOS);
|
|
qemu_fflush(f);
|
|
|
|
@@ -4152,7 +4173,9 @@ int ram_load_postcopy(QEMUFile *f, int channel)
|
|
}
|
|
decompress_data_with_multi_threads(f, page_buffer, len);
|
|
break;
|
|
-
|
|
+ case RAM_SAVE_FLAG_MULTIFD_FLUSH:
|
|
+ multifd_recv_sync_main();
|
|
+ break;
|
|
case RAM_SAVE_FLAG_EOS:
|
|
/* normal exit */
|
|
if (migrate_multifd_flush_after_each_section()) {
|
|
@@ -4426,6 +4449,9 @@ static int ram_load_precopy(QEMUFile *f)
|
|
break;
|
|
}
|
|
break;
|
|
+ case RAM_SAVE_FLAG_MULTIFD_FLUSH:
|
|
+ multifd_recv_sync_main();
|
|
+ break;
|
|
case RAM_SAVE_FLAG_EOS:
|
|
/* normal exit */
|
|
if (migrate_multifd_flush_after_each_section()) {
|
|
--
|
|
2.39.3
|
|
|