• R/O
  • HTTP
  • SSH
  • HTTPS

Commit

Tags
No Tags

Frequently used words (click to add to your profile)

javac++androidlinuxc#windowsobjective-ccocoa誰得qtpythonphprubygameguibathyscaphec計画中(planning stage)翻訳omegatframeworktwitterdomtestvb.netdirectxゲームエンジンbtronarduinopreviewer

Commit MetaInfo

Revisãod90e6f665d3ac197f83d93ad37147fe677521209 (tree)
Hora2022-01-30 00:55:54
AutorPeter Maydell <peter.maydell@lina...>
CommiterPeter Maydell

Mensagem de Log

Migration Pull request (Take 2)

Hi

This time I have disabled vmstate canary patches form Dave Gilbert.

Let's see if it works.

Later, Juan.
-----BEGIN PGP SIGNATURE-----

iQIzBAABCAAdFiEEGJn/jt6/WMzuA0uC9IfvGFhy1yMFAmH0NkEACgkQ9IfvGFhy
1yM4VQ/+MML5ugA9XA5hOFV+Stwv2ENtMR4r4raQsC7UKdKMaCNuoj1BdlXMRaki
E2TpoHYq99rfJX+AA0XihxHh84I1l9fpoiXrcr8pgNmhcj0qkBykY9Elzf95woMM
UMyinL2jhHfHjby29AaE7BDelUZIA0BgyzQ3TMq8rO+l/ZqFYA8U1SEgPlDYj7cn
gkDWFkPJx6IKgcI8M1obHw11azHgS7dmjjl9lXzxJ2/WfXnoZCuU0BtHd6a1rnAS
qcO3gwLfCo+3aTGKRseJie1Cljz6sIP+ke0Xgn5O+e7alWjCOtlVZrWwd2MqQ07K
2bf7uuTC2KQicLLH8DCnoH/BSvHmpyl/FglFrETRk/55KKg0bi+ZltXaTs9bC2uO
jzNbBSRf8UMcX6Bp3ukhPaFQ1vxqP7KxN9bM+7LYP9aX7Lt/NCJciYjw1jCTwcwi
nz0RS4d7cscMhoMEarPCKcaNJR6PJetdZY2VXavWjXv6er3407yTocvuei0Epdyb
WZtbFnpI2tfx1GEr/Bz6Mnk/qn7kwo7BFEUtJoweFE05g5wHa1PojsblrrsqeOuc
llpK8o8c8NFACxeiLa0z0VBkTjdOtao206eLhF+Se3ukubImayRQwZiOCEBBXwB3
+LmVcmwNDfNonSWI04AA2WAy9gAdM3Ko/gBfWsuOPR5oIs65wns=
=F/ek
-----END PGP SIGNATURE-----

Merge remote-tracking branch 'remotes/quintela-gitlab/tags/migration-20220128-pull-request' into staging

Migration Pull request (Take 2)

Hi

This time I have disabled vmstate canary patches form Dave Gilbert.

Let's see if it works.

Later, Juan.

# gpg: Signature made Fri 28 Jan 2022 18:30:25 GMT
# gpg: using RSA key 1899FF8EDEBF58CCEE034B82F487EF185872D723
# gpg: Good signature from "Juan Quintela <quintela@redhat.com>" [full]
# gpg: aka "Juan Quintela <quintela@trasno.org>" [full]
# Primary key fingerprint: 1899 FF8E DEBF 58CC EE03 4B82 F487 EF18 5872 D723

* remotes/quintela-gitlab/tags/migration-20220128-pull-request: (36 commits)

migration: Move temp page setup and cleanup into separate functions
migration: Simplify unqueue_page()
migration: Add postcopy_has_request()
migration: Enable UFFD_FEATURE_THREAD_ID even without blocktime feat
migration: No off-by-one for pss->page update in host page size
migration: Tally pre-copy, downtime and post-copy bytes independently
migration: Introduce ram_transferred_add()
migration: Don't return for postcopy_send_discard_bm_ram()
migration: Drop return code for disgard ram process
migration: Do chunk page in postcopy_each_ram_send_discard()
migration: Drop postcopy_chunk_hostpages()
migration: Don't return for postcopy_chunk_hostpages()
migration: Drop dead code of ram_debug_dump_bitmap()
migration/ram: clean up unused comment.
migration: Report the error returned when save_live_iterate fails
migration/migration.c: Remove the MIGRATION_STATUS_ACTIVE when migration finished
migration/migration.c: Avoid COLO boot in postcopy migration
migration/migration.c: Add missed default error handler for migration state
Remove unnecessary minimum_version_id_old fields
multifd: Rename pages_used to normal_pages
...

Signed-off-by: Peter Maydell <peter.maydell@linaro.org>

Mudança Sumário

Diff

--- a/hw/acpi/cpu.c
+++ b/hw/acpi/cpu.c
@@ -297,7 +297,6 @@ static const VMStateDescription vmstate_cpuhp_sts = {
297297 .name = "CPU hotplug device state",
298298 .version_id = 1,
299299 .minimum_version_id = 1,
300- .minimum_version_id_old = 1,
301300 .fields = (VMStateField[]) {
302301 VMSTATE_BOOL(is_inserting, AcpiCpuStatus),
303302 VMSTATE_BOOL(is_removing, AcpiCpuStatus),
@@ -311,7 +310,6 @@ const VMStateDescription vmstate_cpu_hotplug = {
311310 .name = "CPU hotplug state",
312311 .version_id = 1,
313312 .minimum_version_id = 1,
314- .minimum_version_id_old = 1,
315313 .fields = (VMStateField[]) {
316314 VMSTATE_UINT32(selector, CPUHotplugState),
317315 VMSTATE_UINT8(command, CPUHotplugState),
--- a/hw/acpi/ich9.c
+++ b/hw/acpi/ich9.c
@@ -163,7 +163,6 @@ static const VMStateDescription vmstate_memhp_state = {
163163 .name = "ich9_pm/memhp",
164164 .version_id = 1,
165165 .minimum_version_id = 1,
166- .minimum_version_id_old = 1,
167166 .needed = vmstate_test_use_memhp,
168167 .fields = (VMStateField[]) {
169168 VMSTATE_MEMORY_HOTPLUG(acpi_memory_hotplug, ICH9LPCPMRegs),
@@ -181,7 +180,6 @@ static const VMStateDescription vmstate_tco_io_state = {
181180 .name = "ich9_pm/tco",
182181 .version_id = 1,
183182 .minimum_version_id = 1,
184- .minimum_version_id_old = 1,
185183 .needed = vmstate_test_use_tco,
186184 .fields = (VMStateField[]) {
187185 VMSTATE_STRUCT(tco_regs, ICH9LPCPMRegs, 1, vmstate_tco_io_sts,
@@ -208,7 +206,6 @@ static const VMStateDescription vmstate_cpuhp_state = {
208206 .name = "ich9_pm/cpuhp",
209207 .version_id = 1,
210208 .minimum_version_id = 1,
211- .minimum_version_id_old = 1,
212209 .needed = vmstate_test_use_cpuhp,
213210 .pre_load = vmstate_cpuhp_pre_load,
214211 .fields = (VMStateField[]) {
--- a/hw/acpi/memory_hotplug.c
+++ b/hw/acpi/memory_hotplug.c
@@ -318,7 +318,6 @@ static const VMStateDescription vmstate_memhp_sts = {
318318 .name = "memory hotplug device state",
319319 .version_id = 1,
320320 .minimum_version_id = 1,
321- .minimum_version_id_old = 1,
322321 .fields = (VMStateField[]) {
323322 VMSTATE_BOOL(is_enabled, MemStatus),
324323 VMSTATE_BOOL(is_inserting, MemStatus),
@@ -332,7 +331,6 @@ const VMStateDescription vmstate_memory_hotplug = {
332331 .name = "memory hotplug state",
333332 .version_id = 1,
334333 .minimum_version_id = 1,
335- .minimum_version_id_old = 1,
336334 .fields = (VMStateField[]) {
337335 VMSTATE_UINT32(selector, MemHotplugState),
338336 VMSTATE_STRUCT_VARRAY_POINTER_UINT32(devs, MemHotplugState, dev_count,
--- a/hw/acpi/piix4.c
+++ b/hw/acpi/piix4.c
@@ -230,7 +230,6 @@ static const VMStateDescription vmstate_memhp_state = {
230230 .name = "piix4_pm/memhp",
231231 .version_id = 1,
232232 .minimum_version_id = 1,
233- .minimum_version_id_old = 1,
234233 .needed = vmstate_test_use_memhp,
235234 .fields = (VMStateField[]) {
236235 VMSTATE_MEMORY_HOTPLUG(acpi_memory_hotplug, PIIX4PMState),
@@ -255,7 +254,6 @@ static const VMStateDescription vmstate_cpuhp_state = {
255254 .name = "piix4_pm/cpuhp",
256255 .version_id = 1,
257256 .minimum_version_id = 1,
258- .minimum_version_id_old = 1,
259257 .needed = vmstate_test_use_cpuhp,
260258 .pre_load = vmstate_cpuhp_pre_load,
261259 .fields = (VMStateField[]) {
--- a/hw/acpi/tco.c
+++ b/hw/acpi/tco.c
@@ -239,7 +239,6 @@ const VMStateDescription vmstate_tco_io_sts = {
239239 .name = "tco io device status",
240240 .version_id = 1,
241241 .minimum_version_id = 1,
242- .minimum_version_id_old = 1,
243242 .fields = (VMStateField[]) {
244243 VMSTATE_UINT16(tco.rld, TCOIORegs),
245244 VMSTATE_UINT8(tco.din, TCOIORegs),
--- a/hw/audio/pcspk.c
+++ b/hw/audio/pcspk.c
@@ -209,7 +209,6 @@ static const VMStateDescription vmstate_spk = {
209209 .name = "pcspk",
210210 .version_id = 1,
211211 .minimum_version_id = 1,
212- .minimum_version_id_old = 1,
213212 .needed = migrate_needed,
214213 .fields = (VMStateField[]) {
215214 VMSTATE_UINT8(data_on, PCSpkState),
--- a/hw/display/macfb.c
+++ b/hw/display/macfb.c
@@ -616,7 +616,6 @@ static const VMStateDescription vmstate_macfb = {
616616 .name = "macfb",
617617 .version_id = 1,
618618 .minimum_version_id = 1,
619- .minimum_version_id_old = 1,
620619 .post_load = macfb_post_load,
621620 .fields = (VMStateField[]) {
622621 VMSTATE_UINT8_ARRAY(color_palette, MacfbState, 256 * 3),
--- a/hw/dma/xlnx-zdma.c
+++ b/hw/dma/xlnx-zdma.c
@@ -806,7 +806,6 @@ static const VMStateDescription vmstate_zdma = {
806806 .name = TYPE_XLNX_ZDMA,
807807 .version_id = 1,
808808 .minimum_version_id = 1,
809- .minimum_version_id_old = 1,
810809 .fields = (VMStateField[]) {
811810 VMSTATE_UINT32_ARRAY(regs, XlnxZDMA, ZDMA_R_MAX),
812811 VMSTATE_UINT32(state, XlnxZDMA),
--- a/hw/dma/xlnx_csu_dma.c
+++ b/hw/dma/xlnx_csu_dma.c
@@ -677,7 +677,6 @@ static const VMStateDescription vmstate_xlnx_csu_dma = {
677677 .name = TYPE_XLNX_CSU_DMA,
678678 .version_id = 0,
679679 .minimum_version_id = 0,
680- .minimum_version_id_old = 0,
681680 .fields = (VMStateField[]) {
682681 VMSTATE_PTIMER(src_timer, XlnxCSUDMA),
683682 VMSTATE_UINT16(width, XlnxCSUDMA),
--- a/hw/gpio/imx_gpio.c
+++ b/hw/gpio/imx_gpio.c
@@ -277,7 +277,6 @@ static const VMStateDescription vmstate_imx_gpio = {
277277 .name = TYPE_IMX_GPIO,
278278 .version_id = 1,
279279 .minimum_version_id = 1,
280- .minimum_version_id_old = 1,
281280 .fields = (VMStateField[]) {
282281 VMSTATE_UINT32(dr, IMXGPIOState),
283282 VMSTATE_UINT32(gdir, IMXGPIOState),
--- a/hw/misc/bcm2835_mbox.c
+++ b/hw/misc/bcm2835_mbox.c
@@ -271,7 +271,6 @@ static const VMStateDescription vmstate_bcm2835_mbox = {
271271 .name = TYPE_BCM2835_MBOX,
272272 .version_id = 1,
273273 .minimum_version_id = 1,
274- .minimum_version_id_old = 1,
275274 .fields = (VMStateField[]) {
276275 VMSTATE_BOOL_ARRAY(available, BCM2835MboxState, MBOX_CHAN_COUNT),
277276 VMSTATE_STRUCT_ARRAY(mbox, BCM2835MboxState, 2, 1,
--- a/hw/net/can/can_kvaser_pci.c
+++ b/hw/net/can/can_kvaser_pci.c
@@ -266,7 +266,6 @@ static const VMStateDescription vmstate_kvaser_pci = {
266266 .name = "kvaser_pci",
267267 .version_id = 1,
268268 .minimum_version_id = 1,
269- .minimum_version_id_old = 1,
270269 .fields = (VMStateField[]) {
271270 VMSTATE_PCI_DEVICE(dev, KvaserPCIState),
272271 /* Load this before sja_state. */
--- a/hw/net/can/can_mioe3680_pci.c
+++ b/hw/net/can/can_mioe3680_pci.c
@@ -203,7 +203,6 @@ static const VMStateDescription vmstate_mioe3680_pci = {
203203 .name = "mioe3680_pci",
204204 .version_id = 1,
205205 .minimum_version_id = 1,
206- .minimum_version_id_old = 1,
207206 .fields = (VMStateField[]) {
208207 VMSTATE_PCI_DEVICE(dev, Mioe3680PCIState),
209208 VMSTATE_STRUCT(sja_state[0], Mioe3680PCIState, 0, vmstate_can_sja,
--- a/hw/net/can/can_pcm3680_pci.c
+++ b/hw/net/can/can_pcm3680_pci.c
@@ -204,7 +204,6 @@ static const VMStateDescription vmstate_pcm3680i_pci = {
204204 .name = "pcm3680i_pci",
205205 .version_id = 1,
206206 .minimum_version_id = 1,
207- .minimum_version_id_old = 1,
208207 .fields = (VMStateField[]) {
209208 VMSTATE_PCI_DEVICE(dev, Pcm3680iPCIState),
210209 VMSTATE_STRUCT(sja_state[0], Pcm3680iPCIState, 0,
--- a/hw/net/can/can_sja1000.c
+++ b/hw/net/can/can_sja1000.c
@@ -928,7 +928,6 @@ const VMStateDescription vmstate_qemu_can_filter = {
928928 .name = "qemu_can_filter",
929929 .version_id = 1,
930930 .minimum_version_id = 1,
931- .minimum_version_id_old = 1,
932931 .fields = (VMStateField[]) {
933932 VMSTATE_UINT32(can_id, qemu_can_filter),
934933 VMSTATE_UINT32(can_mask, qemu_can_filter),
@@ -952,7 +951,6 @@ const VMStateDescription vmstate_can_sja = {
952951 .name = "can_sja",
953952 .version_id = 1,
954953 .minimum_version_id = 1,
955- .minimum_version_id_old = 1,
956954 .post_load = can_sja_post_load,
957955 .fields = (VMStateField[]) {
958956 VMSTATE_UINT8(mode, CanSJA1000State),
--- a/hw/net/can/ctucan_core.c
+++ b/hw/net/can/ctucan_core.c
@@ -617,7 +617,6 @@ const VMStateDescription vmstate_qemu_ctucan_tx_buffer = {
617617 .name = "qemu_ctucan_tx_buffer",
618618 .version_id = 1,
619619 .minimum_version_id = 1,
620- .minimum_version_id_old = 1,
621620 .fields = (VMStateField[]) {
622621 VMSTATE_UINT8_ARRAY(data, CtuCanCoreMsgBuffer, CTUCAN_CORE_MSG_MAX_LEN),
623622 VMSTATE_END_OF_LIST()
@@ -636,7 +635,6 @@ const VMStateDescription vmstate_ctucan = {
636635 .name = "ctucan",
637636 .version_id = 1,
638637 .minimum_version_id = 1,
639- .minimum_version_id_old = 1,
640638 .post_load = ctucan_post_load,
641639 .fields = (VMStateField[]) {
642640 VMSTATE_UINT32(mode_settings.u32, CtuCanCoreState),
--- a/hw/net/can/ctucan_pci.c
+++ b/hw/net/can/ctucan_pci.c
@@ -215,7 +215,6 @@ static const VMStateDescription vmstate_ctucan_pci = {
215215 .name = "ctucan_pci",
216216 .version_id = 1,
217217 .minimum_version_id = 1,
218- .minimum_version_id_old = 1,
219218 .fields = (VMStateField[]) {
220219 VMSTATE_PCI_DEVICE(dev, CtuCanPCIState),
221220 VMSTATE_STRUCT(ctucan_state[0], CtuCanPCIState, 0, vmstate_ctucan,
--- a/hw/ppc/ppc.c
+++ b/hw/ppc/ppc.c
@@ -1049,7 +1049,6 @@ const VMStateDescription vmstate_ppc_timebase = {
10491049 .name = "timebase",
10501050 .version_id = 1,
10511051 .minimum_version_id = 1,
1052- .minimum_version_id_old = 1,
10531052 .pre_save = timebase_pre_save,
10541053 .fields = (VMStateField []) {
10551054 VMSTATE_UINT64(guest_timebase, PPCTimebase),
--- a/hw/scsi/megasas.c
+++ b/hw/scsi/megasas.c
@@ -2315,7 +2315,6 @@ static const VMStateDescription vmstate_megasas_gen2 = {
23152315 .name = "megasas-gen2",
23162316 .version_id = 0,
23172317 .minimum_version_id = 0,
2318- .minimum_version_id_old = 0,
23192318 .fields = (VMStateField[]) {
23202319 VMSTATE_PCI_DEVICE(parent_obj, MegasasState),
23212320 VMSTATE_MSIX(parent_obj, MegasasState),
--- a/hw/scsi/mptsas.c
+++ b/hw/scsi/mptsas.c
@@ -1363,7 +1363,6 @@ static const VMStateDescription vmstate_mptsas = {
13631363 .name = "mptsas",
13641364 .version_id = 0,
13651365 .minimum_version_id = 0,
1366- .minimum_version_id_old = 0,
13671366 .post_load = mptsas_post_load,
13681367 .fields = (VMStateField[]) {
13691368 VMSTATE_PCI_DEVICE(dev, MPTSASState),
--- a/hw/virtio/virtio-mmio.c
+++ b/hw/virtio/virtio-mmio.c
@@ -592,7 +592,6 @@ static const VMStateDescription vmstate_virtio_mmio = {
592592 .name = "virtio_mmio",
593593 .version_id = 1,
594594 .minimum_version_id = 1,
595- .minimum_version_id_old = 1,
596595 .fields = (VMStateField[]) {
597596 VMSTATE_END_OF_LIST()
598597 },
--- a/hw/virtio/virtio-pci.c
+++ b/hw/virtio/virtio-pci.c
@@ -131,7 +131,6 @@ static const VMStateDescription vmstate_virtio_pci = {
131131 .name = "virtio_pci",
132132 .version_id = 1,
133133 .minimum_version_id = 1,
134- .minimum_version_id_old = 1,
135134 .fields = (VMStateField[]) {
136135 VMSTATE_END_OF_LIST()
137136 },
--- a/hw/virtio/virtio.c
+++ b/hw/virtio/virtio.c
@@ -2808,7 +2808,6 @@ static const VMStateDescription vmstate_virtio = {
28082808 .name = "virtio",
28092809 .version_id = 1,
28102810 .minimum_version_id = 1,
2811- .minimum_version_id_old = 1,
28122811 .fields = (VMStateField[]) {
28132812 VMSTATE_END_OF_LIST()
28142813 },
--- a/migration/migration.c
+++ b/migration/migration.c
@@ -1014,6 +1014,9 @@ static void populate_ram_info(MigrationInfo *info, MigrationState *s)
10141014 info->ram->page_size = page_size;
10151015 info->ram->multifd_bytes = ram_counters.multifd_bytes;
10161016 info->ram->pages_per_second = s->pages_per_second;
1017+ info->ram->precopy_bytes = ram_counters.precopy_bytes;
1018+ info->ram->downtime_bytes = ram_counters.downtime_bytes;
1019+ info->ram->postcopy_bytes = ram_counters.postcopy_bytes;
10171020
10181021 if (migrate_use_xbzrle()) {
10191022 info->has_xbzrle_cache = true;
@@ -2991,10 +2994,7 @@ static int postcopy_start(MigrationState *ms)
29912994 * that are dirty
29922995 */
29932996 if (migrate_postcopy_ram()) {
2994- if (ram_postcopy_send_discard_bitmap(ms)) {
2995- error_report("postcopy send discard bitmap failed");
2996- goto fail;
2997- }
2997+ ram_postcopy_send_discard_bitmap(ms);
29982998 }
29992999
30003000 /*
@@ -3205,7 +3205,7 @@ static void migration_completion(MigrationState *s)
32053205 qemu_mutex_unlock_iothread();
32063206
32073207 trace_migration_completion_postcopy_end_after_complete();
3208- } else if (s->state == MIGRATION_STATUS_CANCELLING) {
3208+ } else {
32093209 goto fail;
32103210 }
32113211
@@ -3230,7 +3230,11 @@ static void migration_completion(MigrationState *s)
32303230 goto fail_invalidate;
32313231 }
32323232
3233- if (!migrate_colo_enabled()) {
3233+ if (migrate_colo_enabled() && s->state == MIGRATION_STATUS_ACTIVE) {
3234+ /* COLO does not support postcopy */
3235+ migrate_set_state(&s->state, MIGRATION_STATUS_ACTIVE,
3236+ MIGRATION_STATUS_COLO);
3237+ } else {
32343238 migrate_set_state(&s->state, current_active_state,
32353239 MIGRATION_STATUS_COMPLETED);
32363240 }
@@ -3621,16 +3625,6 @@ static void migration_iteration_finish(MigrationState *s)
36213625 "COLO enabled", __func__);
36223626 }
36233627 migrate_start_colo_process(s);
3624- /*
3625- * Fixme: we will run VM in COLO no matter its old running state.
3626- * After exited COLO, we will keep running.
3627- */
3628- /* Fallthrough */
3629- case MIGRATION_STATUS_ACTIVE:
3630- /*
3631- * We should really assert here, but since it's during
3632- * migration, let's try to reduce the usage of assertions.
3633- */
36343628 s->vm_was_running = true;
36353629 /* Fallthrough */
36363630 case MIGRATION_STATUS_FAILED:
--- a/migration/multifd-zlib.c
+++ b/migration/multifd-zlib.c
@@ -51,16 +51,16 @@ static int zlib_send_setup(MultiFDSendParams *p, Error **errp)
5151 zs->opaque = Z_NULL;
5252 if (deflateInit(zs, migrate_multifd_zlib_level()) != Z_OK) {
5353 g_free(z);
54- error_setg(errp, "multifd %d: deflate init failed", p->id);
54+ error_setg(errp, "multifd %u: deflate init failed", p->id);
5555 return -1;
5656 }
57- /* To be safe, we reserve twice the size of the packet */
58- z->zbuff_len = MULTIFD_PACKET_SIZE * 2;
57+ /* This is the maxium size of the compressed buffer */
58+ z->zbuff_len = compressBound(MULTIFD_PACKET_SIZE);
5959 z->zbuff = g_try_malloc(z->zbuff_len);
6060 if (!z->zbuff) {
6161 deflateEnd(&z->zs);
6262 g_free(z);
63- error_setg(errp, "multifd %d: out of memory for zbuff", p->id);
63+ error_setg(errp, "multifd %u: out of memory for zbuff", p->id);
6464 return -1;
6565 }
6666 p->data = z;
@@ -106,16 +106,16 @@ static int zlib_send_prepare(MultiFDSendParams *p, Error **errp)
106106 int ret;
107107 uint32_t i;
108108
109- for (i = 0; i < p->pages->num; i++) {
109+ for (i = 0; i < p->normal_num; i++) {
110110 uint32_t available = z->zbuff_len - out_size;
111111 int flush = Z_NO_FLUSH;
112112
113- if (i == p->pages->num - 1) {
113+ if (i == p->normal_num - 1) {
114114 flush = Z_SYNC_FLUSH;
115115 }
116116
117117 zs->avail_in = page_size;
118- zs->next_in = p->pages->block->host + p->pages->offset[i];
118+ zs->next_in = p->pages->block->host + p->normal[i];
119119
120120 zs->avail_out = available;
121121 zs->next_out = z->zbuff + out_size;
@@ -132,17 +132,20 @@ static int zlib_send_prepare(MultiFDSendParams *p, Error **errp)
132132 ret = deflate(zs, flush);
133133 } while (ret == Z_OK && zs->avail_in && zs->avail_out);
134134 if (ret == Z_OK && zs->avail_in) {
135- error_setg(errp, "multifd %d: deflate failed to compress all input",
135+ error_setg(errp, "multifd %u: deflate failed to compress all input",
136136 p->id);
137137 return -1;
138138 }
139139 if (ret != Z_OK) {
140- error_setg(errp, "multifd %d: deflate returned %d instead of Z_OK",
140+ error_setg(errp, "multifd %u: deflate returned %d instead of Z_OK",
141141 p->id, ret);
142142 return -1;
143143 }
144144 out_size += available - zs->avail_out;
145145 }
146+ p->iov[p->iovs_num].iov_base = z->zbuff;
147+ p->iov[p->iovs_num].iov_len = out_size;
148+ p->iovs_num++;
146149 p->next_packet_size = out_size;
147150 p->flags |= MULTIFD_FLAG_ZLIB;
148151
@@ -150,25 +153,6 @@ static int zlib_send_prepare(MultiFDSendParams *p, Error **errp)
150153 }
151154
152155 /**
153- * zlib_send_write: do the actual write of the data
154- *
155- * Do the actual write of the comprresed buffer.
156- *
157- * Returns 0 for success or -1 for error
158- *
159- * @p: Params for the channel that we are using
160- * @used: number of pages used
161- * @errp: pointer to an error
162- */
163-static int zlib_send_write(MultiFDSendParams *p, uint32_t used, Error **errp)
164-{
165- struct zlib_data *z = p->data;
166-
167- return qio_channel_write_all(p->c, (void *)z->zbuff, p->next_packet_size,
168- errp);
169-}
170-
171-/**
172156 * zlib_recv_setup: setup receive side
173157 *
174158 * Create the compressed channel and buffer.
@@ -190,7 +174,7 @@ static int zlib_recv_setup(MultiFDRecvParams *p, Error **errp)
190174 zs->avail_in = 0;
191175 zs->next_in = Z_NULL;
192176 if (inflateInit(zs) != Z_OK) {
193- error_setg(errp, "multifd %d: inflate init failed", p->id);
177+ error_setg(errp, "multifd %u: inflate init failed", p->id);
194178 return -1;
195179 }
196180 /* To be safe, we reserve twice the size of the packet */
@@ -198,7 +182,7 @@ static int zlib_recv_setup(MultiFDRecvParams *p, Error **errp)
198182 z->zbuff = g_try_malloc(z->zbuff_len);
199183 if (!z->zbuff) {
200184 inflateEnd(zs);
201- error_setg(errp, "multifd %d: out of memory for zbuff", p->id);
185+ error_setg(errp, "multifd %u: out of memory for zbuff", p->id);
202186 return -1;
203187 }
204188 return 0;
@@ -241,13 +225,13 @@ static int zlib_recv_pages(MultiFDRecvParams *p, Error **errp)
241225 uint32_t in_size = p->next_packet_size;
242226 /* we measure the change of total_out */
243227 uint32_t out_size = zs->total_out;
244- uint32_t expected_size = p->pages->num * qemu_target_page_size();
228+ uint32_t expected_size = p->normal_num * page_size;
245229 uint32_t flags = p->flags & MULTIFD_FLAG_COMPRESSION_MASK;
246230 int ret;
247231 int i;
248232
249233 if (flags != MULTIFD_FLAG_ZLIB) {
250- error_setg(errp, "multifd %d: flags received %x flags expected %x",
234+ error_setg(errp, "multifd %u: flags received %x flags expected %x",
251235 p->id, flags, MULTIFD_FLAG_ZLIB);
252236 return -1;
253237 }
@@ -260,16 +244,16 @@ static int zlib_recv_pages(MultiFDRecvParams *p, Error **errp)
260244 zs->avail_in = in_size;
261245 zs->next_in = z->zbuff;
262246
263- for (i = 0; i < p->pages->num; i++) {
247+ for (i = 0; i < p->normal_num; i++) {
264248 int flush = Z_NO_FLUSH;
265249 unsigned long start = zs->total_out;
266250
267- if (i == p->pages->num - 1) {
251+ if (i == p->normal_num - 1) {
268252 flush = Z_SYNC_FLUSH;
269253 }
270254
271255 zs->avail_out = page_size;
272- zs->next_out = p->pages->block->host + p->pages->offset[i];
256+ zs->next_out = p->host + p->normal[i];
273257
274258 /*
275259 * Welcome to inflate semantics
@@ -284,19 +268,19 @@ static int zlib_recv_pages(MultiFDRecvParams *p, Error **errp)
284268 } while (ret == Z_OK && zs->avail_in
285269 && (zs->total_out - start) < page_size);
286270 if (ret == Z_OK && (zs->total_out - start) < page_size) {
287- error_setg(errp, "multifd %d: inflate generated too few output",
271+ error_setg(errp, "multifd %u: inflate generated too few output",
288272 p->id);
289273 return -1;
290274 }
291275 if (ret != Z_OK) {
292- error_setg(errp, "multifd %d: inflate returned %d instead of Z_OK",
276+ error_setg(errp, "multifd %u: inflate returned %d instead of Z_OK",
293277 p->id, ret);
294278 return -1;
295279 }
296280 }
297281 out_size = zs->total_out - out_size;
298282 if (out_size != expected_size) {
299- error_setg(errp, "multifd %d: packet size received %d size expected %d",
283+ error_setg(errp, "multifd %u: packet size received %u size expected %u",
300284 p->id, out_size, expected_size);
301285 return -1;
302286 }
@@ -307,7 +291,6 @@ static MultiFDMethods multifd_zlib_ops = {
307291 .send_setup = zlib_send_setup,
308292 .send_cleanup = zlib_send_cleanup,
309293 .send_prepare = zlib_send_prepare,
310- .send_write = zlib_send_write,
311294 .recv_setup = zlib_recv_setup,
312295 .recv_cleanup = zlib_recv_cleanup,
313296 .recv_pages = zlib_recv_pages
--- a/migration/multifd-zstd.c
+++ b/migration/multifd-zstd.c
@@ -55,7 +55,7 @@ static int zstd_send_setup(MultiFDSendParams *p, Error **errp)
5555 z->zcs = ZSTD_createCStream();
5656 if (!z->zcs) {
5757 g_free(z);
58- error_setg(errp, "multifd %d: zstd createCStream failed", p->id);
58+ error_setg(errp, "multifd %u: zstd createCStream failed", p->id);
5959 return -1;
6060 }
6161
@@ -63,17 +63,17 @@ static int zstd_send_setup(MultiFDSendParams *p, Error **errp)
6363 if (ZSTD_isError(res)) {
6464 ZSTD_freeCStream(z->zcs);
6565 g_free(z);
66- error_setg(errp, "multifd %d: initCStream failed with error %s",
66+ error_setg(errp, "multifd %u: initCStream failed with error %s",
6767 p->id, ZSTD_getErrorName(res));
6868 return -1;
6969 }
70- /* To be safe, we reserve twice the size of the packet */
71- z->zbuff_len = MULTIFD_PACKET_SIZE * 2;
70+ /* This is the maxium size of the compressed buffer */
71+ z->zbuff_len = ZSTD_compressBound(MULTIFD_PACKET_SIZE);
7272 z->zbuff = g_try_malloc(z->zbuff_len);
7373 if (!z->zbuff) {
7474 ZSTD_freeCStream(z->zcs);
7575 g_free(z);
76- error_setg(errp, "multifd %d: out of memory for zbuff", p->id);
76+ error_setg(errp, "multifd %u: out of memory for zbuff", p->id);
7777 return -1;
7878 }
7979 return 0;
@@ -121,13 +121,13 @@ static int zstd_send_prepare(MultiFDSendParams *p, Error **errp)
121121 z->out.size = z->zbuff_len;
122122 z->out.pos = 0;
123123
124- for (i = 0; i < p->pages->num; i++) {
124+ for (i = 0; i < p->normal_num; i++) {
125125 ZSTD_EndDirective flush = ZSTD_e_continue;
126126
127- if (i == p->pages->num - 1) {
127+ if (i == p->normal_num - 1) {
128128 flush = ZSTD_e_flush;
129129 }
130- z->in.src = p->pages->block->host + p->pages->offset[i];
130+ z->in.src = p->pages->block->host + p->normal[i];
131131 z->in.size = page_size;
132132 z->in.pos = 0;
133133
@@ -144,16 +144,19 @@ static int zstd_send_prepare(MultiFDSendParams *p, Error **errp)
144144 } while (ret > 0 && (z->in.size - z->in.pos > 0)
145145 && (z->out.size - z->out.pos > 0));
146146 if (ret > 0 && (z->in.size - z->in.pos > 0)) {
147- error_setg(errp, "multifd %d: compressStream buffer too small",
147+ error_setg(errp, "multifd %u: compressStream buffer too small",
148148 p->id);
149149 return -1;
150150 }
151151 if (ZSTD_isError(ret)) {
152- error_setg(errp, "multifd %d: compressStream error %s",
152+ error_setg(errp, "multifd %u: compressStream error %s",
153153 p->id, ZSTD_getErrorName(ret));
154154 return -1;
155155 }
156156 }
157+ p->iov[p->iovs_num].iov_base = z->zbuff;
158+ p->iov[p->iovs_num].iov_len = z->out.pos;
159+ p->iovs_num++;
157160 p->next_packet_size = z->out.pos;
158161 p->flags |= MULTIFD_FLAG_ZSTD;
159162
@@ -161,25 +164,6 @@ static int zstd_send_prepare(MultiFDSendParams *p, Error **errp)
161164 }
162165
163166 /**
164- * zstd_send_write: do the actual write of the data
165- *
166- * Do the actual write of the comprresed buffer.
167- *
168- * Returns 0 for success or -1 for error
169- *
170- * @p: Params for the channel that we are using
171- * @used: number of pages used
172- * @errp: pointer to an error
173- */
174-static int zstd_send_write(MultiFDSendParams *p, uint32_t used, Error **errp)
175-{
176- struct zstd_data *z = p->data;
177-
178- return qio_channel_write_all(p->c, (void *)z->zbuff, p->next_packet_size,
179- errp);
180-}
181-
182-/**
183167 * zstd_recv_setup: setup receive side
184168 *
185169 * Create the compressed channel and buffer.
@@ -198,7 +182,7 @@ static int zstd_recv_setup(MultiFDRecvParams *p, Error **errp)
198182 z->zds = ZSTD_createDStream();
199183 if (!z->zds) {
200184 g_free(z);
201- error_setg(errp, "multifd %d: zstd createDStream failed", p->id);
185+ error_setg(errp, "multifd %u: zstd createDStream failed", p->id);
202186 return -1;
203187 }
204188
@@ -206,7 +190,7 @@ static int zstd_recv_setup(MultiFDRecvParams *p, Error **errp)
206190 if (ZSTD_isError(ret)) {
207191 ZSTD_freeDStream(z->zds);
208192 g_free(z);
209- error_setg(errp, "multifd %d: initDStream failed with error %s",
193+ error_setg(errp, "multifd %u: initDStream failed with error %s",
210194 p->id, ZSTD_getErrorName(ret));
211195 return -1;
212196 }
@@ -217,7 +201,7 @@ static int zstd_recv_setup(MultiFDRecvParams *p, Error **errp)
217201 if (!z->zbuff) {
218202 ZSTD_freeDStream(z->zds);
219203 g_free(z);
220- error_setg(errp, "multifd %d: out of memory for zbuff", p->id);
204+ error_setg(errp, "multifd %u: out of memory for zbuff", p->id);
221205 return -1;
222206 }
223207 return 0;
@@ -258,14 +242,14 @@ static int zstd_recv_pages(MultiFDRecvParams *p, Error **errp)
258242 uint32_t in_size = p->next_packet_size;
259243 uint32_t out_size = 0;
260244 size_t page_size = qemu_target_page_size();
261- uint32_t expected_size = p->pages->num * page_size;
245+ uint32_t expected_size = p->normal_num * page_size;
262246 uint32_t flags = p->flags & MULTIFD_FLAG_COMPRESSION_MASK;
263247 struct zstd_data *z = p->data;
264248 int ret;
265249 int i;
266250
267251 if (flags != MULTIFD_FLAG_ZSTD) {
268- error_setg(errp, "multifd %d: flags received %x flags expected %x",
252+ error_setg(errp, "multifd %u: flags received %x flags expected %x",
269253 p->id, flags, MULTIFD_FLAG_ZSTD);
270254 return -1;
271255 }
@@ -279,8 +263,8 @@ static int zstd_recv_pages(MultiFDRecvParams *p, Error **errp)
279263 z->in.size = in_size;
280264 z->in.pos = 0;
281265
282- for (i = 0; i < p->pages->num; i++) {
283- z->out.dst = p->pages->block->host + p->pages->offset[i];
266+ for (i = 0; i < p->normal_num; i++) {
267+ z->out.dst = p->host + p->normal[i];
284268 z->out.size = page_size;
285269 z->out.pos = 0;
286270
@@ -297,19 +281,19 @@ static int zstd_recv_pages(MultiFDRecvParams *p, Error **errp)
297281 } while (ret > 0 && (z->in.size - z->in.pos > 0)
298282 && (z->out.pos < page_size));
299283 if (ret > 0 && (z->out.pos < page_size)) {
300- error_setg(errp, "multifd %d: decompressStream buffer too small",
284+ error_setg(errp, "multifd %u: decompressStream buffer too small",
301285 p->id);
302286 return -1;
303287 }
304288 if (ZSTD_isError(ret)) {
305- error_setg(errp, "multifd %d: decompressStream returned %s",
289+ error_setg(errp, "multifd %u: decompressStream returned %s",
306290 p->id, ZSTD_getErrorName(ret));
307291 return ret;
308292 }
309293 out_size += z->out.pos;
310294 }
311295 if (out_size != expected_size) {
312- error_setg(errp, "multifd %d: packet size received %d size expected %d",
296+ error_setg(errp, "multifd %u: packet size received %u size expected %u",
313297 p->id, out_size, expected_size);
314298 return -1;
315299 }
@@ -320,7 +304,6 @@ static MultiFDMethods multifd_zstd_ops = {
320304 .send_setup = zstd_send_setup,
321305 .send_cleanup = zstd_send_cleanup,
322306 .send_prepare = zstd_send_prepare,
323- .send_write = zstd_send_write,
324307 .recv_setup = zstd_recv_setup,
325308 .recv_cleanup = zstd_recv_cleanup,
326309 .recv_pages = zstd_recv_pages
--- a/migration/multifd.c
+++ b/migration/multifd.c
@@ -86,28 +86,21 @@ static void nocomp_send_cleanup(MultiFDSendParams *p, Error **errp)
8686 */
8787 static int nocomp_send_prepare(MultiFDSendParams *p, Error **errp)
8888 {
89- p->next_packet_size = p->pages->num * qemu_target_page_size();
89+ MultiFDPages_t *pages = p->pages;
90+ size_t page_size = qemu_target_page_size();
91+
92+ for (int i = 0; i < p->normal_num; i++) {
93+ p->iov[p->iovs_num].iov_base = pages->block->host + p->normal[i];
94+ p->iov[p->iovs_num].iov_len = page_size;
95+ p->iovs_num++;
96+ }
97+
98+ p->next_packet_size = p->normal_num * page_size;
9099 p->flags |= MULTIFD_FLAG_NOCOMP;
91100 return 0;
92101 }
93102
94103 /**
95- * nocomp_send_write: do the actual write of the data
96- *
97- * For no compression we just have to write the data.
98- *
99- * Returns 0 for success or -1 for error
100- *
101- * @p: Params for the channel that we are using
102- * @used: number of pages used
103- * @errp: pointer to an error
104- */
105-static int nocomp_send_write(MultiFDSendParams *p, uint32_t used, Error **errp)
106-{
107- return qio_channel_writev_all(p->c, p->pages->iov, used, errp);
108-}
109-
110-/**
111104 * nocomp_recv_setup: setup receive side
112105 *
113106 * For no compression this function does nothing.
@@ -146,20 +139,24 @@ static void nocomp_recv_cleanup(MultiFDRecvParams *p)
146139 static int nocomp_recv_pages(MultiFDRecvParams *p, Error **errp)
147140 {
148141 uint32_t flags = p->flags & MULTIFD_FLAG_COMPRESSION_MASK;
142+ size_t page_size = qemu_target_page_size();
149143
150144 if (flags != MULTIFD_FLAG_NOCOMP) {
151- error_setg(errp, "multifd %d: flags received %x flags expected %x",
145+ error_setg(errp, "multifd %u: flags received %x flags expected %x",
152146 p->id, flags, MULTIFD_FLAG_NOCOMP);
153147 return -1;
154148 }
155- return qio_channel_readv_all(p->c, p->pages->iov, p->pages->num, errp);
149+ for (int i = 0; i < p->normal_num; i++) {
150+ p->iov[i].iov_base = p->host + p->normal[i];
151+ p->iov[i].iov_len = page_size;
152+ }
153+ return qio_channel_readv_all(p->c, p->iov, p->normal_num, errp);
156154 }
157155
158156 static MultiFDMethods multifd_nocomp_ops = {
159157 .send_setup = nocomp_send_setup,
160158 .send_cleanup = nocomp_send_cleanup,
161159 .send_prepare = nocomp_send_prepare,
162- .send_write = nocomp_send_write,
163160 .recv_setup = nocomp_recv_setup,
164161 .recv_cleanup = nocomp_recv_cleanup,
165162 .recv_pages = nocomp_recv_pages
@@ -212,8 +209,8 @@ static int multifd_recv_initial_packet(QIOChannel *c, Error **errp)
212209 }
213210
214211 if (msg.version != MULTIFD_VERSION) {
215- error_setg(errp, "multifd: received packet version %d "
216- "expected %d", msg.version, MULTIFD_VERSION);
212+ error_setg(errp, "multifd: received packet version %u "
213+ "expected %u", msg.version, MULTIFD_VERSION);
217214 return -1;
218215 }
219216
@@ -229,8 +226,8 @@ static int multifd_recv_initial_packet(QIOChannel *c, Error **errp)
229226 }
230227
231228 if (msg.id > migrate_multifd_channels()) {
232- error_setg(errp, "multifd: received channel version %d "
233- "expected %d", msg.version, MULTIFD_VERSION);
229+ error_setg(errp, "multifd: received channel version %u "
230+ "expected %u", msg.version, MULTIFD_VERSION);
234231 return -1;
235232 }
236233
@@ -242,7 +239,6 @@ static MultiFDPages_t *multifd_pages_init(size_t size)
242239 MultiFDPages_t *pages = g_new0(MultiFDPages_t, 1);
243240
244241 pages->allocated = size;
245- pages->iov = g_new0(struct iovec, size);
246242 pages->offset = g_new0(ram_addr_t, size);
247243
248244 return pages;
@@ -254,8 +250,6 @@ static void multifd_pages_clear(MultiFDPages_t *pages)
254250 pages->allocated = 0;
255251 pages->packet_num = 0;
256252 pages->block = NULL;
257- g_free(pages->iov);
258- pages->iov = NULL;
259253 g_free(pages->offset);
260254 pages->offset = NULL;
261255 g_free(pages);
@@ -268,7 +262,7 @@ static void multifd_send_fill_packet(MultiFDSendParams *p)
268262
269263 packet->flags = cpu_to_be32(p->flags);
270264 packet->pages_alloc = cpu_to_be32(p->pages->allocated);
271- packet->pages_used = cpu_to_be32(p->pages->num);
265+ packet->normal_pages = cpu_to_be32(p->normal_num);
272266 packet->next_packet_size = cpu_to_be32(p->next_packet_size);
273267 packet->packet_num = cpu_to_be64(p->packet_num);
274268
@@ -276,9 +270,9 @@ static void multifd_send_fill_packet(MultiFDSendParams *p)
276270 strncpy(packet->ramblock, p->pages->block->idstr, 256);
277271 }
278272
279- for (i = 0; i < p->pages->num; i++) {
273+ for (i = 0; i < p->normal_num; i++) {
280274 /* there are architectures where ram_addr_t is 32 bit */
281- uint64_t temp = p->pages->offset[i];
275+ uint64_t temp = p->normal[i];
282276
283277 packet->offset[i] = cpu_to_be64(temp);
284278 }
@@ -288,7 +282,7 @@ static int multifd_recv_unfill_packet(MultiFDRecvParams *p, Error **errp)
288282 {
289283 MultiFDPacket_t *packet = p->packet;
290284 size_t page_size = qemu_target_page_size();
291- uint32_t pages_max = MULTIFD_PACKET_SIZE / page_size;
285+ uint32_t page_count = MULTIFD_PACKET_SIZE / page_size;
292286 RAMBlock *block;
293287 int i;
294288
@@ -303,7 +297,7 @@ static int multifd_recv_unfill_packet(MultiFDRecvParams *p, Error **errp)
303297 packet->version = be32_to_cpu(packet->version);
304298 if (packet->version != MULTIFD_VERSION) {
305299 error_setg(errp, "multifd: received packet "
306- "version %d and expected version %d",
300+ "version %u and expected version %u",
307301 packet->version, MULTIFD_VERSION);
308302 return -1;
309303 }
@@ -315,33 +309,25 @@ static int multifd_recv_unfill_packet(MultiFDRecvParams *p, Error **errp)
315309 * If we received a packet that is 100 times bigger than expected
316310 * just stop migration. It is a magic number.
317311 */
318- if (packet->pages_alloc > pages_max * 100) {
312+ if (packet->pages_alloc > page_count) {
319313 error_setg(errp, "multifd: received packet "
320- "with size %d and expected a maximum size of %d",
321- packet->pages_alloc, pages_max * 100) ;
314+ "with size %u and expected a size of %u",
315+ packet->pages_alloc, page_count) ;
322316 return -1;
323317 }
324- /*
325- * We received a packet that is bigger than expected but inside
326- * reasonable limits (see previous comment). Just reallocate.
327- */
328- if (packet->pages_alloc > p->pages->allocated) {
329- multifd_pages_clear(p->pages);
330- p->pages = multifd_pages_init(packet->pages_alloc);
331- }
332318
333- p->pages->num = be32_to_cpu(packet->pages_used);
334- if (p->pages->num > packet->pages_alloc) {
319+ p->normal_num = be32_to_cpu(packet->normal_pages);
320+ if (p->normal_num > packet->pages_alloc) {
335321 error_setg(errp, "multifd: received packet "
336- "with %d pages and expected maximum pages are %d",
337- p->pages->num, packet->pages_alloc) ;
322+ "with %u pages and expected maximum pages are %u",
323+ p->normal_num, packet->pages_alloc) ;
338324 return -1;
339325 }
340326
341327 p->next_packet_size = be32_to_cpu(packet->next_packet_size);
342328 p->packet_num = be64_to_cpu(packet->packet_num);
343329
344- if (p->pages->num == 0) {
330+ if (p->normal_num == 0) {
345331 return 0;
346332 }
347333
@@ -354,8 +340,8 @@ static int multifd_recv_unfill_packet(MultiFDRecvParams *p, Error **errp)
354340 return -1;
355341 }
356342
357- p->pages->block = block;
358- for (i = 0; i < p->pages->num; i++) {
343+ p->host = block->host;
344+ for (i = 0; i < p->normal_num; i++) {
359345 uint64_t offset = be64_to_cpu(packet->offset[i]);
360346
361347 if (offset > (block->used_length - page_size)) {
@@ -364,9 +350,7 @@ static int multifd_recv_unfill_packet(MultiFDRecvParams *p, Error **errp)
364350 offset, block->used_length);
365351 return -1;
366352 }
367- p->pages->offset[i] = offset;
368- p->pages->iov[i].iov_base = block->host + offset;
369- p->pages->iov[i].iov_len = page_size;
353+ p->normal[i] = offset;
370354 }
371355
372356 return 0;
@@ -470,8 +454,6 @@ int multifd_queue_page(QEMUFile *f, RAMBlock *block, ram_addr_t offset)
470454
471455 if (pages->block == block) {
472456 pages->offset[pages->num] = offset;
473- pages->iov[pages->num].iov_base = block->host + offset;
474- pages->iov[pages->num].iov_len = qemu_target_page_size();
475457 pages->num++;
476458
477459 if (pages->num < pages->allocated) {
@@ -567,6 +549,10 @@ void multifd_save_cleanup(void)
567549 p->packet_len = 0;
568550 g_free(p->packet);
569551 p->packet = NULL;
552+ g_free(p->iov);
553+ p->iov = NULL;
554+ g_free(p->normal);
555+ p->normal = NULL;
570556 multifd_send_state->ops->send_cleanup(p, &local_err);
571557 if (local_err) {
572558 migrate_set_error(migrate_get_current(), local_err);
@@ -651,11 +637,17 @@ static void *multifd_send_thread(void *opaque)
651637 qemu_mutex_lock(&p->mutex);
652638
653639 if (p->pending_job) {
654- uint32_t used = p->pages->num;
655640 uint64_t packet_num = p->packet_num;
656641 uint32_t flags = p->flags;
642+ p->iovs_num = 1;
643+ p->normal_num = 0;
644+
645+ for (int i = 0; i < p->pages->num; i++) {
646+ p->normal[p->normal_num] = p->pages->offset[i];
647+ p->normal_num++;
648+ }
657649
658- if (used) {
650+ if (p->normal_num) {
659651 ret = multifd_send_state->ops->send_prepare(p, &local_err);
660652 if (ret != 0) {
661653 qemu_mutex_unlock(&p->mutex);
@@ -665,27 +657,23 @@ static void *multifd_send_thread(void *opaque)
665657 multifd_send_fill_packet(p);
666658 p->flags = 0;
667659 p->num_packets++;
668- p->num_pages += used;
660+ p->total_normal_pages += p->normal_num;
669661 p->pages->num = 0;
670662 p->pages->block = NULL;
671663 qemu_mutex_unlock(&p->mutex);
672664
673- trace_multifd_send(p->id, packet_num, used, flags,
665+ trace_multifd_send(p->id, packet_num, p->normal_num, flags,
674666 p->next_packet_size);
675667
676- ret = qio_channel_write_all(p->c, (void *)p->packet,
677- p->packet_len, &local_err);
668+ p->iov[0].iov_len = p->packet_len;
669+ p->iov[0].iov_base = p->packet;
670+
671+ ret = qio_channel_writev_all(p->c, p->iov, p->iovs_num,
672+ &local_err);
678673 if (ret != 0) {
679674 break;
680675 }
681676
682- if (used) {
683- ret = multifd_send_state->ops->send_write(p, used, &local_err);
684- if (ret != 0) {
685- break;
686- }
687- }
688-
689677 qemu_mutex_lock(&p->mutex);
690678 p->pending_job--;
691679 qemu_mutex_unlock(&p->mutex);
@@ -724,7 +712,7 @@ out:
724712 qemu_mutex_unlock(&p->mutex);
725713
726714 rcu_unregister_thread();
727- trace_multifd_send_thread_end(p->id, p->num_packets, p->num_pages);
715+ trace_multifd_send_thread_end(p->id, p->num_packets, p->total_normal_pages);
728716
729717 return NULL;
730718 }
@@ -922,6 +910,9 @@ int multifd_save_setup(Error **errp)
922910 p->packet->version = cpu_to_be32(MULTIFD_VERSION);
923911 p->name = g_strdup_printf("multifdsend_%d", i);
924912 p->tls_hostname = g_strdup(s->hostname);
913+ /* We need one extra place for the packet header */
914+ p->iov = g_new0(struct iovec, page_count + 1);
915+ p->normal = g_new0(ram_addr_t, page_count);
925916 socket_send_channel_create(multifd_new_send_channel_async, p);
926917 }
927918
@@ -1016,11 +1007,13 @@ int multifd_load_cleanup(Error **errp)
10161007 qemu_sem_destroy(&p->sem_sync);
10171008 g_free(p->name);
10181009 p->name = NULL;
1019- multifd_pages_clear(p->pages);
1020- p->pages = NULL;
10211010 p->packet_len = 0;
10221011 g_free(p->packet);
10231012 p->packet = NULL;
1013+ g_free(p->iov);
1014+ p->iov = NULL;
1015+ g_free(p->normal);
1016+ p->normal = NULL;
10241017 multifd_recv_state->ops->recv_cleanup(p);
10251018 }
10261019 qemu_sem_destroy(&multifd_recv_state->sem_sync);
@@ -1069,7 +1062,6 @@ static void *multifd_recv_thread(void *opaque)
10691062 rcu_register_thread();
10701063
10711064 while (true) {
1072- uint32_t used;
10731065 uint32_t flags;
10741066
10751067 if (p->quit) {
@@ -1092,17 +1084,16 @@ static void *multifd_recv_thread(void *opaque)
10921084 break;
10931085 }
10941086
1095- used = p->pages->num;
10961087 flags = p->flags;
10971088 /* recv methods don't know how to handle the SYNC flag */
10981089 p->flags &= ~MULTIFD_FLAG_SYNC;
1099- trace_multifd_recv(p->id, p->packet_num, used, flags,
1090+ trace_multifd_recv(p->id, p->packet_num, p->normal_num, flags,
11001091 p->next_packet_size);
11011092 p->num_packets++;
1102- p->num_pages += used;
1093+ p->total_normal_pages += p->normal_num;
11031094 qemu_mutex_unlock(&p->mutex);
11041095
1105- if (used) {
1096+ if (p->normal_num) {
11061097 ret = multifd_recv_state->ops->recv_pages(p, &local_err);
11071098 if (ret != 0) {
11081099 break;
@@ -1124,7 +1115,7 @@ static void *multifd_recv_thread(void *opaque)
11241115 qemu_mutex_unlock(&p->mutex);
11251116
11261117 rcu_unregister_thread();
1127- trace_multifd_recv_thread_end(p->id, p->num_packets, p->num_pages);
1118+ trace_multifd_recv_thread_end(p->id, p->num_packets, p->total_normal_pages);
11281119
11291120 return NULL;
11301121 }
@@ -1156,11 +1147,12 @@ int multifd_load_setup(Error **errp)
11561147 qemu_sem_init(&p->sem_sync, 0);
11571148 p->quit = false;
11581149 p->id = i;
1159- p->pages = multifd_pages_init(page_count);
11601150 p->packet_len = sizeof(MultiFDPacket_t)
11611151 + sizeof(uint64_t) * page_count;
11621152 p->packet = g_malloc0(p->packet_len);
11631153 p->name = g_strdup_printf("multifdrecv_%d", i);
1154+ p->iov = g_new0(struct iovec, page_count);
1155+ p->normal = g_new0(ram_addr_t, page_count);
11641156 }
11651157
11661158 for (i = 0; i < thread_count; i++) {
--- a/migration/multifd.h
+++ b/migration/multifd.h
@@ -44,7 +44,8 @@ typedef struct {
4444 uint32_t flags;
4545 /* maximum number of allocated pages */
4646 uint32_t pages_alloc;
47- uint32_t pages_used;
47+ /* non zero pages */
48+ uint32_t normal_pages;
4849 /* size of the next packet that contains pages */
4950 uint32_t next_packet_size;
5051 uint64_t packet_num;
@@ -62,8 +63,6 @@ typedef struct {
6263 uint64_t packet_num;
6364 /* offset of each page */
6465 ram_addr_t *offset;
65- /* pointer to each page */
66- struct iovec *iov;
6766 RAMBlock *block;
6867 } MultiFDPages_t;
6968
@@ -106,10 +105,18 @@ typedef struct {
106105 /* thread local variables */
107106 /* packets sent through this channel */
108107 uint64_t num_packets;
109- /* pages sent through this channel */
110- uint64_t num_pages;
108+ /* non zero pages sent through this channel */
109+ uint64_t total_normal_pages;
111110 /* syncs main thread and channels */
112111 QemuSemaphore sem_sync;
112+ /* buffers to send */
113+ struct iovec *iov;
114+ /* number of iovs used */
115+ uint32_t iovs_num;
116+ /* Pages that are not zero */
117+ ram_addr_t *normal;
118+ /* num of non zero pages */
119+ uint32_t normal_num;
113120 /* used for compression methods */
114121 void *data;
115122 } MultiFDSendParams;
@@ -130,8 +137,8 @@ typedef struct {
130137 bool running;
131138 /* should this thread finish */
132139 bool quit;
133- /* array of pages to receive */
134- MultiFDPages_t *pages;
140+ /* ramblock host address */
141+ uint8_t *host;
135142 /* packet allocated len */
136143 uint32_t packet_len;
137144 /* pointer to the packet */
@@ -145,10 +152,16 @@ typedef struct {
145152 uint32_t next_packet_size;
146153 /* packets sent through this channel */
147154 uint64_t num_packets;
148- /* pages sent through this channel */
149- uint64_t num_pages;
155+ /* non zero pages recv through this channel */
156+ uint64_t total_normal_pages;
150157 /* syncs main thread and channels */
151158 QemuSemaphore sem_sync;
159+ /* buffers to recv */
160+ struct iovec *iov;
161+ /* Pages that are not zero */
162+ ram_addr_t *normal;
163+ /* num of non zero pages */
164+ uint32_t normal_num;
152165 /* used for de-compression methods */
153166 void *data;
154167 } MultiFDRecvParams;
@@ -160,8 +173,6 @@ typedef struct {
160173 void (*send_cleanup)(MultiFDSendParams *p, Error **errp);
161174 /* Prepare the send packet */
162175 int (*send_prepare)(MultiFDSendParams *p, Error **errp);
163- /* Write the send packet */
164- int (*send_write)(MultiFDSendParams *p, uint32_t used, Error **errp);
165176 /* Setup for receiving side */
166177 int (*recv_setup)(MultiFDRecvParams *p, Error **errp);
167178 /* Cleanup for receiving side */
--- a/migration/postcopy-ram.c
+++ b/migration/postcopy-ram.c
@@ -283,15 +283,13 @@ static bool ufd_check_and_apply(int ufd, MigrationIncomingState *mis)
283283 }
284284
285285 #ifdef UFFD_FEATURE_THREAD_ID
286- if (migrate_postcopy_blocktime() && mis &&
287- UFFD_FEATURE_THREAD_ID & supported_features) {
288- /* kernel supports that feature */
289- /* don't create blocktime_context if it exists */
290- if (!mis->blocktime_ctx) {
291- mis->blocktime_ctx = blocktime_context_new();
292- }
293-
286+ if (UFFD_FEATURE_THREAD_ID & supported_features) {
294287 asked_features |= UFFD_FEATURE_THREAD_ID;
288+ if (migrate_postcopy_blocktime()) {
289+ if (!mis->blocktime_ctx) {
290+ mis->blocktime_ctx = blocktime_context_new();
291+ }
292+ }
295293 }
296294 #endif
297295
@@ -525,6 +523,19 @@ int postcopy_ram_incoming_init(MigrationIncomingState *mis)
525523 return 0;
526524 }
527525
526+static void postcopy_temp_pages_cleanup(MigrationIncomingState *mis)
527+{
528+ if (mis->postcopy_tmp_page) {
529+ munmap(mis->postcopy_tmp_page, mis->largest_page_size);
530+ mis->postcopy_tmp_page = NULL;
531+ }
532+
533+ if (mis->postcopy_tmp_zero_page) {
534+ munmap(mis->postcopy_tmp_zero_page, mis->largest_page_size);
535+ mis->postcopy_tmp_zero_page = NULL;
536+ }
537+}
538+
528539 /*
529540 * At the end of a migration where postcopy_ram_incoming_init was called.
530541 */
@@ -566,14 +577,8 @@ int postcopy_ram_incoming_cleanup(MigrationIncomingState *mis)
566577 }
567578 }
568579
569- if (mis->postcopy_tmp_page) {
570- munmap(mis->postcopy_tmp_page, mis->largest_page_size);
571- mis->postcopy_tmp_page = NULL;
572- }
573- if (mis->postcopy_tmp_zero_page) {
574- munmap(mis->postcopy_tmp_zero_page, mis->largest_page_size);
575- mis->postcopy_tmp_zero_page = NULL;
576- }
580+ postcopy_temp_pages_cleanup(mis);
581+
577582 trace_postcopy_ram_incoming_cleanup_blocktime(
578583 get_postcopy_total_blocktime());
579584
@@ -1084,6 +1089,40 @@ retry:
10841089 return NULL;
10851090 }
10861091
1092+static int postcopy_temp_pages_setup(MigrationIncomingState *mis)
1093+{
1094+ int err;
1095+
1096+ mis->postcopy_tmp_page = mmap(NULL, mis->largest_page_size,
1097+ PROT_READ | PROT_WRITE,
1098+ MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
1099+ if (mis->postcopy_tmp_page == MAP_FAILED) {
1100+ err = errno;
1101+ mis->postcopy_tmp_page = NULL;
1102+ error_report("%s: Failed to map postcopy_tmp_page %s",
1103+ __func__, strerror(err));
1104+ return -err;
1105+ }
1106+
1107+ /*
1108+ * Map large zero page when kernel can't use UFFDIO_ZEROPAGE for hugepages
1109+ */
1110+ mis->postcopy_tmp_zero_page = mmap(NULL, mis->largest_page_size,
1111+ PROT_READ | PROT_WRITE,
1112+ MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
1113+ if (mis->postcopy_tmp_zero_page == MAP_FAILED) {
1114+ err = errno;
1115+ mis->postcopy_tmp_zero_page = NULL;
1116+ error_report("%s: Failed to map large zero page %s",
1117+ __func__, strerror(err));
1118+ return -err;
1119+ }
1120+
1121+ memset(mis->postcopy_tmp_zero_page, '\0', mis->largest_page_size);
1122+
1123+ return 0;
1124+}
1125+
10871126 int postcopy_ram_incoming_setup(MigrationIncomingState *mis)
10881127 {
10891128 /* Open the fd for the kernel to give us userfaults */
@@ -1124,32 +1163,11 @@ int postcopy_ram_incoming_setup(MigrationIncomingState *mis)
11241163 return -1;
11251164 }
11261165
1127- mis->postcopy_tmp_page = mmap(NULL, mis->largest_page_size,
1128- PROT_READ | PROT_WRITE, MAP_PRIVATE |
1129- MAP_ANONYMOUS, -1, 0);
1130- if (mis->postcopy_tmp_page == MAP_FAILED) {
1131- mis->postcopy_tmp_page = NULL;
1132- error_report("%s: Failed to map postcopy_tmp_page %s",
1133- __func__, strerror(errno));
1166+ if (postcopy_temp_pages_setup(mis)) {
1167+ /* Error dumped in the sub-function */
11341168 return -1;
11351169 }
11361170
1137- /*
1138- * Map large zero page when kernel can't use UFFDIO_ZEROPAGE for hugepages
1139- */
1140- mis->postcopy_tmp_zero_page = mmap(NULL, mis->largest_page_size,
1141- PROT_READ | PROT_WRITE,
1142- MAP_PRIVATE | MAP_ANONYMOUS,
1143- -1, 0);
1144- if (mis->postcopy_tmp_zero_page == MAP_FAILED) {
1145- int e = errno;
1146- mis->postcopy_tmp_zero_page = NULL;
1147- error_report("%s: Failed to map large zero page %s",
1148- __func__, strerror(e));
1149- return -e;
1150- }
1151- memset(mis->postcopy_tmp_zero_page, '\0', mis->largest_page_size);
1152-
11531171 trace_postcopy_ram_enable_notify();
11541172
11551173 return 0;
--- a/migration/ram.c
+++ b/migration/ram.c
@@ -325,7 +325,8 @@ struct RAMState {
325325 uint64_t xbzrle_bytes_prev;
326326 /* Start using XBZRLE (e.g., after the first round). */
327327 bool xbzrle_enabled;
328-
328+ /* Are we on the last stage of migration */
329+ bool last_stage;
329330 /* compression statistics since the beginning of the period */
330331 /* amount of count that no free thread to compress data */
331332 uint64_t compress_thread_busy_prev;
@@ -354,6 +355,12 @@ static RAMState *ram_state;
354355
355356 static NotifierWithReturnList precopy_notifier_list;
356357
358+/* Whether postcopy has queued requests? */
359+static bool postcopy_has_request(RAMState *rs)
360+{
361+ return !QSIMPLEQ_EMPTY_ATOMIC(&rs->src_page_requests);
362+}
363+
357364 void precopy_infrastructure_init(void)
358365 {
359366 notifier_with_return_list_init(&precopy_notifier_list);
@@ -386,6 +393,18 @@ uint64_t ram_bytes_remaining(void)
386393
387394 MigrationStats ram_counters;
388395
396+static void ram_transferred_add(uint64_t bytes)
397+{
398+ if (runstate_is_running()) {
399+ ram_counters.precopy_bytes += bytes;
400+ } else if (migration_in_postcopy()) {
401+ ram_counters.postcopy_bytes += bytes;
402+ } else {
403+ ram_counters.downtime_bytes += bytes;
404+ }
405+ ram_counters.transferred += bytes;
406+}
407+
389408 /* used by the search for pages to send */
390409 struct PageSearchStatus {
391410 /* Current block being searched */
@@ -683,11 +702,10 @@ static void xbzrle_cache_zero_page(RAMState *rs, ram_addr_t current_addr)
683702 * @current_addr: addr of the page
684703 * @block: block that contains the page we want to send
685704 * @offset: offset inside the block for the page
686- * @last_stage: if we are at the completion stage
687705 */
688706 static int save_xbzrle_page(RAMState *rs, uint8_t **current_data,
689707 ram_addr_t current_addr, RAMBlock *block,
690- ram_addr_t offset, bool last_stage)
708+ ram_addr_t offset)
691709 {
692710 int encoded_len = 0, bytes_xbzrle;
693711 uint8_t *prev_cached_page;
@@ -695,7 +713,7 @@ static int save_xbzrle_page(RAMState *rs, uint8_t **current_data,
695713 if (!cache_is_cached(XBZRLE.cache, current_addr,
696714 ram_counters.dirty_sync_count)) {
697715 xbzrle_counters.cache_miss++;
698- if (!last_stage) {
716+ if (!rs->last_stage) {
699717 if (cache_insert(XBZRLE.cache, current_addr, *current_data,
700718 ram_counters.dirty_sync_count) == -1) {
701719 return -1;
@@ -734,7 +752,7 @@ static int save_xbzrle_page(RAMState *rs, uint8_t **current_data,
734752 * Update the cache contents, so that it corresponds to the data
735753 * sent, in all cases except where we skip the page.
736754 */
737- if (!last_stage && encoded_len != 0) {
755+ if (!rs->last_stage && encoded_len != 0) {
738756 memcpy(prev_cached_page, XBZRLE.current_buf, TARGET_PAGE_SIZE);
739757 /*
740758 * In the case where we couldn't compress, ensure that the caller
@@ -767,7 +785,7 @@ static int save_xbzrle_page(RAMState *rs, uint8_t **current_data,
767785 * RAM_SAVE_FLAG_CONTINUE.
768786 */
769787 xbzrle_counters.bytes += bytes_xbzrle - 8;
770- ram_counters.transferred += bytes_xbzrle;
788+ ram_transferred_add(bytes_xbzrle);
771789
772790 return 1;
773791 }
@@ -1158,6 +1176,15 @@ static void migration_bitmap_sync_precopy(RAMState *rs)
11581176 }
11591177 }
11601178
1179+static void ram_release_page(const char *rbname, uint64_t offset)
1180+{
1181+ if (!migrate_release_ram() || !migration_in_postcopy()) {
1182+ return;
1183+ }
1184+
1185+ ram_discard_range(rbname, offset, TARGET_PAGE_SIZE);
1186+}
1187+
11611188 /**
11621189 * save_zero_page_to_file: send the zero page to the file
11631190 *
@@ -1179,6 +1206,7 @@ static int save_zero_page_to_file(RAMState *rs, QEMUFile *file,
11791206 len += save_page_header(rs, file, block, offset | RAM_SAVE_FLAG_ZERO);
11801207 qemu_put_byte(file, 0);
11811208 len += 1;
1209+ ram_release_page(block->idstr, offset);
11821210 }
11831211 return len;
11841212 }
@@ -1198,21 +1226,12 @@ static int save_zero_page(RAMState *rs, RAMBlock *block, ram_addr_t offset)
11981226
11991227 if (len) {
12001228 ram_counters.duplicate++;
1201- ram_counters.transferred += len;
1229+ ram_transferred_add(len);
12021230 return 1;
12031231 }
12041232 return -1;
12051233 }
12061234
1207-static void ram_release_pages(const char *rbname, uint64_t offset, int pages)
1208-{
1209- if (!migrate_release_ram() || !migration_in_postcopy()) {
1210- return;
1211- }
1212-
1213- ram_discard_range(rbname, offset, ((ram_addr_t)pages) << TARGET_PAGE_BITS);
1214-}
1215-
12161235 /*
12171236 * @pages: the number of pages written by the control path,
12181237 * < 0 - error
@@ -1234,7 +1253,7 @@ static bool control_save_page(RAMState *rs, RAMBlock *block, ram_addr_t offset,
12341253 }
12351254
12361255 if (bytes_xmit) {
1237- ram_counters.transferred += bytes_xmit;
1256+ ram_transferred_add(bytes_xmit);
12381257 *pages = 1;
12391258 }
12401259
@@ -1265,8 +1284,8 @@ static bool control_save_page(RAMState *rs, RAMBlock *block, ram_addr_t offset,
12651284 static int save_normal_page(RAMState *rs, RAMBlock *block, ram_addr_t offset,
12661285 uint8_t *buf, bool async)
12671286 {
1268- ram_counters.transferred += save_page_header(rs, rs->f, block,
1269- offset | RAM_SAVE_FLAG_PAGE);
1287+ ram_transferred_add(save_page_header(rs, rs->f, block,
1288+ offset | RAM_SAVE_FLAG_PAGE));
12701289 if (async) {
12711290 qemu_put_buffer_async(rs->f, buf, TARGET_PAGE_SIZE,
12721291 migrate_release_ram() &
@@ -1274,7 +1293,7 @@ static int save_normal_page(RAMState *rs, RAMBlock *block, ram_addr_t offset,
12741293 } else {
12751294 qemu_put_buffer(rs->f, buf, TARGET_PAGE_SIZE);
12761295 }
1277- ram_counters.transferred += TARGET_PAGE_SIZE;
1296+ ram_transferred_add(TARGET_PAGE_SIZE);
12781297 ram_counters.normal++;
12791298 return 1;
12801299 }
@@ -1290,9 +1309,8 @@ static int save_normal_page(RAMState *rs, RAMBlock *block, ram_addr_t offset,
12901309 * @rs: current RAM state
12911310 * @block: block that contains the page we want to send
12921311 * @offset: offset inside the block for the page
1293- * @last_stage: if we are at the completion stage
12941312 */
1295-static int ram_save_page(RAMState *rs, PageSearchStatus *pss, bool last_stage)
1313+static int ram_save_page(RAMState *rs, PageSearchStatus *pss)
12961314 {
12971315 int pages = -1;
12981316 uint8_t *p;
@@ -1307,8 +1325,8 @@ static int ram_save_page(RAMState *rs, PageSearchStatus *pss, bool last_stage)
13071325 XBZRLE_cache_lock();
13081326 if (rs->xbzrle_enabled && !migration_in_postcopy()) {
13091327 pages = save_xbzrle_page(rs, &p, current_addr, block,
1310- offset, last_stage);
1311- if (!last_stage) {
1328+ offset);
1329+ if (!rs->last_stage) {
13121330 /* Can't send this cached data async, since the cache page
13131331 * might get updated before it gets to the wire
13141332 */
@@ -1341,13 +1359,11 @@ static bool do_compress_ram_page(QEMUFile *f, z_stream *stream, RAMBlock *block,
13411359 ram_addr_t offset, uint8_t *source_buf)
13421360 {
13431361 RAMState *rs = ram_state;
1344- uint8_t *p = block->host + (offset & TARGET_PAGE_MASK);
1345- bool zero_page = false;
1362+ uint8_t *p = block->host + offset;
13461363 int ret;
13471364
13481365 if (save_zero_page_to_file(rs, f, block, offset)) {
1349- zero_page = true;
1350- goto exit;
1366+ return true;
13511367 }
13521368
13531369 save_page_header(rs, f, block, offset | RAM_SAVE_FLAG_COMPRESS_PAGE);
@@ -1362,18 +1378,14 @@ static bool do_compress_ram_page(QEMUFile *f, z_stream *stream, RAMBlock *block,
13621378 if (ret < 0) {
13631379 qemu_file_set_error(migrate_get_current()->to_dst_file, ret);
13641380 error_report("compressed data failed!");
1365- return false;
13661381 }
1367-
1368-exit:
1369- ram_release_pages(block->idstr, offset & TARGET_PAGE_MASK, 1);
1370- return zero_page;
1382+ return false;
13711383 }
13721384
13731385 static void
13741386 update_compress_thread_counts(const CompressParam *param, int bytes_xmit)
13751387 {
1376- ram_counters.transferred += bytes_xmit;
1388+ ram_transferred_add(bytes_xmit);
13771389
13781390 if (param->zero_page) {
13791391 ram_counters.duplicate++;
@@ -1533,30 +1545,42 @@ static bool find_dirty_block(RAMState *rs, PageSearchStatus *pss, bool *again)
15331545 */
15341546 static RAMBlock *unqueue_page(RAMState *rs, ram_addr_t *offset)
15351547 {
1548+ struct RAMSrcPageRequest *entry;
15361549 RAMBlock *block = NULL;
1550+ size_t page_size;
15371551
1538- if (QSIMPLEQ_EMPTY_ATOMIC(&rs->src_page_requests)) {
1552+ if (!postcopy_has_request(rs)) {
15391553 return NULL;
15401554 }
15411555
15421556 QEMU_LOCK_GUARD(&rs->src_page_req_mutex);
1543- if (!QSIMPLEQ_EMPTY(&rs->src_page_requests)) {
1544- struct RAMSrcPageRequest *entry =
1545- QSIMPLEQ_FIRST(&rs->src_page_requests);
1546- block = entry->rb;
1547- *offset = entry->offset;
1548-
1549- if (entry->len > TARGET_PAGE_SIZE) {
1550- entry->len -= TARGET_PAGE_SIZE;
1551- entry->offset += TARGET_PAGE_SIZE;
1552- } else {
1553- memory_region_unref(block->mr);
1554- QSIMPLEQ_REMOVE_HEAD(&rs->src_page_requests, next_req);
1555- g_free(entry);
1556- migration_consume_urgent_request();
1557- }
1557+
1558+ /*
1559+ * This should _never_ change even after we take the lock, because no one
1560+ * should be taking anything off the request list other than us.
1561+ */
1562+ assert(postcopy_has_request(rs));
1563+
1564+ entry = QSIMPLEQ_FIRST(&rs->src_page_requests);
1565+ block = entry->rb;
1566+ *offset = entry->offset;
1567+ page_size = qemu_ram_pagesize(block);
1568+ /* Each page request should only be multiple page size of the ramblock */
1569+ assert((entry->len % page_size) == 0);
1570+
1571+ if (entry->len > page_size) {
1572+ entry->len -= page_size;
1573+ entry->offset += page_size;
1574+ } else {
1575+ memory_region_unref(block->mr);
1576+ QSIMPLEQ_REMOVE_HEAD(&rs->src_page_requests, next_req);
1577+ g_free(entry);
1578+ migration_consume_urgent_request();
15581579 }
15591580
1581+ trace_unqueue_page(block->idstr, *offset,
1582+ test_bit((*offset >> TARGET_PAGE_BITS), block->bmap));
1583+
15601584 return block;
15611585 }
15621586
@@ -1611,7 +1635,7 @@ static int ram_save_release_protection(RAMState *rs, PageSearchStatus *pss,
16111635 /* Check if page is from UFFD-managed region. */
16121636 if (pss->block->flags & RAM_UF_WRITEPROTECT) {
16131637 void *page_address = pss->block->host + (start_page << TARGET_PAGE_BITS);
1614- uint64_t run_length = (pss->page - start_page + 1) << TARGET_PAGE_BITS;
1638+ uint64_t run_length = (pss->page - start_page) << TARGET_PAGE_BITS;
16151639
16161640 /* Flush async buffers before un-protect. */
16171641 qemu_fflush(rs->f);
@@ -1931,30 +1955,8 @@ static bool get_queued_page(RAMState *rs, PageSearchStatus *pss)
19311955 {
19321956 RAMBlock *block;
19331957 ram_addr_t offset;
1934- bool dirty;
1935-
1936- do {
1937- block = unqueue_page(rs, &offset);
1938- /*
1939- * We're sending this page, and since it's postcopy nothing else
1940- * will dirty it, and we must make sure it doesn't get sent again
1941- * even if this queue request was received after the background
1942- * search already sent it.
1943- */
1944- if (block) {
1945- unsigned long page;
19461958
1947- page = offset >> TARGET_PAGE_BITS;
1948- dirty = test_bit(page, block->bmap);
1949- if (!dirty) {
1950- trace_get_queued_page_not_dirty(block->idstr, (uint64_t)offset,
1951- page);
1952- } else {
1953- trace_get_queued_page(block->idstr, (uint64_t)offset, page);
1954- }
1955- }
1956-
1957- } while (block && !dirty);
1959+ block = unqueue_page(rs, &offset);
19581960
19591961 if (!block) {
19601962 /*
@@ -2129,10 +2131,8 @@ static bool save_compress_page(RAMState *rs, RAMBlock *block, ram_addr_t offset)
21292131 *
21302132 * @rs: current RAM state
21312133 * @pss: data about the page we want to send
2132- * @last_stage: if we are at the completion stage
21332134 */
2134-static int ram_save_target_page(RAMState *rs, PageSearchStatus *pss,
2135- bool last_stage)
2135+static int ram_save_target_page(RAMState *rs, PageSearchStatus *pss)
21362136 {
21372137 RAMBlock *block = pss->block;
21382138 ram_addr_t offset = ((ram_addr_t)pss->page) << TARGET_PAGE_BITS;
@@ -2156,7 +2156,6 @@ static int ram_save_target_page(RAMState *rs, PageSearchStatus *pss,
21562156 xbzrle_cache_zero_page(rs, block->offset + offset);
21572157 XBZRLE_cache_unlock();
21582158 }
2159- ram_release_pages(block->idstr, offset, res);
21602159 return res;
21612160 }
21622161
@@ -2171,7 +2170,7 @@ static int ram_save_target_page(RAMState *rs, PageSearchStatus *pss,
21712170 return ram_save_multifd_page(rs, block, offset);
21722171 }
21732172
2174- return ram_save_page(rs, pss, last_stage);
2173+ return ram_save_page(rs, pss);
21752174 }
21762175
21772176 /**
@@ -2188,12 +2187,9 @@ static int ram_save_target_page(RAMState *rs, PageSearchStatus *pss,
21882187 * Returns the number of pages written or negative on error
21892188 *
21902189 * @rs: current RAM state
2191- * @ms: current migration state
21922190 * @pss: data about the page we want to send
2193- * @last_stage: if we are at the completion stage
21942191 */
2195-static int ram_save_host_page(RAMState *rs, PageSearchStatus *pss,
2196- bool last_stage)
2192+static int ram_save_host_page(RAMState *rs, PageSearchStatus *pss)
21972193 {
21982194 int tmppages, pages = 0;
21992195 size_t pagesize_bits =
@@ -2211,7 +2207,7 @@ static int ram_save_host_page(RAMState *rs, PageSearchStatus *pss,
22112207 do {
22122208 /* Check the pages is dirty and if it is send it */
22132209 if (migration_bitmap_clear_dirty(rs, pss->block, pss->page)) {
2214- tmppages = ram_save_target_page(rs, pss, last_stage);
2210+ tmppages = ram_save_target_page(rs, pss);
22152211 if (tmppages < 0) {
22162212 return tmppages;
22172213 }
@@ -2230,7 +2226,7 @@ static int ram_save_host_page(RAMState *rs, PageSearchStatus *pss,
22302226 offset_in_ramblock(pss->block,
22312227 ((ram_addr_t)pss->page) << TARGET_PAGE_BITS));
22322228 /* The offset we leave with is the min boundary of host page and block */
2233- pss->page = MIN(pss->page, hostpage_boundary) - 1;
2229+ pss->page = MIN(pss->page, hostpage_boundary);
22342230
22352231 res = ram_save_release_protection(rs, pss, start_page);
22362232 return (res < 0 ? res : pages);
@@ -2245,13 +2241,11 @@ static int ram_save_host_page(RAMState *rs, PageSearchStatus *pss,
22452241 * or negative on error
22462242 *
22472243 * @rs: current RAM state
2248- * @last_stage: if we are at the completion stage
22492244 *
22502245 * On systems where host-page-size > target-page-size it will send all the
22512246 * pages in a host page that are dirty.
22522247 */
2253-
2254-static int ram_find_and_save_block(RAMState *rs, bool last_stage)
2248+static int ram_find_and_save_block(RAMState *rs)
22552249 {
22562250 PageSearchStatus pss;
22572251 int pages = 0;
@@ -2280,7 +2274,7 @@ static int ram_find_and_save_block(RAMState *rs, bool last_stage)
22802274 }
22812275
22822276 if (found) {
2283- pages = ram_save_host_page(rs, &pss, last_stage);
2277+ pages = ram_save_host_page(rs, &pss);
22842278 }
22852279 } while (!pages && again);
22862280
@@ -2298,7 +2292,7 @@ void acct_update_position(QEMUFile *f, size_t size, bool zero)
22982292 ram_counters.duplicate += pages;
22992293 } else {
23002294 ram_counters.normal += pages;
2301- ram_counters.transferred += size;
2295+ ram_transferred_add(size);
23022296 qemu_update_position(f, size);
23032297 }
23042298 }
@@ -2408,40 +2402,6 @@ static void ram_state_reset(RAMState *rs)
24082402
24092403 #define MAX_WAIT 50 /* ms, half buffered_file limit */
24102404
2411-/*
2412- * 'expected' is the value you expect the bitmap mostly to be full
2413- * of; it won't bother printing lines that are all this value.
2414- * If 'todump' is null the migration bitmap is dumped.
2415- */
2416-void ram_debug_dump_bitmap(unsigned long *todump, bool expected,
2417- unsigned long pages)
2418-{
2419- int64_t cur;
2420- int64_t linelen = 128;
2421- char linebuf[129];
2422-
2423- for (cur = 0; cur < pages; cur += linelen) {
2424- int64_t curb;
2425- bool found = false;
2426- /*
2427- * Last line; catch the case where the line length
2428- * is longer than remaining ram
2429- */
2430- if (cur + linelen > pages) {
2431- linelen = pages - cur;
2432- }
2433- for (curb = 0; curb < linelen; curb++) {
2434- bool thisbit = test_bit(cur + curb, todump);
2435- linebuf[curb] = thisbit ? '1' : '.';
2436- found = found || (thisbit != expected);
2437- }
2438- if (found) {
2439- linebuf[curb] = '\0';
2440- fprintf(stderr, "0x%08" PRIx64 " : %s\n", cur, linebuf);
2441- }
2442- }
2443-}
2444-
24452405 /* **** functions for postcopy ***** */
24462406
24472407 void ram_postcopy_migrated_memory_release(MigrationState *ms)
@@ -2467,14 +2427,12 @@ void ram_postcopy_migrated_memory_release(MigrationState *ms)
24672427 /**
24682428 * postcopy_send_discard_bm_ram: discard a RAMBlock
24692429 *
2470- * Returns zero on success
2471- *
24722430 * Callback from postcopy_each_ram_send_discard for each RAMBlock
24732431 *
24742432 * @ms: current migration state
24752433 * @block: RAMBlock to discard
24762434 */
2477-static int postcopy_send_discard_bm_ram(MigrationState *ms, RAMBlock *block)
2435+static void postcopy_send_discard_bm_ram(MigrationState *ms, RAMBlock *block)
24782436 {
24792437 unsigned long end = block->used_length >> TARGET_PAGE_BITS;
24802438 unsigned long current;
@@ -2498,15 +2456,13 @@ static int postcopy_send_discard_bm_ram(MigrationState *ms, RAMBlock *block)
24982456 postcopy_discard_send_range(ms, one, discard_length);
24992457 current = one + discard_length;
25002458 }
2501-
2502- return 0;
25032459 }
25042460
2461+static void postcopy_chunk_hostpages_pass(MigrationState *ms, RAMBlock *block);
2462+
25052463 /**
25062464 * postcopy_each_ram_send_discard: discard all RAMBlocks
25072465 *
2508- * Returns 0 for success or negative for error
2509- *
25102466 * Utility for the outgoing postcopy code.
25112467 * Calls postcopy_send_discard_bm_ram for each RAMBlock
25122468 * passing it bitmap indexes and name.
@@ -2515,27 +2471,29 @@ static int postcopy_send_discard_bm_ram(MigrationState *ms, RAMBlock *block)
25152471 *
25162472 * @ms: current migration state
25172473 */
2518-static int postcopy_each_ram_send_discard(MigrationState *ms)
2474+static void postcopy_each_ram_send_discard(MigrationState *ms)
25192475 {
25202476 struct RAMBlock *block;
2521- int ret;
25222477
25232478 RAMBLOCK_FOREACH_NOT_IGNORED(block) {
25242479 postcopy_discard_send_init(ms, block->idstr);
25252480
25262481 /*
2482+ * Deal with TPS != HPS and huge pages. It discard any partially sent
2483+ * host-page size chunks, mark any partially dirty host-page size
2484+ * chunks as all dirty. In this case the host-page is the host-page
2485+ * for the particular RAMBlock, i.e. it might be a huge page.
2486+ */
2487+ postcopy_chunk_hostpages_pass(ms, block);
2488+
2489+ /*
25272490 * Postcopy sends chunks of bitmap over the wire, but it
25282491 * just needs indexes at this point, avoids it having
25292492 * target page specific code.
25302493 */
2531- ret = postcopy_send_discard_bm_ram(ms, block);
2494+ postcopy_send_discard_bm_ram(ms, block);
25322495 postcopy_discard_send_finish(ms);
2533- if (ret) {
2534- return ret;
2535- }
25362496 }
2537-
2538- return 0;
25392497 }
25402498
25412499 /**
@@ -2606,37 +2564,8 @@ static void postcopy_chunk_hostpages_pass(MigrationState *ms, RAMBlock *block)
26062564 }
26072565
26082566 /**
2609- * postcopy_chunk_hostpages: discard any partially sent host page
2610- *
2611- * Utility for the outgoing postcopy code.
2612- *
2613- * Discard any partially sent host-page size chunks, mark any partially
2614- * dirty host-page size chunks as all dirty. In this case the host-page
2615- * is the host-page for the particular RAMBlock, i.e. it might be a huge page
2616- *
2617- * Returns zero on success
2618- *
2619- * @ms: current migration state
2620- * @block: block we want to work with
2621- */
2622-static int postcopy_chunk_hostpages(MigrationState *ms, RAMBlock *block)
2623-{
2624- postcopy_discard_send_init(ms, block->idstr);
2625-
2626- /*
2627- * Ensure that all partially dirty host pages are made fully dirty.
2628- */
2629- postcopy_chunk_hostpages_pass(ms, block);
2630-
2631- postcopy_discard_send_finish(ms);
2632- return 0;
2633-}
2634-
2635-/**
26362567 * ram_postcopy_send_discard_bitmap: transmit the discard bitmap
26372568 *
2638- * Returns zero on success
2639- *
26402569 * Transmit the set of pages to be discarded after precopy to the target
26412570 * these are pages that:
26422571 * a) Have been previously transmitted but are now dirty again
@@ -2647,11 +2576,9 @@ static int postcopy_chunk_hostpages(MigrationState *ms, RAMBlock *block)
26472576 *
26482577 * @ms: current migration state
26492578 */
2650-int ram_postcopy_send_discard_bitmap(MigrationState *ms)
2579+void ram_postcopy_send_discard_bitmap(MigrationState *ms)
26512580 {
26522581 RAMState *rs = ram_state;
2653- RAMBlock *block;
2654- int ret;
26552582
26562583 RCU_READ_LOCK_GUARD();
26572584
@@ -2663,21 +2590,9 @@ int ram_postcopy_send_discard_bitmap(MigrationState *ms)
26632590 rs->last_sent_block = NULL;
26642591 rs->last_page = 0;
26652592
2666- RAMBLOCK_FOREACH_NOT_IGNORED(block) {
2667- /* Deal with TPS != HPS and huge pages */
2668- ret = postcopy_chunk_hostpages(ms, block);
2669- if (ret) {
2670- return ret;
2671- }
2593+ postcopy_each_ram_send_discard(ms);
26722594
2673-#ifdef DEBUG_POSTCOPY
2674- ram_debug_dump_bitmap(block->bmap, true,
2675- block->used_length >> TARGET_PAGE_BITS);
2676-#endif
2677- }
26782595 trace_ram_postcopy_send_discard_bitmap();
2679-
2680- return postcopy_each_ram_send_discard(ms);
26812596 }
26822597
26832598 /**
@@ -3073,14 +2988,14 @@ static int ram_save_iterate(QEMUFile *f, void *opaque)
30732988 t0 = qemu_clock_get_ns(QEMU_CLOCK_REALTIME);
30742989 i = 0;
30752990 while ((ret = qemu_file_rate_limit(f)) == 0 ||
3076- !QSIMPLEQ_EMPTY(&rs->src_page_requests)) {
2991+ postcopy_has_request(rs)) {
30772992 int pages;
30782993
30792994 if (qemu_file_get_error(f)) {
30802995 break;
30812996 }
30822997
3083- pages = ram_find_and_save_block(rs, false);
2998+ pages = ram_find_and_save_block(rs);
30842999 /* no more pages to sent */
30853000 if (pages == 0) {
30863001 done = 1;
@@ -3133,7 +3048,7 @@ out:
31333048 multifd_send_sync_main(rs->f);
31343049 qemu_put_be64(f, RAM_SAVE_FLAG_EOS);
31353050 qemu_fflush(f);
3136- ram_counters.transferred += 8;
3051+ ram_transferred_add(8);
31373052
31383053 ret = qemu_file_get_error(f);
31393054 }
@@ -3160,6 +3075,8 @@ static int ram_save_complete(QEMUFile *f, void *opaque)
31603075 RAMState *rs = *temp;
31613076 int ret = 0;
31623077
3078+ rs->last_stage = !migration_in_colo_state();
3079+
31633080 WITH_RCU_READ_LOCK_GUARD() {
31643081 if (!migration_in_postcopy()) {
31653082 migration_bitmap_sync_precopy(rs);
@@ -3173,7 +3090,7 @@ static int ram_save_complete(QEMUFile *f, void *opaque)
31733090 while (true) {
31743091 int pages;
31753092
3176- pages = ram_find_and_save_block(rs, !migration_in_colo_state());
3093+ pages = ram_find_and_save_block(rs);
31773094 /* no more blocks to sent */
31783095 if (pages == 0) {
31793096 break;
--- a/migration/ram.h
+++ b/migration/ram.h
@@ -55,11 +55,9 @@ void mig_throttle_counter_reset(void);
5555 uint64_t ram_pagesize_summary(void);
5656 int ram_save_queue_pages(const char *rbname, ram_addr_t start, ram_addr_t len);
5757 void acct_update_position(QEMUFile *f, size_t size, bool zero);
58-void ram_debug_dump_bitmap(unsigned long *todump, bool expected,
59- unsigned long pages);
6058 void ram_postcopy_migrated_memory_release(MigrationState *ms);
6159 /* For outgoing discard bitmap */
62-int ram_postcopy_send_discard_bitmap(MigrationState *ms);
60+void ram_postcopy_send_discard_bitmap(MigrationState *ms);
6361 /* For incoming postcopy discard */
6462 int ram_discard_range(const char *block_name, uint64_t start, size_t length);
6563 int ram_postcopy_incoming_init(MigrationIncomingState *mis);
--- a/migration/savevm.c
+++ b/migration/savevm.c
@@ -1298,8 +1298,9 @@ int qemu_savevm_state_iterate(QEMUFile *f, bool postcopy)
12981298 save_section_footer(f, se);
12991299
13001300 if (ret < 0) {
1301- error_report("failed to save SaveStateEntry with id(name): %d(%s)",
1302- se->section_id, se->idstr);
1301+ error_report("failed to save SaveStateEntry with id(name): "
1302+ "%d(%s): %d",
1303+ se->section_id, se->idstr, ret);
13031304 qemu_file_set_error(f, ret);
13041305 }
13051306 if (ret <= 0) {
--- a/migration/trace-events
+++ b/migration/trace-events
@@ -86,8 +86,6 @@ put_qlist_end(const char *field_name, const char *vmsd_name) "%s(%s)"
8686 qemu_file_fclose(void) ""
8787
8888 # ram.c
89-get_queued_page(const char *block_name, uint64_t tmp_offset, unsigned long page_abs) "%s/0x%" PRIx64 " page_abs=0x%lx"
90-get_queued_page_not_dirty(const char *block_name, uint64_t tmp_offset, unsigned long page_abs) "%s/0x%" PRIx64 " page_abs=0x%lx"
9189 migration_bitmap_sync_start(void) ""
9290 migration_bitmap_sync_end(uint64_t dirty_pages) "dirty_pages %" PRIu64
9391 migration_bitmap_clear_dirty(char *str, uint64_t start, uint64_t size, unsigned long page) "rb %s start 0x%"PRIx64" size 0x%"PRIx64" page 0x%lx"
@@ -113,25 +111,26 @@ ram_save_iterate_big_wait(uint64_t milliconds, int iterations) "big wait: %" PRI
113111 ram_load_complete(int ret, uint64_t seq_iter) "exit_code %d seq iteration %" PRIu64
114112 ram_write_tracking_ramblock_start(const char *block_id, size_t page_size, void *addr, size_t length) "%s: page_size: %zu addr: %p length: %zu"
115113 ram_write_tracking_ramblock_stop(const char *block_id, size_t page_size, void *addr, size_t length) "%s: page_size: %zu addr: %p length: %zu"
114+unqueue_page(char *block, uint64_t offset, bool dirty) "ramblock '%s' offset 0x%"PRIx64" dirty %d"
116115
117116 # multifd.c
118-multifd_new_send_channel_async(uint8_t id) "channel %d"
119-multifd_recv(uint8_t id, uint64_t packet_num, uint32_t used, uint32_t flags, uint32_t next_packet_size) "channel %d packet_num %" PRIu64 " pages %d flags 0x%x next packet size %d"
120-multifd_recv_new_channel(uint8_t id) "channel %d"
117+multifd_new_send_channel_async(uint8_t id) "channel %u"
118+multifd_recv(uint8_t id, uint64_t packet_num, uint32_t used, uint32_t flags, uint32_t next_packet_size) "channel %u packet_num %" PRIu64 " pages %u flags 0x%x next packet size %u"
119+multifd_recv_new_channel(uint8_t id) "channel %u"
121120 multifd_recv_sync_main(long packet_num) "packet num %ld"
122-multifd_recv_sync_main_signal(uint8_t id) "channel %d"
123-multifd_recv_sync_main_wait(uint8_t id) "channel %d"
121+multifd_recv_sync_main_signal(uint8_t id) "channel %u"
122+multifd_recv_sync_main_wait(uint8_t id) "channel %u"
124123 multifd_recv_terminate_threads(bool error) "error %d"
125-multifd_recv_thread_end(uint8_t id, uint64_t packets, uint64_t pages) "channel %d packets %" PRIu64 " pages %" PRIu64
126-multifd_recv_thread_start(uint8_t id) "%d"
127-multifd_send(uint8_t id, uint64_t packet_num, uint32_t used, uint32_t flags, uint32_t next_packet_size) "channel %d packet_num %" PRIu64 " pages %d flags 0x%x next packet size %d"
128-multifd_send_error(uint8_t id) "channel %d"
124+multifd_recv_thread_end(uint8_t id, uint64_t packets, uint64_t pages) "channel %u packets %" PRIu64 " pages %" PRIu64
125+multifd_recv_thread_start(uint8_t id) "%u"
126+multifd_send(uint8_t id, uint64_t packet_num, uint32_t normal, uint32_t flags, uint32_t next_packet_size) "channel %u packet_num %" PRIu64 " normal pages %u flags 0x%x next packet size %u"
127+multifd_send_error(uint8_t id) "channel %u"
129128 multifd_send_sync_main(long packet_num) "packet num %ld"
130-multifd_send_sync_main_signal(uint8_t id) "channel %d"
131-multifd_send_sync_main_wait(uint8_t id) "channel %d"
129+multifd_send_sync_main_signal(uint8_t id) "channel %u"
130+multifd_send_sync_main_wait(uint8_t id) "channel %u"
132131 multifd_send_terminate_threads(bool error) "error %d"
133-multifd_send_thread_end(uint8_t id, uint64_t packets, uint64_t pages) "channel %d packets %" PRIu64 " pages %" PRIu64
134-multifd_send_thread_start(uint8_t id) "%d"
132+multifd_send_thread_end(uint8_t id, uint64_t packets, uint64_t normal_pages) "channel %u packets %" PRIu64 " normal pages %" PRIu64
133+multifd_send_thread_start(uint8_t id) "%u"
135134 multifd_tls_outgoing_handshake_start(void *ioc, void *tioc, const char *hostname) "ioc=%p tioc=%p hostname=%s"
136135 multifd_tls_outgoing_handshake_error(void *ioc, const char *err) "ioc=%p err=%s"
137136 multifd_tls_outgoing_handshake_complete(void *ioc) "ioc=%p"
--- a/monitor/hmp-cmds.c
+++ b/monitor/hmp-cmds.c
@@ -293,6 +293,18 @@ void hmp_info_migrate(Monitor *mon, const QDict *qdict)
293293 monitor_printf(mon, "postcopy request count: %" PRIu64 "\n",
294294 info->ram->postcopy_requests);
295295 }
296+ if (info->ram->precopy_bytes) {
297+ monitor_printf(mon, "precopy ram: %" PRIu64 " kbytes\n",
298+ info->ram->precopy_bytes >> 10);
299+ }
300+ if (info->ram->downtime_bytes) {
301+ monitor_printf(mon, "downtime ram: %" PRIu64 " kbytes\n",
302+ info->ram->downtime_bytes >> 10);
303+ }
304+ if (info->ram->postcopy_bytes) {
305+ monitor_printf(mon, "postcopy ram: %" PRIu64 " kbytes\n",
306+ info->ram->postcopy_bytes >> 10);
307+ }
296308 }
297309
298310 if (info->has_disk) {
--- a/qapi/migration.json
+++ b/qapi/migration.json
@@ -46,6 +46,15 @@
4646 # @pages-per-second: the number of memory pages transferred per second
4747 # (Since 4.0)
4848 #
49+# @precopy-bytes: The number of bytes sent in the pre-copy phase
50+# (since 7.0).
51+#
52+# @downtime-bytes: The number of bytes sent while the guest is paused
53+# (since 7.0).
54+#
55+# @postcopy-bytes: The number of bytes sent during the post-copy phase
56+# (since 7.0).
57+#
4958 # Since: 0.14
5059 ##
5160 { 'struct': 'MigrationStats',
@@ -54,7 +63,9 @@
5463 'normal-bytes': 'int', 'dirty-pages-rate' : 'int',
5564 'mbps' : 'number', 'dirty-sync-count' : 'int',
5665 'postcopy-requests' : 'int', 'page-size' : 'int',
57- 'multifd-bytes' : 'uint64', 'pages-per-second' : 'uint64' } }
66+ 'multifd-bytes' : 'uint64', 'pages-per-second' : 'uint64',
67+ 'precopy-bytes' : 'uint64', 'downtime-bytes' : 'uint64',
68+ 'postcopy-bytes' : 'uint64' } }
5869
5970 ##
6071 # @XBZRLECacheStats:
--- a/target/openrisc/machine.c
+++ b/target/openrisc/machine.c
@@ -25,7 +25,6 @@ static const VMStateDescription vmstate_tlb_entry = {
2525 .name = "tlb_entry",
2626 .version_id = 1,
2727 .minimum_version_id = 1,
28- .minimum_version_id_old = 1,
2928 .fields = (VMStateField[]) {
3029 VMSTATE_UINTTL(mr, OpenRISCTLBEntry),
3130 VMSTATE_UINTTL(tr, OpenRISCTLBEntry),
--- a/target/ppc/machine.c
+++ b/target/ppc/machine.c
@@ -421,7 +421,6 @@ static const VMStateDescription vmstate_tm = {
421421 .name = "cpu/tm",
422422 .version_id = 1,
423423 .minimum_version_id = 1,
424- .minimum_version_id_old = 1,
425424 .needed = tm_needed,
426425 .fields = (VMStateField []) {
427426 VMSTATE_UINTTL_ARRAY(env.tm_gpr, PowerPCCPU, 32),
@@ -672,7 +671,6 @@ const VMStateDescription vmstate_ppc_cpu = {
672671 .name = "cpu",
673672 .version_id = 5,
674673 .minimum_version_id = 5,
675- .minimum_version_id_old = 4,
676674 .pre_save = cpu_pre_save,
677675 .post_load = cpu_post_load,
678676 .fields = (VMStateField[]) {
--- a/target/sparc/machine.c
+++ b/target/sparc/machine.c
@@ -10,7 +10,6 @@ static const VMStateDescription vmstate_cpu_timer = {
1010 .name = "cpu_timer",
1111 .version_id = 1,
1212 .minimum_version_id = 1,
13- .minimum_version_id_old = 1,
1413 .fields = (VMStateField[]) {
1514 VMSTATE_UINT32(frequency, CPUTimer),
1615 VMSTATE_UINT32(disabled, CPUTimer),
@@ -30,7 +29,6 @@ static const VMStateDescription vmstate_trap_state = {
3029 .name = "trap_state",
3130 .version_id = 1,
3231 .minimum_version_id = 1,
33- .minimum_version_id_old = 1,
3432 .fields = (VMStateField[]) {
3533 VMSTATE_UINT64(tpc, trap_state),
3634 VMSTATE_UINT64(tnpc, trap_state),
@@ -44,7 +42,6 @@ static const VMStateDescription vmstate_tlb_entry = {
4442 .name = "tlb_entry",
4543 .version_id = 1,
4644 .minimum_version_id = 1,
47- .minimum_version_id_old = 1,
4845 .fields = (VMStateField[]) {
4946 VMSTATE_UINT64(tag, SparcTLBEntry),
5047 VMSTATE_UINT64(tte, SparcTLBEntry),
@@ -113,7 +110,6 @@ const VMStateDescription vmstate_sparc_cpu = {
113110 .name = "cpu",
114111 .version_id = SPARC_VMSTATE_VER,
115112 .minimum_version_id = SPARC_VMSTATE_VER,
116- .minimum_version_id_old = SPARC_VMSTATE_VER,
117113 .pre_save = cpu_pre_save,
118114 .fields = (VMStateField[]) {
119115 VMSTATE_UINTTL_ARRAY(env.gregs, SPARCCPU, 8),