Revisão | d90e6f665d3ac197f83d93ad37147fe677521209 (tree) |
---|---|
Hora | 2022-01-30 00:55:54 |
Autor | Peter Maydell <peter.maydell@lina...> |
Commiter | Peter Maydell |
Merge remote-tracking branch 'remotes/quintela-gitlab/tags/migration-20220128-pull-request' into staging
Migration Pull request (Take 2)
Hi
This time I have disabled vmstate canary patches form Dave Gilbert.
Let's see if it works.
Later, Juan.
# gpg: Signature made Fri 28 Jan 2022 18:30:25 GMT
# gpg: using RSA key 1899FF8EDEBF58CCEE034B82F487EF185872D723
# gpg: Good signature from "Juan Quintela <quintela@redhat.com>" [full]
# gpg: aka "Juan Quintela <quintela@trasno.org>" [full]
# Primary key fingerprint: 1899 FF8E DEBF 58CC EE03 4B82 F487 EF18 5872 D723
* remotes/quintela-gitlab/tags/migration-20220128-pull-request: (36 commits)
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
@@ -297,7 +297,6 @@ static const VMStateDescription vmstate_cpuhp_sts = { | ||
297 | 297 | .name = "CPU hotplug device state", |
298 | 298 | .version_id = 1, |
299 | 299 | .minimum_version_id = 1, |
300 | - .minimum_version_id_old = 1, | |
301 | 300 | .fields = (VMStateField[]) { |
302 | 301 | VMSTATE_BOOL(is_inserting, AcpiCpuStatus), |
303 | 302 | VMSTATE_BOOL(is_removing, AcpiCpuStatus), |
@@ -311,7 +310,6 @@ const VMStateDescription vmstate_cpu_hotplug = { | ||
311 | 310 | .name = "CPU hotplug state", |
312 | 311 | .version_id = 1, |
313 | 312 | .minimum_version_id = 1, |
314 | - .minimum_version_id_old = 1, | |
315 | 313 | .fields = (VMStateField[]) { |
316 | 314 | VMSTATE_UINT32(selector, CPUHotplugState), |
317 | 315 | VMSTATE_UINT8(command, CPUHotplugState), |
@@ -163,7 +163,6 @@ static const VMStateDescription vmstate_memhp_state = { | ||
163 | 163 | .name = "ich9_pm/memhp", |
164 | 164 | .version_id = 1, |
165 | 165 | .minimum_version_id = 1, |
166 | - .minimum_version_id_old = 1, | |
167 | 166 | .needed = vmstate_test_use_memhp, |
168 | 167 | .fields = (VMStateField[]) { |
169 | 168 | VMSTATE_MEMORY_HOTPLUG(acpi_memory_hotplug, ICH9LPCPMRegs), |
@@ -181,7 +180,6 @@ static const VMStateDescription vmstate_tco_io_state = { | ||
181 | 180 | .name = "ich9_pm/tco", |
182 | 181 | .version_id = 1, |
183 | 182 | .minimum_version_id = 1, |
184 | - .minimum_version_id_old = 1, | |
185 | 183 | .needed = vmstate_test_use_tco, |
186 | 184 | .fields = (VMStateField[]) { |
187 | 185 | VMSTATE_STRUCT(tco_regs, ICH9LPCPMRegs, 1, vmstate_tco_io_sts, |
@@ -208,7 +206,6 @@ static const VMStateDescription vmstate_cpuhp_state = { | ||
208 | 206 | .name = "ich9_pm/cpuhp", |
209 | 207 | .version_id = 1, |
210 | 208 | .minimum_version_id = 1, |
211 | - .minimum_version_id_old = 1, | |
212 | 209 | .needed = vmstate_test_use_cpuhp, |
213 | 210 | .pre_load = vmstate_cpuhp_pre_load, |
214 | 211 | .fields = (VMStateField[]) { |
@@ -318,7 +318,6 @@ static const VMStateDescription vmstate_memhp_sts = { | ||
318 | 318 | .name = "memory hotplug device state", |
319 | 319 | .version_id = 1, |
320 | 320 | .minimum_version_id = 1, |
321 | - .minimum_version_id_old = 1, | |
322 | 321 | .fields = (VMStateField[]) { |
323 | 322 | VMSTATE_BOOL(is_enabled, MemStatus), |
324 | 323 | VMSTATE_BOOL(is_inserting, MemStatus), |
@@ -332,7 +331,6 @@ const VMStateDescription vmstate_memory_hotplug = { | ||
332 | 331 | .name = "memory hotplug state", |
333 | 332 | .version_id = 1, |
334 | 333 | .minimum_version_id = 1, |
335 | - .minimum_version_id_old = 1, | |
336 | 334 | .fields = (VMStateField[]) { |
337 | 335 | VMSTATE_UINT32(selector, MemHotplugState), |
338 | 336 | VMSTATE_STRUCT_VARRAY_POINTER_UINT32(devs, MemHotplugState, dev_count, |
@@ -230,7 +230,6 @@ static const VMStateDescription vmstate_memhp_state = { | ||
230 | 230 | .name = "piix4_pm/memhp", |
231 | 231 | .version_id = 1, |
232 | 232 | .minimum_version_id = 1, |
233 | - .minimum_version_id_old = 1, | |
234 | 233 | .needed = vmstate_test_use_memhp, |
235 | 234 | .fields = (VMStateField[]) { |
236 | 235 | VMSTATE_MEMORY_HOTPLUG(acpi_memory_hotplug, PIIX4PMState), |
@@ -255,7 +254,6 @@ static const VMStateDescription vmstate_cpuhp_state = { | ||
255 | 254 | .name = "piix4_pm/cpuhp", |
256 | 255 | .version_id = 1, |
257 | 256 | .minimum_version_id = 1, |
258 | - .minimum_version_id_old = 1, | |
259 | 257 | .needed = vmstate_test_use_cpuhp, |
260 | 258 | .pre_load = vmstate_cpuhp_pre_load, |
261 | 259 | .fields = (VMStateField[]) { |
@@ -239,7 +239,6 @@ const VMStateDescription vmstate_tco_io_sts = { | ||
239 | 239 | .name = "tco io device status", |
240 | 240 | .version_id = 1, |
241 | 241 | .minimum_version_id = 1, |
242 | - .minimum_version_id_old = 1, | |
243 | 242 | .fields = (VMStateField[]) { |
244 | 243 | VMSTATE_UINT16(tco.rld, TCOIORegs), |
245 | 244 | VMSTATE_UINT8(tco.din, TCOIORegs), |
@@ -209,7 +209,6 @@ static const VMStateDescription vmstate_spk = { | ||
209 | 209 | .name = "pcspk", |
210 | 210 | .version_id = 1, |
211 | 211 | .minimum_version_id = 1, |
212 | - .minimum_version_id_old = 1, | |
213 | 212 | .needed = migrate_needed, |
214 | 213 | .fields = (VMStateField[]) { |
215 | 214 | VMSTATE_UINT8(data_on, PCSpkState), |
@@ -616,7 +616,6 @@ static const VMStateDescription vmstate_macfb = { | ||
616 | 616 | .name = "macfb", |
617 | 617 | .version_id = 1, |
618 | 618 | .minimum_version_id = 1, |
619 | - .minimum_version_id_old = 1, | |
620 | 619 | .post_load = macfb_post_load, |
621 | 620 | .fields = (VMStateField[]) { |
622 | 621 | VMSTATE_UINT8_ARRAY(color_palette, MacfbState, 256 * 3), |
@@ -806,7 +806,6 @@ static const VMStateDescription vmstate_zdma = { | ||
806 | 806 | .name = TYPE_XLNX_ZDMA, |
807 | 807 | .version_id = 1, |
808 | 808 | .minimum_version_id = 1, |
809 | - .minimum_version_id_old = 1, | |
810 | 809 | .fields = (VMStateField[]) { |
811 | 810 | VMSTATE_UINT32_ARRAY(regs, XlnxZDMA, ZDMA_R_MAX), |
812 | 811 | VMSTATE_UINT32(state, XlnxZDMA), |
@@ -677,7 +677,6 @@ static const VMStateDescription vmstate_xlnx_csu_dma = { | ||
677 | 677 | .name = TYPE_XLNX_CSU_DMA, |
678 | 678 | .version_id = 0, |
679 | 679 | .minimum_version_id = 0, |
680 | - .minimum_version_id_old = 0, | |
681 | 680 | .fields = (VMStateField[]) { |
682 | 681 | VMSTATE_PTIMER(src_timer, XlnxCSUDMA), |
683 | 682 | VMSTATE_UINT16(width, XlnxCSUDMA), |
@@ -277,7 +277,6 @@ static const VMStateDescription vmstate_imx_gpio = { | ||
277 | 277 | .name = TYPE_IMX_GPIO, |
278 | 278 | .version_id = 1, |
279 | 279 | .minimum_version_id = 1, |
280 | - .minimum_version_id_old = 1, | |
281 | 280 | .fields = (VMStateField[]) { |
282 | 281 | VMSTATE_UINT32(dr, IMXGPIOState), |
283 | 282 | VMSTATE_UINT32(gdir, IMXGPIOState), |
@@ -271,7 +271,6 @@ static const VMStateDescription vmstate_bcm2835_mbox = { | ||
271 | 271 | .name = TYPE_BCM2835_MBOX, |
272 | 272 | .version_id = 1, |
273 | 273 | .minimum_version_id = 1, |
274 | - .minimum_version_id_old = 1, | |
275 | 274 | .fields = (VMStateField[]) { |
276 | 275 | VMSTATE_BOOL_ARRAY(available, BCM2835MboxState, MBOX_CHAN_COUNT), |
277 | 276 | VMSTATE_STRUCT_ARRAY(mbox, BCM2835MboxState, 2, 1, |
@@ -266,7 +266,6 @@ static const VMStateDescription vmstate_kvaser_pci = { | ||
266 | 266 | .name = "kvaser_pci", |
267 | 267 | .version_id = 1, |
268 | 268 | .minimum_version_id = 1, |
269 | - .minimum_version_id_old = 1, | |
270 | 269 | .fields = (VMStateField[]) { |
271 | 270 | VMSTATE_PCI_DEVICE(dev, KvaserPCIState), |
272 | 271 | /* Load this before sja_state. */ |
@@ -203,7 +203,6 @@ static const VMStateDescription vmstate_mioe3680_pci = { | ||
203 | 203 | .name = "mioe3680_pci", |
204 | 204 | .version_id = 1, |
205 | 205 | .minimum_version_id = 1, |
206 | - .minimum_version_id_old = 1, | |
207 | 206 | .fields = (VMStateField[]) { |
208 | 207 | VMSTATE_PCI_DEVICE(dev, Mioe3680PCIState), |
209 | 208 | VMSTATE_STRUCT(sja_state[0], Mioe3680PCIState, 0, vmstate_can_sja, |
@@ -204,7 +204,6 @@ static const VMStateDescription vmstate_pcm3680i_pci = { | ||
204 | 204 | .name = "pcm3680i_pci", |
205 | 205 | .version_id = 1, |
206 | 206 | .minimum_version_id = 1, |
207 | - .minimum_version_id_old = 1, | |
208 | 207 | .fields = (VMStateField[]) { |
209 | 208 | VMSTATE_PCI_DEVICE(dev, Pcm3680iPCIState), |
210 | 209 | VMSTATE_STRUCT(sja_state[0], Pcm3680iPCIState, 0, |
@@ -928,7 +928,6 @@ const VMStateDescription vmstate_qemu_can_filter = { | ||
928 | 928 | .name = "qemu_can_filter", |
929 | 929 | .version_id = 1, |
930 | 930 | .minimum_version_id = 1, |
931 | - .minimum_version_id_old = 1, | |
932 | 931 | .fields = (VMStateField[]) { |
933 | 932 | VMSTATE_UINT32(can_id, qemu_can_filter), |
934 | 933 | VMSTATE_UINT32(can_mask, qemu_can_filter), |
@@ -952,7 +951,6 @@ const VMStateDescription vmstate_can_sja = { | ||
952 | 951 | .name = "can_sja", |
953 | 952 | .version_id = 1, |
954 | 953 | .minimum_version_id = 1, |
955 | - .minimum_version_id_old = 1, | |
956 | 954 | .post_load = can_sja_post_load, |
957 | 955 | .fields = (VMStateField[]) { |
958 | 956 | VMSTATE_UINT8(mode, CanSJA1000State), |
@@ -617,7 +617,6 @@ const VMStateDescription vmstate_qemu_ctucan_tx_buffer = { | ||
617 | 617 | .name = "qemu_ctucan_tx_buffer", |
618 | 618 | .version_id = 1, |
619 | 619 | .minimum_version_id = 1, |
620 | - .minimum_version_id_old = 1, | |
621 | 620 | .fields = (VMStateField[]) { |
622 | 621 | VMSTATE_UINT8_ARRAY(data, CtuCanCoreMsgBuffer, CTUCAN_CORE_MSG_MAX_LEN), |
623 | 622 | VMSTATE_END_OF_LIST() |
@@ -636,7 +635,6 @@ const VMStateDescription vmstate_ctucan = { | ||
636 | 635 | .name = "ctucan", |
637 | 636 | .version_id = 1, |
638 | 637 | .minimum_version_id = 1, |
639 | - .minimum_version_id_old = 1, | |
640 | 638 | .post_load = ctucan_post_load, |
641 | 639 | .fields = (VMStateField[]) { |
642 | 640 | VMSTATE_UINT32(mode_settings.u32, CtuCanCoreState), |
@@ -215,7 +215,6 @@ static const VMStateDescription vmstate_ctucan_pci = { | ||
215 | 215 | .name = "ctucan_pci", |
216 | 216 | .version_id = 1, |
217 | 217 | .minimum_version_id = 1, |
218 | - .minimum_version_id_old = 1, | |
219 | 218 | .fields = (VMStateField[]) { |
220 | 219 | VMSTATE_PCI_DEVICE(dev, CtuCanPCIState), |
221 | 220 | VMSTATE_STRUCT(ctucan_state[0], CtuCanPCIState, 0, vmstate_ctucan, |
@@ -1049,7 +1049,6 @@ const VMStateDescription vmstate_ppc_timebase = { | ||
1049 | 1049 | .name = "timebase", |
1050 | 1050 | .version_id = 1, |
1051 | 1051 | .minimum_version_id = 1, |
1052 | - .minimum_version_id_old = 1, | |
1053 | 1052 | .pre_save = timebase_pre_save, |
1054 | 1053 | .fields = (VMStateField []) { |
1055 | 1054 | VMSTATE_UINT64(guest_timebase, PPCTimebase), |
@@ -2315,7 +2315,6 @@ static const VMStateDescription vmstate_megasas_gen2 = { | ||
2315 | 2315 | .name = "megasas-gen2", |
2316 | 2316 | .version_id = 0, |
2317 | 2317 | .minimum_version_id = 0, |
2318 | - .minimum_version_id_old = 0, | |
2319 | 2318 | .fields = (VMStateField[]) { |
2320 | 2319 | VMSTATE_PCI_DEVICE(parent_obj, MegasasState), |
2321 | 2320 | VMSTATE_MSIX(parent_obj, MegasasState), |
@@ -1363,7 +1363,6 @@ static const VMStateDescription vmstate_mptsas = { | ||
1363 | 1363 | .name = "mptsas", |
1364 | 1364 | .version_id = 0, |
1365 | 1365 | .minimum_version_id = 0, |
1366 | - .minimum_version_id_old = 0, | |
1367 | 1366 | .post_load = mptsas_post_load, |
1368 | 1367 | .fields = (VMStateField[]) { |
1369 | 1368 | VMSTATE_PCI_DEVICE(dev, MPTSASState), |
@@ -592,7 +592,6 @@ static const VMStateDescription vmstate_virtio_mmio = { | ||
592 | 592 | .name = "virtio_mmio", |
593 | 593 | .version_id = 1, |
594 | 594 | .minimum_version_id = 1, |
595 | - .minimum_version_id_old = 1, | |
596 | 595 | .fields = (VMStateField[]) { |
597 | 596 | VMSTATE_END_OF_LIST() |
598 | 597 | }, |
@@ -131,7 +131,6 @@ static const VMStateDescription vmstate_virtio_pci = { | ||
131 | 131 | .name = "virtio_pci", |
132 | 132 | .version_id = 1, |
133 | 133 | .minimum_version_id = 1, |
134 | - .minimum_version_id_old = 1, | |
135 | 134 | .fields = (VMStateField[]) { |
136 | 135 | VMSTATE_END_OF_LIST() |
137 | 136 | }, |
@@ -2808,7 +2808,6 @@ static const VMStateDescription vmstate_virtio = { | ||
2808 | 2808 | .name = "virtio", |
2809 | 2809 | .version_id = 1, |
2810 | 2810 | .minimum_version_id = 1, |
2811 | - .minimum_version_id_old = 1, | |
2812 | 2811 | .fields = (VMStateField[]) { |
2813 | 2812 | VMSTATE_END_OF_LIST() |
2814 | 2813 | }, |
@@ -1014,6 +1014,9 @@ static void populate_ram_info(MigrationInfo *info, MigrationState *s) | ||
1014 | 1014 | info->ram->page_size = page_size; |
1015 | 1015 | info->ram->multifd_bytes = ram_counters.multifd_bytes; |
1016 | 1016 | info->ram->pages_per_second = s->pages_per_second; |
1017 | + info->ram->precopy_bytes = ram_counters.precopy_bytes; | |
1018 | + info->ram->downtime_bytes = ram_counters.downtime_bytes; | |
1019 | + info->ram->postcopy_bytes = ram_counters.postcopy_bytes; | |
1017 | 1020 | |
1018 | 1021 | if (migrate_use_xbzrle()) { |
1019 | 1022 | info->has_xbzrle_cache = true; |
@@ -2991,10 +2994,7 @@ static int postcopy_start(MigrationState *ms) | ||
2991 | 2994 | * that are dirty |
2992 | 2995 | */ |
2993 | 2996 | if (migrate_postcopy_ram()) { |
2994 | - if (ram_postcopy_send_discard_bitmap(ms)) { | |
2995 | - error_report("postcopy send discard bitmap failed"); | |
2996 | - goto fail; | |
2997 | - } | |
2997 | + ram_postcopy_send_discard_bitmap(ms); | |
2998 | 2998 | } |
2999 | 2999 | |
3000 | 3000 | /* |
@@ -3205,7 +3205,7 @@ static void migration_completion(MigrationState *s) | ||
3205 | 3205 | qemu_mutex_unlock_iothread(); |
3206 | 3206 | |
3207 | 3207 | trace_migration_completion_postcopy_end_after_complete(); |
3208 | - } else if (s->state == MIGRATION_STATUS_CANCELLING) { | |
3208 | + } else { | |
3209 | 3209 | goto fail; |
3210 | 3210 | } |
3211 | 3211 |
@@ -3230,7 +3230,11 @@ static void migration_completion(MigrationState *s) | ||
3230 | 3230 | goto fail_invalidate; |
3231 | 3231 | } |
3232 | 3232 | |
3233 | - if (!migrate_colo_enabled()) { | |
3233 | + if (migrate_colo_enabled() && s->state == MIGRATION_STATUS_ACTIVE) { | |
3234 | + /* COLO does not support postcopy */ | |
3235 | + migrate_set_state(&s->state, MIGRATION_STATUS_ACTIVE, | |
3236 | + MIGRATION_STATUS_COLO); | |
3237 | + } else { | |
3234 | 3238 | migrate_set_state(&s->state, current_active_state, |
3235 | 3239 | MIGRATION_STATUS_COMPLETED); |
3236 | 3240 | } |
@@ -3621,16 +3625,6 @@ static void migration_iteration_finish(MigrationState *s) | ||
3621 | 3625 | "COLO enabled", __func__); |
3622 | 3626 | } |
3623 | 3627 | migrate_start_colo_process(s); |
3624 | - /* | |
3625 | - * Fixme: we will run VM in COLO no matter its old running state. | |
3626 | - * After exited COLO, we will keep running. | |
3627 | - */ | |
3628 | - /* Fallthrough */ | |
3629 | - case MIGRATION_STATUS_ACTIVE: | |
3630 | - /* | |
3631 | - * We should really assert here, but since it's during | |
3632 | - * migration, let's try to reduce the usage of assertions. | |
3633 | - */ | |
3634 | 3628 | s->vm_was_running = true; |
3635 | 3629 | /* Fallthrough */ |
3636 | 3630 | case MIGRATION_STATUS_FAILED: |
@@ -51,16 +51,16 @@ static int zlib_send_setup(MultiFDSendParams *p, Error **errp) | ||
51 | 51 | zs->opaque = Z_NULL; |
52 | 52 | if (deflateInit(zs, migrate_multifd_zlib_level()) != Z_OK) { |
53 | 53 | g_free(z); |
54 | - error_setg(errp, "multifd %d: deflate init failed", p->id); | |
54 | + error_setg(errp, "multifd %u: deflate init failed", p->id); | |
55 | 55 | return -1; |
56 | 56 | } |
57 | - /* To be safe, we reserve twice the size of the packet */ | |
58 | - z->zbuff_len = MULTIFD_PACKET_SIZE * 2; | |
57 | + /* This is the maxium size of the compressed buffer */ | |
58 | + z->zbuff_len = compressBound(MULTIFD_PACKET_SIZE); | |
59 | 59 | z->zbuff = g_try_malloc(z->zbuff_len); |
60 | 60 | if (!z->zbuff) { |
61 | 61 | deflateEnd(&z->zs); |
62 | 62 | g_free(z); |
63 | - error_setg(errp, "multifd %d: out of memory for zbuff", p->id); | |
63 | + error_setg(errp, "multifd %u: out of memory for zbuff", p->id); | |
64 | 64 | return -1; |
65 | 65 | } |
66 | 66 | p->data = z; |
@@ -106,16 +106,16 @@ static int zlib_send_prepare(MultiFDSendParams *p, Error **errp) | ||
106 | 106 | int ret; |
107 | 107 | uint32_t i; |
108 | 108 | |
109 | - for (i = 0; i < p->pages->num; i++) { | |
109 | + for (i = 0; i < p->normal_num; i++) { | |
110 | 110 | uint32_t available = z->zbuff_len - out_size; |
111 | 111 | int flush = Z_NO_FLUSH; |
112 | 112 | |
113 | - if (i == p->pages->num - 1) { | |
113 | + if (i == p->normal_num - 1) { | |
114 | 114 | flush = Z_SYNC_FLUSH; |
115 | 115 | } |
116 | 116 | |
117 | 117 | zs->avail_in = page_size; |
118 | - zs->next_in = p->pages->block->host + p->pages->offset[i]; | |
118 | + zs->next_in = p->pages->block->host + p->normal[i]; | |
119 | 119 | |
120 | 120 | zs->avail_out = available; |
121 | 121 | zs->next_out = z->zbuff + out_size; |
@@ -132,17 +132,20 @@ static int zlib_send_prepare(MultiFDSendParams *p, Error **errp) | ||
132 | 132 | ret = deflate(zs, flush); |
133 | 133 | } while (ret == Z_OK && zs->avail_in && zs->avail_out); |
134 | 134 | if (ret == Z_OK && zs->avail_in) { |
135 | - error_setg(errp, "multifd %d: deflate failed to compress all input", | |
135 | + error_setg(errp, "multifd %u: deflate failed to compress all input", | |
136 | 136 | p->id); |
137 | 137 | return -1; |
138 | 138 | } |
139 | 139 | if (ret != Z_OK) { |
140 | - error_setg(errp, "multifd %d: deflate returned %d instead of Z_OK", | |
140 | + error_setg(errp, "multifd %u: deflate returned %d instead of Z_OK", | |
141 | 141 | p->id, ret); |
142 | 142 | return -1; |
143 | 143 | } |
144 | 144 | out_size += available - zs->avail_out; |
145 | 145 | } |
146 | + p->iov[p->iovs_num].iov_base = z->zbuff; | |
147 | + p->iov[p->iovs_num].iov_len = out_size; | |
148 | + p->iovs_num++; | |
146 | 149 | p->next_packet_size = out_size; |
147 | 150 | p->flags |= MULTIFD_FLAG_ZLIB; |
148 | 151 |
@@ -150,25 +153,6 @@ static int zlib_send_prepare(MultiFDSendParams *p, Error **errp) | ||
150 | 153 | } |
151 | 154 | |
152 | 155 | /** |
153 | - * zlib_send_write: do the actual write of the data | |
154 | - * | |
155 | - * Do the actual write of the comprresed buffer. | |
156 | - * | |
157 | - * Returns 0 for success or -1 for error | |
158 | - * | |
159 | - * @p: Params for the channel that we are using | |
160 | - * @used: number of pages used | |
161 | - * @errp: pointer to an error | |
162 | - */ | |
163 | -static int zlib_send_write(MultiFDSendParams *p, uint32_t used, Error **errp) | |
164 | -{ | |
165 | - struct zlib_data *z = p->data; | |
166 | - | |
167 | - return qio_channel_write_all(p->c, (void *)z->zbuff, p->next_packet_size, | |
168 | - errp); | |
169 | -} | |
170 | - | |
171 | -/** | |
172 | 156 | * zlib_recv_setup: setup receive side |
173 | 157 | * |
174 | 158 | * Create the compressed channel and buffer. |
@@ -190,7 +174,7 @@ static int zlib_recv_setup(MultiFDRecvParams *p, Error **errp) | ||
190 | 174 | zs->avail_in = 0; |
191 | 175 | zs->next_in = Z_NULL; |
192 | 176 | if (inflateInit(zs) != Z_OK) { |
193 | - error_setg(errp, "multifd %d: inflate init failed", p->id); | |
177 | + error_setg(errp, "multifd %u: inflate init failed", p->id); | |
194 | 178 | return -1; |
195 | 179 | } |
196 | 180 | /* To be safe, we reserve twice the size of the packet */ |
@@ -198,7 +182,7 @@ static int zlib_recv_setup(MultiFDRecvParams *p, Error **errp) | ||
198 | 182 | z->zbuff = g_try_malloc(z->zbuff_len); |
199 | 183 | if (!z->zbuff) { |
200 | 184 | inflateEnd(zs); |
201 | - error_setg(errp, "multifd %d: out of memory for zbuff", p->id); | |
185 | + error_setg(errp, "multifd %u: out of memory for zbuff", p->id); | |
202 | 186 | return -1; |
203 | 187 | } |
204 | 188 | return 0; |
@@ -241,13 +225,13 @@ static int zlib_recv_pages(MultiFDRecvParams *p, Error **errp) | ||
241 | 225 | uint32_t in_size = p->next_packet_size; |
242 | 226 | /* we measure the change of total_out */ |
243 | 227 | uint32_t out_size = zs->total_out; |
244 | - uint32_t expected_size = p->pages->num * qemu_target_page_size(); | |
228 | + uint32_t expected_size = p->normal_num * page_size; | |
245 | 229 | uint32_t flags = p->flags & MULTIFD_FLAG_COMPRESSION_MASK; |
246 | 230 | int ret; |
247 | 231 | int i; |
248 | 232 | |
249 | 233 | if (flags != MULTIFD_FLAG_ZLIB) { |
250 | - error_setg(errp, "multifd %d: flags received %x flags expected %x", | |
234 | + error_setg(errp, "multifd %u: flags received %x flags expected %x", | |
251 | 235 | p->id, flags, MULTIFD_FLAG_ZLIB); |
252 | 236 | return -1; |
253 | 237 | } |
@@ -260,16 +244,16 @@ static int zlib_recv_pages(MultiFDRecvParams *p, Error **errp) | ||
260 | 244 | zs->avail_in = in_size; |
261 | 245 | zs->next_in = z->zbuff; |
262 | 246 | |
263 | - for (i = 0; i < p->pages->num; i++) { | |
247 | + for (i = 0; i < p->normal_num; i++) { | |
264 | 248 | int flush = Z_NO_FLUSH; |
265 | 249 | unsigned long start = zs->total_out; |
266 | 250 | |
267 | - if (i == p->pages->num - 1) { | |
251 | + if (i == p->normal_num - 1) { | |
268 | 252 | flush = Z_SYNC_FLUSH; |
269 | 253 | } |
270 | 254 | |
271 | 255 | zs->avail_out = page_size; |
272 | - zs->next_out = p->pages->block->host + p->pages->offset[i]; | |
256 | + zs->next_out = p->host + p->normal[i]; | |
273 | 257 | |
274 | 258 | /* |
275 | 259 | * Welcome to inflate semantics |
@@ -284,19 +268,19 @@ static int zlib_recv_pages(MultiFDRecvParams *p, Error **errp) | ||
284 | 268 | } while (ret == Z_OK && zs->avail_in |
285 | 269 | && (zs->total_out - start) < page_size); |
286 | 270 | if (ret == Z_OK && (zs->total_out - start) < page_size) { |
287 | - error_setg(errp, "multifd %d: inflate generated too few output", | |
271 | + error_setg(errp, "multifd %u: inflate generated too few output", | |
288 | 272 | p->id); |
289 | 273 | return -1; |
290 | 274 | } |
291 | 275 | if (ret != Z_OK) { |
292 | - error_setg(errp, "multifd %d: inflate returned %d instead of Z_OK", | |
276 | + error_setg(errp, "multifd %u: inflate returned %d instead of Z_OK", | |
293 | 277 | p->id, ret); |
294 | 278 | return -1; |
295 | 279 | } |
296 | 280 | } |
297 | 281 | out_size = zs->total_out - out_size; |
298 | 282 | if (out_size != expected_size) { |
299 | - error_setg(errp, "multifd %d: packet size received %d size expected %d", | |
283 | + error_setg(errp, "multifd %u: packet size received %u size expected %u", | |
300 | 284 | p->id, out_size, expected_size); |
301 | 285 | return -1; |
302 | 286 | } |
@@ -307,7 +291,6 @@ static MultiFDMethods multifd_zlib_ops = { | ||
307 | 291 | .send_setup = zlib_send_setup, |
308 | 292 | .send_cleanup = zlib_send_cleanup, |
309 | 293 | .send_prepare = zlib_send_prepare, |
310 | - .send_write = zlib_send_write, | |
311 | 294 | .recv_setup = zlib_recv_setup, |
312 | 295 | .recv_cleanup = zlib_recv_cleanup, |
313 | 296 | .recv_pages = zlib_recv_pages |
@@ -55,7 +55,7 @@ static int zstd_send_setup(MultiFDSendParams *p, Error **errp) | ||
55 | 55 | z->zcs = ZSTD_createCStream(); |
56 | 56 | if (!z->zcs) { |
57 | 57 | g_free(z); |
58 | - error_setg(errp, "multifd %d: zstd createCStream failed", p->id); | |
58 | + error_setg(errp, "multifd %u: zstd createCStream failed", p->id); | |
59 | 59 | return -1; |
60 | 60 | } |
61 | 61 |
@@ -63,17 +63,17 @@ static int zstd_send_setup(MultiFDSendParams *p, Error **errp) | ||
63 | 63 | if (ZSTD_isError(res)) { |
64 | 64 | ZSTD_freeCStream(z->zcs); |
65 | 65 | g_free(z); |
66 | - error_setg(errp, "multifd %d: initCStream failed with error %s", | |
66 | + error_setg(errp, "multifd %u: initCStream failed with error %s", | |
67 | 67 | p->id, ZSTD_getErrorName(res)); |
68 | 68 | return -1; |
69 | 69 | } |
70 | - /* To be safe, we reserve twice the size of the packet */ | |
71 | - z->zbuff_len = MULTIFD_PACKET_SIZE * 2; | |
70 | + /* This is the maxium size of the compressed buffer */ | |
71 | + z->zbuff_len = ZSTD_compressBound(MULTIFD_PACKET_SIZE); | |
72 | 72 | z->zbuff = g_try_malloc(z->zbuff_len); |
73 | 73 | if (!z->zbuff) { |
74 | 74 | ZSTD_freeCStream(z->zcs); |
75 | 75 | g_free(z); |
76 | - error_setg(errp, "multifd %d: out of memory for zbuff", p->id); | |
76 | + error_setg(errp, "multifd %u: out of memory for zbuff", p->id); | |
77 | 77 | return -1; |
78 | 78 | } |
79 | 79 | return 0; |
@@ -121,13 +121,13 @@ static int zstd_send_prepare(MultiFDSendParams *p, Error **errp) | ||
121 | 121 | z->out.size = z->zbuff_len; |
122 | 122 | z->out.pos = 0; |
123 | 123 | |
124 | - for (i = 0; i < p->pages->num; i++) { | |
124 | + for (i = 0; i < p->normal_num; i++) { | |
125 | 125 | ZSTD_EndDirective flush = ZSTD_e_continue; |
126 | 126 | |
127 | - if (i == p->pages->num - 1) { | |
127 | + if (i == p->normal_num - 1) { | |
128 | 128 | flush = ZSTD_e_flush; |
129 | 129 | } |
130 | - z->in.src = p->pages->block->host + p->pages->offset[i]; | |
130 | + z->in.src = p->pages->block->host + p->normal[i]; | |
131 | 131 | z->in.size = page_size; |
132 | 132 | z->in.pos = 0; |
133 | 133 |
@@ -144,16 +144,19 @@ static int zstd_send_prepare(MultiFDSendParams *p, Error **errp) | ||
144 | 144 | } while (ret > 0 && (z->in.size - z->in.pos > 0) |
145 | 145 | && (z->out.size - z->out.pos > 0)); |
146 | 146 | if (ret > 0 && (z->in.size - z->in.pos > 0)) { |
147 | - error_setg(errp, "multifd %d: compressStream buffer too small", | |
147 | + error_setg(errp, "multifd %u: compressStream buffer too small", | |
148 | 148 | p->id); |
149 | 149 | return -1; |
150 | 150 | } |
151 | 151 | if (ZSTD_isError(ret)) { |
152 | - error_setg(errp, "multifd %d: compressStream error %s", | |
152 | + error_setg(errp, "multifd %u: compressStream error %s", | |
153 | 153 | p->id, ZSTD_getErrorName(ret)); |
154 | 154 | return -1; |
155 | 155 | } |
156 | 156 | } |
157 | + p->iov[p->iovs_num].iov_base = z->zbuff; | |
158 | + p->iov[p->iovs_num].iov_len = z->out.pos; | |
159 | + p->iovs_num++; | |
157 | 160 | p->next_packet_size = z->out.pos; |
158 | 161 | p->flags |= MULTIFD_FLAG_ZSTD; |
159 | 162 |
@@ -161,25 +164,6 @@ static int zstd_send_prepare(MultiFDSendParams *p, Error **errp) | ||
161 | 164 | } |
162 | 165 | |
163 | 166 | /** |
164 | - * zstd_send_write: do the actual write of the data | |
165 | - * | |
166 | - * Do the actual write of the comprresed buffer. | |
167 | - * | |
168 | - * Returns 0 for success or -1 for error | |
169 | - * | |
170 | - * @p: Params for the channel that we are using | |
171 | - * @used: number of pages used | |
172 | - * @errp: pointer to an error | |
173 | - */ | |
174 | -static int zstd_send_write(MultiFDSendParams *p, uint32_t used, Error **errp) | |
175 | -{ | |
176 | - struct zstd_data *z = p->data; | |
177 | - | |
178 | - return qio_channel_write_all(p->c, (void *)z->zbuff, p->next_packet_size, | |
179 | - errp); | |
180 | -} | |
181 | - | |
182 | -/** | |
183 | 167 | * zstd_recv_setup: setup receive side |
184 | 168 | * |
185 | 169 | * Create the compressed channel and buffer. |
@@ -198,7 +182,7 @@ static int zstd_recv_setup(MultiFDRecvParams *p, Error **errp) | ||
198 | 182 | z->zds = ZSTD_createDStream(); |
199 | 183 | if (!z->zds) { |
200 | 184 | g_free(z); |
201 | - error_setg(errp, "multifd %d: zstd createDStream failed", p->id); | |
185 | + error_setg(errp, "multifd %u: zstd createDStream failed", p->id); | |
202 | 186 | return -1; |
203 | 187 | } |
204 | 188 |
@@ -206,7 +190,7 @@ static int zstd_recv_setup(MultiFDRecvParams *p, Error **errp) | ||
206 | 190 | if (ZSTD_isError(ret)) { |
207 | 191 | ZSTD_freeDStream(z->zds); |
208 | 192 | g_free(z); |
209 | - error_setg(errp, "multifd %d: initDStream failed with error %s", | |
193 | + error_setg(errp, "multifd %u: initDStream failed with error %s", | |
210 | 194 | p->id, ZSTD_getErrorName(ret)); |
211 | 195 | return -1; |
212 | 196 | } |
@@ -217,7 +201,7 @@ static int zstd_recv_setup(MultiFDRecvParams *p, Error **errp) | ||
217 | 201 | if (!z->zbuff) { |
218 | 202 | ZSTD_freeDStream(z->zds); |
219 | 203 | g_free(z); |
220 | - error_setg(errp, "multifd %d: out of memory for zbuff", p->id); | |
204 | + error_setg(errp, "multifd %u: out of memory for zbuff", p->id); | |
221 | 205 | return -1; |
222 | 206 | } |
223 | 207 | return 0; |
@@ -258,14 +242,14 @@ static int zstd_recv_pages(MultiFDRecvParams *p, Error **errp) | ||
258 | 242 | uint32_t in_size = p->next_packet_size; |
259 | 243 | uint32_t out_size = 0; |
260 | 244 | size_t page_size = qemu_target_page_size(); |
261 | - uint32_t expected_size = p->pages->num * page_size; | |
245 | + uint32_t expected_size = p->normal_num * page_size; | |
262 | 246 | uint32_t flags = p->flags & MULTIFD_FLAG_COMPRESSION_MASK; |
263 | 247 | struct zstd_data *z = p->data; |
264 | 248 | int ret; |
265 | 249 | int i; |
266 | 250 | |
267 | 251 | if (flags != MULTIFD_FLAG_ZSTD) { |
268 | - error_setg(errp, "multifd %d: flags received %x flags expected %x", | |
252 | + error_setg(errp, "multifd %u: flags received %x flags expected %x", | |
269 | 253 | p->id, flags, MULTIFD_FLAG_ZSTD); |
270 | 254 | return -1; |
271 | 255 | } |
@@ -279,8 +263,8 @@ static int zstd_recv_pages(MultiFDRecvParams *p, Error **errp) | ||
279 | 263 | z->in.size = in_size; |
280 | 264 | z->in.pos = 0; |
281 | 265 | |
282 | - for (i = 0; i < p->pages->num; i++) { | |
283 | - z->out.dst = p->pages->block->host + p->pages->offset[i]; | |
266 | + for (i = 0; i < p->normal_num; i++) { | |
267 | + z->out.dst = p->host + p->normal[i]; | |
284 | 268 | z->out.size = page_size; |
285 | 269 | z->out.pos = 0; |
286 | 270 |
@@ -297,19 +281,19 @@ static int zstd_recv_pages(MultiFDRecvParams *p, Error **errp) | ||
297 | 281 | } while (ret > 0 && (z->in.size - z->in.pos > 0) |
298 | 282 | && (z->out.pos < page_size)); |
299 | 283 | if (ret > 0 && (z->out.pos < page_size)) { |
300 | - error_setg(errp, "multifd %d: decompressStream buffer too small", | |
284 | + error_setg(errp, "multifd %u: decompressStream buffer too small", | |
301 | 285 | p->id); |
302 | 286 | return -1; |
303 | 287 | } |
304 | 288 | if (ZSTD_isError(ret)) { |
305 | - error_setg(errp, "multifd %d: decompressStream returned %s", | |
289 | + error_setg(errp, "multifd %u: decompressStream returned %s", | |
306 | 290 | p->id, ZSTD_getErrorName(ret)); |
307 | 291 | return ret; |
308 | 292 | } |
309 | 293 | out_size += z->out.pos; |
310 | 294 | } |
311 | 295 | if (out_size != expected_size) { |
312 | - error_setg(errp, "multifd %d: packet size received %d size expected %d", | |
296 | + error_setg(errp, "multifd %u: packet size received %u size expected %u", | |
313 | 297 | p->id, out_size, expected_size); |
314 | 298 | return -1; |
315 | 299 | } |
@@ -320,7 +304,6 @@ static MultiFDMethods multifd_zstd_ops = { | ||
320 | 304 | .send_setup = zstd_send_setup, |
321 | 305 | .send_cleanup = zstd_send_cleanup, |
322 | 306 | .send_prepare = zstd_send_prepare, |
323 | - .send_write = zstd_send_write, | |
324 | 307 | .recv_setup = zstd_recv_setup, |
325 | 308 | .recv_cleanup = zstd_recv_cleanup, |
326 | 309 | .recv_pages = zstd_recv_pages |
@@ -86,28 +86,21 @@ static void nocomp_send_cleanup(MultiFDSendParams *p, Error **errp) | ||
86 | 86 | */ |
87 | 87 | static int nocomp_send_prepare(MultiFDSendParams *p, Error **errp) |
88 | 88 | { |
89 | - p->next_packet_size = p->pages->num * qemu_target_page_size(); | |
89 | + MultiFDPages_t *pages = p->pages; | |
90 | + size_t page_size = qemu_target_page_size(); | |
91 | + | |
92 | + for (int i = 0; i < p->normal_num; i++) { | |
93 | + p->iov[p->iovs_num].iov_base = pages->block->host + p->normal[i]; | |
94 | + p->iov[p->iovs_num].iov_len = page_size; | |
95 | + p->iovs_num++; | |
96 | + } | |
97 | + | |
98 | + p->next_packet_size = p->normal_num * page_size; | |
90 | 99 | p->flags |= MULTIFD_FLAG_NOCOMP; |
91 | 100 | return 0; |
92 | 101 | } |
93 | 102 | |
94 | 103 | /** |
95 | - * nocomp_send_write: do the actual write of the data | |
96 | - * | |
97 | - * For no compression we just have to write the data. | |
98 | - * | |
99 | - * Returns 0 for success or -1 for error | |
100 | - * | |
101 | - * @p: Params for the channel that we are using | |
102 | - * @used: number of pages used | |
103 | - * @errp: pointer to an error | |
104 | - */ | |
105 | -static int nocomp_send_write(MultiFDSendParams *p, uint32_t used, Error **errp) | |
106 | -{ | |
107 | - return qio_channel_writev_all(p->c, p->pages->iov, used, errp); | |
108 | -} | |
109 | - | |
110 | -/** | |
111 | 104 | * nocomp_recv_setup: setup receive side |
112 | 105 | * |
113 | 106 | * For no compression this function does nothing. |
@@ -146,20 +139,24 @@ static void nocomp_recv_cleanup(MultiFDRecvParams *p) | ||
146 | 139 | static int nocomp_recv_pages(MultiFDRecvParams *p, Error **errp) |
147 | 140 | { |
148 | 141 | uint32_t flags = p->flags & MULTIFD_FLAG_COMPRESSION_MASK; |
142 | + size_t page_size = qemu_target_page_size(); | |
149 | 143 | |
150 | 144 | if (flags != MULTIFD_FLAG_NOCOMP) { |
151 | - error_setg(errp, "multifd %d: flags received %x flags expected %x", | |
145 | + error_setg(errp, "multifd %u: flags received %x flags expected %x", | |
152 | 146 | p->id, flags, MULTIFD_FLAG_NOCOMP); |
153 | 147 | return -1; |
154 | 148 | } |
155 | - return qio_channel_readv_all(p->c, p->pages->iov, p->pages->num, errp); | |
149 | + for (int i = 0; i < p->normal_num; i++) { | |
150 | + p->iov[i].iov_base = p->host + p->normal[i]; | |
151 | + p->iov[i].iov_len = page_size; | |
152 | + } | |
153 | + return qio_channel_readv_all(p->c, p->iov, p->normal_num, errp); | |
156 | 154 | } |
157 | 155 | |
158 | 156 | static MultiFDMethods multifd_nocomp_ops = { |
159 | 157 | .send_setup = nocomp_send_setup, |
160 | 158 | .send_cleanup = nocomp_send_cleanup, |
161 | 159 | .send_prepare = nocomp_send_prepare, |
162 | - .send_write = nocomp_send_write, | |
163 | 160 | .recv_setup = nocomp_recv_setup, |
164 | 161 | .recv_cleanup = nocomp_recv_cleanup, |
165 | 162 | .recv_pages = nocomp_recv_pages |
@@ -212,8 +209,8 @@ static int multifd_recv_initial_packet(QIOChannel *c, Error **errp) | ||
212 | 209 | } |
213 | 210 | |
214 | 211 | if (msg.version != MULTIFD_VERSION) { |
215 | - error_setg(errp, "multifd: received packet version %d " | |
216 | - "expected %d", msg.version, MULTIFD_VERSION); | |
212 | + error_setg(errp, "multifd: received packet version %u " | |
213 | + "expected %u", msg.version, MULTIFD_VERSION); | |
217 | 214 | return -1; |
218 | 215 | } |
219 | 216 |
@@ -229,8 +226,8 @@ static int multifd_recv_initial_packet(QIOChannel *c, Error **errp) | ||
229 | 226 | } |
230 | 227 | |
231 | 228 | if (msg.id > migrate_multifd_channels()) { |
232 | - error_setg(errp, "multifd: received channel version %d " | |
233 | - "expected %d", msg.version, MULTIFD_VERSION); | |
229 | + error_setg(errp, "multifd: received channel version %u " | |
230 | + "expected %u", msg.version, MULTIFD_VERSION); | |
234 | 231 | return -1; |
235 | 232 | } |
236 | 233 |
@@ -242,7 +239,6 @@ static MultiFDPages_t *multifd_pages_init(size_t size) | ||
242 | 239 | MultiFDPages_t *pages = g_new0(MultiFDPages_t, 1); |
243 | 240 | |
244 | 241 | pages->allocated = size; |
245 | - pages->iov = g_new0(struct iovec, size); | |
246 | 242 | pages->offset = g_new0(ram_addr_t, size); |
247 | 243 | |
248 | 244 | return pages; |
@@ -254,8 +250,6 @@ static void multifd_pages_clear(MultiFDPages_t *pages) | ||
254 | 250 | pages->allocated = 0; |
255 | 251 | pages->packet_num = 0; |
256 | 252 | pages->block = NULL; |
257 | - g_free(pages->iov); | |
258 | - pages->iov = NULL; | |
259 | 253 | g_free(pages->offset); |
260 | 254 | pages->offset = NULL; |
261 | 255 | g_free(pages); |
@@ -268,7 +262,7 @@ static void multifd_send_fill_packet(MultiFDSendParams *p) | ||
268 | 262 | |
269 | 263 | packet->flags = cpu_to_be32(p->flags); |
270 | 264 | packet->pages_alloc = cpu_to_be32(p->pages->allocated); |
271 | - packet->pages_used = cpu_to_be32(p->pages->num); | |
265 | + packet->normal_pages = cpu_to_be32(p->normal_num); | |
272 | 266 | packet->next_packet_size = cpu_to_be32(p->next_packet_size); |
273 | 267 | packet->packet_num = cpu_to_be64(p->packet_num); |
274 | 268 |
@@ -276,9 +270,9 @@ static void multifd_send_fill_packet(MultiFDSendParams *p) | ||
276 | 270 | strncpy(packet->ramblock, p->pages->block->idstr, 256); |
277 | 271 | } |
278 | 272 | |
279 | - for (i = 0; i < p->pages->num; i++) { | |
273 | + for (i = 0; i < p->normal_num; i++) { | |
280 | 274 | /* there are architectures where ram_addr_t is 32 bit */ |
281 | - uint64_t temp = p->pages->offset[i]; | |
275 | + uint64_t temp = p->normal[i]; | |
282 | 276 | |
283 | 277 | packet->offset[i] = cpu_to_be64(temp); |
284 | 278 | } |
@@ -288,7 +282,7 @@ static int multifd_recv_unfill_packet(MultiFDRecvParams *p, Error **errp) | ||
288 | 282 | { |
289 | 283 | MultiFDPacket_t *packet = p->packet; |
290 | 284 | size_t page_size = qemu_target_page_size(); |
291 | - uint32_t pages_max = MULTIFD_PACKET_SIZE / page_size; | |
285 | + uint32_t page_count = MULTIFD_PACKET_SIZE / page_size; | |
292 | 286 | RAMBlock *block; |
293 | 287 | int i; |
294 | 288 |
@@ -303,7 +297,7 @@ static int multifd_recv_unfill_packet(MultiFDRecvParams *p, Error **errp) | ||
303 | 297 | packet->version = be32_to_cpu(packet->version); |
304 | 298 | if (packet->version != MULTIFD_VERSION) { |
305 | 299 | error_setg(errp, "multifd: received packet " |
306 | - "version %d and expected version %d", | |
300 | + "version %u and expected version %u", | |
307 | 301 | packet->version, MULTIFD_VERSION); |
308 | 302 | return -1; |
309 | 303 | } |
@@ -315,33 +309,25 @@ static int multifd_recv_unfill_packet(MultiFDRecvParams *p, Error **errp) | ||
315 | 309 | * If we received a packet that is 100 times bigger than expected |
316 | 310 | * just stop migration. It is a magic number. |
317 | 311 | */ |
318 | - if (packet->pages_alloc > pages_max * 100) { | |
312 | + if (packet->pages_alloc > page_count) { | |
319 | 313 | error_setg(errp, "multifd: received packet " |
320 | - "with size %d and expected a maximum size of %d", | |
321 | - packet->pages_alloc, pages_max * 100) ; | |
314 | + "with size %u and expected a size of %u", | |
315 | + packet->pages_alloc, page_count) ; | |
322 | 316 | return -1; |
323 | 317 | } |
324 | - /* | |
325 | - * We received a packet that is bigger than expected but inside | |
326 | - * reasonable limits (see previous comment). Just reallocate. | |
327 | - */ | |
328 | - if (packet->pages_alloc > p->pages->allocated) { | |
329 | - multifd_pages_clear(p->pages); | |
330 | - p->pages = multifd_pages_init(packet->pages_alloc); | |
331 | - } | |
332 | 318 | |
333 | - p->pages->num = be32_to_cpu(packet->pages_used); | |
334 | - if (p->pages->num > packet->pages_alloc) { | |
319 | + p->normal_num = be32_to_cpu(packet->normal_pages); | |
320 | + if (p->normal_num > packet->pages_alloc) { | |
335 | 321 | error_setg(errp, "multifd: received packet " |
336 | - "with %d pages and expected maximum pages are %d", | |
337 | - p->pages->num, packet->pages_alloc) ; | |
322 | + "with %u pages and expected maximum pages are %u", | |
323 | + p->normal_num, packet->pages_alloc) ; | |
338 | 324 | return -1; |
339 | 325 | } |
340 | 326 | |
341 | 327 | p->next_packet_size = be32_to_cpu(packet->next_packet_size); |
342 | 328 | p->packet_num = be64_to_cpu(packet->packet_num); |
343 | 329 | |
344 | - if (p->pages->num == 0) { | |
330 | + if (p->normal_num == 0) { | |
345 | 331 | return 0; |
346 | 332 | } |
347 | 333 |
@@ -354,8 +340,8 @@ static int multifd_recv_unfill_packet(MultiFDRecvParams *p, Error **errp) | ||
354 | 340 | return -1; |
355 | 341 | } |
356 | 342 | |
357 | - p->pages->block = block; | |
358 | - for (i = 0; i < p->pages->num; i++) { | |
343 | + p->host = block->host; | |
344 | + for (i = 0; i < p->normal_num; i++) { | |
359 | 345 | uint64_t offset = be64_to_cpu(packet->offset[i]); |
360 | 346 | |
361 | 347 | if (offset > (block->used_length - page_size)) { |
@@ -364,9 +350,7 @@ static int multifd_recv_unfill_packet(MultiFDRecvParams *p, Error **errp) | ||
364 | 350 | offset, block->used_length); |
365 | 351 | return -1; |
366 | 352 | } |
367 | - p->pages->offset[i] = offset; | |
368 | - p->pages->iov[i].iov_base = block->host + offset; | |
369 | - p->pages->iov[i].iov_len = page_size; | |
353 | + p->normal[i] = offset; | |
370 | 354 | } |
371 | 355 | |
372 | 356 | return 0; |
@@ -470,8 +454,6 @@ int multifd_queue_page(QEMUFile *f, RAMBlock *block, ram_addr_t offset) | ||
470 | 454 | |
471 | 455 | if (pages->block == block) { |
472 | 456 | pages->offset[pages->num] = offset; |
473 | - pages->iov[pages->num].iov_base = block->host + offset; | |
474 | - pages->iov[pages->num].iov_len = qemu_target_page_size(); | |
475 | 457 | pages->num++; |
476 | 458 | |
477 | 459 | if (pages->num < pages->allocated) { |
@@ -567,6 +549,10 @@ void multifd_save_cleanup(void) | ||
567 | 549 | p->packet_len = 0; |
568 | 550 | g_free(p->packet); |
569 | 551 | p->packet = NULL; |
552 | + g_free(p->iov); | |
553 | + p->iov = NULL; | |
554 | + g_free(p->normal); | |
555 | + p->normal = NULL; | |
570 | 556 | multifd_send_state->ops->send_cleanup(p, &local_err); |
571 | 557 | if (local_err) { |
572 | 558 | migrate_set_error(migrate_get_current(), local_err); |
@@ -651,11 +637,17 @@ static void *multifd_send_thread(void *opaque) | ||
651 | 637 | qemu_mutex_lock(&p->mutex); |
652 | 638 | |
653 | 639 | if (p->pending_job) { |
654 | - uint32_t used = p->pages->num; | |
655 | 640 | uint64_t packet_num = p->packet_num; |
656 | 641 | uint32_t flags = p->flags; |
642 | + p->iovs_num = 1; | |
643 | + p->normal_num = 0; | |
644 | + | |
645 | + for (int i = 0; i < p->pages->num; i++) { | |
646 | + p->normal[p->normal_num] = p->pages->offset[i]; | |
647 | + p->normal_num++; | |
648 | + } | |
657 | 649 | |
658 | - if (used) { | |
650 | + if (p->normal_num) { | |
659 | 651 | ret = multifd_send_state->ops->send_prepare(p, &local_err); |
660 | 652 | if (ret != 0) { |
661 | 653 | qemu_mutex_unlock(&p->mutex); |
@@ -665,27 +657,23 @@ static void *multifd_send_thread(void *opaque) | ||
665 | 657 | multifd_send_fill_packet(p); |
666 | 658 | p->flags = 0; |
667 | 659 | p->num_packets++; |
668 | - p->num_pages += used; | |
660 | + p->total_normal_pages += p->normal_num; | |
669 | 661 | p->pages->num = 0; |
670 | 662 | p->pages->block = NULL; |
671 | 663 | qemu_mutex_unlock(&p->mutex); |
672 | 664 | |
673 | - trace_multifd_send(p->id, packet_num, used, flags, | |
665 | + trace_multifd_send(p->id, packet_num, p->normal_num, flags, | |
674 | 666 | p->next_packet_size); |
675 | 667 | |
676 | - ret = qio_channel_write_all(p->c, (void *)p->packet, | |
677 | - p->packet_len, &local_err); | |
668 | + p->iov[0].iov_len = p->packet_len; | |
669 | + p->iov[0].iov_base = p->packet; | |
670 | + | |
671 | + ret = qio_channel_writev_all(p->c, p->iov, p->iovs_num, | |
672 | + &local_err); | |
678 | 673 | if (ret != 0) { |
679 | 674 | break; |
680 | 675 | } |
681 | 676 | |
682 | - if (used) { | |
683 | - ret = multifd_send_state->ops->send_write(p, used, &local_err); | |
684 | - if (ret != 0) { | |
685 | - break; | |
686 | - } | |
687 | - } | |
688 | - | |
689 | 677 | qemu_mutex_lock(&p->mutex); |
690 | 678 | p->pending_job--; |
691 | 679 | qemu_mutex_unlock(&p->mutex); |
@@ -724,7 +712,7 @@ out: | ||
724 | 712 | qemu_mutex_unlock(&p->mutex); |
725 | 713 | |
726 | 714 | rcu_unregister_thread(); |
727 | - trace_multifd_send_thread_end(p->id, p->num_packets, p->num_pages); | |
715 | + trace_multifd_send_thread_end(p->id, p->num_packets, p->total_normal_pages); | |
728 | 716 | |
729 | 717 | return NULL; |
730 | 718 | } |
@@ -922,6 +910,9 @@ int multifd_save_setup(Error **errp) | ||
922 | 910 | p->packet->version = cpu_to_be32(MULTIFD_VERSION); |
923 | 911 | p->name = g_strdup_printf("multifdsend_%d", i); |
924 | 912 | p->tls_hostname = g_strdup(s->hostname); |
913 | + /* We need one extra place for the packet header */ | |
914 | + p->iov = g_new0(struct iovec, page_count + 1); | |
915 | + p->normal = g_new0(ram_addr_t, page_count); | |
925 | 916 | socket_send_channel_create(multifd_new_send_channel_async, p); |
926 | 917 | } |
927 | 918 |
@@ -1016,11 +1007,13 @@ int multifd_load_cleanup(Error **errp) | ||
1016 | 1007 | qemu_sem_destroy(&p->sem_sync); |
1017 | 1008 | g_free(p->name); |
1018 | 1009 | p->name = NULL; |
1019 | - multifd_pages_clear(p->pages); | |
1020 | - p->pages = NULL; | |
1021 | 1010 | p->packet_len = 0; |
1022 | 1011 | g_free(p->packet); |
1023 | 1012 | p->packet = NULL; |
1013 | + g_free(p->iov); | |
1014 | + p->iov = NULL; | |
1015 | + g_free(p->normal); | |
1016 | + p->normal = NULL; | |
1024 | 1017 | multifd_recv_state->ops->recv_cleanup(p); |
1025 | 1018 | } |
1026 | 1019 | qemu_sem_destroy(&multifd_recv_state->sem_sync); |
@@ -1069,7 +1062,6 @@ static void *multifd_recv_thread(void *opaque) | ||
1069 | 1062 | rcu_register_thread(); |
1070 | 1063 | |
1071 | 1064 | while (true) { |
1072 | - uint32_t used; | |
1073 | 1065 | uint32_t flags; |
1074 | 1066 | |
1075 | 1067 | if (p->quit) { |
@@ -1092,17 +1084,16 @@ static void *multifd_recv_thread(void *opaque) | ||
1092 | 1084 | break; |
1093 | 1085 | } |
1094 | 1086 | |
1095 | - used = p->pages->num; | |
1096 | 1087 | flags = p->flags; |
1097 | 1088 | /* recv methods don't know how to handle the SYNC flag */ |
1098 | 1089 | p->flags &= ~MULTIFD_FLAG_SYNC; |
1099 | - trace_multifd_recv(p->id, p->packet_num, used, flags, | |
1090 | + trace_multifd_recv(p->id, p->packet_num, p->normal_num, flags, | |
1100 | 1091 | p->next_packet_size); |
1101 | 1092 | p->num_packets++; |
1102 | - p->num_pages += used; | |
1093 | + p->total_normal_pages += p->normal_num; | |
1103 | 1094 | qemu_mutex_unlock(&p->mutex); |
1104 | 1095 | |
1105 | - if (used) { | |
1096 | + if (p->normal_num) { | |
1106 | 1097 | ret = multifd_recv_state->ops->recv_pages(p, &local_err); |
1107 | 1098 | if (ret != 0) { |
1108 | 1099 | break; |
@@ -1124,7 +1115,7 @@ static void *multifd_recv_thread(void *opaque) | ||
1124 | 1115 | qemu_mutex_unlock(&p->mutex); |
1125 | 1116 | |
1126 | 1117 | rcu_unregister_thread(); |
1127 | - trace_multifd_recv_thread_end(p->id, p->num_packets, p->num_pages); | |
1118 | + trace_multifd_recv_thread_end(p->id, p->num_packets, p->total_normal_pages); | |
1128 | 1119 | |
1129 | 1120 | return NULL; |
1130 | 1121 | } |
@@ -1156,11 +1147,12 @@ int multifd_load_setup(Error **errp) | ||
1156 | 1147 | qemu_sem_init(&p->sem_sync, 0); |
1157 | 1148 | p->quit = false; |
1158 | 1149 | p->id = i; |
1159 | - p->pages = multifd_pages_init(page_count); | |
1160 | 1150 | p->packet_len = sizeof(MultiFDPacket_t) |
1161 | 1151 | + sizeof(uint64_t) * page_count; |
1162 | 1152 | p->packet = g_malloc0(p->packet_len); |
1163 | 1153 | p->name = g_strdup_printf("multifdrecv_%d", i); |
1154 | + p->iov = g_new0(struct iovec, page_count); | |
1155 | + p->normal = g_new0(ram_addr_t, page_count); | |
1164 | 1156 | } |
1165 | 1157 | |
1166 | 1158 | for (i = 0; i < thread_count; i++) { |
@@ -44,7 +44,8 @@ typedef struct { | ||
44 | 44 | uint32_t flags; |
45 | 45 | /* maximum number of allocated pages */ |
46 | 46 | uint32_t pages_alloc; |
47 | - uint32_t pages_used; | |
47 | + /* non zero pages */ | |
48 | + uint32_t normal_pages; | |
48 | 49 | /* size of the next packet that contains pages */ |
49 | 50 | uint32_t next_packet_size; |
50 | 51 | uint64_t packet_num; |
@@ -62,8 +63,6 @@ typedef struct { | ||
62 | 63 | uint64_t packet_num; |
63 | 64 | /* offset of each page */ |
64 | 65 | ram_addr_t *offset; |
65 | - /* pointer to each page */ | |
66 | - struct iovec *iov; | |
67 | 66 | RAMBlock *block; |
68 | 67 | } MultiFDPages_t; |
69 | 68 |
@@ -106,10 +105,18 @@ typedef struct { | ||
106 | 105 | /* thread local variables */ |
107 | 106 | /* packets sent through this channel */ |
108 | 107 | uint64_t num_packets; |
109 | - /* pages sent through this channel */ | |
110 | - uint64_t num_pages; | |
108 | + /* non zero pages sent through this channel */ | |
109 | + uint64_t total_normal_pages; | |
111 | 110 | /* syncs main thread and channels */ |
112 | 111 | QemuSemaphore sem_sync; |
112 | + /* buffers to send */ | |
113 | + struct iovec *iov; | |
114 | + /* number of iovs used */ | |
115 | + uint32_t iovs_num; | |
116 | + /* Pages that are not zero */ | |
117 | + ram_addr_t *normal; | |
118 | + /* num of non zero pages */ | |
119 | + uint32_t normal_num; | |
113 | 120 | /* used for compression methods */ |
114 | 121 | void *data; |
115 | 122 | } MultiFDSendParams; |
@@ -130,8 +137,8 @@ typedef struct { | ||
130 | 137 | bool running; |
131 | 138 | /* should this thread finish */ |
132 | 139 | bool quit; |
133 | - /* array of pages to receive */ | |
134 | - MultiFDPages_t *pages; | |
140 | + /* ramblock host address */ | |
141 | + uint8_t *host; | |
135 | 142 | /* packet allocated len */ |
136 | 143 | uint32_t packet_len; |
137 | 144 | /* pointer to the packet */ |
@@ -145,10 +152,16 @@ typedef struct { | ||
145 | 152 | uint32_t next_packet_size; |
146 | 153 | /* packets sent through this channel */ |
147 | 154 | uint64_t num_packets; |
148 | - /* pages sent through this channel */ | |
149 | - uint64_t num_pages; | |
155 | + /* non zero pages recv through this channel */ | |
156 | + uint64_t total_normal_pages; | |
150 | 157 | /* syncs main thread and channels */ |
151 | 158 | QemuSemaphore sem_sync; |
159 | + /* buffers to recv */ | |
160 | + struct iovec *iov; | |
161 | + /* Pages that are not zero */ | |
162 | + ram_addr_t *normal; | |
163 | + /* num of non zero pages */ | |
164 | + uint32_t normal_num; | |
152 | 165 | /* used for de-compression methods */ |
153 | 166 | void *data; |
154 | 167 | } MultiFDRecvParams; |
@@ -160,8 +173,6 @@ typedef struct { | ||
160 | 173 | void (*send_cleanup)(MultiFDSendParams *p, Error **errp); |
161 | 174 | /* Prepare the send packet */ |
162 | 175 | int (*send_prepare)(MultiFDSendParams *p, Error **errp); |
163 | - /* Write the send packet */ | |
164 | - int (*send_write)(MultiFDSendParams *p, uint32_t used, Error **errp); | |
165 | 176 | /* Setup for receiving side */ |
166 | 177 | int (*recv_setup)(MultiFDRecvParams *p, Error **errp); |
167 | 178 | /* Cleanup for receiving side */ |
@@ -283,15 +283,13 @@ static bool ufd_check_and_apply(int ufd, MigrationIncomingState *mis) | ||
283 | 283 | } |
284 | 284 | |
285 | 285 | #ifdef UFFD_FEATURE_THREAD_ID |
286 | - if (migrate_postcopy_blocktime() && mis && | |
287 | - UFFD_FEATURE_THREAD_ID & supported_features) { | |
288 | - /* kernel supports that feature */ | |
289 | - /* don't create blocktime_context if it exists */ | |
290 | - if (!mis->blocktime_ctx) { | |
291 | - mis->blocktime_ctx = blocktime_context_new(); | |
292 | - } | |
293 | - | |
286 | + if (UFFD_FEATURE_THREAD_ID & supported_features) { | |
294 | 287 | asked_features |= UFFD_FEATURE_THREAD_ID; |
288 | + if (migrate_postcopy_blocktime()) { | |
289 | + if (!mis->blocktime_ctx) { | |
290 | + mis->blocktime_ctx = blocktime_context_new(); | |
291 | + } | |
292 | + } | |
295 | 293 | } |
296 | 294 | #endif |
297 | 295 |
@@ -525,6 +523,19 @@ int postcopy_ram_incoming_init(MigrationIncomingState *mis) | ||
525 | 523 | return 0; |
526 | 524 | } |
527 | 525 | |
526 | +static void postcopy_temp_pages_cleanup(MigrationIncomingState *mis) | |
527 | +{ | |
528 | + if (mis->postcopy_tmp_page) { | |
529 | + munmap(mis->postcopy_tmp_page, mis->largest_page_size); | |
530 | + mis->postcopy_tmp_page = NULL; | |
531 | + } | |
532 | + | |
533 | + if (mis->postcopy_tmp_zero_page) { | |
534 | + munmap(mis->postcopy_tmp_zero_page, mis->largest_page_size); | |
535 | + mis->postcopy_tmp_zero_page = NULL; | |
536 | + } | |
537 | +} | |
538 | + | |
528 | 539 | /* |
529 | 540 | * At the end of a migration where postcopy_ram_incoming_init was called. |
530 | 541 | */ |
@@ -566,14 +577,8 @@ int postcopy_ram_incoming_cleanup(MigrationIncomingState *mis) | ||
566 | 577 | } |
567 | 578 | } |
568 | 579 | |
569 | - if (mis->postcopy_tmp_page) { | |
570 | - munmap(mis->postcopy_tmp_page, mis->largest_page_size); | |
571 | - mis->postcopy_tmp_page = NULL; | |
572 | - } | |
573 | - if (mis->postcopy_tmp_zero_page) { | |
574 | - munmap(mis->postcopy_tmp_zero_page, mis->largest_page_size); | |
575 | - mis->postcopy_tmp_zero_page = NULL; | |
576 | - } | |
580 | + postcopy_temp_pages_cleanup(mis); | |
581 | + | |
577 | 582 | trace_postcopy_ram_incoming_cleanup_blocktime( |
578 | 583 | get_postcopy_total_blocktime()); |
579 | 584 |
@@ -1084,6 +1089,40 @@ retry: | ||
1084 | 1089 | return NULL; |
1085 | 1090 | } |
1086 | 1091 | |
1092 | +static int postcopy_temp_pages_setup(MigrationIncomingState *mis) | |
1093 | +{ | |
1094 | + int err; | |
1095 | + | |
1096 | + mis->postcopy_tmp_page = mmap(NULL, mis->largest_page_size, | |
1097 | + PROT_READ | PROT_WRITE, | |
1098 | + MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); | |
1099 | + if (mis->postcopy_tmp_page == MAP_FAILED) { | |
1100 | + err = errno; | |
1101 | + mis->postcopy_tmp_page = NULL; | |
1102 | + error_report("%s: Failed to map postcopy_tmp_page %s", | |
1103 | + __func__, strerror(err)); | |
1104 | + return -err; | |
1105 | + } | |
1106 | + | |
1107 | + /* | |
1108 | + * Map large zero page when kernel can't use UFFDIO_ZEROPAGE for hugepages | |
1109 | + */ | |
1110 | + mis->postcopy_tmp_zero_page = mmap(NULL, mis->largest_page_size, | |
1111 | + PROT_READ | PROT_WRITE, | |
1112 | + MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); | |
1113 | + if (mis->postcopy_tmp_zero_page == MAP_FAILED) { | |
1114 | + err = errno; | |
1115 | + mis->postcopy_tmp_zero_page = NULL; | |
1116 | + error_report("%s: Failed to map large zero page %s", | |
1117 | + __func__, strerror(err)); | |
1118 | + return -err; | |
1119 | + } | |
1120 | + | |
1121 | + memset(mis->postcopy_tmp_zero_page, '\0', mis->largest_page_size); | |
1122 | + | |
1123 | + return 0; | |
1124 | +} | |
1125 | + | |
1087 | 1126 | int postcopy_ram_incoming_setup(MigrationIncomingState *mis) |
1088 | 1127 | { |
1089 | 1128 | /* Open the fd for the kernel to give us userfaults */ |
@@ -1124,32 +1163,11 @@ int postcopy_ram_incoming_setup(MigrationIncomingState *mis) | ||
1124 | 1163 | return -1; |
1125 | 1164 | } |
1126 | 1165 | |
1127 | - mis->postcopy_tmp_page = mmap(NULL, mis->largest_page_size, | |
1128 | - PROT_READ | PROT_WRITE, MAP_PRIVATE | | |
1129 | - MAP_ANONYMOUS, -1, 0); | |
1130 | - if (mis->postcopy_tmp_page == MAP_FAILED) { | |
1131 | - mis->postcopy_tmp_page = NULL; | |
1132 | - error_report("%s: Failed to map postcopy_tmp_page %s", | |
1133 | - __func__, strerror(errno)); | |
1166 | + if (postcopy_temp_pages_setup(mis)) { | |
1167 | + /* Error dumped in the sub-function */ | |
1134 | 1168 | return -1; |
1135 | 1169 | } |
1136 | 1170 | |
1137 | - /* | |
1138 | - * Map large zero page when kernel can't use UFFDIO_ZEROPAGE for hugepages | |
1139 | - */ | |
1140 | - mis->postcopy_tmp_zero_page = mmap(NULL, mis->largest_page_size, | |
1141 | - PROT_READ | PROT_WRITE, | |
1142 | - MAP_PRIVATE | MAP_ANONYMOUS, | |
1143 | - -1, 0); | |
1144 | - if (mis->postcopy_tmp_zero_page == MAP_FAILED) { | |
1145 | - int e = errno; | |
1146 | - mis->postcopy_tmp_zero_page = NULL; | |
1147 | - error_report("%s: Failed to map large zero page %s", | |
1148 | - __func__, strerror(e)); | |
1149 | - return -e; | |
1150 | - } | |
1151 | - memset(mis->postcopy_tmp_zero_page, '\0', mis->largest_page_size); | |
1152 | - | |
1153 | 1171 | trace_postcopy_ram_enable_notify(); |
1154 | 1172 | |
1155 | 1173 | return 0; |
@@ -325,7 +325,8 @@ struct RAMState { | ||
325 | 325 | uint64_t xbzrle_bytes_prev; |
326 | 326 | /* Start using XBZRLE (e.g., after the first round). */ |
327 | 327 | bool xbzrle_enabled; |
328 | - | |
328 | + /* Are we on the last stage of migration */ | |
329 | + bool last_stage; | |
329 | 330 | /* compression statistics since the beginning of the period */ |
330 | 331 | /* amount of count that no free thread to compress data */ |
331 | 332 | uint64_t compress_thread_busy_prev; |
@@ -354,6 +355,12 @@ static RAMState *ram_state; | ||
354 | 355 | |
355 | 356 | static NotifierWithReturnList precopy_notifier_list; |
356 | 357 | |
358 | +/* Whether postcopy has queued requests? */ | |
359 | +static bool postcopy_has_request(RAMState *rs) | |
360 | +{ | |
361 | + return !QSIMPLEQ_EMPTY_ATOMIC(&rs->src_page_requests); | |
362 | +} | |
363 | + | |
357 | 364 | void precopy_infrastructure_init(void) |
358 | 365 | { |
359 | 366 | notifier_with_return_list_init(&precopy_notifier_list); |
@@ -386,6 +393,18 @@ uint64_t ram_bytes_remaining(void) | ||
386 | 393 | |
387 | 394 | MigrationStats ram_counters; |
388 | 395 | |
396 | +static void ram_transferred_add(uint64_t bytes) | |
397 | +{ | |
398 | + if (runstate_is_running()) { | |
399 | + ram_counters.precopy_bytes += bytes; | |
400 | + } else if (migration_in_postcopy()) { | |
401 | + ram_counters.postcopy_bytes += bytes; | |
402 | + } else { | |
403 | + ram_counters.downtime_bytes += bytes; | |
404 | + } | |
405 | + ram_counters.transferred += bytes; | |
406 | +} | |
407 | + | |
389 | 408 | /* used by the search for pages to send */ |
390 | 409 | struct PageSearchStatus { |
391 | 410 | /* Current block being searched */ |
@@ -683,11 +702,10 @@ static void xbzrle_cache_zero_page(RAMState *rs, ram_addr_t current_addr) | ||
683 | 702 | * @current_addr: addr of the page |
684 | 703 | * @block: block that contains the page we want to send |
685 | 704 | * @offset: offset inside the block for the page |
686 | - * @last_stage: if we are at the completion stage | |
687 | 705 | */ |
688 | 706 | static int save_xbzrle_page(RAMState *rs, uint8_t **current_data, |
689 | 707 | ram_addr_t current_addr, RAMBlock *block, |
690 | - ram_addr_t offset, bool last_stage) | |
708 | + ram_addr_t offset) | |
691 | 709 | { |
692 | 710 | int encoded_len = 0, bytes_xbzrle; |
693 | 711 | uint8_t *prev_cached_page; |
@@ -695,7 +713,7 @@ static int save_xbzrle_page(RAMState *rs, uint8_t **current_data, | ||
695 | 713 | if (!cache_is_cached(XBZRLE.cache, current_addr, |
696 | 714 | ram_counters.dirty_sync_count)) { |
697 | 715 | xbzrle_counters.cache_miss++; |
698 | - if (!last_stage) { | |
716 | + if (!rs->last_stage) { | |
699 | 717 | if (cache_insert(XBZRLE.cache, current_addr, *current_data, |
700 | 718 | ram_counters.dirty_sync_count) == -1) { |
701 | 719 | return -1; |
@@ -734,7 +752,7 @@ static int save_xbzrle_page(RAMState *rs, uint8_t **current_data, | ||
734 | 752 | * Update the cache contents, so that it corresponds to the data |
735 | 753 | * sent, in all cases except where we skip the page. |
736 | 754 | */ |
737 | - if (!last_stage && encoded_len != 0) { | |
755 | + if (!rs->last_stage && encoded_len != 0) { | |
738 | 756 | memcpy(prev_cached_page, XBZRLE.current_buf, TARGET_PAGE_SIZE); |
739 | 757 | /* |
740 | 758 | * In the case where we couldn't compress, ensure that the caller |
@@ -767,7 +785,7 @@ static int save_xbzrle_page(RAMState *rs, uint8_t **current_data, | ||
767 | 785 | * RAM_SAVE_FLAG_CONTINUE. |
768 | 786 | */ |
769 | 787 | xbzrle_counters.bytes += bytes_xbzrle - 8; |
770 | - ram_counters.transferred += bytes_xbzrle; | |
788 | + ram_transferred_add(bytes_xbzrle); | |
771 | 789 | |
772 | 790 | return 1; |
773 | 791 | } |
@@ -1158,6 +1176,15 @@ static void migration_bitmap_sync_precopy(RAMState *rs) | ||
1158 | 1176 | } |
1159 | 1177 | } |
1160 | 1178 | |
1179 | +static void ram_release_page(const char *rbname, uint64_t offset) | |
1180 | +{ | |
1181 | + if (!migrate_release_ram() || !migration_in_postcopy()) { | |
1182 | + return; | |
1183 | + } | |
1184 | + | |
1185 | + ram_discard_range(rbname, offset, TARGET_PAGE_SIZE); | |
1186 | +} | |
1187 | + | |
1161 | 1188 | /** |
1162 | 1189 | * save_zero_page_to_file: send the zero page to the file |
1163 | 1190 | * |
@@ -1179,6 +1206,7 @@ static int save_zero_page_to_file(RAMState *rs, QEMUFile *file, | ||
1179 | 1206 | len += save_page_header(rs, file, block, offset | RAM_SAVE_FLAG_ZERO); |
1180 | 1207 | qemu_put_byte(file, 0); |
1181 | 1208 | len += 1; |
1209 | + ram_release_page(block->idstr, offset); | |
1182 | 1210 | } |
1183 | 1211 | return len; |
1184 | 1212 | } |
@@ -1198,21 +1226,12 @@ static int save_zero_page(RAMState *rs, RAMBlock *block, ram_addr_t offset) | ||
1198 | 1226 | |
1199 | 1227 | if (len) { |
1200 | 1228 | ram_counters.duplicate++; |
1201 | - ram_counters.transferred += len; | |
1229 | + ram_transferred_add(len); | |
1202 | 1230 | return 1; |
1203 | 1231 | } |
1204 | 1232 | return -1; |
1205 | 1233 | } |
1206 | 1234 | |
1207 | -static void ram_release_pages(const char *rbname, uint64_t offset, int pages) | |
1208 | -{ | |
1209 | - if (!migrate_release_ram() || !migration_in_postcopy()) { | |
1210 | - return; | |
1211 | - } | |
1212 | - | |
1213 | - ram_discard_range(rbname, offset, ((ram_addr_t)pages) << TARGET_PAGE_BITS); | |
1214 | -} | |
1215 | - | |
1216 | 1235 | /* |
1217 | 1236 | * @pages: the number of pages written by the control path, |
1218 | 1237 | * < 0 - error |
@@ -1234,7 +1253,7 @@ static bool control_save_page(RAMState *rs, RAMBlock *block, ram_addr_t offset, | ||
1234 | 1253 | } |
1235 | 1254 | |
1236 | 1255 | if (bytes_xmit) { |
1237 | - ram_counters.transferred += bytes_xmit; | |
1256 | + ram_transferred_add(bytes_xmit); | |
1238 | 1257 | *pages = 1; |
1239 | 1258 | } |
1240 | 1259 |
@@ -1265,8 +1284,8 @@ static bool control_save_page(RAMState *rs, RAMBlock *block, ram_addr_t offset, | ||
1265 | 1284 | static int save_normal_page(RAMState *rs, RAMBlock *block, ram_addr_t offset, |
1266 | 1285 | uint8_t *buf, bool async) |
1267 | 1286 | { |
1268 | - ram_counters.transferred += save_page_header(rs, rs->f, block, | |
1269 | - offset | RAM_SAVE_FLAG_PAGE); | |
1287 | + ram_transferred_add(save_page_header(rs, rs->f, block, | |
1288 | + offset | RAM_SAVE_FLAG_PAGE)); | |
1270 | 1289 | if (async) { |
1271 | 1290 | qemu_put_buffer_async(rs->f, buf, TARGET_PAGE_SIZE, |
1272 | 1291 | migrate_release_ram() & |
@@ -1274,7 +1293,7 @@ static int save_normal_page(RAMState *rs, RAMBlock *block, ram_addr_t offset, | ||
1274 | 1293 | } else { |
1275 | 1294 | qemu_put_buffer(rs->f, buf, TARGET_PAGE_SIZE); |
1276 | 1295 | } |
1277 | - ram_counters.transferred += TARGET_PAGE_SIZE; | |
1296 | + ram_transferred_add(TARGET_PAGE_SIZE); | |
1278 | 1297 | ram_counters.normal++; |
1279 | 1298 | return 1; |
1280 | 1299 | } |
@@ -1290,9 +1309,8 @@ static int save_normal_page(RAMState *rs, RAMBlock *block, ram_addr_t offset, | ||
1290 | 1309 | * @rs: current RAM state |
1291 | 1310 | * @block: block that contains the page we want to send |
1292 | 1311 | * @offset: offset inside the block for the page |
1293 | - * @last_stage: if we are at the completion stage | |
1294 | 1312 | */ |
1295 | -static int ram_save_page(RAMState *rs, PageSearchStatus *pss, bool last_stage) | |
1313 | +static int ram_save_page(RAMState *rs, PageSearchStatus *pss) | |
1296 | 1314 | { |
1297 | 1315 | int pages = -1; |
1298 | 1316 | uint8_t *p; |
@@ -1307,8 +1325,8 @@ static int ram_save_page(RAMState *rs, PageSearchStatus *pss, bool last_stage) | ||
1307 | 1325 | XBZRLE_cache_lock(); |
1308 | 1326 | if (rs->xbzrle_enabled && !migration_in_postcopy()) { |
1309 | 1327 | pages = save_xbzrle_page(rs, &p, current_addr, block, |
1310 | - offset, last_stage); | |
1311 | - if (!last_stage) { | |
1328 | + offset); | |
1329 | + if (!rs->last_stage) { | |
1312 | 1330 | /* Can't send this cached data async, since the cache page |
1313 | 1331 | * might get updated before it gets to the wire |
1314 | 1332 | */ |
@@ -1341,13 +1359,11 @@ static bool do_compress_ram_page(QEMUFile *f, z_stream *stream, RAMBlock *block, | ||
1341 | 1359 | ram_addr_t offset, uint8_t *source_buf) |
1342 | 1360 | { |
1343 | 1361 | RAMState *rs = ram_state; |
1344 | - uint8_t *p = block->host + (offset & TARGET_PAGE_MASK); | |
1345 | - bool zero_page = false; | |
1362 | + uint8_t *p = block->host + offset; | |
1346 | 1363 | int ret; |
1347 | 1364 | |
1348 | 1365 | if (save_zero_page_to_file(rs, f, block, offset)) { |
1349 | - zero_page = true; | |
1350 | - goto exit; | |
1366 | + return true; | |
1351 | 1367 | } |
1352 | 1368 | |
1353 | 1369 | save_page_header(rs, f, block, offset | RAM_SAVE_FLAG_COMPRESS_PAGE); |
@@ -1362,18 +1378,14 @@ static bool do_compress_ram_page(QEMUFile *f, z_stream *stream, RAMBlock *block, | ||
1362 | 1378 | if (ret < 0) { |
1363 | 1379 | qemu_file_set_error(migrate_get_current()->to_dst_file, ret); |
1364 | 1380 | error_report("compressed data failed!"); |
1365 | - return false; | |
1366 | 1381 | } |
1367 | - | |
1368 | -exit: | |
1369 | - ram_release_pages(block->idstr, offset & TARGET_PAGE_MASK, 1); | |
1370 | - return zero_page; | |
1382 | + return false; | |
1371 | 1383 | } |
1372 | 1384 | |
1373 | 1385 | static void |
1374 | 1386 | update_compress_thread_counts(const CompressParam *param, int bytes_xmit) |
1375 | 1387 | { |
1376 | - ram_counters.transferred += bytes_xmit; | |
1388 | + ram_transferred_add(bytes_xmit); | |
1377 | 1389 | |
1378 | 1390 | if (param->zero_page) { |
1379 | 1391 | ram_counters.duplicate++; |
@@ -1533,30 +1545,42 @@ static bool find_dirty_block(RAMState *rs, PageSearchStatus *pss, bool *again) | ||
1533 | 1545 | */ |
1534 | 1546 | static RAMBlock *unqueue_page(RAMState *rs, ram_addr_t *offset) |
1535 | 1547 | { |
1548 | + struct RAMSrcPageRequest *entry; | |
1536 | 1549 | RAMBlock *block = NULL; |
1550 | + size_t page_size; | |
1537 | 1551 | |
1538 | - if (QSIMPLEQ_EMPTY_ATOMIC(&rs->src_page_requests)) { | |
1552 | + if (!postcopy_has_request(rs)) { | |
1539 | 1553 | return NULL; |
1540 | 1554 | } |
1541 | 1555 | |
1542 | 1556 | QEMU_LOCK_GUARD(&rs->src_page_req_mutex); |
1543 | - if (!QSIMPLEQ_EMPTY(&rs->src_page_requests)) { | |
1544 | - struct RAMSrcPageRequest *entry = | |
1545 | - QSIMPLEQ_FIRST(&rs->src_page_requests); | |
1546 | - block = entry->rb; | |
1547 | - *offset = entry->offset; | |
1548 | - | |
1549 | - if (entry->len > TARGET_PAGE_SIZE) { | |
1550 | - entry->len -= TARGET_PAGE_SIZE; | |
1551 | - entry->offset += TARGET_PAGE_SIZE; | |
1552 | - } else { | |
1553 | - memory_region_unref(block->mr); | |
1554 | - QSIMPLEQ_REMOVE_HEAD(&rs->src_page_requests, next_req); | |
1555 | - g_free(entry); | |
1556 | - migration_consume_urgent_request(); | |
1557 | - } | |
1557 | + | |
1558 | + /* | |
1559 | + * This should _never_ change even after we take the lock, because no one | |
1560 | + * should be taking anything off the request list other than us. | |
1561 | + */ | |
1562 | + assert(postcopy_has_request(rs)); | |
1563 | + | |
1564 | + entry = QSIMPLEQ_FIRST(&rs->src_page_requests); | |
1565 | + block = entry->rb; | |
1566 | + *offset = entry->offset; | |
1567 | + page_size = qemu_ram_pagesize(block); | |
1568 | + /* Each page request should only be multiple page size of the ramblock */ | |
1569 | + assert((entry->len % page_size) == 0); | |
1570 | + | |
1571 | + if (entry->len > page_size) { | |
1572 | + entry->len -= page_size; | |
1573 | + entry->offset += page_size; | |
1574 | + } else { | |
1575 | + memory_region_unref(block->mr); | |
1576 | + QSIMPLEQ_REMOVE_HEAD(&rs->src_page_requests, next_req); | |
1577 | + g_free(entry); | |
1578 | + migration_consume_urgent_request(); | |
1558 | 1579 | } |
1559 | 1580 | |
1581 | + trace_unqueue_page(block->idstr, *offset, | |
1582 | + test_bit((*offset >> TARGET_PAGE_BITS), block->bmap)); | |
1583 | + | |
1560 | 1584 | return block; |
1561 | 1585 | } |
1562 | 1586 |
@@ -1611,7 +1635,7 @@ static int ram_save_release_protection(RAMState *rs, PageSearchStatus *pss, | ||
1611 | 1635 | /* Check if page is from UFFD-managed region. */ |
1612 | 1636 | if (pss->block->flags & RAM_UF_WRITEPROTECT) { |
1613 | 1637 | void *page_address = pss->block->host + (start_page << TARGET_PAGE_BITS); |
1614 | - uint64_t run_length = (pss->page - start_page + 1) << TARGET_PAGE_BITS; | |
1638 | + uint64_t run_length = (pss->page - start_page) << TARGET_PAGE_BITS; | |
1615 | 1639 | |
1616 | 1640 | /* Flush async buffers before un-protect. */ |
1617 | 1641 | qemu_fflush(rs->f); |
@@ -1931,30 +1955,8 @@ static bool get_queued_page(RAMState *rs, PageSearchStatus *pss) | ||
1931 | 1955 | { |
1932 | 1956 | RAMBlock *block; |
1933 | 1957 | ram_addr_t offset; |
1934 | - bool dirty; | |
1935 | - | |
1936 | - do { | |
1937 | - block = unqueue_page(rs, &offset); | |
1938 | - /* | |
1939 | - * We're sending this page, and since it's postcopy nothing else | |
1940 | - * will dirty it, and we must make sure it doesn't get sent again | |
1941 | - * even if this queue request was received after the background | |
1942 | - * search already sent it. | |
1943 | - */ | |
1944 | - if (block) { | |
1945 | - unsigned long page; | |
1946 | 1958 | |
1947 | - page = offset >> TARGET_PAGE_BITS; | |
1948 | - dirty = test_bit(page, block->bmap); | |
1949 | - if (!dirty) { | |
1950 | - trace_get_queued_page_not_dirty(block->idstr, (uint64_t)offset, | |
1951 | - page); | |
1952 | - } else { | |
1953 | - trace_get_queued_page(block->idstr, (uint64_t)offset, page); | |
1954 | - } | |
1955 | - } | |
1956 | - | |
1957 | - } while (block && !dirty); | |
1959 | + block = unqueue_page(rs, &offset); | |
1958 | 1960 | |
1959 | 1961 | if (!block) { |
1960 | 1962 | /* |
@@ -2129,10 +2131,8 @@ static bool save_compress_page(RAMState *rs, RAMBlock *block, ram_addr_t offset) | ||
2129 | 2131 | * |
2130 | 2132 | * @rs: current RAM state |
2131 | 2133 | * @pss: data about the page we want to send |
2132 | - * @last_stage: if we are at the completion stage | |
2133 | 2134 | */ |
2134 | -static int ram_save_target_page(RAMState *rs, PageSearchStatus *pss, | |
2135 | - bool last_stage) | |
2135 | +static int ram_save_target_page(RAMState *rs, PageSearchStatus *pss) | |
2136 | 2136 | { |
2137 | 2137 | RAMBlock *block = pss->block; |
2138 | 2138 | ram_addr_t offset = ((ram_addr_t)pss->page) << TARGET_PAGE_BITS; |
@@ -2156,7 +2156,6 @@ static int ram_save_target_page(RAMState *rs, PageSearchStatus *pss, | ||
2156 | 2156 | xbzrle_cache_zero_page(rs, block->offset + offset); |
2157 | 2157 | XBZRLE_cache_unlock(); |
2158 | 2158 | } |
2159 | - ram_release_pages(block->idstr, offset, res); | |
2160 | 2159 | return res; |
2161 | 2160 | } |
2162 | 2161 |
@@ -2171,7 +2170,7 @@ static int ram_save_target_page(RAMState *rs, PageSearchStatus *pss, | ||
2171 | 2170 | return ram_save_multifd_page(rs, block, offset); |
2172 | 2171 | } |
2173 | 2172 | |
2174 | - return ram_save_page(rs, pss, last_stage); | |
2173 | + return ram_save_page(rs, pss); | |
2175 | 2174 | } |
2176 | 2175 | |
2177 | 2176 | /** |
@@ -2188,12 +2187,9 @@ static int ram_save_target_page(RAMState *rs, PageSearchStatus *pss, | ||
2188 | 2187 | * Returns the number of pages written or negative on error |
2189 | 2188 | * |
2190 | 2189 | * @rs: current RAM state |
2191 | - * @ms: current migration state | |
2192 | 2190 | * @pss: data about the page we want to send |
2193 | - * @last_stage: if we are at the completion stage | |
2194 | 2191 | */ |
2195 | -static int ram_save_host_page(RAMState *rs, PageSearchStatus *pss, | |
2196 | - bool last_stage) | |
2192 | +static int ram_save_host_page(RAMState *rs, PageSearchStatus *pss) | |
2197 | 2193 | { |
2198 | 2194 | int tmppages, pages = 0; |
2199 | 2195 | size_t pagesize_bits = |
@@ -2211,7 +2207,7 @@ static int ram_save_host_page(RAMState *rs, PageSearchStatus *pss, | ||
2211 | 2207 | do { |
2212 | 2208 | /* Check the pages is dirty and if it is send it */ |
2213 | 2209 | if (migration_bitmap_clear_dirty(rs, pss->block, pss->page)) { |
2214 | - tmppages = ram_save_target_page(rs, pss, last_stage); | |
2210 | + tmppages = ram_save_target_page(rs, pss); | |
2215 | 2211 | if (tmppages < 0) { |
2216 | 2212 | return tmppages; |
2217 | 2213 | } |
@@ -2230,7 +2226,7 @@ static int ram_save_host_page(RAMState *rs, PageSearchStatus *pss, | ||
2230 | 2226 | offset_in_ramblock(pss->block, |
2231 | 2227 | ((ram_addr_t)pss->page) << TARGET_PAGE_BITS)); |
2232 | 2228 | /* The offset we leave with is the min boundary of host page and block */ |
2233 | - pss->page = MIN(pss->page, hostpage_boundary) - 1; | |
2229 | + pss->page = MIN(pss->page, hostpage_boundary); | |
2234 | 2230 | |
2235 | 2231 | res = ram_save_release_protection(rs, pss, start_page); |
2236 | 2232 | return (res < 0 ? res : pages); |
@@ -2245,13 +2241,11 @@ static int ram_save_host_page(RAMState *rs, PageSearchStatus *pss, | ||
2245 | 2241 | * or negative on error |
2246 | 2242 | * |
2247 | 2243 | * @rs: current RAM state |
2248 | - * @last_stage: if we are at the completion stage | |
2249 | 2244 | * |
2250 | 2245 | * On systems where host-page-size > target-page-size it will send all the |
2251 | 2246 | * pages in a host page that are dirty. |
2252 | 2247 | */ |
2253 | - | |
2254 | -static int ram_find_and_save_block(RAMState *rs, bool last_stage) | |
2248 | +static int ram_find_and_save_block(RAMState *rs) | |
2255 | 2249 | { |
2256 | 2250 | PageSearchStatus pss; |
2257 | 2251 | int pages = 0; |
@@ -2280,7 +2274,7 @@ static int ram_find_and_save_block(RAMState *rs, bool last_stage) | ||
2280 | 2274 | } |
2281 | 2275 | |
2282 | 2276 | if (found) { |
2283 | - pages = ram_save_host_page(rs, &pss, last_stage); | |
2277 | + pages = ram_save_host_page(rs, &pss); | |
2284 | 2278 | } |
2285 | 2279 | } while (!pages && again); |
2286 | 2280 |
@@ -2298,7 +2292,7 @@ void acct_update_position(QEMUFile *f, size_t size, bool zero) | ||
2298 | 2292 | ram_counters.duplicate += pages; |
2299 | 2293 | } else { |
2300 | 2294 | ram_counters.normal += pages; |
2301 | - ram_counters.transferred += size; | |
2295 | + ram_transferred_add(size); | |
2302 | 2296 | qemu_update_position(f, size); |
2303 | 2297 | } |
2304 | 2298 | } |
@@ -2408,40 +2402,6 @@ static void ram_state_reset(RAMState *rs) | ||
2408 | 2402 | |
2409 | 2403 | #define MAX_WAIT 50 /* ms, half buffered_file limit */ |
2410 | 2404 | |
2411 | -/* | |
2412 | - * 'expected' is the value you expect the bitmap mostly to be full | |
2413 | - * of; it won't bother printing lines that are all this value. | |
2414 | - * If 'todump' is null the migration bitmap is dumped. | |
2415 | - */ | |
2416 | -void ram_debug_dump_bitmap(unsigned long *todump, bool expected, | |
2417 | - unsigned long pages) | |
2418 | -{ | |
2419 | - int64_t cur; | |
2420 | - int64_t linelen = 128; | |
2421 | - char linebuf[129]; | |
2422 | - | |
2423 | - for (cur = 0; cur < pages; cur += linelen) { | |
2424 | - int64_t curb; | |
2425 | - bool found = false; | |
2426 | - /* | |
2427 | - * Last line; catch the case where the line length | |
2428 | - * is longer than remaining ram | |
2429 | - */ | |
2430 | - if (cur + linelen > pages) { | |
2431 | - linelen = pages - cur; | |
2432 | - } | |
2433 | - for (curb = 0; curb < linelen; curb++) { | |
2434 | - bool thisbit = test_bit(cur + curb, todump); | |
2435 | - linebuf[curb] = thisbit ? '1' : '.'; | |
2436 | - found = found || (thisbit != expected); | |
2437 | - } | |
2438 | - if (found) { | |
2439 | - linebuf[curb] = '\0'; | |
2440 | - fprintf(stderr, "0x%08" PRIx64 " : %s\n", cur, linebuf); | |
2441 | - } | |
2442 | - } | |
2443 | -} | |
2444 | - | |
2445 | 2405 | /* **** functions for postcopy ***** */ |
2446 | 2406 | |
2447 | 2407 | void ram_postcopy_migrated_memory_release(MigrationState *ms) |
@@ -2467,14 +2427,12 @@ void ram_postcopy_migrated_memory_release(MigrationState *ms) | ||
2467 | 2427 | /** |
2468 | 2428 | * postcopy_send_discard_bm_ram: discard a RAMBlock |
2469 | 2429 | * |
2470 | - * Returns zero on success | |
2471 | - * | |
2472 | 2430 | * Callback from postcopy_each_ram_send_discard for each RAMBlock |
2473 | 2431 | * |
2474 | 2432 | * @ms: current migration state |
2475 | 2433 | * @block: RAMBlock to discard |
2476 | 2434 | */ |
2477 | -static int postcopy_send_discard_bm_ram(MigrationState *ms, RAMBlock *block) | |
2435 | +static void postcopy_send_discard_bm_ram(MigrationState *ms, RAMBlock *block) | |
2478 | 2436 | { |
2479 | 2437 | unsigned long end = block->used_length >> TARGET_PAGE_BITS; |
2480 | 2438 | unsigned long current; |
@@ -2498,15 +2456,13 @@ static int postcopy_send_discard_bm_ram(MigrationState *ms, RAMBlock *block) | ||
2498 | 2456 | postcopy_discard_send_range(ms, one, discard_length); |
2499 | 2457 | current = one + discard_length; |
2500 | 2458 | } |
2501 | - | |
2502 | - return 0; | |
2503 | 2459 | } |
2504 | 2460 | |
2461 | +static void postcopy_chunk_hostpages_pass(MigrationState *ms, RAMBlock *block); | |
2462 | + | |
2505 | 2463 | /** |
2506 | 2464 | * postcopy_each_ram_send_discard: discard all RAMBlocks |
2507 | 2465 | * |
2508 | - * Returns 0 for success or negative for error | |
2509 | - * | |
2510 | 2466 | * Utility for the outgoing postcopy code. |
2511 | 2467 | * Calls postcopy_send_discard_bm_ram for each RAMBlock |
2512 | 2468 | * passing it bitmap indexes and name. |
@@ -2515,27 +2471,29 @@ static int postcopy_send_discard_bm_ram(MigrationState *ms, RAMBlock *block) | ||
2515 | 2471 | * |
2516 | 2472 | * @ms: current migration state |
2517 | 2473 | */ |
2518 | -static int postcopy_each_ram_send_discard(MigrationState *ms) | |
2474 | +static void postcopy_each_ram_send_discard(MigrationState *ms) | |
2519 | 2475 | { |
2520 | 2476 | struct RAMBlock *block; |
2521 | - int ret; | |
2522 | 2477 | |
2523 | 2478 | RAMBLOCK_FOREACH_NOT_IGNORED(block) { |
2524 | 2479 | postcopy_discard_send_init(ms, block->idstr); |
2525 | 2480 | |
2526 | 2481 | /* |
2482 | + * Deal with TPS != HPS and huge pages. It discard any partially sent | |
2483 | + * host-page size chunks, mark any partially dirty host-page size | |
2484 | + * chunks as all dirty. In this case the host-page is the host-page | |
2485 | + * for the particular RAMBlock, i.e. it might be a huge page. | |
2486 | + */ | |
2487 | + postcopy_chunk_hostpages_pass(ms, block); | |
2488 | + | |
2489 | + /* | |
2527 | 2490 | * Postcopy sends chunks of bitmap over the wire, but it |
2528 | 2491 | * just needs indexes at this point, avoids it having |
2529 | 2492 | * target page specific code. |
2530 | 2493 | */ |
2531 | - ret = postcopy_send_discard_bm_ram(ms, block); | |
2494 | + postcopy_send_discard_bm_ram(ms, block); | |
2532 | 2495 | postcopy_discard_send_finish(ms); |
2533 | - if (ret) { | |
2534 | - return ret; | |
2535 | - } | |
2536 | 2496 | } |
2537 | - | |
2538 | - return 0; | |
2539 | 2497 | } |
2540 | 2498 | |
2541 | 2499 | /** |
@@ -2606,37 +2564,8 @@ static void postcopy_chunk_hostpages_pass(MigrationState *ms, RAMBlock *block) | ||
2606 | 2564 | } |
2607 | 2565 | |
2608 | 2566 | /** |
2609 | - * postcopy_chunk_hostpages: discard any partially sent host page | |
2610 | - * | |
2611 | - * Utility for the outgoing postcopy code. | |
2612 | - * | |
2613 | - * Discard any partially sent host-page size chunks, mark any partially | |
2614 | - * dirty host-page size chunks as all dirty. In this case the host-page | |
2615 | - * is the host-page for the particular RAMBlock, i.e. it might be a huge page | |
2616 | - * | |
2617 | - * Returns zero on success | |
2618 | - * | |
2619 | - * @ms: current migration state | |
2620 | - * @block: block we want to work with | |
2621 | - */ | |
2622 | -static int postcopy_chunk_hostpages(MigrationState *ms, RAMBlock *block) | |
2623 | -{ | |
2624 | - postcopy_discard_send_init(ms, block->idstr); | |
2625 | - | |
2626 | - /* | |
2627 | - * Ensure that all partially dirty host pages are made fully dirty. | |
2628 | - */ | |
2629 | - postcopy_chunk_hostpages_pass(ms, block); | |
2630 | - | |
2631 | - postcopy_discard_send_finish(ms); | |
2632 | - return 0; | |
2633 | -} | |
2634 | - | |
2635 | -/** | |
2636 | 2567 | * ram_postcopy_send_discard_bitmap: transmit the discard bitmap |
2637 | 2568 | * |
2638 | - * Returns zero on success | |
2639 | - * | |
2640 | 2569 | * Transmit the set of pages to be discarded after precopy to the target |
2641 | 2570 | * these are pages that: |
2642 | 2571 | * a) Have been previously transmitted but are now dirty again |
@@ -2647,11 +2576,9 @@ static int postcopy_chunk_hostpages(MigrationState *ms, RAMBlock *block) | ||
2647 | 2576 | * |
2648 | 2577 | * @ms: current migration state |
2649 | 2578 | */ |
2650 | -int ram_postcopy_send_discard_bitmap(MigrationState *ms) | |
2579 | +void ram_postcopy_send_discard_bitmap(MigrationState *ms) | |
2651 | 2580 | { |
2652 | 2581 | RAMState *rs = ram_state; |
2653 | - RAMBlock *block; | |
2654 | - int ret; | |
2655 | 2582 | |
2656 | 2583 | RCU_READ_LOCK_GUARD(); |
2657 | 2584 |
@@ -2663,21 +2590,9 @@ int ram_postcopy_send_discard_bitmap(MigrationState *ms) | ||
2663 | 2590 | rs->last_sent_block = NULL; |
2664 | 2591 | rs->last_page = 0; |
2665 | 2592 | |
2666 | - RAMBLOCK_FOREACH_NOT_IGNORED(block) { | |
2667 | - /* Deal with TPS != HPS and huge pages */ | |
2668 | - ret = postcopy_chunk_hostpages(ms, block); | |
2669 | - if (ret) { | |
2670 | - return ret; | |
2671 | - } | |
2593 | + postcopy_each_ram_send_discard(ms); | |
2672 | 2594 | |
2673 | -#ifdef DEBUG_POSTCOPY | |
2674 | - ram_debug_dump_bitmap(block->bmap, true, | |
2675 | - block->used_length >> TARGET_PAGE_BITS); | |
2676 | -#endif | |
2677 | - } | |
2678 | 2595 | trace_ram_postcopy_send_discard_bitmap(); |
2679 | - | |
2680 | - return postcopy_each_ram_send_discard(ms); | |
2681 | 2596 | } |
2682 | 2597 | |
2683 | 2598 | /** |
@@ -3073,14 +2988,14 @@ static int ram_save_iterate(QEMUFile *f, void *opaque) | ||
3073 | 2988 | t0 = qemu_clock_get_ns(QEMU_CLOCK_REALTIME); |
3074 | 2989 | i = 0; |
3075 | 2990 | while ((ret = qemu_file_rate_limit(f)) == 0 || |
3076 | - !QSIMPLEQ_EMPTY(&rs->src_page_requests)) { | |
2991 | + postcopy_has_request(rs)) { | |
3077 | 2992 | int pages; |
3078 | 2993 | |
3079 | 2994 | if (qemu_file_get_error(f)) { |
3080 | 2995 | break; |
3081 | 2996 | } |
3082 | 2997 | |
3083 | - pages = ram_find_and_save_block(rs, false); | |
2998 | + pages = ram_find_and_save_block(rs); | |
3084 | 2999 | /* no more pages to sent */ |
3085 | 3000 | if (pages == 0) { |
3086 | 3001 | done = 1; |
@@ -3133,7 +3048,7 @@ out: | ||
3133 | 3048 | multifd_send_sync_main(rs->f); |
3134 | 3049 | qemu_put_be64(f, RAM_SAVE_FLAG_EOS); |
3135 | 3050 | qemu_fflush(f); |
3136 | - ram_counters.transferred += 8; | |
3051 | + ram_transferred_add(8); | |
3137 | 3052 | |
3138 | 3053 | ret = qemu_file_get_error(f); |
3139 | 3054 | } |
@@ -3160,6 +3075,8 @@ static int ram_save_complete(QEMUFile *f, void *opaque) | ||
3160 | 3075 | RAMState *rs = *temp; |
3161 | 3076 | int ret = 0; |
3162 | 3077 | |
3078 | + rs->last_stage = !migration_in_colo_state(); | |
3079 | + | |
3163 | 3080 | WITH_RCU_READ_LOCK_GUARD() { |
3164 | 3081 | if (!migration_in_postcopy()) { |
3165 | 3082 | migration_bitmap_sync_precopy(rs); |
@@ -3173,7 +3090,7 @@ static int ram_save_complete(QEMUFile *f, void *opaque) | ||
3173 | 3090 | while (true) { |
3174 | 3091 | int pages; |
3175 | 3092 | |
3176 | - pages = ram_find_and_save_block(rs, !migration_in_colo_state()); | |
3093 | + pages = ram_find_and_save_block(rs); | |
3177 | 3094 | /* no more blocks to sent */ |
3178 | 3095 | if (pages == 0) { |
3179 | 3096 | break; |
@@ -55,11 +55,9 @@ void mig_throttle_counter_reset(void); | ||
55 | 55 | uint64_t ram_pagesize_summary(void); |
56 | 56 | int ram_save_queue_pages(const char *rbname, ram_addr_t start, ram_addr_t len); |
57 | 57 | void acct_update_position(QEMUFile *f, size_t size, bool zero); |
58 | -void ram_debug_dump_bitmap(unsigned long *todump, bool expected, | |
59 | - unsigned long pages); | |
60 | 58 | void ram_postcopy_migrated_memory_release(MigrationState *ms); |
61 | 59 | /* For outgoing discard bitmap */ |
62 | -int ram_postcopy_send_discard_bitmap(MigrationState *ms); | |
60 | +void ram_postcopy_send_discard_bitmap(MigrationState *ms); | |
63 | 61 | /* For incoming postcopy discard */ |
64 | 62 | int ram_discard_range(const char *block_name, uint64_t start, size_t length); |
65 | 63 | int ram_postcopy_incoming_init(MigrationIncomingState *mis); |
@@ -1298,8 +1298,9 @@ int qemu_savevm_state_iterate(QEMUFile *f, bool postcopy) | ||
1298 | 1298 | save_section_footer(f, se); |
1299 | 1299 | |
1300 | 1300 | if (ret < 0) { |
1301 | - error_report("failed to save SaveStateEntry with id(name): %d(%s)", | |
1302 | - se->section_id, se->idstr); | |
1301 | + error_report("failed to save SaveStateEntry with id(name): " | |
1302 | + "%d(%s): %d", | |
1303 | + se->section_id, se->idstr, ret); | |
1303 | 1304 | qemu_file_set_error(f, ret); |
1304 | 1305 | } |
1305 | 1306 | if (ret <= 0) { |
@@ -86,8 +86,6 @@ put_qlist_end(const char *field_name, const char *vmsd_name) "%s(%s)" | ||
86 | 86 | qemu_file_fclose(void) "" |
87 | 87 | |
88 | 88 | # ram.c |
89 | -get_queued_page(const char *block_name, uint64_t tmp_offset, unsigned long page_abs) "%s/0x%" PRIx64 " page_abs=0x%lx" | |
90 | -get_queued_page_not_dirty(const char *block_name, uint64_t tmp_offset, unsigned long page_abs) "%s/0x%" PRIx64 " page_abs=0x%lx" | |
91 | 89 | migration_bitmap_sync_start(void) "" |
92 | 90 | migration_bitmap_sync_end(uint64_t dirty_pages) "dirty_pages %" PRIu64 |
93 | 91 | migration_bitmap_clear_dirty(char *str, uint64_t start, uint64_t size, unsigned long page) "rb %s start 0x%"PRIx64" size 0x%"PRIx64" page 0x%lx" |
@@ -113,25 +111,26 @@ ram_save_iterate_big_wait(uint64_t milliconds, int iterations) "big wait: %" PRI | ||
113 | 111 | ram_load_complete(int ret, uint64_t seq_iter) "exit_code %d seq iteration %" PRIu64 |
114 | 112 | ram_write_tracking_ramblock_start(const char *block_id, size_t page_size, void *addr, size_t length) "%s: page_size: %zu addr: %p length: %zu" |
115 | 113 | ram_write_tracking_ramblock_stop(const char *block_id, size_t page_size, void *addr, size_t length) "%s: page_size: %zu addr: %p length: %zu" |
114 | +unqueue_page(char *block, uint64_t offset, bool dirty) "ramblock '%s' offset 0x%"PRIx64" dirty %d" | |
116 | 115 | |
117 | 116 | # multifd.c |
118 | -multifd_new_send_channel_async(uint8_t id) "channel %d" | |
119 | -multifd_recv(uint8_t id, uint64_t packet_num, uint32_t used, uint32_t flags, uint32_t next_packet_size) "channel %d packet_num %" PRIu64 " pages %d flags 0x%x next packet size %d" | |
120 | -multifd_recv_new_channel(uint8_t id) "channel %d" | |
117 | +multifd_new_send_channel_async(uint8_t id) "channel %u" | |
118 | +multifd_recv(uint8_t id, uint64_t packet_num, uint32_t used, uint32_t flags, uint32_t next_packet_size) "channel %u packet_num %" PRIu64 " pages %u flags 0x%x next packet size %u" | |
119 | +multifd_recv_new_channel(uint8_t id) "channel %u" | |
121 | 120 | multifd_recv_sync_main(long packet_num) "packet num %ld" |
122 | -multifd_recv_sync_main_signal(uint8_t id) "channel %d" | |
123 | -multifd_recv_sync_main_wait(uint8_t id) "channel %d" | |
121 | +multifd_recv_sync_main_signal(uint8_t id) "channel %u" | |
122 | +multifd_recv_sync_main_wait(uint8_t id) "channel %u" | |
124 | 123 | multifd_recv_terminate_threads(bool error) "error %d" |
125 | -multifd_recv_thread_end(uint8_t id, uint64_t packets, uint64_t pages) "channel %d packets %" PRIu64 " pages %" PRIu64 | |
126 | -multifd_recv_thread_start(uint8_t id) "%d" | |
127 | -multifd_send(uint8_t id, uint64_t packet_num, uint32_t used, uint32_t flags, uint32_t next_packet_size) "channel %d packet_num %" PRIu64 " pages %d flags 0x%x next packet size %d" | |
128 | -multifd_send_error(uint8_t id) "channel %d" | |
124 | +multifd_recv_thread_end(uint8_t id, uint64_t packets, uint64_t pages) "channel %u packets %" PRIu64 " pages %" PRIu64 | |
125 | +multifd_recv_thread_start(uint8_t id) "%u" | |
126 | +multifd_send(uint8_t id, uint64_t packet_num, uint32_t normal, uint32_t flags, uint32_t next_packet_size) "channel %u packet_num %" PRIu64 " normal pages %u flags 0x%x next packet size %u" | |
127 | +multifd_send_error(uint8_t id) "channel %u" | |
129 | 128 | multifd_send_sync_main(long packet_num) "packet num %ld" |
130 | -multifd_send_sync_main_signal(uint8_t id) "channel %d" | |
131 | -multifd_send_sync_main_wait(uint8_t id) "channel %d" | |
129 | +multifd_send_sync_main_signal(uint8_t id) "channel %u" | |
130 | +multifd_send_sync_main_wait(uint8_t id) "channel %u" | |
132 | 131 | multifd_send_terminate_threads(bool error) "error %d" |
133 | -multifd_send_thread_end(uint8_t id, uint64_t packets, uint64_t pages) "channel %d packets %" PRIu64 " pages %" PRIu64 | |
134 | -multifd_send_thread_start(uint8_t id) "%d" | |
132 | +multifd_send_thread_end(uint8_t id, uint64_t packets, uint64_t normal_pages) "channel %u packets %" PRIu64 " normal pages %" PRIu64 | |
133 | +multifd_send_thread_start(uint8_t id) "%u" | |
135 | 134 | multifd_tls_outgoing_handshake_start(void *ioc, void *tioc, const char *hostname) "ioc=%p tioc=%p hostname=%s" |
136 | 135 | multifd_tls_outgoing_handshake_error(void *ioc, const char *err) "ioc=%p err=%s" |
137 | 136 | multifd_tls_outgoing_handshake_complete(void *ioc) "ioc=%p" |
@@ -293,6 +293,18 @@ void hmp_info_migrate(Monitor *mon, const QDict *qdict) | ||
293 | 293 | monitor_printf(mon, "postcopy request count: %" PRIu64 "\n", |
294 | 294 | info->ram->postcopy_requests); |
295 | 295 | } |
296 | + if (info->ram->precopy_bytes) { | |
297 | + monitor_printf(mon, "precopy ram: %" PRIu64 " kbytes\n", | |
298 | + info->ram->precopy_bytes >> 10); | |
299 | + } | |
300 | + if (info->ram->downtime_bytes) { | |
301 | + monitor_printf(mon, "downtime ram: %" PRIu64 " kbytes\n", | |
302 | + info->ram->downtime_bytes >> 10); | |
303 | + } | |
304 | + if (info->ram->postcopy_bytes) { | |
305 | + monitor_printf(mon, "postcopy ram: %" PRIu64 " kbytes\n", | |
306 | + info->ram->postcopy_bytes >> 10); | |
307 | + } | |
296 | 308 | } |
297 | 309 | |
298 | 310 | if (info->has_disk) { |
@@ -46,6 +46,15 @@ | ||
46 | 46 | # @pages-per-second: the number of memory pages transferred per second |
47 | 47 | # (Since 4.0) |
48 | 48 | # |
49 | +# @precopy-bytes: The number of bytes sent in the pre-copy phase | |
50 | +# (since 7.0). | |
51 | +# | |
52 | +# @downtime-bytes: The number of bytes sent while the guest is paused | |
53 | +# (since 7.0). | |
54 | +# | |
55 | +# @postcopy-bytes: The number of bytes sent during the post-copy phase | |
56 | +# (since 7.0). | |
57 | +# | |
49 | 58 | # Since: 0.14 |
50 | 59 | ## |
51 | 60 | { 'struct': 'MigrationStats', |
@@ -54,7 +63,9 @@ | ||
54 | 63 | 'normal-bytes': 'int', 'dirty-pages-rate' : 'int', |
55 | 64 | 'mbps' : 'number', 'dirty-sync-count' : 'int', |
56 | 65 | 'postcopy-requests' : 'int', 'page-size' : 'int', |
57 | - 'multifd-bytes' : 'uint64', 'pages-per-second' : 'uint64' } } | |
66 | + 'multifd-bytes' : 'uint64', 'pages-per-second' : 'uint64', | |
67 | + 'precopy-bytes' : 'uint64', 'downtime-bytes' : 'uint64', | |
68 | + 'postcopy-bytes' : 'uint64' } } | |
58 | 69 | |
59 | 70 | ## |
60 | 71 | # @XBZRLECacheStats: |
@@ -25,7 +25,6 @@ static const VMStateDescription vmstate_tlb_entry = { | ||
25 | 25 | .name = "tlb_entry", |
26 | 26 | .version_id = 1, |
27 | 27 | .minimum_version_id = 1, |
28 | - .minimum_version_id_old = 1, | |
29 | 28 | .fields = (VMStateField[]) { |
30 | 29 | VMSTATE_UINTTL(mr, OpenRISCTLBEntry), |
31 | 30 | VMSTATE_UINTTL(tr, OpenRISCTLBEntry), |
@@ -421,7 +421,6 @@ static const VMStateDescription vmstate_tm = { | ||
421 | 421 | .name = "cpu/tm", |
422 | 422 | .version_id = 1, |
423 | 423 | .minimum_version_id = 1, |
424 | - .minimum_version_id_old = 1, | |
425 | 424 | .needed = tm_needed, |
426 | 425 | .fields = (VMStateField []) { |
427 | 426 | VMSTATE_UINTTL_ARRAY(env.tm_gpr, PowerPCCPU, 32), |
@@ -672,7 +671,6 @@ const VMStateDescription vmstate_ppc_cpu = { | ||
672 | 671 | .name = "cpu", |
673 | 672 | .version_id = 5, |
674 | 673 | .minimum_version_id = 5, |
675 | - .minimum_version_id_old = 4, | |
676 | 674 | .pre_save = cpu_pre_save, |
677 | 675 | .post_load = cpu_post_load, |
678 | 676 | .fields = (VMStateField[]) { |
@@ -10,7 +10,6 @@ static const VMStateDescription vmstate_cpu_timer = { | ||
10 | 10 | .name = "cpu_timer", |
11 | 11 | .version_id = 1, |
12 | 12 | .minimum_version_id = 1, |
13 | - .minimum_version_id_old = 1, | |
14 | 13 | .fields = (VMStateField[]) { |
15 | 14 | VMSTATE_UINT32(frequency, CPUTimer), |
16 | 15 | VMSTATE_UINT32(disabled, CPUTimer), |
@@ -30,7 +29,6 @@ static const VMStateDescription vmstate_trap_state = { | ||
30 | 29 | .name = "trap_state", |
31 | 30 | .version_id = 1, |
32 | 31 | .minimum_version_id = 1, |
33 | - .minimum_version_id_old = 1, | |
34 | 32 | .fields = (VMStateField[]) { |
35 | 33 | VMSTATE_UINT64(tpc, trap_state), |
36 | 34 | VMSTATE_UINT64(tnpc, trap_state), |
@@ -44,7 +42,6 @@ static const VMStateDescription vmstate_tlb_entry = { | ||
44 | 42 | .name = "tlb_entry", |
45 | 43 | .version_id = 1, |
46 | 44 | .minimum_version_id = 1, |
47 | - .minimum_version_id_old = 1, | |
48 | 45 | .fields = (VMStateField[]) { |
49 | 46 | VMSTATE_UINT64(tag, SparcTLBEntry), |
50 | 47 | VMSTATE_UINT64(tte, SparcTLBEntry), |
@@ -113,7 +110,6 @@ const VMStateDescription vmstate_sparc_cpu = { | ||
113 | 110 | .name = "cpu", |
114 | 111 | .version_id = SPARC_VMSTATE_VER, |
115 | 112 | .minimum_version_id = SPARC_VMSTATE_VER, |
116 | - .minimum_version_id_old = SPARC_VMSTATE_VER, | |
117 | 113 | .pre_save = cpu_pre_save, |
118 | 114 | .fields = (VMStateField[]) { |
119 | 115 | VMSTATE_UINTTL_ARRAY(env.gregs, SPARCCPU, 8), |