Revisão | 61c265f0660ee476985808c8aa7915617c44fd53 (tree) |
---|---|
Hora | 2020-03-13 19:33:04 |
Autor | Peter Maydell <peter.maydell@lina...> |
Commiter | Peter Maydell |
Merge remote-tracking branch 'remotes/dgilbert/tags/pull-migration-20200313a' into staging
Migration pull 2020-03-13
zstd build fix
A new auto-converge parameter
Some COLO improvements
# gpg: Signature made Fri 13 Mar 2020 10:29:34 GMT
# gpg: using RSA key 45F5C71B4A0CB7FB977A9FA90516331EBC5BFDE7
# gpg: Good signature from "Dr. David Alan Gilbert (RH2) <dgilbert@redhat.com>" [full]
# Primary key fingerprint: 45F5 C71B 4A0C B7FB 977A 9FA9 0516 331E BC5B FDE7
* remotes/dgilbert/tags/pull-migration-20200313a:
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
@@ -2475,7 +2475,8 @@ fi | ||
2475 | 2475 | # zstd check |
2476 | 2476 | |
2477 | 2477 | if test "$zstd" != "no" ; then |
2478 | - if $pkg_config --exist libzstd ; then | |
2478 | + libzstd_minver="1.4.0" | |
2479 | + if $pkg_config --atleast-version=$libzstd_minver libzstd ; then | |
2479 | 2480 | zstd_cflags="$($pkg_config --cflags libzstd)" |
2480 | 2481 | zstd_libs="$($pkg_config --libs libzstd)" |
2481 | 2482 | LIBS="$zstd_libs $LIBS" |
@@ -26,6 +26,7 @@ | ||
26 | 26 | #include "qemu/main-loop.h" |
27 | 27 | #include "qemu/rcu.h" |
28 | 28 | #include "migration/failover.h" |
29 | +#include "migration/ram.h" | |
29 | 30 | #ifdef CONFIG_REPLICATION |
30 | 31 | #include "replication.h" |
31 | 32 | #endif |
@@ -845,6 +846,8 @@ void *colo_process_incoming_thread(void *opaque) | ||
845 | 846 | */ |
846 | 847 | qemu_file_set_blocking(mis->from_src_file, true); |
847 | 848 | |
849 | + colo_incoming_start_dirty_log(); | |
850 | + | |
848 | 851 | bioc = qio_channel_buffer_new(COLO_BUFFER_BASE_SIZE); |
849 | 852 | fb = qemu_fopen_channel_input(QIO_CHANNEL(bioc)); |
850 | 853 | object_unref(OBJECT(bioc)); |
@@ -78,6 +78,7 @@ | ||
78 | 78 | /*0: means nocompress, 1: best speed, ... 9: best compress ratio */ |
79 | 79 | #define DEFAULT_MIGRATE_COMPRESS_LEVEL 1 |
80 | 80 | /* Define default autoconverge cpu throttle migration parameters */ |
81 | +#define DEFAULT_MIGRATE_THROTTLE_TRIGGER_THRESHOLD 50 | |
81 | 82 | #define DEFAULT_MIGRATE_CPU_THROTTLE_INITIAL 20 |
82 | 83 | #define DEFAULT_MIGRATE_CPU_THROTTLE_INCREMENT 10 |
83 | 84 | #define DEFAULT_MIGRATE_MAX_CPU_THROTTLE 99 |
@@ -778,6 +779,8 @@ MigrationParameters *qmp_query_migrate_parameters(Error **errp) | ||
778 | 779 | params->compress_wait_thread = s->parameters.compress_wait_thread; |
779 | 780 | params->has_decompress_threads = true; |
780 | 781 | params->decompress_threads = s->parameters.decompress_threads; |
782 | + params->has_throttle_trigger_threshold = true; | |
783 | + params->throttle_trigger_threshold = s->parameters.throttle_trigger_threshold; | |
781 | 784 | params->has_cpu_throttle_initial = true; |
782 | 785 | params->cpu_throttle_initial = s->parameters.cpu_throttle_initial; |
783 | 786 | params->has_cpu_throttle_increment = true; |
@@ -851,6 +854,7 @@ bool migration_is_setup_or_active(int state) | ||
851 | 854 | case MIGRATION_STATUS_PRE_SWITCHOVER: |
852 | 855 | case MIGRATION_STATUS_DEVICE: |
853 | 856 | case MIGRATION_STATUS_WAIT_UNPLUG: |
857 | + case MIGRATION_STATUS_COLO: | |
854 | 858 | return true; |
855 | 859 | |
856 | 860 | default: |
@@ -1169,6 +1173,15 @@ static bool migrate_params_check(MigrationParameters *params, Error **errp) | ||
1169 | 1173 | return false; |
1170 | 1174 | } |
1171 | 1175 | |
1176 | + if (params->has_throttle_trigger_threshold && | |
1177 | + (params->throttle_trigger_threshold < 1 || | |
1178 | + params->throttle_trigger_threshold > 100)) { | |
1179 | + error_setg(errp, QERR_INVALID_PARAMETER_VALUE, | |
1180 | + "throttle_trigger_threshold", | |
1181 | + "an integer in the range of 1 to 100"); | |
1182 | + return false; | |
1183 | + } | |
1184 | + | |
1172 | 1185 | if (params->has_cpu_throttle_initial && |
1173 | 1186 | (params->cpu_throttle_initial < 1 || |
1174 | 1187 | params->cpu_throttle_initial > 99)) { |
@@ -1298,6 +1311,10 @@ static void migrate_params_test_apply(MigrateSetParameters *params, | ||
1298 | 1311 | dest->decompress_threads = params->decompress_threads; |
1299 | 1312 | } |
1300 | 1313 | |
1314 | + if (params->has_throttle_trigger_threshold) { | |
1315 | + dest->throttle_trigger_threshold = params->throttle_trigger_threshold; | |
1316 | + } | |
1317 | + | |
1301 | 1318 | if (params->has_cpu_throttle_initial) { |
1302 | 1319 | dest->cpu_throttle_initial = params->cpu_throttle_initial; |
1303 | 1320 | } |
@@ -1382,6 +1399,10 @@ static void migrate_params_apply(MigrateSetParameters *params, Error **errp) | ||
1382 | 1399 | s->parameters.decompress_threads = params->decompress_threads; |
1383 | 1400 | } |
1384 | 1401 | |
1402 | + if (params->has_throttle_trigger_threshold) { | |
1403 | + s->parameters.throttle_trigger_threshold = params->throttle_trigger_threshold; | |
1404 | + } | |
1405 | + | |
1385 | 1406 | if (params->has_cpu_throttle_initial) { |
1386 | 1407 | s->parameters.cpu_throttle_initial = params->cpu_throttle_initial; |
1387 | 1408 | } |
@@ -3558,6 +3579,9 @@ static Property migration_properties[] = { | ||
3558 | 3579 | DEFINE_PROP_UINT8("x-decompress-threads", MigrationState, |
3559 | 3580 | parameters.decompress_threads, |
3560 | 3581 | DEFAULT_MIGRATE_DECOMPRESS_THREAD_COUNT), |
3582 | + DEFINE_PROP_UINT8("x-throttle-trigger-threshold", MigrationState, | |
3583 | + parameters.throttle_trigger_threshold, | |
3584 | + DEFAULT_MIGRATE_THROTTLE_TRIGGER_THRESHOLD), | |
3561 | 3585 | DEFINE_PROP_UINT8("x-cpu-throttle-initial", MigrationState, |
3562 | 3586 | parameters.cpu_throttle_initial, |
3563 | 3587 | DEFAULT_MIGRATE_CPU_THROTTLE_INITIAL), |
@@ -3667,6 +3691,7 @@ static void migration_instance_init(Object *obj) | ||
3667 | 3691 | params->has_compress_level = true; |
3668 | 3692 | params->has_compress_threads = true; |
3669 | 3693 | params->has_decompress_threads = true; |
3694 | + params->has_throttle_trigger_threshold = true; | |
3670 | 3695 | params->has_cpu_throttle_initial = true; |
3671 | 3696 | params->has_cpu_throttle_increment = true; |
3672 | 3697 | params->has_max_bandwidth = true; |
@@ -896,11 +896,38 @@ static void migration_update_rates(RAMState *rs, int64_t end_time) | ||
896 | 896 | } |
897 | 897 | } |
898 | 898 | |
899 | +static void migration_trigger_throttle(RAMState *rs) | |
900 | +{ | |
901 | + MigrationState *s = migrate_get_current(); | |
902 | + uint64_t threshold = s->parameters.throttle_trigger_threshold; | |
903 | + | |
904 | + uint64_t bytes_xfer_period = ram_counters.transferred - rs->bytes_xfer_prev; | |
905 | + uint64_t bytes_dirty_period = rs->num_dirty_pages_period * TARGET_PAGE_SIZE; | |
906 | + uint64_t bytes_dirty_threshold = bytes_xfer_period * threshold / 100; | |
907 | + | |
908 | + /* During block migration the auto-converge logic incorrectly detects | |
909 | + * that ram migration makes no progress. Avoid this by disabling the | |
910 | + * throttling logic during the bulk phase of block migration. */ | |
911 | + if (migrate_auto_converge() && !blk_mig_bulk_active()) { | |
912 | + /* The following detection logic can be refined later. For now: | |
913 | + Check to see if the ratio between dirtied bytes and the approx. | |
914 | + amount of bytes that just got transferred since the last time | |
915 | + we were in this routine reaches the threshold. If that happens | |
916 | + twice, start or increase throttling. */ | |
917 | + | |
918 | + if ((bytes_dirty_period > bytes_dirty_threshold) && | |
919 | + (++rs->dirty_rate_high_cnt >= 2)) { | |
920 | + trace_migration_throttle(); | |
921 | + rs->dirty_rate_high_cnt = 0; | |
922 | + mig_throttle_guest_down(); | |
923 | + } | |
924 | + } | |
925 | +} | |
926 | + | |
899 | 927 | static void migration_bitmap_sync(RAMState *rs) |
900 | 928 | { |
901 | 929 | RAMBlock *block; |
902 | 930 | int64_t end_time; |
903 | - uint64_t bytes_xfer_now; | |
904 | 931 | |
905 | 932 | ram_counters.dirty_sync_count++; |
906 | 933 |
@@ -927,26 +954,7 @@ static void migration_bitmap_sync(RAMState *rs) | ||
927 | 954 | |
928 | 955 | /* more than 1 second = 1000 millisecons */ |
929 | 956 | if (end_time > rs->time_last_bitmap_sync + 1000) { |
930 | - bytes_xfer_now = ram_counters.transferred; | |
931 | - | |
932 | - /* During block migration the auto-converge logic incorrectly detects | |
933 | - * that ram migration makes no progress. Avoid this by disabling the | |
934 | - * throttling logic during the bulk phase of block migration. */ | |
935 | - if (migrate_auto_converge() && !blk_mig_bulk_active()) { | |
936 | - /* The following detection logic can be refined later. For now: | |
937 | - Check to see if the dirtied bytes is 50% more than the approx. | |
938 | - amount of bytes that just got transferred since the last time we | |
939 | - were in this routine. If that happens twice, start or increase | |
940 | - throttling */ | |
941 | - | |
942 | - if ((rs->num_dirty_pages_period * TARGET_PAGE_SIZE > | |
943 | - (bytes_xfer_now - rs->bytes_xfer_prev) / 2) && | |
944 | - (++rs->dirty_rate_high_cnt >= 2)) { | |
945 | - trace_migration_throttle(); | |
946 | - rs->dirty_rate_high_cnt = 0; | |
947 | - mig_throttle_guest_down(); | |
948 | - } | |
949 | - } | |
957 | + migration_trigger_throttle(rs); | |
950 | 958 | |
951 | 959 | migration_update_rates(rs, end_time); |
952 | 960 |
@@ -955,7 +963,7 @@ static void migration_bitmap_sync(RAMState *rs) | ||
955 | 963 | /* reset period counters */ |
956 | 964 | rs->time_last_bitmap_sync = end_time; |
957 | 965 | rs->num_dirty_pages_period = 0; |
958 | - rs->bytes_xfer_prev = bytes_xfer_now; | |
966 | + rs->bytes_xfer_prev = ram_counters.transferred; | |
959 | 967 | } |
960 | 968 | if (migrate_use_events()) { |
961 | 969 | qapi_event_send_migration_pass(ram_counters.dirty_sync_count); |
@@ -2734,7 +2742,7 @@ static inline void *host_from_ram_block_offset(RAMBlock *block, | ||
2734 | 2742 | } |
2735 | 2743 | |
2736 | 2744 | static inline void *colo_cache_from_block_offset(RAMBlock *block, |
2737 | - ram_addr_t offset) | |
2745 | + ram_addr_t offset, bool record_bitmap) | |
2738 | 2746 | { |
2739 | 2747 | if (!offset_in_ramblock(block, offset)) { |
2740 | 2748 | return NULL; |
@@ -2750,7 +2758,8 @@ static inline void *colo_cache_from_block_offset(RAMBlock *block, | ||
2750 | 2758 | * It help us to decide which pages in ram cache should be flushed |
2751 | 2759 | * into VM's RAM later. |
2752 | 2760 | */ |
2753 | - if (!test_and_set_bit(offset >> TARGET_PAGE_BITS, block->bmap)) { | |
2761 | + if (record_bitmap && | |
2762 | + !test_and_set_bit(offset >> TARGET_PAGE_BITS, block->bmap)) { | |
2754 | 2763 | ram_state->migration_dirty_pages++; |
2755 | 2764 | } |
2756 | 2765 | return block->colo_cache + offset; |
@@ -2986,7 +2995,6 @@ int colo_init_ram_cache(void) | ||
2986 | 2995 | } |
2987 | 2996 | return -errno; |
2988 | 2997 | } |
2989 | - memcpy(block->colo_cache, block->host, block->used_length); | |
2990 | 2998 | } |
2991 | 2999 | } |
2992 | 3000 |
@@ -3000,19 +3008,36 @@ int colo_init_ram_cache(void) | ||
3000 | 3008 | |
3001 | 3009 | RAMBLOCK_FOREACH_NOT_IGNORED(block) { |
3002 | 3010 | unsigned long pages = block->max_length >> TARGET_PAGE_BITS; |
3003 | - | |
3004 | 3011 | block->bmap = bitmap_new(pages); |
3005 | - bitmap_set(block->bmap, 0, pages); | |
3006 | 3012 | } |
3007 | 3013 | } |
3008 | - ram_state = g_new0(RAMState, 1); | |
3009 | - ram_state->migration_dirty_pages = 0; | |
3010 | - qemu_mutex_init(&ram_state->bitmap_mutex); | |
3011 | - memory_global_dirty_log_start(); | |
3012 | 3014 | |
3015 | + ram_state_init(&ram_state); | |
3013 | 3016 | return 0; |
3014 | 3017 | } |
3015 | 3018 | |
3019 | +/* TODO: duplicated with ram_init_bitmaps */ | |
3020 | +void colo_incoming_start_dirty_log(void) | |
3021 | +{ | |
3022 | + RAMBlock *block = NULL; | |
3023 | + /* For memory_global_dirty_log_start below. */ | |
3024 | + qemu_mutex_lock_iothread(); | |
3025 | + qemu_mutex_lock_ramlist(); | |
3026 | + | |
3027 | + memory_global_dirty_log_sync(); | |
3028 | + WITH_RCU_READ_LOCK_GUARD() { | |
3029 | + RAMBLOCK_FOREACH_NOT_IGNORED(block) { | |
3030 | + ramblock_sync_dirty_bitmap(ram_state, block); | |
3031 | + /* Discard this dirty bitmap record */ | |
3032 | + bitmap_zero(block->bmap, block->max_length >> TARGET_PAGE_BITS); | |
3033 | + } | |
3034 | + memory_global_dirty_log_start(); | |
3035 | + } | |
3036 | + ram_state->migration_dirty_pages = 0; | |
3037 | + qemu_mutex_unlock_ramlist(); | |
3038 | + qemu_mutex_unlock_iothread(); | |
3039 | +} | |
3040 | + | |
3016 | 3041 | /* It is need to hold the global lock to call this helper */ |
3017 | 3042 | void colo_release_ram_cache(void) |
3018 | 3043 | { |
@@ -3032,9 +3057,7 @@ void colo_release_ram_cache(void) | ||
3032 | 3057 | } |
3033 | 3058 | } |
3034 | 3059 | } |
3035 | - qemu_mutex_destroy(&ram_state->bitmap_mutex); | |
3036 | - g_free(ram_state); | |
3037 | - ram_state = NULL; | |
3060 | + ram_state_cleanup(&ram_state); | |
3038 | 3061 | } |
3039 | 3062 | |
3040 | 3063 | /** |
@@ -3348,7 +3371,7 @@ static int ram_load_precopy(QEMUFile *f) | ||
3348 | 3371 | |
3349 | 3372 | while (!ret && !(flags & RAM_SAVE_FLAG_EOS)) { |
3350 | 3373 | ram_addr_t addr, total_ram_bytes; |
3351 | - void *host = NULL; | |
3374 | + void *host = NULL, *host_bak = NULL; | |
3352 | 3375 | uint8_t ch; |
3353 | 3376 | |
3354 | 3377 | /* |
@@ -3379,20 +3402,35 @@ static int ram_load_precopy(QEMUFile *f) | ||
3379 | 3402 | RAM_SAVE_FLAG_COMPRESS_PAGE | RAM_SAVE_FLAG_XBZRLE)) { |
3380 | 3403 | RAMBlock *block = ram_block_from_stream(f, flags); |
3381 | 3404 | |
3405 | + host = host_from_ram_block_offset(block, addr); | |
3382 | 3406 | /* |
3383 | - * After going into COLO, we should load the Page into colo_cache. | |
3407 | + * After going into COLO stage, we should not load the page | |
3408 | + * into SVM's memory directly, we put them into colo_cache firstly. | |
3409 | + * NOTE: We need to keep a copy of SVM's ram in colo_cache. | |
3410 | + * Previously, we copied all these memory in preparing stage of COLO | |
3411 | + * while we need to stop VM, which is a time-consuming process. | |
3412 | + * Here we optimize it by a trick, back-up every page while in | |
3413 | + * migration process while COLO is enabled, though it affects the | |
3414 | + * speed of the migration, but it obviously reduce the downtime of | |
3415 | + * back-up all SVM'S memory in COLO preparing stage. | |
3384 | 3416 | */ |
3385 | - if (migration_incoming_in_colo_state()) { | |
3386 | - host = colo_cache_from_block_offset(block, addr); | |
3387 | - } else { | |
3388 | - host = host_from_ram_block_offset(block, addr); | |
3417 | + if (migration_incoming_colo_enabled()) { | |
3418 | + if (migration_incoming_in_colo_state()) { | |
3419 | + /* In COLO stage, put all pages into cache temporarily */ | |
3420 | + host = colo_cache_from_block_offset(block, addr, true); | |
3421 | + } else { | |
3422 | + /* | |
3423 | + * In migration stage but before COLO stage, | |
3424 | + * Put all pages into both cache and SVM's memory. | |
3425 | + */ | |
3426 | + host_bak = colo_cache_from_block_offset(block, addr, false); | |
3427 | + } | |
3389 | 3428 | } |
3390 | 3429 | if (!host) { |
3391 | 3430 | error_report("Illegal RAM offset " RAM_ADDR_FMT, addr); |
3392 | 3431 | ret = -EINVAL; |
3393 | 3432 | break; |
3394 | 3433 | } |
3395 | - | |
3396 | 3434 | if (!migration_incoming_in_colo_state()) { |
3397 | 3435 | ramblock_recv_bitmap_set(block, host); |
3398 | 3436 | } |
@@ -3506,6 +3544,9 @@ static int ram_load_precopy(QEMUFile *f) | ||
3506 | 3544 | if (!ret) { |
3507 | 3545 | ret = qemu_file_get_error(f); |
3508 | 3546 | } |
3547 | + if (!ret && host_bak) { | |
3548 | + memcpy(host_bak, host, TARGET_PAGE_SIZE); | |
3549 | + } | |
3509 | 3550 | } |
3510 | 3551 | |
3511 | 3552 | ret |= wait_for_decompress_done(); |
@@ -66,5 +66,6 @@ int ram_dirty_bitmap_reload(MigrationState *s, RAMBlock *rb); | ||
66 | 66 | /* ram cache */ |
67 | 67 | int colo_init_ram_cache(void); |
68 | 68 | void colo_release_ram_cache(void); |
69 | +void colo_incoming_start_dirty_log(void); | |
69 | 70 | |
70 | 71 | #endif |
@@ -407,6 +407,10 @@ void hmp_info_migrate_parameters(Monitor *mon, const QDict *qdict) | ||
407 | 407 | monitor_printf(mon, "%s: %u\n", |
408 | 408 | MigrationParameter_str(MIGRATION_PARAMETER_DECOMPRESS_THREADS), |
409 | 409 | params->decompress_threads); |
410 | + assert(params->has_throttle_trigger_threshold); | |
411 | + monitor_printf(mon, "%s: %u\n", | |
412 | + MigrationParameter_str(MIGRATION_PARAMETER_THROTTLE_TRIGGER_THRESHOLD), | |
413 | + params->throttle_trigger_threshold); | |
410 | 414 | assert(params->has_cpu_throttle_initial); |
411 | 415 | monitor_printf(mon, "%s: %u\n", |
412 | 416 | MigrationParameter_str(MIGRATION_PARAMETER_CPU_THROTTLE_INITIAL), |
@@ -1254,6 +1258,9 @@ void hmp_migrate_set_parameter(Monitor *mon, const QDict *qdict) | ||
1254 | 1258 | p->has_decompress_threads = true; |
1255 | 1259 | visit_type_int(v, param, &p->decompress_threads, &err); |
1256 | 1260 | break; |
1261 | + case MIGRATION_PARAMETER_THROTTLE_TRIGGER_THRESHOLD: | |
1262 | + p->has_throttle_trigger_threshold = true; | |
1263 | + visit_type_int(v, param, &p->throttle_trigger_threshold, &err); | |
1257 | 1264 | case MIGRATION_PARAMETER_CPU_THROTTLE_INITIAL: |
1258 | 1265 | p->has_cpu_throttle_initial = true; |
1259 | 1266 | visit_type_int(v, param, &p->cpu_throttle_initial, &err); |
@@ -540,6 +540,10 @@ | ||
540 | 540 | # compression, so set the decompress-threads to the number about 1/4 |
541 | 541 | # of compress-threads is adequate. |
542 | 542 | # |
543 | +# @throttle-trigger-threshold: The ratio of bytes_dirty_period and bytes_xfer_period | |
544 | +# to trigger throttling. It is expressed as percentage. | |
545 | +# The default value is 50. (Since 5.0) | |
546 | +# | |
543 | 547 | # @cpu-throttle-initial: Initial percentage of time guest cpus are throttled |
544 | 548 | # when migration auto-converge is activated. The |
545 | 549 | # default value is 20. (Since 2.7) |
@@ -625,7 +629,7 @@ | ||
625 | 629 | 'data': ['announce-initial', 'announce-max', |
626 | 630 | 'announce-rounds', 'announce-step', |
627 | 631 | 'compress-level', 'compress-threads', 'decompress-threads', |
628 | - 'compress-wait-thread', | |
632 | + 'compress-wait-thread', 'throttle-trigger-threshold', | |
629 | 633 | 'cpu-throttle-initial', 'cpu-throttle-increment', |
630 | 634 | 'tls-creds', 'tls-hostname', 'tls-authz', 'max-bandwidth', |
631 | 635 | 'downtime-limit', 'x-checkpoint-delay', 'block-incremental', |
@@ -660,6 +664,10 @@ | ||
660 | 664 | # |
661 | 665 | # @decompress-threads: decompression thread count |
662 | 666 | # |
667 | +# @throttle-trigger-threshold: The ratio of bytes_dirty_period and bytes_xfer_period | |
668 | +# to trigger throttling. It is expressed as percentage. | |
669 | +# The default value is 50. (Since 5.0) | |
670 | +# | |
663 | 671 | # @cpu-throttle-initial: Initial percentage of time guest cpus are |
664 | 672 | # throttled when migration auto-converge is activated. |
665 | 673 | # The default value is 20. (Since 2.7) |
@@ -752,6 +760,7 @@ | ||
752 | 760 | '*compress-threads': 'int', |
753 | 761 | '*compress-wait-thread': 'bool', |
754 | 762 | '*decompress-threads': 'int', |
763 | + '*throttle-trigger-threshold': 'int', | |
755 | 764 | '*cpu-throttle-initial': 'int', |
756 | 765 | '*cpu-throttle-increment': 'int', |
757 | 766 | '*tls-creds': 'StrOrNull', |
@@ -813,6 +822,10 @@ | ||
813 | 822 | # |
814 | 823 | # @decompress-threads: decompression thread count |
815 | 824 | # |
825 | +# @throttle-trigger-threshold: The ratio of bytes_dirty_period and bytes_xfer_period | |
826 | +# to trigger throttling. It is expressed as percentage. | |
827 | +# The default value is 50. (Since 5.0) | |
828 | +# | |
816 | 829 | # @cpu-throttle-initial: Initial percentage of time guest cpus are |
817 | 830 | # throttled when migration auto-converge is activated. |
818 | 831 | # (Since 2.7) |
@@ -905,6 +918,7 @@ | ||
905 | 918 | '*compress-threads': 'uint8', |
906 | 919 | '*compress-wait-thread': 'bool', |
907 | 920 | '*decompress-threads': 'uint8', |
921 | + '*throttle-trigger-threshold': 'uint8', | |
908 | 922 | '*cpu-throttle-initial': 'uint8', |
909 | 923 | '*cpu-throttle-increment': 'uint8', |
910 | 924 | '*tls-creds': 'str', |