Skip to content

Commit 1af4a96

Browse files
Ben Gardonbonzini
authored andcommitted
KVM: x86/mmu: Yield in TDU MMU iter even if no SPTES changed
Given certain conditions, some TDP MMU functions may not yield reliably / frequently enough. For example, if a paging structure was very large but had few, if any writable entries, wrprot_gfn_range could traverse many entries before finding a writable entry and yielding because the check for yielding only happens after an SPTE is modified. Fix this issue by moving the yield to the beginning of the loop. Fixes: a6a0b05 ("kvm: x86/mmu: Support dirty logging for the TDP MMU") Reviewed-by: Peter Feiner <[email protected]> Signed-off-by: Ben Gardon <[email protected]> Message-Id: <[email protected]> Signed-off-by: Paolo Bonzini <[email protected]>
1 parent ed5e484 commit 1af4a96

File tree

1 file changed

+22
-10
lines changed

1 file changed

+22
-10
lines changed

arch/x86/kvm/mmu/tdp_mmu.c

Lines changed: 22 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -501,6 +501,12 @@ static bool zap_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root,
501501
bool flush_needed = false;
502502

503503
tdp_root_for_each_pte(iter, root, start, end) {
504+
if (can_yield &&
505+
tdp_mmu_iter_cond_resched(kvm, &iter, flush_needed)) {
506+
flush_needed = false;
507+
continue;
508+
}
509+
504510
if (!is_shadow_present_pte(iter.old_spte))
505511
continue;
506512

@@ -515,9 +521,7 @@ static bool zap_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root,
515521
continue;
516522

517523
tdp_mmu_set_spte(kvm, &iter, 0);
518-
519-
flush_needed = !(can_yield &&
520-
tdp_mmu_iter_cond_resched(kvm, &iter, true));
524+
flush_needed = true;
521525
}
522526
return flush_needed;
523527
}
@@ -880,6 +884,9 @@ static bool wrprot_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root,
880884

881885
for_each_tdp_pte_min_level(iter, root->spt, root->role.level,
882886
min_level, start, end) {
887+
if (tdp_mmu_iter_cond_resched(kvm, &iter, false))
888+
continue;
889+
883890
if (!is_shadow_present_pte(iter.old_spte) ||
884891
!is_last_spte(iter.old_spte, iter.level))
885892
continue;
@@ -888,8 +895,6 @@ static bool wrprot_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root,
888895

889896
tdp_mmu_set_spte_no_dirty_log(kvm, &iter, new_spte);
890897
spte_set = true;
891-
892-
tdp_mmu_iter_cond_resched(kvm, &iter, false);
893898
}
894899
return spte_set;
895900
}
@@ -933,6 +938,9 @@ static bool clear_dirty_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root,
933938
bool spte_set = false;
934939

935940
tdp_root_for_each_leaf_pte(iter, root, start, end) {
941+
if (tdp_mmu_iter_cond_resched(kvm, &iter, false))
942+
continue;
943+
936944
if (spte_ad_need_write_protect(iter.old_spte)) {
937945
if (is_writable_pte(iter.old_spte))
938946
new_spte = iter.old_spte & ~PT_WRITABLE_MASK;
@@ -947,8 +955,6 @@ static bool clear_dirty_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root,
947955

948956
tdp_mmu_set_spte_no_dirty_log(kvm, &iter, new_spte);
949957
spte_set = true;
950-
951-
tdp_mmu_iter_cond_resched(kvm, &iter, false);
952958
}
953959
return spte_set;
954960
}
@@ -1056,15 +1062,16 @@ static bool set_dirty_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root,
10561062
bool spte_set = false;
10571063

10581064
tdp_root_for_each_pte(iter, root, start, end) {
1065+
if (tdp_mmu_iter_cond_resched(kvm, &iter, false))
1066+
continue;
1067+
10591068
if (!is_shadow_present_pte(iter.old_spte))
10601069
continue;
10611070

10621071
new_spte = iter.old_spte | shadow_dirty_mask;
10631072

10641073
tdp_mmu_set_spte(kvm, &iter, new_spte);
10651074
spte_set = true;
1066-
1067-
tdp_mmu_iter_cond_resched(kvm, &iter, false);
10681075
}
10691076

10701077
return spte_set;
@@ -1105,6 +1112,11 @@ static void zap_collapsible_spte_range(struct kvm *kvm,
11051112
bool spte_set = false;
11061113

11071114
tdp_root_for_each_pte(iter, root, start, end) {
1115+
if (tdp_mmu_iter_cond_resched(kvm, &iter, spte_set)) {
1116+
spte_set = false;
1117+
continue;
1118+
}
1119+
11081120
if (!is_shadow_present_pte(iter.old_spte) ||
11091121
!is_last_spte(iter.old_spte, iter.level))
11101122
continue;
@@ -1116,7 +1128,7 @@ static void zap_collapsible_spte_range(struct kvm *kvm,
11161128

11171129
tdp_mmu_set_spte(kvm, &iter, 0);
11181130

1119-
spte_set = !tdp_mmu_iter_cond_resched(kvm, &iter, true);
1131+
spte_set = true;
11201132
}
11211133

11221134
if (spte_set)

0 commit comments

Comments
 (0)