Skip to content

Commit ed5e484

Browse files
Ben Gardonbonzini
authored andcommitted
KVM: x86/mmu: Ensure forward progress when yielding in TDP MMU iter
In some functions the TDP iter risks not making forward progress if two threads livelock yielding to one another. This is possible if two threads are trying to execute wrprot_gfn_range. Each could write protect an entry and then yield. This would reset the tdp_iter's walk over the paging structure and the loop would end up repeating the same entry over and over, preventing either thread from making forward progress. Fix this issue by only yielding if the loop has made forward progress since the last yield. Fixes: a6a0b05 ("kvm: x86/mmu: Support dirty logging for the TDP MMU") Reviewed-by: Peter Feiner <[email protected]> Signed-off-by: Ben Gardon <[email protected]> Message-Id: <[email protected]> Signed-off-by: Paolo Bonzini <[email protected]>
1 parent 74953d3 commit ed5e484

File tree

3 files changed

+23
-23
lines changed

3 files changed

+23
-23
lines changed

arch/x86/kvm/mmu/tdp_iter.c

Lines changed: 1 addition & 17 deletions
Original file line numberDiff line numberDiff line change
@@ -31,6 +31,7 @@ void tdp_iter_start(struct tdp_iter *iter, u64 *root_pt, int root_level,
3131
WARN_ON(root_level > PT64_ROOT_MAX_LEVEL);
3232

3333
iter->next_last_level_gfn = next_last_level_gfn;
34+
iter->yielded_gfn = iter->next_last_level_gfn;
3435
iter->root_level = root_level;
3536
iter->min_level = min_level;
3637
iter->level = root_level;
@@ -158,23 +159,6 @@ void tdp_iter_next(struct tdp_iter *iter)
158159
iter->valid = false;
159160
}
160161

161-
/*
162-
* Restart the walk over the paging structure from the root, starting from the
163-
* highest gfn the iterator had previously reached. Assumes that the entire
164-
* paging structure, except the root page, may have been completely torn down
165-
* and rebuilt.
166-
*/
167-
void tdp_iter_refresh_walk(struct tdp_iter *iter)
168-
{
169-
gfn_t next_last_level_gfn = iter->next_last_level_gfn;
170-
171-
if (iter->gfn > next_last_level_gfn)
172-
next_last_level_gfn = iter->gfn;
173-
174-
tdp_iter_start(iter, iter->pt_path[iter->root_level - 1],
175-
iter->root_level, iter->min_level, next_last_level_gfn);
176-
}
177-
178162
u64 *tdp_iter_root_pt(struct tdp_iter *iter)
179163
{
180164
return iter->pt_path[iter->root_level - 1];

arch/x86/kvm/mmu/tdp_iter.h

Lines changed: 6 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -16,6 +16,12 @@ struct tdp_iter {
1616
* for this GFN.
1717
*/
1818
gfn_t next_last_level_gfn;
19+
/*
20+
* The next_last_level_gfn at the time when the thread last
21+
* yielded. Only yielding when the next_last_level_gfn !=
22+
* yielded_gfn helps ensure forward progress.
23+
*/
24+
gfn_t yielded_gfn;
1925
/* Pointers to the page tables traversed to reach the current SPTE */
2026
u64 *pt_path[PT64_ROOT_MAX_LEVEL];
2127
/* A pointer to the current SPTE */
@@ -54,7 +60,6 @@ u64 *spte_to_child_pt(u64 pte, int level);
5460
void tdp_iter_start(struct tdp_iter *iter, u64 *root_pt, int root_level,
5561
int min_level, gfn_t next_last_level_gfn);
5662
void tdp_iter_next(struct tdp_iter *iter);
57-
void tdp_iter_refresh_walk(struct tdp_iter *iter);
5863
u64 *tdp_iter_root_pt(struct tdp_iter *iter);
5964

6065
#endif /* __KVM_X86_MMU_TDP_ITER_H */

arch/x86/kvm/mmu/tdp_mmu.c

Lines changed: 16 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -451,21 +451,32 @@ static inline void tdp_mmu_set_spte_no_dirty_log(struct kvm *kvm,
451451
* TLB flush before yielding.
452452
*
453453
* If this function yields, it will also reset the tdp_iter's walk over the
454-
* paging structure and the calling function should allow the iterator to
455-
* continue its traversal from the paging structure root.
454+
* paging structure and the calling function should skip to the next
455+
* iteration to allow the iterator to continue its traversal from the
456+
* paging structure root.
456457
*
457458
* Return true if this function yielded and the iterator's traversal was reset.
458459
* Return false if a yield was not needed.
459460
*/
460461
static inline bool tdp_mmu_iter_cond_resched(struct kvm *kvm,
461462
struct tdp_iter *iter, bool flush)
462463
{
464+
/* Ensure forward progress has been made before yielding. */
465+
if (iter->next_last_level_gfn == iter->yielded_gfn)
466+
return false;
467+
463468
if (need_resched() || spin_needbreak(&kvm->mmu_lock)) {
464469
if (flush)
465470
kvm_flush_remote_tlbs(kvm);
466471

467472
cond_resched_lock(&kvm->mmu_lock);
468-
tdp_iter_refresh_walk(iter);
473+
474+
WARN_ON(iter->gfn > iter->next_last_level_gfn);
475+
476+
tdp_iter_start(iter, iter->pt_path[iter->root_level - 1],
477+
iter->root_level, iter->min_level,
478+
iter->next_last_level_gfn);
479+
469480
return true;
470481
}
471482

@@ -505,8 +516,8 @@ static bool zap_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root,
505516

506517
tdp_mmu_set_spte(kvm, &iter, 0);
507518

508-
flush_needed = !can_yield ||
509-
!tdp_mmu_iter_cond_resched(kvm, &iter, true);
519+
flush_needed = !(can_yield &&
520+
tdp_mmu_iter_cond_resched(kvm, &iter, true));
510521
}
511522
return flush_needed;
512523
}

0 commit comments

Comments
 (0)