Skip to content

Commit 244893f

Browse files
sean-jcbonzini
authored andcommitted
KVM: Dynamically allocate "new" memslots from the get-go
Allocate the "new" memslot for !DELETE memslot updates straight away instead of filling an intermediate on-stack object and forcing kvm_set_memslot() to juggle the allocation and do weird things like reuse the old memslot object in MOVE. In the MOVE case, this results in an "extra" memslot allocation due to allocating both the "new" slot and the "invalid" slot, but that's a temporary and not-huge allocation, and MOVE is a relatively rare memslot operation. Regarding MOVE, drop the open-coded management of the gfn tree with a call to kvm_replace_memslot(), which already handles the case where new->base_gfn != old->base_gfn. This is made possible by virtue of not having to copy the "new" memslot data after erasing the old memslot from the gfn tree. Using kvm_replace_memslot(), and more specifically not reusing the old memslot, means the MOVE case now does hva tree and hash list updates, but that's a small price to pay for simplifying the code and making MOVE align with all the other flavors of updates. The "extra" updates are firmly in the noise from a performance perspective, e.g. the "move (in)active area" selfttests show a (very, very) slight improvement. Signed-off-by: Sean Christopherson <[email protected]> Reviewed-by: Maciej S. Szmigiero <[email protected]> Signed-off-by: Maciej S. Szmigiero <[email protected]> Message-Id: <f0d8c72727aa825cf682bd4e3da4b3fa68215dd4.1638817641.git.maciej.szmigiero@oracle.com>
1 parent 0f9bdef commit 244893f

File tree

1 file changed

+77
-101
lines changed

1 file changed

+77
-101
lines changed

virt/kvm/kvm_main.c

Lines changed: 77 additions & 101 deletions
Original file line numberDiff line numberDiff line change
@@ -1503,23 +1503,25 @@ static int kvm_prepare_memory_region(struct kvm *kvm,
15031503
* new and KVM isn't using a ring buffer, allocate and initialize a
15041504
* new bitmap.
15051505
*/
1506-
if (!(new->flags & KVM_MEM_LOG_DIRTY_PAGES))
1507-
new->dirty_bitmap = NULL;
1508-
else if (old->dirty_bitmap)
1509-
new->dirty_bitmap = old->dirty_bitmap;
1510-
else if (!kvm->dirty_ring_size) {
1511-
r = kvm_alloc_dirty_bitmap(new);
1512-
if (r)
1513-
return r;
1506+
if (change != KVM_MR_DELETE) {
1507+
if (!(new->flags & KVM_MEM_LOG_DIRTY_PAGES))
1508+
new->dirty_bitmap = NULL;
1509+
else if (old && old->dirty_bitmap)
1510+
new->dirty_bitmap = old->dirty_bitmap;
1511+
else if (!kvm->dirty_ring_size) {
1512+
r = kvm_alloc_dirty_bitmap(new);
1513+
if (r)
1514+
return r;
15141515

1515-
if (kvm_dirty_log_manual_protect_and_init_set(kvm))
1516-
bitmap_set(new->dirty_bitmap, 0, new->npages);
1516+
if (kvm_dirty_log_manual_protect_and_init_set(kvm))
1517+
bitmap_set(new->dirty_bitmap, 0, new->npages);
1518+
}
15171519
}
15181520

15191521
r = kvm_arch_prepare_memory_region(kvm, old, new, change);
15201522

15211523
/* Free the bitmap on failure if it was allocated above. */
1522-
if (r && new->dirty_bitmap && !old->dirty_bitmap)
1524+
if (r && new && new->dirty_bitmap && old && !old->dirty_bitmap)
15231525
kvm_destroy_dirty_bitmap(new);
15241526

15251527
return r;
@@ -1606,16 +1608,16 @@ static void kvm_copy_memslot(struct kvm_memory_slot *dest,
16061608

16071609
static void kvm_invalidate_memslot(struct kvm *kvm,
16081610
struct kvm_memory_slot *old,
1609-
struct kvm_memory_slot *working_slot)
1611+
struct kvm_memory_slot *invalid_slot)
16101612
{
16111613
/*
16121614
* Mark the current slot INVALID. As with all memslot modifications,
16131615
* this must be done on an unreachable slot to avoid modifying the
16141616
* current slot in the active tree.
16151617
*/
1616-
kvm_copy_memslot(working_slot, old);
1617-
working_slot->flags |= KVM_MEMSLOT_INVALID;
1618-
kvm_replace_memslot(kvm, old, working_slot);
1618+
kvm_copy_memslot(invalid_slot, old);
1619+
invalid_slot->flags |= KVM_MEMSLOT_INVALID;
1620+
kvm_replace_memslot(kvm, old, invalid_slot);
16191621

16201622
/*
16211623
* Activate the slot that is now marked INVALID, but don't propagate
@@ -1642,20 +1644,15 @@ static void kvm_invalidate_memslot(struct kvm *kvm,
16421644
* above. Writers are required to retrieve memslots *after* acquiring
16431645
* slots_arch_lock, thus the active slot's data is guaranteed to be fresh.
16441646
*/
1645-
old->arch = working_slot->arch;
1647+
old->arch = invalid_slot->arch;
16461648
}
16471649

16481650
static void kvm_create_memslot(struct kvm *kvm,
1649-
const struct kvm_memory_slot *new,
1650-
struct kvm_memory_slot *working)
1651+
struct kvm_memory_slot *new)
16511652
{
1652-
/*
1653-
* Add the new memslot to the inactive set as a copy of the
1654-
* new memslot data provided by userspace.
1655-
*/
1656-
kvm_copy_memslot(working, new);
1657-
kvm_replace_memslot(kvm, NULL, working);
1658-
kvm_activate_memslot(kvm, NULL, working);
1653+
/* Add the new memslot to the inactive set and activate. */
1654+
kvm_replace_memslot(kvm, NULL, new);
1655+
kvm_activate_memslot(kvm, NULL, new);
16591656
}
16601657

16611658
static void kvm_delete_memslot(struct kvm *kvm,
@@ -1664,85 +1661,46 @@ static void kvm_delete_memslot(struct kvm *kvm,
16641661
{
16651662
/*
16661663
* Remove the old memslot (in the inactive memslots) by passing NULL as
1667-
* the "new" slot.
1664+
* the "new" slot, and for the invalid version in the active slots.
16681665
*/
16691666
kvm_replace_memslot(kvm, old, NULL);
1670-
1671-
/* And do the same for the invalid version in the active slot. */
16721667
kvm_activate_memslot(kvm, invalid_slot, NULL);
1673-
1674-
/* Free the invalid slot, the caller will clean up the old slot. */
1675-
kfree(invalid_slot);
16761668
}
16771669

1678-
static struct kvm_memory_slot *kvm_move_memslot(struct kvm *kvm,
1679-
struct kvm_memory_slot *old,
1680-
const struct kvm_memory_slot *new,
1681-
struct kvm_memory_slot *invalid_slot)
1670+
static void kvm_move_memslot(struct kvm *kvm,
1671+
struct kvm_memory_slot *old,
1672+
struct kvm_memory_slot *new,
1673+
struct kvm_memory_slot *invalid_slot)
16821674
{
1683-
struct kvm_memslots *slots = kvm_get_inactive_memslots(kvm, old->as_id);
1684-
1685-
/*
1686-
* The memslot's gfn is changing, remove it from the inactive tree, it
1687-
* will be re-added with its updated gfn. Because its range is
1688-
* changing, an in-place replace is not possible.
1689-
*/
1690-
kvm_erase_gfn_node(slots, old);
1691-
1692-
/*
1693-
* The old slot is now fully disconnected, reuse its memory for the
1694-
* persistent copy of "new".
1695-
*/
1696-
kvm_copy_memslot(old, new);
1697-
1698-
/* Re-add to the gfn tree with the updated gfn */
1699-
kvm_insert_gfn_node(slots, old);
1700-
1701-
/* Replace the current INVALID slot with the updated memslot. */
1702-
kvm_activate_memslot(kvm, invalid_slot, old);
1703-
17041675
/*
1705-
* Clear the INVALID flag so that the invalid_slot is now a perfect
1706-
* copy of the old slot. Return it for cleanup in the caller.
1676+
* Replace the old memslot in the inactive slots, and then swap slots
1677+
* and replace the current INVALID with the new as well.
17071678
*/
1708-
WARN_ON_ONCE(!(invalid_slot->flags & KVM_MEMSLOT_INVALID));
1709-
invalid_slot->flags &= ~KVM_MEMSLOT_INVALID;
1710-
return invalid_slot;
1679+
kvm_replace_memslot(kvm, old, new);
1680+
kvm_activate_memslot(kvm, invalid_slot, new);
17111681
}
17121682

17131683
static void kvm_update_flags_memslot(struct kvm *kvm,
17141684
struct kvm_memory_slot *old,
1715-
const struct kvm_memory_slot *new,
1716-
struct kvm_memory_slot *working_slot)
1685+
struct kvm_memory_slot *new)
17171686
{
17181687
/*
17191688
* Similar to the MOVE case, but the slot doesn't need to be zapped as
17201689
* an intermediate step. Instead, the old memslot is simply replaced
17211690
* with a new, updated copy in both memslot sets.
17221691
*/
1723-
kvm_copy_memslot(working_slot, new);
1724-
kvm_replace_memslot(kvm, old, working_slot);
1725-
kvm_activate_memslot(kvm, old, working_slot);
1692+
kvm_replace_memslot(kvm, old, new);
1693+
kvm_activate_memslot(kvm, old, new);
17261694
}
17271695

17281696
static int kvm_set_memslot(struct kvm *kvm,
17291697
struct kvm_memory_slot *old,
17301698
struct kvm_memory_slot *new,
17311699
enum kvm_mr_change change)
17321700
{
1733-
struct kvm_memory_slot *working;
1701+
struct kvm_memory_slot *invalid_slot;
17341702
int r;
17351703

1736-
/*
1737-
* Modifications are done on an unreachable slot. Any changes are then
1738-
* (eventually) propagated to both the active and inactive slots. This
1739-
* allocation would ideally be on-demand (in helpers), but is done here
1740-
* to avoid having to handle failure after kvm_prepare_memory_region().
1741-
*/
1742-
working = kzalloc(sizeof(*working), GFP_KERNEL_ACCOUNT);
1743-
if (!working)
1744-
return -ENOMEM;
1745-
17461704
/*
17471705
* Released in kvm_swap_active_memslots.
17481706
*
@@ -1767,9 +1725,19 @@ static int kvm_set_memslot(struct kvm *kvm,
17671725
* (and without a lock), a window would exist between effecting the
17681726
* delete/move and committing the changes in arch code where KVM or a
17691727
* guest could access a non-existent memslot.
1728+
*
1729+
* Modifications are done on a temporary, unreachable slot. The old
1730+
* slot needs to be preserved in case a later step fails and the
1731+
* invalidation needs to be reverted.
17701732
*/
1771-
if (change == KVM_MR_DELETE || change == KVM_MR_MOVE)
1772-
kvm_invalidate_memslot(kvm, old, working);
1733+
if (change == KVM_MR_DELETE || change == KVM_MR_MOVE) {
1734+
invalid_slot = kzalloc(sizeof(*invalid_slot), GFP_KERNEL_ACCOUNT);
1735+
if (!invalid_slot) {
1736+
mutex_unlock(&kvm->slots_arch_lock);
1737+
return -ENOMEM;
1738+
}
1739+
kvm_invalidate_memslot(kvm, old, invalid_slot);
1740+
}
17731741

17741742
r = kvm_prepare_memory_region(kvm, old, new, change);
17751743
if (r) {
@@ -1779,11 +1747,12 @@ static int kvm_set_memslot(struct kvm *kvm,
17791747
* in the inactive slots. Changing the active memslots also
17801748
* release slots_arch_lock.
17811749
*/
1782-
if (change == KVM_MR_DELETE || change == KVM_MR_MOVE)
1783-
kvm_activate_memslot(kvm, working, old);
1784-
else
1750+
if (change == KVM_MR_DELETE || change == KVM_MR_MOVE) {
1751+
kvm_activate_memslot(kvm, invalid_slot, old);
1752+
kfree(invalid_slot);
1753+
} else {
17851754
mutex_unlock(&kvm->slots_arch_lock);
1786-
kfree(working);
1755+
}
17871756
return r;
17881757
}
17891758

@@ -1795,16 +1764,20 @@ static int kvm_set_memslot(struct kvm *kvm,
17951764
* old slot is detached but otherwise preserved.
17961765
*/
17971766
if (change == KVM_MR_CREATE)
1798-
kvm_create_memslot(kvm, new, working);
1767+
kvm_create_memslot(kvm, new);
17991768
else if (change == KVM_MR_DELETE)
1800-
kvm_delete_memslot(kvm, old, working);
1769+
kvm_delete_memslot(kvm, old, invalid_slot);
18011770
else if (change == KVM_MR_MOVE)
1802-
old = kvm_move_memslot(kvm, old, new, working);
1771+
kvm_move_memslot(kvm, old, new, invalid_slot);
18031772
else if (change == KVM_MR_FLAGS_ONLY)
1804-
kvm_update_flags_memslot(kvm, old, new, working);
1773+
kvm_update_flags_memslot(kvm, old, new);
18051774
else
18061775
BUG();
18071776

1777+
/* Free the temporary INVALID slot used for DELETE and MOVE. */
1778+
if (change == KVM_MR_DELETE || change == KVM_MR_MOVE)
1779+
kfree(invalid_slot);
1780+
18081781
/*
18091782
* No need to refresh new->arch, changes after dropping slots_arch_lock
18101783
* will directly hit the final, active memsot. Architectures are
@@ -1839,8 +1812,7 @@ static bool kvm_check_memslot_overlap(struct kvm_memslots *slots, int id,
18391812
int __kvm_set_memory_region(struct kvm *kvm,
18401813
const struct kvm_userspace_memory_region *mem)
18411814
{
1842-
struct kvm_memory_slot *old;
1843-
struct kvm_memory_slot new;
1815+
struct kvm_memory_slot *old, *new;
18441816
struct kvm_memslots *slots;
18451817
enum kvm_mr_change change;
18461818
unsigned long npages;
@@ -1889,11 +1861,7 @@ int __kvm_set_memory_region(struct kvm *kvm,
18891861
if (WARN_ON_ONCE(kvm->nr_memslot_pages < old->npages))
18901862
return -EIO;
18911863

1892-
memset(&new, 0, sizeof(new));
1893-
new.id = id;
1894-
new.as_id = as_id;
1895-
1896-
return kvm_set_memslot(kvm, old, &new, KVM_MR_DELETE);
1864+
return kvm_set_memslot(kvm, old, NULL, KVM_MR_DELETE);
18971865
}
18981866

18991867
base_gfn = (mem->guest_phys_addr >> PAGE_SHIFT);
@@ -1926,14 +1894,22 @@ int __kvm_set_memory_region(struct kvm *kvm,
19261894
kvm_check_memslot_overlap(slots, id, base_gfn, base_gfn + npages))
19271895
return -EEXIST;
19281896

1929-
new.as_id = as_id;
1930-
new.id = id;
1931-
new.base_gfn = base_gfn;
1932-
new.npages = npages;
1933-
new.flags = mem->flags;
1934-
new.userspace_addr = mem->userspace_addr;
1897+
/* Allocate a slot that will persist in the memslot. */
1898+
new = kzalloc(sizeof(*new), GFP_KERNEL_ACCOUNT);
1899+
if (!new)
1900+
return -ENOMEM;
1901+
1902+
new->as_id = as_id;
1903+
new->id = id;
1904+
new->base_gfn = base_gfn;
1905+
new->npages = npages;
1906+
new->flags = mem->flags;
1907+
new->userspace_addr = mem->userspace_addr;
19351908

1936-
return kvm_set_memslot(kvm, old, &new, change);
1909+
r = kvm_set_memslot(kvm, old, new, change);
1910+
if (r)
1911+
kfree(new);
1912+
return r;
19371913
}
19381914
EXPORT_SYMBOL_GPL(__kvm_set_memory_region);
19391915

0 commit comments

Comments
 (0)