mirror of
git://git.yoctoproject.org/linux-yocto.git
synced 2025-07-05 13:25:20 +02:00
KVM: arm64: Rename free_removed to free_unlinked
Normalize on referring to tables outside of an active paging structure as 'unlinked'. A subsequent change to KVM will add support for building page tables that are not part of an active paging structure. The existing 'removed_table' terminology is quite clunky when applied in this context. Signed-off-by: Ricardo Koller <ricarkol@google.com> Reviewed-by: Oliver Upton <oliver.upton@linux.dev> Reviewed-by: Shaoqin Huang <shahuang@redhat.com> Reviewed-by: Gavin Shan <gshan@redhat.com> Link: https://lore.kernel.org/r/20230426172330.1439644-2-ricarkol@google.com Signed-off-by: Oliver Upton <oliver.upton@linux.dev>
This commit is contained in:
parent
f1fcbaa18b
commit
c14d08c5ad
|
@ -104,7 +104,7 @@ static inline bool kvm_level_supports_block_mapping(u32 level)
|
||||||
* allocation is physically contiguous.
|
* allocation is physically contiguous.
|
||||||
* @free_pages_exact: Free an exact number of memory pages previously
|
* @free_pages_exact: Free an exact number of memory pages previously
|
||||||
* allocated by zalloc_pages_exact.
|
* allocated by zalloc_pages_exact.
|
||||||
* @free_removed_table: Free a removed paging structure by unlinking and
|
* @free_unlinked_table: Free an unlinked paging structure by unlinking and
|
||||||
* dropping references.
|
* dropping references.
|
||||||
* @get_page: Increment the refcount on a page.
|
* @get_page: Increment the refcount on a page.
|
||||||
* @put_page: Decrement the refcount on a page. When the
|
* @put_page: Decrement the refcount on a page. When the
|
||||||
|
@ -124,7 +124,7 @@ struct kvm_pgtable_mm_ops {
|
||||||
void* (*zalloc_page)(void *arg);
|
void* (*zalloc_page)(void *arg);
|
||||||
void* (*zalloc_pages_exact)(size_t size);
|
void* (*zalloc_pages_exact)(size_t size);
|
||||||
void (*free_pages_exact)(void *addr, size_t size);
|
void (*free_pages_exact)(void *addr, size_t size);
|
||||||
void (*free_removed_table)(void *addr, u32 level);
|
void (*free_unlinked_table)(void *addr, u32 level);
|
||||||
void (*get_page)(void *addr);
|
void (*get_page)(void *addr);
|
||||||
void (*put_page)(void *addr);
|
void (*put_page)(void *addr);
|
||||||
int (*page_count)(void *addr);
|
int (*page_count)(void *addr);
|
||||||
|
@ -440,7 +440,7 @@ int __kvm_pgtable_stage2_init(struct kvm_pgtable *pgt, struct kvm_s2_mmu *mmu,
|
||||||
void kvm_pgtable_stage2_destroy(struct kvm_pgtable *pgt);
|
void kvm_pgtable_stage2_destroy(struct kvm_pgtable *pgt);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* kvm_pgtable_stage2_free_removed() - Free a removed stage-2 paging structure.
|
* kvm_pgtable_stage2_free_unlinked() - Free an unlinked stage-2 paging structure.
|
||||||
* @mm_ops: Memory management callbacks.
|
* @mm_ops: Memory management callbacks.
|
||||||
* @pgtable: Unlinked stage-2 paging structure to be freed.
|
* @pgtable: Unlinked stage-2 paging structure to be freed.
|
||||||
* @level: Level of the stage-2 paging structure to be freed.
|
* @level: Level of the stage-2 paging structure to be freed.
|
||||||
|
@ -448,7 +448,7 @@ void kvm_pgtable_stage2_destroy(struct kvm_pgtable *pgt);
|
||||||
* The page-table is assumed to be unreachable by any hardware walkers prior to
|
* The page-table is assumed to be unreachable by any hardware walkers prior to
|
||||||
* freeing and therefore no TLB invalidation is performed.
|
* freeing and therefore no TLB invalidation is performed.
|
||||||
*/
|
*/
|
||||||
void kvm_pgtable_stage2_free_removed(struct kvm_pgtable_mm_ops *mm_ops, void *pgtable, u32 level);
|
void kvm_pgtable_stage2_free_unlinked(struct kvm_pgtable_mm_ops *mm_ops, void *pgtable, u32 level);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* kvm_pgtable_stage2_map() - Install a mapping in a guest stage-2 page-table.
|
* kvm_pgtable_stage2_map() - Install a mapping in a guest stage-2 page-table.
|
||||||
|
|
|
@ -91,9 +91,9 @@ static void host_s2_put_page(void *addr)
|
||||||
hyp_put_page(&host_s2_pool, addr);
|
hyp_put_page(&host_s2_pool, addr);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void host_s2_free_removed_table(void *addr, u32 level)
|
static void host_s2_free_unlinked_table(void *addr, u32 level)
|
||||||
{
|
{
|
||||||
kvm_pgtable_stage2_free_removed(&host_mmu.mm_ops, addr, level);
|
kvm_pgtable_stage2_free_unlinked(&host_mmu.mm_ops, addr, level);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int prepare_s2_pool(void *pgt_pool_base)
|
static int prepare_s2_pool(void *pgt_pool_base)
|
||||||
|
@ -110,7 +110,7 @@ static int prepare_s2_pool(void *pgt_pool_base)
|
||||||
host_mmu.mm_ops = (struct kvm_pgtable_mm_ops) {
|
host_mmu.mm_ops = (struct kvm_pgtable_mm_ops) {
|
||||||
.zalloc_pages_exact = host_s2_zalloc_pages_exact,
|
.zalloc_pages_exact = host_s2_zalloc_pages_exact,
|
||||||
.zalloc_page = host_s2_zalloc_page,
|
.zalloc_page = host_s2_zalloc_page,
|
||||||
.free_removed_table = host_s2_free_removed_table,
|
.free_unlinked_table = host_s2_free_unlinked_table,
|
||||||
.phys_to_virt = hyp_phys_to_virt,
|
.phys_to_virt = hyp_phys_to_virt,
|
||||||
.virt_to_phys = hyp_virt_to_phys,
|
.virt_to_phys = hyp_virt_to_phys,
|
||||||
.page_count = hyp_page_count,
|
.page_count = hyp_page_count,
|
||||||
|
|
|
@ -860,7 +860,7 @@ static int stage2_map_walk_table_pre(const struct kvm_pgtable_visit_ctx *ctx,
|
||||||
if (ret)
|
if (ret)
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
mm_ops->free_removed_table(childp, ctx->level);
|
mm_ops->free_unlinked_table(childp, ctx->level);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -905,7 +905,7 @@ static int stage2_map_walk_leaf(const struct kvm_pgtable_visit_ctx *ctx,
|
||||||
* The TABLE_PRE callback runs for table entries on the way down, looking
|
* The TABLE_PRE callback runs for table entries on the way down, looking
|
||||||
* for table entries which we could conceivably replace with a block entry
|
* for table entries which we could conceivably replace with a block entry
|
||||||
* for this mapping. If it finds one it replaces the entry and calls
|
* for this mapping. If it finds one it replaces the entry and calls
|
||||||
* kvm_pgtable_mm_ops::free_removed_table() to tear down the detached table.
|
* kvm_pgtable_mm_ops::free_unlinked_table() to tear down the detached table.
|
||||||
*
|
*
|
||||||
* Otherwise, the LEAF callback performs the mapping at the existing leaves
|
* Otherwise, the LEAF callback performs the mapping at the existing leaves
|
||||||
* instead.
|
* instead.
|
||||||
|
@ -1276,7 +1276,7 @@ void kvm_pgtable_stage2_destroy(struct kvm_pgtable *pgt)
|
||||||
pgt->pgd = NULL;
|
pgt->pgd = NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
void kvm_pgtable_stage2_free_removed(struct kvm_pgtable_mm_ops *mm_ops, void *pgtable, u32 level)
|
void kvm_pgtable_stage2_free_unlinked(struct kvm_pgtable_mm_ops *mm_ops, void *pgtable, u32 level)
|
||||||
{
|
{
|
||||||
kvm_pteref_t ptep = (kvm_pteref_t)pgtable;
|
kvm_pteref_t ptep = (kvm_pteref_t)pgtable;
|
||||||
struct kvm_pgtable_walker walker = {
|
struct kvm_pgtable_walker walker = {
|
||||||
|
|
|
@ -131,21 +131,21 @@ static void kvm_s2_free_pages_exact(void *virt, size_t size)
|
||||||
|
|
||||||
static struct kvm_pgtable_mm_ops kvm_s2_mm_ops;
|
static struct kvm_pgtable_mm_ops kvm_s2_mm_ops;
|
||||||
|
|
||||||
static void stage2_free_removed_table_rcu_cb(struct rcu_head *head)
|
static void stage2_free_unlinked_table_rcu_cb(struct rcu_head *head)
|
||||||
{
|
{
|
||||||
struct page *page = container_of(head, struct page, rcu_head);
|
struct page *page = container_of(head, struct page, rcu_head);
|
||||||
void *pgtable = page_to_virt(page);
|
void *pgtable = page_to_virt(page);
|
||||||
u32 level = page_private(page);
|
u32 level = page_private(page);
|
||||||
|
|
||||||
kvm_pgtable_stage2_free_removed(&kvm_s2_mm_ops, pgtable, level);
|
kvm_pgtable_stage2_free_unlinked(&kvm_s2_mm_ops, pgtable, level);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void stage2_free_removed_table(void *addr, u32 level)
|
static void stage2_free_unlinked_table(void *addr, u32 level)
|
||||||
{
|
{
|
||||||
struct page *page = virt_to_page(addr);
|
struct page *page = virt_to_page(addr);
|
||||||
|
|
||||||
set_page_private(page, (unsigned long)level);
|
set_page_private(page, (unsigned long)level);
|
||||||
call_rcu(&page->rcu_head, stage2_free_removed_table_rcu_cb);
|
call_rcu(&page->rcu_head, stage2_free_unlinked_table_rcu_cb);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void kvm_host_get_page(void *addr)
|
static void kvm_host_get_page(void *addr)
|
||||||
|
@ -701,7 +701,7 @@ static struct kvm_pgtable_mm_ops kvm_s2_mm_ops = {
|
||||||
.zalloc_page = stage2_memcache_zalloc_page,
|
.zalloc_page = stage2_memcache_zalloc_page,
|
||||||
.zalloc_pages_exact = kvm_s2_zalloc_pages_exact,
|
.zalloc_pages_exact = kvm_s2_zalloc_pages_exact,
|
||||||
.free_pages_exact = kvm_s2_free_pages_exact,
|
.free_pages_exact = kvm_s2_free_pages_exact,
|
||||||
.free_removed_table = stage2_free_removed_table,
|
.free_unlinked_table = stage2_free_unlinked_table,
|
||||||
.get_page = kvm_host_get_page,
|
.get_page = kvm_host_get_page,
|
||||||
.put_page = kvm_s2_put_page,
|
.put_page = kvm_s2_put_page,
|
||||||
.page_count = kvm_host_page_count,
|
.page_count = kvm_host_page_count,
|
||||||
|
|
Loading…
Reference in New Issue
Block a user