mirror of
https://github.com/nxp-imx/linux-imx.git
synced 2025-07-14 21:29:37 +02:00
ANDROID: virt: geniezone: Align the gzvm driver with mainline v11
Change lists: - Resolve low memory issue by using only one api to get pages for guest VM. - Fix coding style from viewer suggestion and checking tools. - Separate the GenieZone dt-binding into its own patch to resolve the `DT_SPLIT_BINDING_PATCH` warning from checkpatch.pl. Change-Id: I9c7cb862cc5a926c152efa145cd1c5b364b5049e Signed-off-by: Yingshiuan Pan <yingshiuan.pan@mediatek.com> Signed-off-by: Jerry Wang <ze-yu.wang@mediatek.com> Signed-off-by: Kevenny Hsieh <kevenny.hsieh@mediatek.com> Signed-off-by: Liju Chen <liju-clr.chen@mediatek.com> Bug: 343838587 Link: https://lore.kernel.org/all/20240529084239.11478-1-liju-clr.chen@mediatek.com/
This commit is contained in:
parent
f6cbf65f3f
commit
77828e5a7d
|
@ -8796,7 +8796,7 @@ F: lib/vdso/
|
||||||
GENIEZONE HYPERVISOR DRIVER
|
GENIEZONE HYPERVISOR DRIVER
|
||||||
M: Yingshiuan Pan <yingshiuan.pan@mediatek.com>
|
M: Yingshiuan Pan <yingshiuan.pan@mediatek.com>
|
||||||
M: Ze-Yu Wang <ze-yu.wang@mediatek.com>
|
M: Ze-Yu Wang <ze-yu.wang@mediatek.com>
|
||||||
M: Yi-De Wu <yi-de.wu@mediatek.com>
|
M: Liju Chen <liju-clr.chen@mediatek.com>
|
||||||
F: Documentation/devicetree/bindings/firmware/mediatek,geniezone.yaml
|
F: Documentation/devicetree/bindings/firmware/mediatek,geniezone.yaml
|
||||||
F: Documentation/virt/geniezone/
|
F: Documentation/virt/geniezone/
|
||||||
F: arch/arm64/geniezone/
|
F: arch/arm64/geniezone/
|
||||||
|
|
|
@ -394,31 +394,6 @@ int gzvm_vm_ioctl_arch_enable_cap(struct gzvm *gzvm,
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
|
||||||
* gzvm_hva_to_pa_arch() - converts hva to pa with arch-specific way
|
|
||||||
* @hva: Host virtual address.
|
|
||||||
*
|
|
||||||
* Return: GZVM_PA_ERR_BAD for translation error
|
|
||||||
*/
|
|
||||||
u64 gzvm_hva_to_pa_arch(u64 hva)
|
|
||||||
{
|
|
||||||
unsigned long flags;
|
|
||||||
u64 par;
|
|
||||||
|
|
||||||
local_irq_save(flags);
|
|
||||||
asm volatile("at s1e1r, %0" :: "r" (hva));
|
|
||||||
isb();
|
|
||||||
par = read_sysreg_par();
|
|
||||||
local_irq_restore(flags);
|
|
||||||
|
|
||||||
if (par & SYS_PAR_EL1_F)
|
|
||||||
return GZVM_PA_ERR_BAD;
|
|
||||||
par = par & PAR_PA47_MASK;
|
|
||||||
if (!par)
|
|
||||||
return GZVM_PA_ERR_BAD;
|
|
||||||
return par;
|
|
||||||
}
|
|
||||||
|
|
||||||
int gzvm_arch_map_guest(u16 vm_id, int memslot_id, u64 pfn, u64 gfn,
|
int gzvm_arch_map_guest(u16 vm_id, int memslot_id, u64 pfn, u64 gfn,
|
||||||
u64 nr_pages)
|
u64 nr_pages)
|
||||||
{
|
{
|
||||||
|
|
|
@ -7,6 +7,6 @@
|
||||||
GZVM_DIR ?= ../../../drivers/virt/geniezone
|
GZVM_DIR ?= ../../../drivers/virt/geniezone
|
||||||
|
|
||||||
gzvm-y := $(GZVM_DIR)/gzvm_main.o $(GZVM_DIR)/gzvm_vm.o \
|
gzvm-y := $(GZVM_DIR)/gzvm_main.o $(GZVM_DIR)/gzvm_vm.o \
|
||||||
$(GZVM_DIR)/gzvm_mmu.o $(GZVM_DIR)/gzvm_vcpu.o \
|
$(GZVM_DIR)/gzvm_vcpu.o $(GZVM_DIR)/gzvm_irqfd.o \
|
||||||
$(GZVM_DIR)/gzvm_irqfd.o $(GZVM_DIR)/gzvm_ioeventfd.o \
|
$(GZVM_DIR)/gzvm_ioeventfd.o $(GZVM_DIR)/gzvm_mmu.o \
|
||||||
$(GZVM_DIR)/gzvm_exception.o
|
$(GZVM_DIR)/gzvm_exception.o
|
||||||
|
|
|
@ -19,7 +19,7 @@ bool gzvm_handle_guest_exception(struct gzvm_vcpu *vcpu)
|
||||||
|
|
||||||
for (int i = 0; i < ARRAY_SIZE(vcpu->run->exception.reserved); i++) {
|
for (int i = 0; i < ARRAY_SIZE(vcpu->run->exception.reserved); i++) {
|
||||||
if (vcpu->run->exception.reserved[i])
|
if (vcpu->run->exception.reserved[i])
|
||||||
return -EINVAL;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
switch (vcpu->run->exception.exception) {
|
switch (vcpu->run->exception.exception) {
|
||||||
|
|
|
@ -23,7 +23,7 @@ struct gzvm_ioevent {
|
||||||
};
|
};
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* ioeventfd_check_collision() - Check collison assumes gzvm->slots_lock held.
|
* ioeventfd_check_collision() - Check collison assumes gzvm->ioevent_lock held.
|
||||||
* @gzvm: Pointer to gzvm.
|
* @gzvm: Pointer to gzvm.
|
||||||
* @p: Pointer to gzvm_ioevent.
|
* @p: Pointer to gzvm_ioevent.
|
||||||
*
|
*
|
||||||
|
@ -115,8 +115,7 @@ static int gzvm_deassign_ioeventfd(struct gzvm *gzvm,
|
||||||
|
|
||||||
wildcard = !(args->flags & GZVM_IOEVENTFD_FLAG_DATAMATCH);
|
wildcard = !(args->flags & GZVM_IOEVENTFD_FLAG_DATAMATCH);
|
||||||
|
|
||||||
mutex_lock(&gzvm->lock);
|
mutex_lock(&gzvm->ioevent_lock);
|
||||||
|
|
||||||
list_for_each_entry_safe(p, tmp, &gzvm->ioevents, list) {
|
list_for_each_entry_safe(p, tmp, &gzvm->ioevents, list) {
|
||||||
if (p->evt_ctx != evt_ctx ||
|
if (p->evt_ctx != evt_ctx ||
|
||||||
p->addr != args->addr ||
|
p->addr != args->addr ||
|
||||||
|
@ -132,7 +131,7 @@ static int gzvm_deassign_ioeventfd(struct gzvm *gzvm,
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
mutex_unlock(&gzvm->lock);
|
mutex_unlock(&gzvm->ioevent_lock);
|
||||||
|
|
||||||
/* got in the front of this function */
|
/* got in the front of this function */
|
||||||
eventfd_ctx_put(evt_ctx);
|
eventfd_ctx_put(evt_ctx);
|
||||||
|
@ -165,14 +164,15 @@ static int gzvm_assign_ioeventfd(struct gzvm *gzvm, struct gzvm_ioeventfd *args)
|
||||||
evt->wildcard = true;
|
evt->wildcard = true;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
mutex_lock(&gzvm->ioevent_lock);
|
||||||
if (ioeventfd_check_collision(gzvm, evt)) {
|
if (ioeventfd_check_collision(gzvm, evt)) {
|
||||||
ret = -EEXIST;
|
ret = -EEXIST;
|
||||||
|
mutex_unlock(&gzvm->ioevent_lock);
|
||||||
goto err_free;
|
goto err_free;
|
||||||
}
|
}
|
||||||
|
|
||||||
mutex_lock(&gzvm->lock);
|
|
||||||
list_add_tail(&evt->list, &gzvm->ioevents);
|
list_add_tail(&evt->list, &gzvm->ioevents);
|
||||||
mutex_unlock(&gzvm->lock);
|
mutex_unlock(&gzvm->ioevent_lock);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
|
@ -259,18 +259,23 @@ bool gzvm_ioevent_write(struct gzvm_vcpu *vcpu, __u64 addr, int len,
|
||||||
{
|
{
|
||||||
struct gzvm_ioevent *e;
|
struct gzvm_ioevent *e;
|
||||||
|
|
||||||
|
mutex_lock(&vcpu->gzvm->ioevent_lock);
|
||||||
list_for_each_entry(e, &vcpu->gzvm->ioevents, list) {
|
list_for_each_entry(e, &vcpu->gzvm->ioevents, list) {
|
||||||
if (gzvm_ioevent_in_range(e, addr, len, val)) {
|
if (gzvm_ioevent_in_range(e, addr, len, val)) {
|
||||||
eventfd_signal(e->evt_ctx, 1);
|
eventfd_signal(e->evt_ctx, 1);
|
||||||
|
mutex_unlock(&vcpu->gzvm->ioevent_lock);
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
mutex_unlock(&vcpu->gzvm->ioevent_lock);
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
int gzvm_init_ioeventfd(struct gzvm *gzvm)
|
int gzvm_init_ioeventfd(struct gzvm *gzvm)
|
||||||
{
|
{
|
||||||
INIT_LIST_HEAD(&gzvm->ioevents);
|
INIT_LIST_HEAD(&gzvm->ioevents);
|
||||||
|
mutex_init(&gzvm->ioevent_lock);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
|
@ -127,7 +127,7 @@ static int gzvm_drv_remove(struct platform_device *pdev)
|
||||||
}
|
}
|
||||||
|
|
||||||
static const struct of_device_id gzvm_of_match[] = {
|
static const struct of_device_id gzvm_of_match[] = {
|
||||||
{ .compatible = "mediatek,geniezone-hyp" },
|
{ .compatible = "mediatek,geniezone" },
|
||||||
{/* sentinel */},
|
{/* sentinel */},
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -136,7 +136,6 @@ static struct platform_driver gzvm_driver = {
|
||||||
.remove = gzvm_drv_remove,
|
.remove = gzvm_drv_remove,
|
||||||
.driver = {
|
.driver = {
|
||||||
.name = KBUILD_MODNAME,
|
.name = KBUILD_MODNAME,
|
||||||
.owner = THIS_MODULE,
|
|
||||||
.of_match_table = gzvm_of_match,
|
.of_match_table = gzvm_of_match,
|
||||||
},
|
},
|
||||||
};
|
};
|
||||||
|
|
|
@ -5,109 +5,6 @@
|
||||||
|
|
||||||
#include <linux/soc/mediatek/gzvm_drv.h>
|
#include <linux/soc/mediatek/gzvm_drv.h>
|
||||||
|
|
||||||
/**
|
|
||||||
* hva_to_pa_fast() - converts hva to pa in generic fast way
|
|
||||||
* @hva: Host virtual address.
|
|
||||||
*
|
|
||||||
* Return: GZVM_PA_ERR_BAD for translation error
|
|
||||||
*/
|
|
||||||
u64 hva_to_pa_fast(u64 hva)
|
|
||||||
{
|
|
||||||
struct page *page[1];
|
|
||||||
u64 pfn;
|
|
||||||
|
|
||||||
if (get_user_page_fast_only(hva, 0, page)) {
|
|
||||||
pfn = page_to_phys(page[0]);
|
|
||||||
put_page(page[0]);
|
|
||||||
return pfn;
|
|
||||||
}
|
|
||||||
return GZVM_PA_ERR_BAD;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* hva_to_pa_slow() - converts hva to pa in a slow way
|
|
||||||
* @hva: Host virtual address
|
|
||||||
*
|
|
||||||
* This function converts HVA to PA in a slow way because the target hva is not
|
|
||||||
* yet allocated and mapped in the host stage1 page table, we cannot find it
|
|
||||||
* directly from current page table.
|
|
||||||
* Thus, we have to allocate it and this operation is much slower than directly
|
|
||||||
* find via current page table.
|
|
||||||
*
|
|
||||||
* Context: This function may sleep
|
|
||||||
* Return: PA or GZVM_PA_ERR_BAD for translation error
|
|
||||||
*/
|
|
||||||
u64 hva_to_pa_slow(u64 hva)
|
|
||||||
{
|
|
||||||
struct page *page = NULL;
|
|
||||||
u64 pfn = 0;
|
|
||||||
int npages;
|
|
||||||
|
|
||||||
npages = get_user_pages_unlocked(hva, 1, &page, 0);
|
|
||||||
if (npages != 1)
|
|
||||||
return GZVM_PA_ERR_BAD;
|
|
||||||
|
|
||||||
if (page) {
|
|
||||||
pfn = page_to_phys(page);
|
|
||||||
put_page(page);
|
|
||||||
return pfn;
|
|
||||||
}
|
|
||||||
|
|
||||||
return GZVM_PA_ERR_BAD;
|
|
||||||
}
|
|
||||||
|
|
||||||
static u64 __gzvm_gfn_to_pfn_memslot(struct gzvm_memslot *memslot, u64 gfn)
|
|
||||||
{
|
|
||||||
u64 hva, pa;
|
|
||||||
|
|
||||||
if (gzvm_gfn_to_hva_memslot(memslot, gfn, &hva) != 0)
|
|
||||||
return GZVM_PA_ERR_BAD;
|
|
||||||
|
|
||||||
pa = gzvm_hva_to_pa_arch(hva);
|
|
||||||
if (pa != GZVM_PA_ERR_BAD)
|
|
||||||
return PHYS_PFN(pa);
|
|
||||||
|
|
||||||
pa = hva_to_pa_fast(hva);
|
|
||||||
if (pa != GZVM_PA_ERR_BAD)
|
|
||||||
return PHYS_PFN(pa);
|
|
||||||
|
|
||||||
pa = hva_to_pa_slow(hva);
|
|
||||||
if (pa != GZVM_PA_ERR_BAD)
|
|
||||||
return PHYS_PFN(pa);
|
|
||||||
|
|
||||||
return GZVM_PA_ERR_BAD;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* gzvm_gfn_to_pfn_memslot() - Translate gfn (guest ipa) to pfn (host pa),
|
|
||||||
* result is in @pfn
|
|
||||||
* @memslot: Pointer to struct gzvm_memslot.
|
|
||||||
* @gfn: Guest frame number.
|
|
||||||
* @pfn: Host page frame number.
|
|
||||||
*
|
|
||||||
* Return:
|
|
||||||
* * 0 - Succeed
|
|
||||||
* * -EFAULT - Failed to convert
|
|
||||||
*/
|
|
||||||
int gzvm_gfn_to_pfn_memslot(struct gzvm_memslot *memslot, u64 gfn,
|
|
||||||
u64 *pfn)
|
|
||||||
{
|
|
||||||
u64 __pfn;
|
|
||||||
|
|
||||||
if (!memslot)
|
|
||||||
return -EFAULT;
|
|
||||||
|
|
||||||
__pfn = __gzvm_gfn_to_pfn_memslot(memslot, gfn);
|
|
||||||
if (__pfn == GZVM_PA_ERR_BAD) {
|
|
||||||
*pfn = 0;
|
|
||||||
return -EFAULT;
|
|
||||||
}
|
|
||||||
|
|
||||||
*pfn = __pfn;
|
|
||||||
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
static int cmp_ppages(struct rb_node *node, const struct rb_node *parent)
|
static int cmp_ppages(struct rb_node *node, const struct rb_node *parent)
|
||||||
{
|
{
|
||||||
struct gzvm_pinned_page *a = container_of(node,
|
struct gzvm_pinned_page *a = container_of(node,
|
||||||
|
@ -162,7 +59,8 @@ static int gzvm_remove_ppage(struct gzvm *vm, phys_addr_t ipa)
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int pin_one_page(struct gzvm *vm, unsigned long hva, u64 gpa)
|
static int pin_one_page(struct gzvm *vm, unsigned long hva, u64 gpa,
|
||||||
|
struct page **out_page)
|
||||||
{
|
{
|
||||||
unsigned int flags = FOLL_HWPOISON | FOLL_LONGTERM | FOLL_WRITE;
|
unsigned int flags = FOLL_HWPOISON | FOLL_LONGTERM | FOLL_WRITE;
|
||||||
struct gzvm_pinned_page *ppage = NULL;
|
struct gzvm_pinned_page *ppage = NULL;
|
||||||
|
@ -175,10 +73,10 @@ static int pin_one_page(struct gzvm *vm, unsigned long hva, u64 gpa)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
mmap_read_lock(mm);
|
mmap_read_lock(mm);
|
||||||
pin_user_pages(hva, 1, flags, &page);
|
ret = pin_user_pages(hva, 1, flags, &page);
|
||||||
mmap_read_unlock(mm);
|
mmap_read_unlock(mm);
|
||||||
|
|
||||||
if (!page) {
|
if (ret != 1 || !page) {
|
||||||
kfree(ppage);
|
kfree(ppage);
|
||||||
return -EFAULT;
|
return -EFAULT;
|
||||||
}
|
}
|
||||||
|
@ -204,6 +102,7 @@ static int pin_one_page(struct gzvm *vm, unsigned long hva, u64 gpa)
|
||||||
ret = 0;
|
ret = 0;
|
||||||
}
|
}
|
||||||
mutex_unlock(&vm->mem_lock);
|
mutex_unlock(&vm->mem_lock);
|
||||||
|
*out_page = page;
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
@ -230,15 +129,42 @@ int gzvm_handle_relinquish(struct gzvm_vcpu *vcpu, phys_addr_t ipa)
|
||||||
int gzvm_vm_allocate_guest_page(struct gzvm *vm, struct gzvm_memslot *slot,
|
int gzvm_vm_allocate_guest_page(struct gzvm *vm, struct gzvm_memslot *slot,
|
||||||
u64 gfn, u64 *pfn)
|
u64 gfn, u64 *pfn)
|
||||||
{
|
{
|
||||||
|
struct page *page = NULL;
|
||||||
unsigned long hva;
|
unsigned long hva;
|
||||||
|
int ret;
|
||||||
if (gzvm_gfn_to_pfn_memslot(slot, gfn, pfn) != 0)
|
|
||||||
return -EFAULT;
|
|
||||||
|
|
||||||
if (gzvm_gfn_to_hva_memslot(slot, gfn, (u64 *)&hva) != 0)
|
if (gzvm_gfn_to_hva_memslot(slot, gfn, (u64 *)&hva) != 0)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
return pin_one_page(vm, hva, PFN_PHYS(gfn));
|
ret = pin_one_page(vm, hva, PFN_PHYS(gfn), &page);
|
||||||
|
if (ret != 0)
|
||||||
|
return ret;
|
||||||
|
|
||||||
|
if (page == NULL)
|
||||||
|
return -EFAULT;
|
||||||
|
/**
|
||||||
|
* As `pin_user_pages` already gets the page struct, we don't need to
|
||||||
|
* call other APIs to reduce function call overhead.
|
||||||
|
*/
|
||||||
|
*pfn = page_to_pfn(page);
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int handle_single_demand_page(struct gzvm *vm, int memslot_id, u64 gfn)
|
||||||
|
{
|
||||||
|
int ret;
|
||||||
|
u64 pfn;
|
||||||
|
|
||||||
|
ret = gzvm_vm_allocate_guest_page(vm, &vm->memslot[memslot_id], gfn, &pfn);
|
||||||
|
if (unlikely(ret))
|
||||||
|
return -EFAULT;
|
||||||
|
|
||||||
|
ret = gzvm_arch_map_guest(vm->vm_id, memslot_id, pfn, gfn, 1);
|
||||||
|
if (unlikely(ret))
|
||||||
|
return -EFAULT;
|
||||||
|
|
||||||
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int handle_block_demand_page(struct gzvm *vm, int memslot_id, u64 gfn)
|
static int handle_block_demand_page(struct gzvm *vm, int memslot_id, u64 gfn)
|
||||||
|
@ -266,6 +192,8 @@ static int handle_block_demand_page(struct gzvm *vm, int memslot_id, u64 gfn)
|
||||||
for (i = 0, __gfn = start_gfn; i < nr_entries; i++, __gfn++) {
|
for (i = 0, __gfn = start_gfn; i < nr_entries; i++, __gfn++) {
|
||||||
ret = gzvm_vm_allocate_guest_page(vm, memslot, __gfn, &pfn);
|
ret = gzvm_vm_allocate_guest_page(vm, memslot, __gfn, &pfn);
|
||||||
if (unlikely(ret)) {
|
if (unlikely(ret)) {
|
||||||
|
pr_notice("VM-%u failed to allocate page for GFN 0x%llx (%d)\n",
|
||||||
|
vm->vm_id, __gfn, ret);
|
||||||
ret = -ERR_FAULT;
|
ret = -ERR_FAULT;
|
||||||
goto err_unlock;
|
goto err_unlock;
|
||||||
}
|
}
|
||||||
|
@ -285,21 +213,6 @@ err_unlock:
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int handle_single_demand_page(struct gzvm *vm, int memslot_id, u64 gfn)
|
|
||||||
{
|
|
||||||
int ret;
|
|
||||||
u64 pfn;
|
|
||||||
|
|
||||||
ret = gzvm_vm_allocate_guest_page(vm, &vm->memslot[memslot_id], gfn, &pfn);
|
|
||||||
if (unlikely(ret))
|
|
||||||
return -EFAULT;
|
|
||||||
|
|
||||||
ret = gzvm_arch_map_guest(vm->vm_id, memslot_id, pfn, gfn, 1);
|
|
||||||
if (unlikely(ret))
|
|
||||||
return -EFAULT;
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* gzvm_handle_page_fault() - Handle guest page fault, find corresponding page
|
* gzvm_handle_page_fault() - Handle guest page fault, find corresponding page
|
||||||
* for the faulting gpa
|
* for the faulting gpa
|
||||||
|
|
|
@ -94,6 +94,33 @@ register_memslot_addr_range(struct gzvm *gzvm, struct gzvm_memslot *memslot)
|
||||||
return gzvm_vm_populate_mem_region(gzvm, memslot->slot_id);
|
return gzvm_vm_populate_mem_region(gzvm, memslot->slot_id);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* memory_region_pre_check() - Preliminary check for userspace memory region
|
||||||
|
* @gzvm: Pointer to struct gzvm.
|
||||||
|
* @mem: Input memory region from user.
|
||||||
|
*
|
||||||
|
* Return: true for check passed, false for invalid input.
|
||||||
|
*/
|
||||||
|
static bool
|
||||||
|
memory_region_pre_check(struct gzvm *gzvm,
|
||||||
|
struct gzvm_userspace_memory_region *mem)
|
||||||
|
{
|
||||||
|
if (mem->slot >= GZVM_MAX_MEM_REGION)
|
||||||
|
return false;
|
||||||
|
|
||||||
|
if (!PAGE_ALIGNED(mem->guest_phys_addr) ||
|
||||||
|
!PAGE_ALIGNED(mem->memory_size))
|
||||||
|
return false;
|
||||||
|
|
||||||
|
if (mem->guest_phys_addr + mem->memory_size < mem->guest_phys_addr)
|
||||||
|
return false;
|
||||||
|
|
||||||
|
if ((mem->memory_size >> PAGE_SHIFT) > GZVM_MEM_MAX_NR_PAGES)
|
||||||
|
return false;
|
||||||
|
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* gzvm_vm_ioctl_set_memory_region() - Set memory region of guest
|
* gzvm_vm_ioctl_set_memory_region() - Set memory region of guest
|
||||||
* @gzvm: Pointer to struct gzvm.
|
* @gzvm: Pointer to struct gzvm.
|
||||||
|
@ -114,8 +141,8 @@ gzvm_vm_ioctl_set_memory_region(struct gzvm *gzvm,
|
||||||
struct gzvm_memslot *memslot;
|
struct gzvm_memslot *memslot;
|
||||||
unsigned long size;
|
unsigned long size;
|
||||||
|
|
||||||
if (mem->slot >= GZVM_MAX_MEM_REGION)
|
if (memory_region_pre_check(gzvm, mem) != true)
|
||||||
return -ENXIO;
|
return -EINVAL;
|
||||||
|
|
||||||
memslot = &gzvm->memslot[mem->slot];
|
memslot = &gzvm->memslot[mem->slot];
|
||||||
|
|
||||||
|
@ -409,12 +436,6 @@ static void setup_vm_demand_paging(struct gzvm *vm)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static int debugfs_open(struct inode *inode, struct file *file)
|
|
||||||
{
|
|
||||||
file->private_data = inode->i_private;
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* hyp_mem_read() - Get size of hypervisor-allocated memory and stage 2 table
|
* hyp_mem_read() - Get size of hypervisor-allocated memory and stage 2 table
|
||||||
* @file: Pointer to struct file
|
* @file: Pointer to struct file
|
||||||
|
@ -476,15 +497,13 @@ static ssize_t shared_mem_read(struct file *file, char __user *buf, size_t len,
|
||||||
}
|
}
|
||||||
|
|
||||||
static const struct file_operations hyp_mem_fops = {
|
static const struct file_operations hyp_mem_fops = {
|
||||||
.owner = THIS_MODULE,
|
.open = simple_open,
|
||||||
.open = debugfs_open,
|
|
||||||
.read = hyp_mem_read,
|
.read = hyp_mem_read,
|
||||||
.llseek = no_llseek,
|
.llseek = no_llseek,
|
||||||
};
|
};
|
||||||
|
|
||||||
static const struct file_operations shared_mem_fops = {
|
static const struct file_operations shared_mem_fops = {
|
||||||
.owner = THIS_MODULE,
|
.open = simple_open,
|
||||||
.open = debugfs_open,
|
|
||||||
.read = shared_mem_read,
|
.read = shared_mem_read,
|
||||||
.llseek = no_llseek,
|
.llseek = no_llseek,
|
||||||
};
|
};
|
||||||
|
@ -494,6 +513,9 @@ static int gzvm_create_vm_debugfs(struct gzvm *vm)
|
||||||
struct dentry *dent;
|
struct dentry *dent;
|
||||||
char dir_name[GZVM_MAX_DEBUGFS_DIR_NAME_SIZE];
|
char dir_name[GZVM_MAX_DEBUGFS_DIR_NAME_SIZE];
|
||||||
|
|
||||||
|
if (!gzvm_debugfs_dir)
|
||||||
|
return -EFAULT;
|
||||||
|
|
||||||
if (vm->debug_dir) {
|
if (vm->debug_dir) {
|
||||||
pr_warn("VM debugfs directory is duplicated\n");
|
pr_warn("VM debugfs directory is duplicated\n");
|
||||||
return 0;
|
return 0;
|
||||||
|
@ -575,7 +597,9 @@ static struct gzvm *gzvm_create_vm(unsigned long vm_type)
|
||||||
list_add(&gzvm->vm_list, &gzvm_list);
|
list_add(&gzvm->vm_list, &gzvm_list);
|
||||||
mutex_unlock(&gzvm_list_lock);
|
mutex_unlock(&gzvm_list_lock);
|
||||||
|
|
||||||
gzvm_create_vm_debugfs(gzvm);
|
ret = gzvm_create_vm_debugfs(gzvm);
|
||||||
|
if (ret)
|
||||||
|
pr_debug("Failed to create debugfs for VM-%u\n", gzvm->vm_id);
|
||||||
|
|
||||||
pr_debug("VM-%u is created\n", gzvm->vm_id);
|
pr_debug("VM-%u is created\n", gzvm->vm_id);
|
||||||
|
|
||||||
|
|
|
@ -75,6 +75,12 @@ struct gzvm_memory_region_ranges {
|
||||||
struct mem_region_addr_range constituents[];
|
struct mem_region_addr_range constituents[];
|
||||||
};
|
};
|
||||||
|
|
||||||
|
/*
|
||||||
|
* A reasonable and large enough limit for the maximum number of pages a
|
||||||
|
* guest can use.
|
||||||
|
*/
|
||||||
|
#define GZVM_MEM_MAX_NR_PAGES ((1UL << 31) - 1)
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* struct gzvm_memslot: VM's memory slot descriptor
|
* struct gzvm_memslot: VM's memory slot descriptor
|
||||||
* @base_gfn: begin of guest page frame
|
* @base_gfn: begin of guest page frame
|
||||||
|
@ -123,19 +129,20 @@ struct gzvm_vm_stat {
|
||||||
* @lock: lock for list_add
|
* @lock: lock for list_add
|
||||||
* @irqfds: the data structure is used to keep irqfds's information
|
* @irqfds: the data structure is used to keep irqfds's information
|
||||||
* @ioevents: list head for ioevents
|
* @ioevents: list head for ioevents
|
||||||
|
* @ioevent_lock: lock for ioevent list
|
||||||
* @vm_list: list head for vm list
|
* @vm_list: list head for vm list
|
||||||
* @vm_id: vm id
|
* @vm_id: vm id
|
||||||
* @irq_ack_notifier_list: list head for irq ack notifier
|
* @irq_ack_notifier_list: list head for irq ack notifier
|
||||||
* @irq_srcu: structure data for SRCU(sleepable rcu)
|
* @irq_srcu: structure data for SRCU(sleepable rcu)
|
||||||
* @irq_lock: lock for irq injection
|
* @irq_lock: lock for irq injection
|
||||||
|
* @pinned_pages: use rb-tree to record pin/unpin page
|
||||||
|
* @mem_lock: lock for memory operations
|
||||||
* @mem_alloc_mode: memory allocation mode - fully allocated or demand paging
|
* @mem_alloc_mode: memory allocation mode - fully allocated or demand paging
|
||||||
* @demand_page_gran: demand page granularity: how much memory we allocate for
|
* @demand_page_gran: demand page granularity: how much memory we allocate for
|
||||||
* VM in a single page fault
|
* VM in a single page fault
|
||||||
* @demand_page_buffer: the mailbox for transferring large portion pages
|
* @demand_page_buffer: the mailbox for transferring large portion pages
|
||||||
* @demand_paging_lock: lock for preventing multiple cpu using the same demand
|
* @demand_paging_lock: lock for preventing multiple cpu using the same demand
|
||||||
* page mailbox at the same time
|
* page mailbox at the same time
|
||||||
* @pinned_pages: use rb-tree to record pin/unpin page
|
|
||||||
* @mem_lock: lock for memory operations
|
|
||||||
* @stat: information for VM memory statistics
|
* @stat: information for VM memory statistics
|
||||||
* @debug_dir: debugfs directory node for VM memory statistics
|
* @debug_dir: debugfs directory node for VM memory statistics
|
||||||
*/
|
*/
|
||||||
|
@ -153,6 +160,7 @@ struct gzvm {
|
||||||
} irqfds;
|
} irqfds;
|
||||||
|
|
||||||
struct list_head ioevents;
|
struct list_head ioevents;
|
||||||
|
struct mutex ioevent_lock;
|
||||||
|
|
||||||
struct list_head vm_list;
|
struct list_head vm_list;
|
||||||
u16 vm_id;
|
u16 vm_id;
|
||||||
|
@ -162,13 +170,13 @@ struct gzvm {
|
||||||
struct mutex irq_lock;
|
struct mutex irq_lock;
|
||||||
u32 mem_alloc_mode;
|
u32 mem_alloc_mode;
|
||||||
|
|
||||||
|
struct rb_root pinned_pages;
|
||||||
|
struct mutex mem_lock;
|
||||||
|
|
||||||
u32 demand_page_gran;
|
u32 demand_page_gran;
|
||||||
u64 *demand_page_buffer;
|
u64 *demand_page_buffer;
|
||||||
struct mutex demand_paging_lock;
|
struct mutex demand_paging_lock;
|
||||||
|
|
||||||
struct rb_root pinned_pages;
|
|
||||||
struct mutex mem_lock;
|
|
||||||
|
|
||||||
struct gzvm_vm_stat stat;
|
struct gzvm_vm_stat stat;
|
||||||
struct dentry *debug_dir;
|
struct dentry *debug_dir;
|
||||||
};
|
};
|
||||||
|
@ -197,10 +205,6 @@ int gzvm_vm_ioctl_arch_enable_cap(struct gzvm *gzvm,
|
||||||
struct gzvm_enable_cap *cap,
|
struct gzvm_enable_cap *cap,
|
||||||
void __user *argp);
|
void __user *argp);
|
||||||
|
|
||||||
u64 gzvm_hva_to_pa_arch(u64 hva);
|
|
||||||
u64 hva_to_pa_fast(u64 hva);
|
|
||||||
u64 hva_to_pa_slow(u64 hva);
|
|
||||||
int gzvm_gfn_to_pfn_memslot(struct gzvm_memslot *memslot, u64 gfn, u64 *pfn);
|
|
||||||
int gzvm_gfn_to_hva_memslot(struct gzvm_memslot *memslot, u64 gfn,
|
int gzvm_gfn_to_hva_memslot(struct gzvm_memslot *memslot, u64 gfn,
|
||||||
u64 *hva_memslot);
|
u64 *hva_memslot);
|
||||||
int gzvm_vm_populate_mem_region(struct gzvm *gzvm, int slot_id);
|
int gzvm_vm_populate_mem_region(struct gzvm *gzvm, int slot_id);
|
||||||
|
|
Loading…
Reference in New Issue
Block a user