ANDROID: KVM: arm64: pkvm_checked_mem_transition for mem_protect share transitions

In preparation for handling ranges in guest HVCs, add a new structure
for mem_protect. While pkvm_mem_transition holds the original request,
pkvm_checked_mem_transition contains the actual range which has been
verified before a transition occurs.

We do not always want to serve the entire range at once to avoid
blocking a CPU at EL2 for too long or because we don't want to serve
physically non-contiguous mapping in one go. When one or the other
happens, we need that pkvm_checked_mem_transition structure to keep
track of the verified fragment.

Bug: 278749606
Bug: 243642516
Change-Id: I20538e46d8922e6c750372082a722237b798d17b
Signed-off-by: Vincent Donnefort <vdonnefort@google.com>
This commit is contained in:
Vincent Donnefort 2023-07-07 15:52:02 +01:00
parent 5b532f54e4
commit 20211228f8

View File

@ -842,6 +842,14 @@ struct pkvm_mem_transition {
} completer;
};
struct pkvm_checked_mem_transition {
const struct pkvm_mem_transition *tx;
u64 completer_addr;
/* Number of physically contiguous pages */
u64 nr_pages;
};
struct check_walk_data {
enum pkvm_page_state desired;
enum pkvm_page_state (*get_page_state)(kvm_pte_t pte, u64 addr);
@ -950,33 +958,34 @@ static int host_request_owned_transition(u64 *completer_addr,
return __host_check_page_state_range(addr, size, PKVM_PAGE_OWNED);
}
static int host_request_unshare(u64 *completer_addr,
const struct pkvm_mem_transition *tx)
static int host_request_unshare(struct pkvm_checked_mem_transition *checked_tx)
{
const struct pkvm_mem_transition *tx = checked_tx->tx;
u64 size = tx->nr_pages * PAGE_SIZE;
u64 addr = tx->initiator.addr;
*completer_addr = tx->initiator.host.completer_addr;
checked_tx->completer_addr = tx->initiator.host.completer_addr;
checked_tx->nr_pages = tx->nr_pages;
return __host_check_page_state_range(addr, size, PKVM_PAGE_SHARED_OWNED);
}
static int host_initiate_share(u64 *completer_addr,
const struct pkvm_mem_transition *tx)
static int host_initiate_share(const struct pkvm_checked_mem_transition *checked_tx)
{
u64 size = tx->nr_pages * PAGE_SIZE;
const struct pkvm_mem_transition *tx = checked_tx->tx;
u64 size = checked_tx->nr_pages * PAGE_SIZE;
u64 addr = tx->initiator.addr;
*completer_addr = tx->initiator.host.completer_addr;
return __host_set_page_state_range(addr, size, PKVM_PAGE_SHARED_OWNED);
}
static int host_initiate_unshare(u64 *completer_addr,
const struct pkvm_mem_transition *tx)
static int host_initiate_unshare(const struct pkvm_checked_mem_transition *checked_tx)
{
u64 size = tx->nr_pages * PAGE_SIZE;
const struct pkvm_mem_transition *tx = checked_tx->tx;
u64 size = checked_tx->nr_pages * PAGE_SIZE;
u64 addr = tx->initiator.addr;
*completer_addr = tx->initiator.host.completer_addr;
return __host_set_page_state_range(addr, size, PKVM_PAGE_OWNED);
}
@ -996,61 +1005,69 @@ static bool __host_ack_skip_pgtable_check(const struct pkvm_mem_transition *tx)
tx->initiator.id != PKVM_ID_HYP);
}
static int __host_ack_transition(u64 addr, const struct pkvm_mem_transition *tx,
static int __host_ack_transition(u64 addr, u64 size,
const struct pkvm_mem_transition *tx,
enum pkvm_page_state state)
{
u64 size = tx->nr_pages * PAGE_SIZE;
if (__host_ack_skip_pgtable_check(tx))
return 0;
return __host_check_page_state_range(addr, size, state);
}
static int host_ack_share(u64 addr, const struct pkvm_mem_transition *tx,
static int host_ack_share(const struct pkvm_checked_mem_transition *checked_tx,
enum kvm_pgtable_prot perms)
{
const struct pkvm_mem_transition *tx = checked_tx->tx;
u64 size = checked_tx->nr_pages * PAGE_SIZE;
if (perms != PKVM_HOST_MEM_PROT)
return -EPERM;
return __host_ack_transition(addr, tx, PKVM_NOPAGE);
return __host_ack_transition(checked_tx->completer_addr, size,
tx, PKVM_NOPAGE);
}
static int host_ack_donation(u64 addr, const struct pkvm_mem_transition *tx)
static int host_ack_donation(u64 addr,
const struct pkvm_mem_transition *tx)
{
return __host_ack_transition(addr, tx, PKVM_NOPAGE);
return __host_ack_transition(addr, tx->nr_pages * PAGE_SIZE, tx, PKVM_NOPAGE);
}
static int host_ack_unshare(u64 addr, const struct pkvm_mem_transition *tx)
static int host_ack_unshare(const struct pkvm_checked_mem_transition *checked_tx)
{
return __host_ack_transition(addr, tx, PKVM_PAGE_SHARED_BORROWED);
return __host_ack_transition(checked_tx->completer_addr,
checked_tx->nr_pages * PAGE_SIZE,
checked_tx->tx, PKVM_PAGE_SHARED_BORROWED);
}
static int host_complete_share(u64 addr, const struct pkvm_mem_transition *tx,
static int host_complete_share(const struct pkvm_checked_mem_transition *checked_tx,
enum kvm_pgtable_prot perms)
{
u64 size = tx->nr_pages * PAGE_SIZE;
u64 size = checked_tx->nr_pages * PAGE_SIZE;
int err;
err = __host_set_page_state_range(addr, size, PKVM_PAGE_SHARED_BORROWED);
err = __host_set_page_state_range(checked_tx->completer_addr, size,
PKVM_PAGE_SHARED_BORROWED);
if (err)
return err;
if (tx->initiator.id == PKVM_ID_GUEST)
psci_mem_protect_dec(tx->nr_pages);
if (checked_tx->tx->initiator.id == PKVM_ID_GUEST)
psci_mem_protect_dec(checked_tx->nr_pages * PAGE_SIZE);
return 0;
}
static int host_complete_unshare(u64 addr, const struct pkvm_mem_transition *tx)
static int host_complete_unshare(const struct pkvm_checked_mem_transition *checked_tx)
{
u8 owner_id = tx->initiator.id;
u64 size = tx->nr_pages * PAGE_SIZE;
u64 size = checked_tx->nr_pages * PAGE_SIZE;
u8 owner_id = checked_tx->tx->initiator.id;
if (tx->initiator.id == PKVM_ID_GUEST)
psci_mem_protect_inc(tx->nr_pages);
if (checked_tx->tx->initiator.id == PKVM_ID_GUEST)
psci_mem_protect_inc(checked_tx->nr_pages * PAGE_SIZE);
return host_stage2_set_owner_locked(addr, size, owner_id);
return host_stage2_set_owner_locked(checked_tx->completer_addr, size,
owner_id);
}
static int host_complete_donation(u64 addr, const struct pkvm_mem_transition *tx)
@ -1115,12 +1132,13 @@ static bool __hyp_ack_skip_pgtable_check(const struct pkvm_mem_transition *tx)
tx->initiator.id != PKVM_ID_HOST);
}
static int hyp_ack_share(u64 addr, const struct pkvm_mem_transition *tx,
static int hyp_ack_share(const struct pkvm_checked_mem_transition *checked_tx,
enum kvm_pgtable_prot perms)
{
u64 size = tx->nr_pages * PAGE_SIZE;
phys_addr_t phys = hyp_virt_to_phys((void *)addr);
phys_addr_t phys = hyp_virt_to_phys((void *)checked_tx->completer_addr);
enum kvm_pgtable_prot prot = default_hyp_prot(phys);
const struct pkvm_mem_transition *tx = checked_tx->tx;
u64 size = checked_tx->nr_pages * PAGE_SIZE;
if (!addr_is_memory(phys) || perms != prot)
return -EPERM;
@ -1128,12 +1146,15 @@ static int hyp_ack_share(u64 addr, const struct pkvm_mem_transition *tx,
if (__hyp_ack_skip_pgtable_check(tx))
return 0;
return __hyp_check_page_state_range(addr, size, PKVM_NOPAGE);
return __hyp_check_page_state_range(checked_tx->completer_addr,
size, PKVM_NOPAGE);
}
static int hyp_ack_unshare(u64 addr, const struct pkvm_mem_transition *tx)
static int hyp_ack_unshare(const struct pkvm_checked_mem_transition *checked_tx)
{
u64 size = tx->nr_pages * PAGE_SIZE;
const struct pkvm_mem_transition *tx = checked_tx->tx;
u64 size = checked_tx->nr_pages * PAGE_SIZE;
u64 addr = checked_tx->completer_addr;
if (tx->initiator.id == PKVM_ID_HOST && hyp_page_count((void *)addr))
return -EBUSY;
@ -1155,22 +1176,25 @@ static int hyp_ack_donation(u64 addr, const struct pkvm_mem_transition *tx)
return __hyp_check_page_state_range(addr, size, PKVM_NOPAGE);
}
static int hyp_complete_share(u64 addr, const struct pkvm_mem_transition *tx,
static int hyp_complete_share(const struct pkvm_checked_mem_transition *checked_tx,
enum kvm_pgtable_prot perms)
{
void *start = (void *)addr, *end = start + (tx->nr_pages * PAGE_SIZE);
void *start = (void *)checked_tx->completer_addr;
void *end = start + (checked_tx->nr_pages * PAGE_SIZE);
enum kvm_pgtable_prot prot;
prot = pkvm_mkstate(perms, PKVM_PAGE_SHARED_BORROWED);
return pkvm_create_mappings_locked(start, end, prot);
}
static int hyp_complete_unshare(u64 addr, const struct pkvm_mem_transition *tx)
static int hyp_complete_unshare(const struct pkvm_checked_mem_transition *checked_tx)
{
u64 size = tx->nr_pages * PAGE_SIZE;
int ret = kvm_pgtable_hyp_unmap(&pkvm_pgtable, addr, size);
u64 unmapped, size = checked_tx->nr_pages * PAGE_SIZE;
return (ret != size) ? -EFAULT : 0;
unmapped = kvm_pgtable_hyp_unmap(&pkvm_pgtable, checked_tx->completer_addr,
size);
return (unmapped != size) ? -EFAULT : 0;
}
static int hyp_complete_donation(u64 addr,
@ -1210,22 +1234,26 @@ static int __guest_check_page_state_range(struct pkvm_hyp_vm *vm, u64 addr,
return check_page_state_range(&vm->pgt, addr, size, &d);
}
static int guest_ack_share(u64 addr, const struct pkvm_mem_transition *tx,
static int guest_ack_share(const struct pkvm_checked_mem_transition *checked_tx,
enum kvm_pgtable_prot perms)
{
u64 size = tx->nr_pages * PAGE_SIZE;
const struct pkvm_mem_transition *tx = checked_tx->tx;
u64 size = checked_tx->nr_pages * PAGE_SIZE;
if (!addr_is_memory(tx->completer.guest.phys) || (perms & ~KVM_PGTABLE_PROT_RWX))
return -EPERM;
return __guest_check_page_state_range(tx->completer.guest.hyp_vm,
addr, size, PKVM_NOPAGE);
checked_tx->completer_addr, size,
PKVM_NOPAGE);
}
static int guest_ack_unshare(u64 addr, const struct pkvm_mem_transition *tx)
static int guest_ack_unshare(const struct pkvm_checked_mem_transition *checked_tx)
{
enum pkvm_page_state state = PKVM_PAGE_SHARED_BORROWED;
u64 size = tx->nr_pages * PAGE_SIZE;
const struct pkvm_mem_transition *tx = checked_tx->tx;
u64 size = checked_tx->nr_pages * PAGE_SIZE;
u64 addr = checked_tx->completer_addr;
int ret;
ret = __guest_check_page_state_range(tx->completer.guest.hyp_vm, addr, size, state);
@ -1249,12 +1277,14 @@ static int guest_ack_donation(u64 addr, const struct pkvm_mem_transition *tx)
addr, size, PKVM_NOPAGE);
}
static int guest_complete_share(u64 addr, const struct pkvm_mem_transition *tx,
static int guest_complete_share(const struct pkvm_checked_mem_transition *checked_tx,
enum kvm_pgtable_prot perms)
{
const struct pkvm_mem_transition *tx = checked_tx->tx;
struct pkvm_hyp_vm *vm = tx->completer.guest.hyp_vm;
struct kvm_hyp_memcache *mc = tx->completer.guest.mc;
u64 size = tx->nr_pages * PAGE_SIZE;
u64 size = checked_tx->nr_pages * PAGE_SIZE;
u64 addr = checked_tx->completer_addr;
enum kvm_pgtable_prot prot;
prot = pkvm_mkstate(perms, PKVM_PAGE_SHARED_BORROWED);
@ -1262,10 +1292,12 @@ static int guest_complete_share(u64 addr, const struct pkvm_mem_transition *tx,
prot, mc, 0);
}
static int guest_complete_unshare(u64 addr, const struct pkvm_mem_transition *tx)
static int guest_complete_unshare(const struct pkvm_checked_mem_transition *checked_tx)
{
const struct pkvm_mem_transition *tx = checked_tx->tx;
struct pkvm_hyp_vm *vm = tx->completer.guest.hyp_vm;
u64 size = tx->nr_pages * PAGE_SIZE;
u64 size = checked_tx->nr_pages * PAGE_SIZE;
u64 addr = checked_tx->completer_addr;
return kvm_pgtable_stage2_unmap(&vm->pgt, addr, size);
}
@ -1323,10 +1355,10 @@ static int __guest_get_completer_addr(u64 *completer_addr, phys_addr_t phys,
return 0;
}
static int __guest_request_page_transition(u64 *completer_addr,
const struct pkvm_mem_transition *tx,
static int __guest_request_page_transition(struct pkvm_checked_mem_transition *checked_tx,
enum pkvm_page_state desired)
{
const struct pkvm_mem_transition *tx = checked_tx->tx;
struct pkvm_hyp_vm *vm = tx->initiator.guest.hyp_vm;
enum pkvm_page_state state;
phys_addr_t phys;
@ -1361,30 +1393,29 @@ static int __guest_request_page_transition(u64 *completer_addr,
if (!addr_is_allowed_memory(phys))
return -EINVAL;
return __guest_get_completer_addr(completer_addr, phys, tx);
checked_tx->nr_pages = tx->nr_pages;
return __guest_get_completer_addr(&checked_tx->completer_addr, phys, tx);
}
static int guest_request_share(u64 *completer_addr,
const struct pkvm_mem_transition *tx)
static int guest_request_share(struct pkvm_checked_mem_transition *checked_tx)
{
return __guest_request_page_transition(completer_addr, tx,
PKVM_PAGE_OWNED);
return __guest_request_page_transition(checked_tx, PKVM_PAGE_OWNED);
}
static int guest_request_unshare(u64 *completer_addr,
const struct pkvm_mem_transition *tx)
static int guest_request_unshare(struct pkvm_checked_mem_transition *checked_tx)
{
return __guest_request_page_transition(completer_addr, tx,
return __guest_request_page_transition(checked_tx,
PKVM_PAGE_SHARED_OWNED);
}
static int __guest_initiate_page_transition(u64 *completer_addr,
const struct pkvm_mem_transition *tx,
static int __guest_initiate_page_transition(const struct pkvm_checked_mem_transition *checked_tx,
enum pkvm_page_state state)
{
struct pkvm_hyp_vm *vm = tx->initiator.guest.hyp_vm;
const struct pkvm_mem_transition *tx = checked_tx->tx;
struct kvm_hyp_memcache *mc = tx->initiator.guest.mc;
u64 size = tx->nr_pages * PAGE_SIZE;
struct pkvm_hyp_vm *vm = tx->initiator.guest.hyp_vm;
u64 size = checked_tx->nr_pages * PAGE_SIZE;
u64 addr = tx->initiator.addr;
enum kvm_pgtable_prot prot;
phys_addr_t phys;
@ -1397,38 +1428,31 @@ static int __guest_initiate_page_transition(u64 *completer_addr,
phys = kvm_pte_to_phys(pte);
prot = pkvm_mkstate(kvm_pgtable_stage2_pte_prot(pte), state);
ret = kvm_pgtable_stage2_map(&vm->pgt, addr, size, phys, prot, mc, 0);
if (ret)
return ret;
return __guest_get_completer_addr(completer_addr, phys, tx);
return kvm_pgtable_stage2_map(&vm->pgt, addr, size, phys, prot, mc, 0);
}
static int guest_initiate_share(u64 *completer_addr,
const struct pkvm_mem_transition *tx)
static int guest_initiate_share(const struct pkvm_checked_mem_transition *checked_tx)
{
return __guest_initiate_page_transition(completer_addr, tx,
PKVM_PAGE_SHARED_OWNED);
return __guest_initiate_page_transition(checked_tx, PKVM_PAGE_SHARED_OWNED);
}
static int guest_initiate_unshare(u64 *completer_addr,
const struct pkvm_mem_transition *tx)
static int guest_initiate_unshare(const struct pkvm_checked_mem_transition *checked_tx)
{
return __guest_initiate_page_transition(completer_addr, tx,
PKVM_PAGE_OWNED);
return __guest_initiate_page_transition(checked_tx, PKVM_PAGE_OWNED);
}
static int check_share(struct pkvm_mem_transition *tx)
static int check_share(struct pkvm_checked_mem_transition *checked_tx)
{
u64 completer_addr;
const struct pkvm_mem_transition *tx = checked_tx->tx;
int ret;
switch (tx->initiator.id) {
case PKVM_ID_HOST:
ret = host_request_owned_transition(&completer_addr, tx);
ret = host_request_owned_transition(&checked_tx->completer_addr, tx);
checked_tx->nr_pages = tx->nr_pages;
break;
case PKVM_ID_GUEST:
ret = guest_request_share(&completer_addr, tx);
ret = guest_request_share(checked_tx);
break;
default:
ret = -EINVAL;
@ -1439,10 +1463,10 @@ static int check_share(struct pkvm_mem_transition *tx)
switch (tx->completer.id) {
case PKVM_ID_HOST:
ret = host_ack_share(completer_addr, tx, tx->completer.prot);
ret = host_ack_share(checked_tx, tx->completer.prot);
break;
case PKVM_ID_HYP:
ret = hyp_ack_share(completer_addr, tx, tx->completer.prot);
ret = hyp_ack_share(checked_tx, tx->completer.prot);
break;
case PKVM_ID_FFA:
/*
@ -1452,7 +1476,7 @@ static int check_share(struct pkvm_mem_transition *tx)
ret = 0;
break;
case PKVM_ID_GUEST:
ret = guest_ack_share(completer_addr, tx, tx->completer.prot);
ret = guest_ack_share(checked_tx, tx->completer.prot);
break;
default:
ret = -EINVAL;
@ -1461,17 +1485,17 @@ static int check_share(struct pkvm_mem_transition *tx)
return ret;
}
static int __do_share(struct pkvm_mem_transition *tx)
static int __do_share(struct pkvm_mem_transition *tx,
const struct pkvm_checked_mem_transition *checked_tx)
{
u64 completer_addr;
int ret;
switch (tx->initiator.id) {
case PKVM_ID_HOST:
ret = host_initiate_share(&completer_addr, tx);
ret = host_initiate_share(checked_tx);
break;
case PKVM_ID_GUEST:
ret = guest_initiate_share(&completer_addr, tx);
ret = guest_initiate_share(checked_tx);
break;
default:
ret = -EINVAL;
@ -1482,10 +1506,10 @@ static int __do_share(struct pkvm_mem_transition *tx)
switch (tx->completer.id) {
case PKVM_ID_HOST:
ret = host_complete_share(completer_addr, tx, tx->completer.prot);
ret = host_complete_share(checked_tx, tx->completer.prot);
break;
case PKVM_ID_HYP:
ret = hyp_complete_share(completer_addr, tx, tx->completer.prot);
ret = hyp_complete_share(checked_tx, tx->completer.prot);
break;
case PKVM_ID_FFA:
/*
@ -1495,7 +1519,7 @@ static int __do_share(struct pkvm_mem_transition *tx)
ret = 0;
break;
case PKVM_ID_GUEST:
ret = guest_complete_share(completer_addr, tx, tx->completer.prot);
ret = guest_complete_share(checked_tx, tx->completer.prot);
break;
default:
ret = -EINVAL;
@ -1513,28 +1537,39 @@ static int __do_share(struct pkvm_mem_transition *tx)
* Initiator: OWNED => SHARED_OWNED
* Completer: NOPAGE => SHARED_BORROWED
*/
static int do_share(struct pkvm_mem_transition *share)
static int do_share(struct pkvm_mem_transition *share,
u64 *nr_shared)
{
struct pkvm_checked_mem_transition checked_tx = {
.tx = share,
.nr_pages = 0,
};
int ret;
ret = check_share(share);
ret = check_share(&checked_tx);
if (ret)
return ret;
return WARN_ON(__do_share(share));
ret = __do_share(share, &checked_tx);
if (WARN_ON(ret))
return ret;
*nr_shared = checked_tx.nr_pages;
return ret;
}
static int check_unshare(struct pkvm_mem_transition *tx)
static int check_unshare(struct pkvm_checked_mem_transition *checked_tx)
{
u64 completer_addr;
const struct pkvm_mem_transition *tx = checked_tx->tx;
int ret;
switch (tx->initiator.id) {
case PKVM_ID_HOST:
ret = host_request_unshare(&completer_addr, tx);
ret = host_request_unshare(checked_tx);
break;
case PKVM_ID_GUEST:
ret = guest_request_unshare(&completer_addr, tx);
ret = guest_request_unshare(checked_tx);
break;
default:
ret = -EINVAL;
@ -1545,13 +1580,13 @@ static int check_unshare(struct pkvm_mem_transition *tx)
switch (tx->completer.id) {
case PKVM_ID_HOST:
ret = host_ack_unshare(completer_addr, tx);
ret = host_ack_unshare(checked_tx);
break;
case PKVM_ID_GUEST:
ret = guest_ack_unshare(completer_addr, tx);
ret = guest_ack_unshare(checked_tx);
break;
case PKVM_ID_HYP:
ret = hyp_ack_unshare(completer_addr, tx);
ret = hyp_ack_unshare(checked_tx);
break;
case PKVM_ID_FFA:
/* See check_share() */
@ -1564,17 +1599,17 @@ static int check_unshare(struct pkvm_mem_transition *tx)
return ret;
}
static int __do_unshare(struct pkvm_mem_transition *tx)
static int __do_unshare(const struct pkvm_checked_mem_transition *checked_tx)
{
u64 completer_addr;
const struct pkvm_mem_transition *tx = checked_tx->tx;
int ret;
switch (tx->initiator.id) {
case PKVM_ID_HOST:
ret = host_initiate_unshare(&completer_addr, tx);
ret = host_initiate_unshare(checked_tx);
break;
case PKVM_ID_GUEST:
ret = guest_initiate_unshare(&completer_addr, tx);
ret = guest_initiate_unshare(checked_tx);
break;
default:
ret = -EINVAL;
@ -1585,13 +1620,13 @@ static int __do_unshare(struct pkvm_mem_transition *tx)
switch (tx->completer.id) {
case PKVM_ID_HOST:
ret = host_complete_unshare(completer_addr, tx);
ret = host_complete_unshare(checked_tx);
break;
case PKVM_ID_GUEST:
ret = guest_complete_unshare(completer_addr, tx);
ret = guest_complete_unshare(checked_tx);
break;
case PKVM_ID_HYP:
ret = hyp_complete_unshare(completer_addr, tx);
ret = hyp_complete_unshare(checked_tx);
break;
case PKVM_ID_FFA:
/* See __do_share() */
@ -1613,15 +1648,26 @@ static int __do_unshare(struct pkvm_mem_transition *tx)
* Initiator: SHARED_OWNED => OWNED
* Completer: SHARED_BORROWED => NOPAGE
*/
static int do_unshare(struct pkvm_mem_transition *share)
static int do_unshare(struct pkvm_mem_transition *share,
u64 *nr_unshared)
{
struct pkvm_checked_mem_transition checked_tx = {
.tx = share,
.nr_pages = 0,
};
int ret;
ret = check_unshare(share);
ret = check_unshare(&checked_tx);
if (ret)
return ret;
return WARN_ON(__do_unshare(share));
ret = __do_unshare(&checked_tx);
if (WARN_ON(ret))
return ret;
*nr_unshared = checked_tx.nr_pages;
return 0;
}
static int check_donation(struct pkvm_mem_transition *tx)
@ -1735,11 +1781,12 @@ int __pkvm_host_share_hyp(u64 pfn)
.prot = default_hyp_prot(host_addr),
},
};
u64 nr_shared;
host_lock_component();
hyp_lock_component();
ret = do_share(&share);
ret = do_share(&share, &nr_shared);
hyp_unlock_component();
host_unlock_component();
@ -1766,11 +1813,12 @@ int __pkvm_guest_share_host(struct pkvm_hyp_vcpu *vcpu, u64 ipa)
.prot = PKVM_HOST_MEM_PROT,
},
};
u64 nr_shared;
host_lock_component();
guest_lock_component(vm);
ret = do_share(&share);
ret = do_share(&share, &nr_shared);
guest_unlock_component(vm);
host_unlock_component();
@ -1797,11 +1845,12 @@ int __pkvm_guest_unshare_host(struct pkvm_hyp_vcpu *vcpu, u64 ipa)
.prot = PKVM_HOST_MEM_PROT,
},
};
u64 nr_unshared;
host_lock_component();
guest_lock_component(vm);
ret = do_unshare(&share);
ret = do_unshare(&share, &nr_unshared);
guest_unlock_component(vm);
host_unlock_component();
@ -1828,11 +1877,12 @@ int __pkvm_host_unshare_hyp(u64 pfn)
.prot = default_hyp_prot(host_addr),
},
};
u64 nr_unshared;
host_lock_component();
hyp_lock_component();
ret = do_unshare(&share);
ret = do_unshare(&share, &nr_unshared);
hyp_unlock_component();
host_unlock_component();
@ -2068,9 +2118,10 @@ int __pkvm_host_share_ffa(u64 pfn, u64 nr_pages)
.id = PKVM_ID_FFA,
},
};
u64 nr_shared;
host_lock_component();
ret = do_share(&share);
ret = do_share(&share, &nr_shared);
host_unlock_component();
return ret;
@ -2089,9 +2140,10 @@ int __pkvm_host_unshare_ffa(u64 pfn, u64 nr_pages)
.id = PKVM_ID_FFA,
},
};
u64 nr_unshared;
host_lock_component();
ret = do_unshare(&share);
ret = do_unshare(&share, &nr_unshared);
host_unlock_component();
return ret;
@ -2122,12 +2174,13 @@ int __pkvm_host_share_guest(u64 pfn, u64 gfn, struct pkvm_hyp_vcpu *vcpu, enum k
},
},
};
u64 nr_shared;
host_lock_component();
guest_lock_component(vm);
ret = do_share(&share);
ret = do_share(&share, &nr_shared);
guest_unlock_component(vm);
host_unlock_component();
@ -2158,11 +2211,12 @@ int __pkvm_host_unshare_guest(u64 pfn, u64 gfn, struct pkvm_hyp_vm *vm)
},
},
};
u64 nr_unshared;
host_lock_component();
guest_lock_component(vm);
ret = do_unshare(&share);
ret = do_unshare(&share, &nr_unshared);
guest_unlock_component(vm);
host_unlock_component();
@ -2194,6 +2248,10 @@ int __pkvm_relax_perms(u64 pfn, u64 gfn, enum kvm_pgtable_prot prot, struct pkvm
},
},
};
struct pkvm_checked_mem_transition checked_tx = {
.tx = &share,
.nr_pages = 0,
};
if ((prot & KVM_PGTABLE_PROT_RWX) != prot)
return -EPERM;
@ -2201,7 +2259,7 @@ int __pkvm_relax_perms(u64 pfn, u64 gfn, enum kvm_pgtable_prot prot, struct pkvm
host_lock_component();
guest_lock_component(vm);
ret = check_unshare(&share);
ret = check_unshare(&checked_tx);
if (ret)
goto unlock;
@ -2236,11 +2294,15 @@ int __pkvm_wrprotect(struct pkvm_hyp_vm *vm, u64 pfn, u64 gfn)
},
},
};
struct pkvm_checked_mem_transition checked_tx = {
.tx = &share,
.nr_pages = 0,
};
host_lock_component();
guest_lock_component(vm);
ret = check_unshare(&share);
ret = check_unshare(&checked_tx);
if (ret)
goto unlock;