Revert "irqchip/gic-v4: Don't allow a VMOVP on a dying VPE"

This reverts commit 01282ab518 which is
commit 1442ee0011 upstream.

It breaks the Android kernel abi and can be brought back in the future
in an abi-safe way if it is really needed.

Bug: 161946584
Change-Id: I4ed1631c4442d7ef3310401d21c1cd183a5c927a
Signed-off-by: Greg Kroah-Hartman <gregkh@google.com>
This commit is contained in:
Greg Kroah-Hartman 2024-11-19 21:46:25 +00:00
parent 227f0fab6f
commit fb47132c39
2 changed files with 7 additions and 15 deletions

View File

@ -796,8 +796,8 @@ static struct its_vpe *its_build_vmapp_cmd(struct its_node *its,
its_encode_valid(cmd, desc->its_vmapp_cmd.valid); its_encode_valid(cmd, desc->its_vmapp_cmd.valid);
if (!desc->its_vmapp_cmd.valid) { if (!desc->its_vmapp_cmd.valid) {
alloc = !atomic_dec_return(&desc->its_vmapp_cmd.vpe->vmapp_count);
if (is_v4_1(its)) { if (is_v4_1(its)) {
alloc = !atomic_dec_return(&desc->its_vmapp_cmd.vpe->vmapp_count);
its_encode_alloc(cmd, alloc); its_encode_alloc(cmd, alloc);
/* /*
* Unmapping a VPE is self-synchronizing on GICv4.1, * Unmapping a VPE is self-synchronizing on GICv4.1,
@ -816,13 +816,13 @@ static struct its_vpe *its_build_vmapp_cmd(struct its_node *its,
its_encode_vpt_addr(cmd, vpt_addr); its_encode_vpt_addr(cmd, vpt_addr);
its_encode_vpt_size(cmd, LPI_NRBITS - 1); its_encode_vpt_size(cmd, LPI_NRBITS - 1);
alloc = !atomic_fetch_inc(&desc->its_vmapp_cmd.vpe->vmapp_count);
if (!is_v4_1(its)) if (!is_v4_1(its))
goto out; goto out;
vconf_addr = virt_to_phys(page_address(desc->its_vmapp_cmd.vpe->its_vm->vprop_page)); vconf_addr = virt_to_phys(page_address(desc->its_vmapp_cmd.vpe->its_vm->vprop_page));
alloc = !atomic_fetch_inc(&desc->its_vmapp_cmd.vpe->vmapp_count);
its_encode_alloc(cmd, alloc); its_encode_alloc(cmd, alloc);
/* /*
@ -3816,13 +3816,6 @@ static int its_vpe_set_affinity(struct irq_data *d,
unsigned long flags; unsigned long flags;
int from, cpu; int from, cpu;
/*
* Check if we're racing against a VPE being destroyed, for
* which we don't want to allow a VMOVP.
*/
if (!atomic_read(&vpe->vmapp_count))
return -EINVAL;
/* /*
* Changing affinity is mega expensive, so let's be as lazy as * Changing affinity is mega expensive, so let's be as lazy as
* we can and only do it if we really have to. Also, if mapped * we can and only do it if we really have to. Also, if mapped
@ -4463,8 +4456,9 @@ static int its_vpe_init(struct its_vpe *vpe)
raw_spin_lock_init(&vpe->vpe_lock); raw_spin_lock_init(&vpe->vpe_lock);
vpe->vpe_id = vpe_id; vpe->vpe_id = vpe_id;
vpe->vpt_page = vpt_page; vpe->vpt_page = vpt_page;
atomic_set(&vpe->vmapp_count, 0); if (gic_rdists->has_rvpeid)
if (!gic_rdists->has_rvpeid) atomic_set(&vpe->vmapp_count, 0);
else
vpe->vpe_proxy_event = -1; vpe->vpe_proxy_event = -1;
return 0; return 0;

View File

@ -58,12 +58,10 @@ struct its_vpe {
bool enabled; bool enabled;
bool group; bool group;
} sgi_config[16]; } sgi_config[16];
atomic_t vmapp_count;
}; };
}; };
/* Track the VPE being mapped */
atomic_t vmapp_count;
/* /*
* Ensures mutual exclusion between affinity setting of the * Ensures mutual exclusion between affinity setting of the
* vPE and vLPI operations using vpe->col_idx. * vPE and vLPI operations using vpe->col_idx.