mirror of
git://git.yoctoproject.org/linux-yocto.git
synced 2025-10-22 23:13:01 +02:00
iommufd 6.14 merge window pull
No major functionality this cycle: - iommufd part of the domain_alloc_paging_flags() conversion - Move IOMMU_HWPT_FAULT_ID_VALID processing out of drivers - Increase a timeout waiting for other threads to drop transient refcounts that syzkaller was hitting - Fix a UBSAN hit in iova_bitmap due to shift out of bounds - Add missing cleanup of fault events during FD shutdown, fixing a memory leak - Improve the fault delivery flow to have a smaller locking critical region that does not include copy_to_user() - Fix 32 bit ABI breakage due to missed implicit padding, and fix the stack memory leakage -----BEGIN PGP SIGNATURE----- iHUEABYKAB0WIQRRRCHOFoQz/8F5bUaFwuHvBreFYQUCZ5JgnAAKCRCFwuHvBreF YQWrAP9ItOsbeOxYIEVQK1E90HbMWVHF8RhWDPnpChgzyorjjQEA/OOou6uTAcVC fJybLk3T7JrutMmBFvVLsRg+yCJPlgE= =ldhT -----END PGP SIGNATURE----- Merge tag 'for-linus-iommufd' of git://git.kernel.org/pub/scm/linux/kernel/git/jgg/iommufd Pull iommufd updates from Jason Gunthorpe: "No major functionality this cycle: - iommufd part of the domain_alloc_paging_flags() conversion - Move IOMMU_HWPT_FAULT_ID_VALID processing out of drivers - Increase a timeout waiting for other threads to drop transient refcounts that syzkaller was hitting - Fix a UBSAN hit in iova_bitmap due to shift out of bounds - Add missing cleanup of fault events during FD shutdown, fixing a memory leak - Improve the fault delivery flow to have a smaller locking critical region that does not include copy_to_user() - Fix 32 bit ABI breakage due to missed implicit padding, and fix the stack memory leakage" * tag 'for-linus-iommufd' of git://git.kernel.org/pub/scm/linux/kernel/git/jgg/iommufd: iommufd: Fix struct iommu_hwpt_pgfault init and padding iommufd/fault: Use a separate spinlock to protect fault->deliver list iommufd/fault: Destroy response and mutex in iommufd_fault_destroy() iommufd: Keep OBJ/IOCTL lists in an alphabetical order iommufd/iova_bitmap: Fix shift-out-of-bounds in iova_bitmap_offset_to_index() iommu: iommufd: fix WARNING in iommufd_device_unbind iommufd: Deal with IOMMU_HWPT_FAULT_ID_VALID in iommufd core iommufd/selftest: Remove domain_alloc_paging()
This commit is contained in:
commit
aa44198a6c
|
@ -178,18 +178,12 @@ arm_vsmmu_alloc_domain_nested(struct iommufd_viommu *viommu, u32 flags,
|
|||
const struct iommu_user_data *user_data)
|
||||
{
|
||||
struct arm_vsmmu *vsmmu = container_of(viommu, struct arm_vsmmu, core);
|
||||
const u32 SUPPORTED_FLAGS = IOMMU_HWPT_FAULT_ID_VALID;
|
||||
struct arm_smmu_nested_domain *nested_domain;
|
||||
struct iommu_hwpt_arm_smmuv3 arg;
|
||||
bool enable_ats = false;
|
||||
int ret;
|
||||
|
||||
/*
|
||||
* Faults delivered to the nested domain are faults that originated by
|
||||
* the S1 in the domain. The core code will match all PASIDs when
|
||||
* delivering the fault due to user_pasid_table
|
||||
*/
|
||||
if (flags & ~SUPPORTED_FLAGS)
|
||||
if (flags)
|
||||
return ERR_PTR(-EOPNOTSUPP);
|
||||
|
||||
ret = iommu_copy_struct_from_user(&arg, user_data,
|
||||
|
|
|
@ -3338,8 +3338,7 @@ intel_iommu_domain_alloc_paging_flags(struct device *dev, u32 flags,
|
|||
bool first_stage;
|
||||
|
||||
if (flags &
|
||||
(~(IOMMU_HWPT_ALLOC_NEST_PARENT | IOMMU_HWPT_ALLOC_DIRTY_TRACKING
|
||||
| IOMMU_HWPT_FAULT_ID_VALID)))
|
||||
(~(IOMMU_HWPT_ALLOC_NEST_PARENT | IOMMU_HWPT_ALLOC_DIRTY_TRACKING)))
|
||||
return ERR_PTR(-EOPNOTSUPP);
|
||||
if (nested_parent && !nested_supported(iommu))
|
||||
return ERR_PTR(-EOPNOTSUPP);
|
||||
|
|
|
@ -103,15 +103,23 @@ static void iommufd_auto_response_faults(struct iommufd_hw_pagetable *hwpt,
|
|||
{
|
||||
struct iommufd_fault *fault = hwpt->fault;
|
||||
struct iopf_group *group, *next;
|
||||
struct list_head free_list;
|
||||
unsigned long index;
|
||||
|
||||
if (!fault)
|
||||
return;
|
||||
INIT_LIST_HEAD(&free_list);
|
||||
|
||||
mutex_lock(&fault->mutex);
|
||||
spin_lock(&fault->lock);
|
||||
list_for_each_entry_safe(group, next, &fault->deliver, node) {
|
||||
if (group->attach_handle != &handle->handle)
|
||||
continue;
|
||||
list_move(&group->node, &free_list);
|
||||
}
|
||||
spin_unlock(&fault->lock);
|
||||
|
||||
list_for_each_entry_safe(group, next, &free_list, node) {
|
||||
list_del(&group->node);
|
||||
iopf_group_response(group, IOMMU_PAGE_RESP_INVALID);
|
||||
iopf_free_group(group);
|
||||
|
@ -213,6 +221,7 @@ void iommufd_fault_destroy(struct iommufd_object *obj)
|
|||
{
|
||||
struct iommufd_fault *fault = container_of(obj, struct iommufd_fault, obj);
|
||||
struct iopf_group *group, *next;
|
||||
unsigned long index;
|
||||
|
||||
/*
|
||||
* The iommufd object's reference count is zero at this point.
|
||||
|
@ -225,6 +234,13 @@ void iommufd_fault_destroy(struct iommufd_object *obj)
|
|||
iopf_group_response(group, IOMMU_PAGE_RESP_INVALID);
|
||||
iopf_free_group(group);
|
||||
}
|
||||
xa_for_each(&fault->response, index, group) {
|
||||
xa_erase(&fault->response, index);
|
||||
iopf_group_response(group, IOMMU_PAGE_RESP_INVALID);
|
||||
iopf_free_group(group);
|
||||
}
|
||||
xa_destroy(&fault->response);
|
||||
mutex_destroy(&fault->mutex);
|
||||
}
|
||||
|
||||
static void iommufd_compose_fault_message(struct iommu_fault *fault,
|
||||
|
@ -247,7 +263,7 @@ static ssize_t iommufd_fault_fops_read(struct file *filep, char __user *buf,
|
|||
{
|
||||
size_t fault_size = sizeof(struct iommu_hwpt_pgfault);
|
||||
struct iommufd_fault *fault = filep->private_data;
|
||||
struct iommu_hwpt_pgfault data;
|
||||
struct iommu_hwpt_pgfault data = {};
|
||||
struct iommufd_device *idev;
|
||||
struct iopf_group *group;
|
||||
struct iopf_fault *iopf;
|
||||
|
@ -258,17 +274,19 @@ static ssize_t iommufd_fault_fops_read(struct file *filep, char __user *buf,
|
|||
return -ESPIPE;
|
||||
|
||||
mutex_lock(&fault->mutex);
|
||||
while (!list_empty(&fault->deliver) && count > done) {
|
||||
group = list_first_entry(&fault->deliver,
|
||||
struct iopf_group, node);
|
||||
|
||||
if (group->fault_count * fault_size > count - done)
|
||||
while ((group = iommufd_fault_deliver_fetch(fault))) {
|
||||
if (done >= count ||
|
||||
group->fault_count * fault_size > count - done) {
|
||||
iommufd_fault_deliver_restore(fault, group);
|
||||
break;
|
||||
}
|
||||
|
||||
rc = xa_alloc(&fault->response, &group->cookie, group,
|
||||
xa_limit_32b, GFP_KERNEL);
|
||||
if (rc)
|
||||
if (rc) {
|
||||
iommufd_fault_deliver_restore(fault, group);
|
||||
break;
|
||||
}
|
||||
|
||||
idev = to_iommufd_handle(group->attach_handle)->idev;
|
||||
list_for_each_entry(iopf, &group->faults, list) {
|
||||
|
@ -277,13 +295,12 @@ static ssize_t iommufd_fault_fops_read(struct file *filep, char __user *buf,
|
|||
group->cookie);
|
||||
if (copy_to_user(buf + done, &data, fault_size)) {
|
||||
xa_erase(&fault->response, group->cookie);
|
||||
iommufd_fault_deliver_restore(fault, group);
|
||||
rc = -EFAULT;
|
||||
break;
|
||||
}
|
||||
done += fault_size;
|
||||
}
|
||||
|
||||
list_del(&group->node);
|
||||
}
|
||||
mutex_unlock(&fault->mutex);
|
||||
|
||||
|
@ -341,10 +358,10 @@ static __poll_t iommufd_fault_fops_poll(struct file *filep,
|
|||
__poll_t pollflags = EPOLLOUT;
|
||||
|
||||
poll_wait(filep, &fault->wait_queue, wait);
|
||||
mutex_lock(&fault->mutex);
|
||||
spin_lock(&fault->lock);
|
||||
if (!list_empty(&fault->deliver))
|
||||
pollflags |= EPOLLIN | EPOLLRDNORM;
|
||||
mutex_unlock(&fault->mutex);
|
||||
spin_unlock(&fault->lock);
|
||||
|
||||
return pollflags;
|
||||
}
|
||||
|
@ -386,6 +403,7 @@ int iommufd_fault_alloc(struct iommufd_ucmd *ucmd)
|
|||
INIT_LIST_HEAD(&fault->deliver);
|
||||
xa_init_flags(&fault->response, XA_FLAGS_ALLOC1);
|
||||
mutex_init(&fault->mutex);
|
||||
spin_lock_init(&fault->lock);
|
||||
init_waitqueue_head(&fault->wait_queue);
|
||||
|
||||
filep = anon_inode_getfile("[iommufd-pgfault]", &iommufd_fault_fops,
|
||||
|
@ -434,9 +452,9 @@ int iommufd_fault_iopf_handler(struct iopf_group *group)
|
|||
hwpt = group->attach_handle->domain->fault_data;
|
||||
fault = hwpt->fault;
|
||||
|
||||
mutex_lock(&fault->mutex);
|
||||
spin_lock(&fault->lock);
|
||||
list_add_tail(&group->node, &fault->deliver);
|
||||
mutex_unlock(&fault->mutex);
|
||||
spin_unlock(&fault->lock);
|
||||
|
||||
wake_up_interruptible(&fault->wait_queue);
|
||||
|
||||
|
|
|
@ -140,8 +140,8 @@ iommufd_hwpt_paging_alloc(struct iommufd_ctx *ictx, struct iommufd_ioas *ioas,
|
|||
hwpt_paging->nest_parent = flags & IOMMU_HWPT_ALLOC_NEST_PARENT;
|
||||
|
||||
if (ops->domain_alloc_paging_flags) {
|
||||
hwpt->domain = ops->domain_alloc_paging_flags(idev->dev, flags,
|
||||
user_data);
|
||||
hwpt->domain = ops->domain_alloc_paging_flags(idev->dev,
|
||||
flags & ~IOMMU_HWPT_FAULT_ID_VALID, user_data);
|
||||
if (IS_ERR(hwpt->domain)) {
|
||||
rc = PTR_ERR(hwpt->domain);
|
||||
hwpt->domain = NULL;
|
||||
|
@ -280,6 +280,8 @@ iommufd_viommu_alloc_hwpt_nested(struct iommufd_viommu *viommu, u32 flags,
|
|||
struct iommufd_hw_pagetable *hwpt;
|
||||
int rc;
|
||||
|
||||
if (flags & ~IOMMU_HWPT_FAULT_ID_VALID)
|
||||
return ERR_PTR(-EOPNOTSUPP);
|
||||
if (!user_data->len)
|
||||
return ERR_PTR(-EOPNOTSUPP);
|
||||
if (!viommu->ops || !viommu->ops->alloc_domain_nested)
|
||||
|
@ -296,7 +298,9 @@ iommufd_viommu_alloc_hwpt_nested(struct iommufd_viommu *viommu, u32 flags,
|
|||
hwpt_nested->parent = viommu->hwpt;
|
||||
|
||||
hwpt->domain =
|
||||
viommu->ops->alloc_domain_nested(viommu, flags, user_data);
|
||||
viommu->ops->alloc_domain_nested(viommu,
|
||||
flags & ~IOMMU_HWPT_FAULT_ID_VALID,
|
||||
user_data);
|
||||
if (IS_ERR(hwpt->domain)) {
|
||||
rc = PTR_ERR(hwpt->domain);
|
||||
hwpt->domain = NULL;
|
||||
|
|
|
@ -443,14 +443,39 @@ struct iommufd_fault {
|
|||
struct iommufd_ctx *ictx;
|
||||
struct file *filep;
|
||||
|
||||
/* The lists of outstanding faults protected by below mutex. */
|
||||
struct mutex mutex;
|
||||
spinlock_t lock; /* protects the deliver list */
|
||||
struct list_head deliver;
|
||||
struct mutex mutex; /* serializes response flows */
|
||||
struct xarray response;
|
||||
|
||||
struct wait_queue_head wait_queue;
|
||||
};
|
||||
|
||||
/* Fetch the first node out of the fault->deliver list */
|
||||
static inline struct iopf_group *
|
||||
iommufd_fault_deliver_fetch(struct iommufd_fault *fault)
|
||||
{
|
||||
struct list_head *list = &fault->deliver;
|
||||
struct iopf_group *group = NULL;
|
||||
|
||||
spin_lock(&fault->lock);
|
||||
if (!list_empty(list)) {
|
||||
group = list_first_entry(list, struct iopf_group, node);
|
||||
list_del(&group->node);
|
||||
}
|
||||
spin_unlock(&fault->lock);
|
||||
return group;
|
||||
}
|
||||
|
||||
/* Restore a node back to the head of the fault->deliver list */
|
||||
static inline void iommufd_fault_deliver_restore(struct iommufd_fault *fault,
|
||||
struct iopf_group *group)
|
||||
{
|
||||
spin_lock(&fault->lock);
|
||||
list_add(&group->node, &fault->deliver);
|
||||
spin_unlock(&fault->lock);
|
||||
}
|
||||
|
||||
struct iommufd_attach_handle {
|
||||
struct iommu_attach_handle handle;
|
||||
struct iommufd_device *idev;
|
||||
|
|
|
@ -130,7 +130,7 @@ struct iova_bitmap {
|
|||
static unsigned long iova_bitmap_offset_to_index(struct iova_bitmap *bitmap,
|
||||
unsigned long iova)
|
||||
{
|
||||
unsigned long pgsize = 1 << bitmap->mapped.pgshift;
|
||||
unsigned long pgsize = 1UL << bitmap->mapped.pgshift;
|
||||
|
||||
return iova / (BITS_PER_TYPE(*bitmap->bitmap) * pgsize);
|
||||
}
|
||||
|
|
|
@ -104,7 +104,7 @@ static int iommufd_object_dec_wait_shortterm(struct iommufd_ctx *ictx,
|
|||
if (wait_event_timeout(ictx->destroy_wait,
|
||||
refcount_read(&to_destroy->shortterm_users) ==
|
||||
0,
|
||||
msecs_to_jiffies(10000)))
|
||||
msecs_to_jiffies(60000)))
|
||||
return 0;
|
||||
|
||||
pr_crit("Time out waiting for iommufd object to become free\n");
|
||||
|
@ -307,9 +307,9 @@ union ucmd_buffer {
|
|||
struct iommu_ioas_map map;
|
||||
struct iommu_ioas_unmap unmap;
|
||||
struct iommu_option option;
|
||||
struct iommu_vdevice_alloc vdev;
|
||||
struct iommu_vfio_ioas vfio_ioas;
|
||||
struct iommu_viommu_alloc viommu;
|
||||
struct iommu_vdevice_alloc vdev;
|
||||
#ifdef CONFIG_IOMMUFD_TEST
|
||||
struct iommu_test_cmd test;
|
||||
#endif
|
||||
|
@ -333,8 +333,8 @@ struct iommufd_ioctl_op {
|
|||
}
|
||||
static const struct iommufd_ioctl_op iommufd_ioctl_ops[] = {
|
||||
IOCTL_OP(IOMMU_DESTROY, iommufd_destroy, struct iommu_destroy, id),
|
||||
IOCTL_OP(IOMMU_FAULT_QUEUE_ALLOC, iommufd_fault_alloc, struct iommu_fault_alloc,
|
||||
out_fault_fd),
|
||||
IOCTL_OP(IOMMU_FAULT_QUEUE_ALLOC, iommufd_fault_alloc,
|
||||
struct iommu_fault_alloc, out_fault_fd),
|
||||
IOCTL_OP(IOMMU_GET_HW_INFO, iommufd_get_hw_info, struct iommu_hw_info,
|
||||
__reserved),
|
||||
IOCTL_OP(IOMMU_HWPT_ALLOC, iommufd_hwpt_alloc, struct iommu_hwpt_alloc,
|
||||
|
@ -355,20 +355,18 @@ static const struct iommufd_ioctl_op iommufd_ioctl_ops[] = {
|
|||
src_iova),
|
||||
IOCTL_OP(IOMMU_IOAS_IOVA_RANGES, iommufd_ioas_iova_ranges,
|
||||
struct iommu_ioas_iova_ranges, out_iova_alignment),
|
||||
IOCTL_OP(IOMMU_IOAS_MAP, iommufd_ioas_map, struct iommu_ioas_map,
|
||||
iova),
|
||||
IOCTL_OP(IOMMU_IOAS_MAP, iommufd_ioas_map, struct iommu_ioas_map, iova),
|
||||
IOCTL_OP(IOMMU_IOAS_MAP_FILE, iommufd_ioas_map_file,
|
||||
struct iommu_ioas_map_file, iova),
|
||||
IOCTL_OP(IOMMU_IOAS_UNMAP, iommufd_ioas_unmap, struct iommu_ioas_unmap,
|
||||
length),
|
||||
IOCTL_OP(IOMMU_OPTION, iommufd_option, struct iommu_option,
|
||||
val64),
|
||||
IOCTL_OP(IOMMU_OPTION, iommufd_option, struct iommu_option, val64),
|
||||
IOCTL_OP(IOMMU_VDEVICE_ALLOC, iommufd_vdevice_alloc_ioctl,
|
||||
struct iommu_vdevice_alloc, virt_id),
|
||||
IOCTL_OP(IOMMU_VFIO_IOAS, iommufd_vfio_ioas, struct iommu_vfio_ioas,
|
||||
__reserved),
|
||||
IOCTL_OP(IOMMU_VIOMMU_ALLOC, iommufd_viommu_alloc_ioctl,
|
||||
struct iommu_viommu_alloc, out_viommu_id),
|
||||
IOCTL_OP(IOMMU_VDEVICE_ALLOC, iommufd_vdevice_alloc_ioctl,
|
||||
struct iommu_vdevice_alloc, virt_id),
|
||||
#ifdef CONFIG_IOMMUFD_TEST
|
||||
IOCTL_OP(IOMMU_TEST_CMD, iommufd_test, struct iommu_test_cmd, last),
|
||||
#endif
|
||||
|
@ -490,8 +488,8 @@ static const struct iommufd_object_ops iommufd_object_ops[] = {
|
|||
[IOMMUFD_OBJ_DEVICE] = {
|
||||
.destroy = iommufd_device_destroy,
|
||||
},
|
||||
[IOMMUFD_OBJ_IOAS] = {
|
||||
.destroy = iommufd_ioas_destroy,
|
||||
[IOMMUFD_OBJ_FAULT] = {
|
||||
.destroy = iommufd_fault_destroy,
|
||||
},
|
||||
[IOMMUFD_OBJ_HWPT_PAGING] = {
|
||||
.destroy = iommufd_hwpt_paging_destroy,
|
||||
|
@ -501,15 +499,15 @@ static const struct iommufd_object_ops iommufd_object_ops[] = {
|
|||
.destroy = iommufd_hwpt_nested_destroy,
|
||||
.abort = iommufd_hwpt_nested_abort,
|
||||
},
|
||||
[IOMMUFD_OBJ_FAULT] = {
|
||||
.destroy = iommufd_fault_destroy,
|
||||
},
|
||||
[IOMMUFD_OBJ_VIOMMU] = {
|
||||
.destroy = iommufd_viommu_destroy,
|
||||
[IOMMUFD_OBJ_IOAS] = {
|
||||
.destroy = iommufd_ioas_destroy,
|
||||
},
|
||||
[IOMMUFD_OBJ_VDEVICE] = {
|
||||
.destroy = iommufd_vdevice_destroy,
|
||||
},
|
||||
[IOMMUFD_OBJ_VIOMMU] = {
|
||||
.destroy = iommufd_viommu_destroy,
|
||||
},
|
||||
#ifdef CONFIG_IOMMUFD_TEST
|
||||
[IOMMUFD_OBJ_SELFTEST] = {
|
||||
.destroy = iommufd_selftest_destroy,
|
||||
|
|
|
@ -311,25 +311,6 @@ static const struct iommu_dirty_ops dirty_ops = {
|
|||
.read_and_clear_dirty = mock_domain_read_and_clear_dirty,
|
||||
};
|
||||
|
||||
static struct iommu_domain *mock_domain_alloc_paging(struct device *dev)
|
||||
{
|
||||
struct mock_dev *mdev = to_mock_dev(dev);
|
||||
struct mock_iommu_domain *mock;
|
||||
|
||||
mock = kzalloc(sizeof(*mock), GFP_KERNEL);
|
||||
if (!mock)
|
||||
return NULL;
|
||||
mock->domain.geometry.aperture_start = MOCK_APERTURE_START;
|
||||
mock->domain.geometry.aperture_end = MOCK_APERTURE_LAST;
|
||||
mock->domain.pgsize_bitmap = MOCK_IO_PAGE_SIZE;
|
||||
if (dev && mdev->flags & MOCK_FLAGS_DEVICE_HUGE_IOVA)
|
||||
mock->domain.pgsize_bitmap |= MOCK_HUGE_PAGE_SIZE;
|
||||
mock->domain.ops = mock_ops.default_domain_ops;
|
||||
mock->domain.type = IOMMU_DOMAIN_UNMANAGED;
|
||||
xa_init(&mock->pfns);
|
||||
return &mock->domain;
|
||||
}
|
||||
|
||||
static struct mock_iommu_domain_nested *
|
||||
__mock_domain_alloc_nested(const struct iommu_user_data *user_data)
|
||||
{
|
||||
|
@ -385,21 +366,30 @@ mock_domain_alloc_paging_flags(struct device *dev, u32 flags,
|
|||
bool has_dirty_flag = flags & IOMMU_HWPT_ALLOC_DIRTY_TRACKING;
|
||||
const u32 PAGING_FLAGS = IOMMU_HWPT_ALLOC_DIRTY_TRACKING |
|
||||
IOMMU_HWPT_ALLOC_NEST_PARENT;
|
||||
bool no_dirty_ops = to_mock_dev(dev)->flags &
|
||||
MOCK_FLAGS_DEVICE_NO_DIRTY;
|
||||
struct iommu_domain *domain;
|
||||
struct mock_dev *mdev = to_mock_dev(dev);
|
||||
bool no_dirty_ops = mdev->flags & MOCK_FLAGS_DEVICE_NO_DIRTY;
|
||||
struct mock_iommu_domain *mock;
|
||||
|
||||
if (user_data)
|
||||
return ERR_PTR(-EOPNOTSUPP);
|
||||
if ((flags & ~PAGING_FLAGS) || (has_dirty_flag && no_dirty_ops))
|
||||
return ERR_PTR(-EOPNOTSUPP);
|
||||
|
||||
domain = mock_domain_alloc_paging(dev);
|
||||
if (!domain)
|
||||
mock = kzalloc(sizeof(*mock), GFP_KERNEL);
|
||||
if (!mock)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
mock->domain.geometry.aperture_start = MOCK_APERTURE_START;
|
||||
mock->domain.geometry.aperture_end = MOCK_APERTURE_LAST;
|
||||
mock->domain.pgsize_bitmap = MOCK_IO_PAGE_SIZE;
|
||||
if (dev && mdev->flags & MOCK_FLAGS_DEVICE_HUGE_IOVA)
|
||||
mock->domain.pgsize_bitmap |= MOCK_HUGE_PAGE_SIZE;
|
||||
mock->domain.ops = mock_ops.default_domain_ops;
|
||||
mock->domain.type = IOMMU_DOMAIN_UNMANAGED;
|
||||
xa_init(&mock->pfns);
|
||||
|
||||
if (has_dirty_flag)
|
||||
domain->dirty_ops = &dirty_ops;
|
||||
return domain;
|
||||
mock->domain.dirty_ops = &dirty_ops;
|
||||
return &mock->domain;
|
||||
}
|
||||
|
||||
static void mock_domain_free(struct iommu_domain *domain)
|
||||
|
@ -595,7 +585,7 @@ mock_viommu_alloc_domain_nested(struct iommufd_viommu *viommu, u32 flags,
|
|||
struct mock_viommu *mock_viommu = to_mock_viommu(viommu);
|
||||
struct mock_iommu_domain_nested *mock_nested;
|
||||
|
||||
if (flags & ~IOMMU_HWPT_FAULT_ID_VALID)
|
||||
if (flags)
|
||||
return ERR_PTR(-EOPNOTSUPP);
|
||||
|
||||
mock_nested = __mock_domain_alloc_nested(user_data);
|
||||
|
@ -713,7 +703,6 @@ static const struct iommu_ops mock_ops = {
|
|||
.owner = THIS_MODULE,
|
||||
.pgsize_bitmap = MOCK_IO_PAGE_SIZE,
|
||||
.hw_info = mock_domain_hw_info,
|
||||
.domain_alloc_paging = mock_domain_alloc_paging,
|
||||
.domain_alloc_paging_flags = mock_domain_alloc_paging_flags,
|
||||
.domain_alloc_nested = mock_domain_alloc_nested,
|
||||
.capable = mock_domain_capable,
|
||||
|
|
|
@ -868,6 +868,7 @@ enum iommu_hwpt_pgfault_perm {
|
|||
* @pasid: Process Address Space ID
|
||||
* @grpid: Page Request Group Index
|
||||
* @perm: Combination of enum iommu_hwpt_pgfault_perm
|
||||
* @__reserved: Must be 0.
|
||||
* @addr: Fault address
|
||||
* @length: a hint of how much data the requestor is expecting to fetch. For
|
||||
* example, if the PRI initiator knows it is going to do a 10MB
|
||||
|
@ -883,7 +884,8 @@ struct iommu_hwpt_pgfault {
|
|||
__u32 pasid;
|
||||
__u32 grpid;
|
||||
__u32 perm;
|
||||
__u64 addr;
|
||||
__u32 __reserved;
|
||||
__aligned_u64 addr;
|
||||
__u32 length;
|
||||
__u32 cookie;
|
||||
};
|
||||
|
|
Loading…
Reference in New Issue
Block a user