xen/gntdev: remove struct gntdev_copy_batch from stack

[ Upstream commit 70045cf659 ]

When compiling the kernel with LLVM, the following warning was issued:

  drivers/xen/gntdev.c:991: warning: stack frame size (1160) exceeds
  limit (1024) in function 'gntdev_ioctl'

The main reason is struct gntdev_copy_batch which is located on the
stack and has a size of nearly 1kb.

For performance reasons it shouldn't by just dynamically allocated
instead, so allocate a new instance when needed and instead of freeing
it put it into a list of free structs anchored in struct gntdev_priv.

Fixes: a4cdb556ca ("xen/gntdev: add ioctl for grant copy")
Reported-by: Abinash Singh <abinashsinghlalotra@gmail.com>
Reviewed-by: Stefano Stabellini <sstabellini@kernel.org>
Signed-off-by: Juergen Gross <jgross@suse.com>
Message-ID: <20250703073259.17356-1-jgross@suse.com>
Signed-off-by: Sasha Levin <sashal@kernel.org>
This commit is contained in:
Juergen Gross 2025-07-03 09:32:59 +02:00 committed by Greg Kroah-Hartman
parent 8523f6fe4b
commit 1b01308948
2 changed files with 54 additions and 21 deletions

View File

@ -26,6 +26,10 @@ struct gntdev_priv {
/* lock protects maps and freeable_maps. */
struct mutex lock;
/* Free instances of struct gntdev_copy_batch. */
struct gntdev_copy_batch *batch;
struct mutex batch_lock;
#ifdef CONFIG_XEN_GRANT_DMA_ALLOC
/* Device for which DMA memory is allocated. */
struct device *dma_dev;

View File

@ -56,6 +56,18 @@ MODULE_AUTHOR("Derek G. Murray <Derek.Murray@cl.cam.ac.uk>, "
"Gerd Hoffmann <kraxel@redhat.com>");
MODULE_DESCRIPTION("User-space granted page access driver");
#define GNTDEV_COPY_BATCH 16
struct gntdev_copy_batch {
struct gnttab_copy ops[GNTDEV_COPY_BATCH];
struct page *pages[GNTDEV_COPY_BATCH];
s16 __user *status[GNTDEV_COPY_BATCH];
unsigned int nr_ops;
unsigned int nr_pages;
bool writeable;
struct gntdev_copy_batch *next;
};
static unsigned int limit = 64*1024;
module_param(limit, uint, 0644);
MODULE_PARM_DESC(limit,
@ -584,6 +596,8 @@ static int gntdev_open(struct inode *inode, struct file *flip)
INIT_LIST_HEAD(&priv->maps);
mutex_init(&priv->lock);
mutex_init(&priv->batch_lock);
#ifdef CONFIG_XEN_GNTDEV_DMABUF
priv->dmabuf_priv = gntdev_dmabuf_init(flip);
if (IS_ERR(priv->dmabuf_priv)) {
@ -608,6 +622,7 @@ static int gntdev_release(struct inode *inode, struct file *flip)
{
struct gntdev_priv *priv = flip->private_data;
struct gntdev_grant_map *map;
struct gntdev_copy_batch *batch;
pr_debug("priv %p\n", priv);
@ -620,6 +635,14 @@ static int gntdev_release(struct inode *inode, struct file *flip)
}
mutex_unlock(&priv->lock);
mutex_lock(&priv->batch_lock);
while (priv->batch) {
batch = priv->batch;
priv->batch = batch->next;
kfree(batch);
}
mutex_unlock(&priv->batch_lock);
#ifdef CONFIG_XEN_GNTDEV_DMABUF
gntdev_dmabuf_fini(priv->dmabuf_priv);
#endif
@ -785,17 +808,6 @@ static long gntdev_ioctl_notify(struct gntdev_priv *priv, void __user *u)
return rc;
}
#define GNTDEV_COPY_BATCH 16
struct gntdev_copy_batch {
struct gnttab_copy ops[GNTDEV_COPY_BATCH];
struct page *pages[GNTDEV_COPY_BATCH];
s16 __user *status[GNTDEV_COPY_BATCH];
unsigned int nr_ops;
unsigned int nr_pages;
bool writeable;
};
static int gntdev_get_page(struct gntdev_copy_batch *batch, void __user *virt,
unsigned long *gfn)
{
@ -953,36 +965,53 @@ static int gntdev_grant_copy_seg(struct gntdev_copy_batch *batch,
static long gntdev_ioctl_grant_copy(struct gntdev_priv *priv, void __user *u)
{
struct ioctl_gntdev_grant_copy copy;
struct gntdev_copy_batch batch;
struct gntdev_copy_batch *batch;
unsigned int i;
int ret = 0;
if (copy_from_user(&copy, u, sizeof(copy)))
return -EFAULT;
batch.nr_ops = 0;
batch.nr_pages = 0;
mutex_lock(&priv->batch_lock);
if (!priv->batch) {
batch = kmalloc(sizeof(*batch), GFP_KERNEL);
} else {
batch = priv->batch;
priv->batch = batch->next;
}
mutex_unlock(&priv->batch_lock);
if (!batch)
return -ENOMEM;
batch->nr_ops = 0;
batch->nr_pages = 0;
for (i = 0; i < copy.count; i++) {
struct gntdev_grant_copy_segment seg;
if (copy_from_user(&seg, &copy.segments[i], sizeof(seg))) {
ret = -EFAULT;
gntdev_put_pages(batch);
goto out;
}
ret = gntdev_grant_copy_seg(&batch, &seg, &copy.segments[i].status);
if (ret < 0)
ret = gntdev_grant_copy_seg(batch, &seg, &copy.segments[i].status);
if (ret < 0) {
gntdev_put_pages(batch);
goto out;
}
cond_resched();
}
if (batch.nr_ops)
ret = gntdev_copy(&batch);
return ret;
if (batch->nr_ops)
ret = gntdev_copy(batch);
out:
mutex_lock(&priv->batch_lock);
batch->next = priv->batch;
priv->batch = batch;
mutex_unlock(&priv->batch_lock);
out:
gntdev_put_pages(&batch);
return ret;
}