mirror of
git://git.yoctoproject.org/linux-yocto.git
synced 2025-10-23 07:23:12 +02:00
io_uring: unify io_pin_pages()
Move it into io_uring.c where it belongs, and use it in there as well rather than have two implementations of this. Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
parent
09fc75e0c0
commit
1943f96b38
|
@ -2650,33 +2650,57 @@ static void io_pages_free(struct page ***pages, int npages)
|
||||||
*pages = NULL;
|
*pages = NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
struct page **io_pin_pages(unsigned long uaddr, unsigned long len, int *npages)
|
||||||
|
{
|
||||||
|
unsigned long start, end, nr_pages;
|
||||||
|
struct page **pages;
|
||||||
|
int ret;
|
||||||
|
|
||||||
|
end = (uaddr + len + PAGE_SIZE - 1) >> PAGE_SHIFT;
|
||||||
|
start = uaddr >> PAGE_SHIFT;
|
||||||
|
nr_pages = end - start;
|
||||||
|
if (WARN_ON_ONCE(!nr_pages))
|
||||||
|
return ERR_PTR(-EINVAL);
|
||||||
|
|
||||||
|
pages = kvmalloc_array(nr_pages, sizeof(struct page *), GFP_KERNEL);
|
||||||
|
if (!pages)
|
||||||
|
return ERR_PTR(-ENOMEM);
|
||||||
|
|
||||||
|
ret = pin_user_pages_fast(uaddr, nr_pages, FOLL_WRITE | FOLL_LONGTERM,
|
||||||
|
pages);
|
||||||
|
/* success, mapped all pages */
|
||||||
|
if (ret == nr_pages) {
|
||||||
|
*npages = nr_pages;
|
||||||
|
return pages;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* partial map, or didn't map anything */
|
||||||
|
if (ret >= 0) {
|
||||||
|
/* if we did partial map, release any pages we did get */
|
||||||
|
if (ret)
|
||||||
|
unpin_user_pages(pages, ret);
|
||||||
|
ret = -EFAULT;
|
||||||
|
}
|
||||||
|
kvfree(pages);
|
||||||
|
return ERR_PTR(ret);
|
||||||
|
}
|
||||||
|
|
||||||
static void *__io_uaddr_map(struct page ***pages, unsigned short *npages,
|
static void *__io_uaddr_map(struct page ***pages, unsigned short *npages,
|
||||||
unsigned long uaddr, size_t size)
|
unsigned long uaddr, size_t size)
|
||||||
{
|
{
|
||||||
struct page **page_array;
|
struct page **page_array;
|
||||||
unsigned int nr_pages;
|
unsigned int nr_pages;
|
||||||
void *page_addr;
|
void *page_addr;
|
||||||
int ret, pinned;
|
|
||||||
|
|
||||||
*npages = 0;
|
*npages = 0;
|
||||||
|
|
||||||
if (uaddr & (PAGE_SIZE - 1) || !size)
|
if (uaddr & (PAGE_SIZE - 1) || !size)
|
||||||
return ERR_PTR(-EINVAL);
|
return ERR_PTR(-EINVAL);
|
||||||
|
|
||||||
nr_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
|
nr_pages = 0;
|
||||||
if (nr_pages > USHRT_MAX)
|
page_array = io_pin_pages(uaddr, size, &nr_pages);
|
||||||
return ERR_PTR(-EINVAL);
|
if (IS_ERR(page_array))
|
||||||
page_array = kvmalloc_array(nr_pages, sizeof(struct page *), GFP_KERNEL);
|
return page_array;
|
||||||
if (!page_array)
|
|
||||||
return ERR_PTR(-ENOMEM);
|
|
||||||
|
|
||||||
|
|
||||||
pinned = pin_user_pages_fast(uaddr, nr_pages, FOLL_WRITE | FOLL_LONGTERM,
|
|
||||||
page_array);
|
|
||||||
if (pinned != nr_pages) {
|
|
||||||
ret = (pinned < 0) ? pinned : -EFAULT;
|
|
||||||
goto free_pages;
|
|
||||||
}
|
|
||||||
|
|
||||||
page_addr = vmap(page_array, nr_pages, VM_MAP, PAGE_KERNEL);
|
page_addr = vmap(page_array, nr_pages, VM_MAP, PAGE_KERNEL);
|
||||||
if (page_addr) {
|
if (page_addr) {
|
||||||
|
@ -2684,10 +2708,9 @@ static void *__io_uaddr_map(struct page ***pages, unsigned short *npages,
|
||||||
*npages = nr_pages;
|
*npages = nr_pages;
|
||||||
return page_addr;
|
return page_addr;
|
||||||
}
|
}
|
||||||
ret = -ENOMEM;
|
|
||||||
free_pages:
|
io_pages_free(&page_array, nr_pages);
|
||||||
io_pages_free(&page_array, pinned > 0 ? pinned : 0);
|
return ERR_PTR(-ENOMEM);
|
||||||
return ERR_PTR(ret);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static void *io_rings_map(struct io_ring_ctx *ctx, unsigned long uaddr,
|
static void *io_rings_map(struct io_ring_ctx *ctx, unsigned long uaddr,
|
||||||
|
|
|
@ -870,42 +870,6 @@ static int io_buffer_account_pin(struct io_ring_ctx *ctx, struct page **pages,
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
struct page **io_pin_pages(unsigned long ubuf, unsigned long len, int *npages)
|
|
||||||
{
|
|
||||||
unsigned long start, end, nr_pages;
|
|
||||||
struct page **pages = NULL;
|
|
||||||
int ret;
|
|
||||||
|
|
||||||
end = (ubuf + len + PAGE_SIZE - 1) >> PAGE_SHIFT;
|
|
||||||
start = ubuf >> PAGE_SHIFT;
|
|
||||||
nr_pages = end - start;
|
|
||||||
WARN_ON(!nr_pages);
|
|
||||||
|
|
||||||
pages = kvmalloc_array(nr_pages, sizeof(struct page *), GFP_KERNEL);
|
|
||||||
if (!pages)
|
|
||||||
return ERR_PTR(-ENOMEM);
|
|
||||||
|
|
||||||
mmap_read_lock(current->mm);
|
|
||||||
ret = pin_user_pages(ubuf, nr_pages, FOLL_WRITE | FOLL_LONGTERM, pages);
|
|
||||||
mmap_read_unlock(current->mm);
|
|
||||||
|
|
||||||
/* success, mapped all pages */
|
|
||||||
if (ret == nr_pages) {
|
|
||||||
*npages = nr_pages;
|
|
||||||
return pages;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* partial map, or didn't map anything */
|
|
||||||
if (ret >= 0) {
|
|
||||||
/* if we did partial map, release any pages we did get */
|
|
||||||
if (ret)
|
|
||||||
unpin_user_pages(pages, ret);
|
|
||||||
ret = -EFAULT;
|
|
||||||
}
|
|
||||||
kvfree(pages);
|
|
||||||
return ERR_PTR(ret);
|
|
||||||
}
|
|
||||||
|
|
||||||
static int io_sqe_buffer_register(struct io_ring_ctx *ctx, struct iovec *iov,
|
static int io_sqe_buffer_register(struct io_ring_ctx *ctx, struct iovec *iov,
|
||||||
struct io_mapped_ubuf **pimu,
|
struct io_mapped_ubuf **pimu,
|
||||||
struct page **last_hpage)
|
struct page **last_hpage)
|
||||||
|
|
Loading…
Reference in New Issue
Block a user