linux-yocto/io_uring/alloc_cache.h
Caleb Sander Mateos 0d83b8a9f1 io_uring: introduce io_cache_free() helper
Add a helper function io_cache_free() that returns an allocation to a
io_alloc_cache, falling back on kfree() if the io_alloc_cache is full.
This is the inverse of io_cache_alloc(), which takes an allocation from
an io_alloc_cache and falls back on kmalloc() if the cache is empty.

Convert 4 callers to use the helper.

Signed-off-by: Caleb Sander Mateos <csander@purestorage.com>
Suggested-by: Li Zetao <lizetao1@huawei.com>
Link: https://lore.kernel.org/r/20250304194814.2346705-1-csander@purestorage.com
Signed-off-by: Jens Axboe <axboe@kernel.dk>
2025-03-05 07:38:55 -07:00

78 lines
1.7 KiB
C

#ifndef IOU_ALLOC_CACHE_H
#define IOU_ALLOC_CACHE_H
#include <linux/io_uring_types.h>
/*
* Don't allow the cache to grow beyond this size.
*/
#define IO_ALLOC_CACHE_MAX 128
void io_alloc_cache_free(struct io_alloc_cache *cache,
void (*free)(const void *));
bool io_alloc_cache_init(struct io_alloc_cache *cache,
unsigned max_nr, unsigned int size,
unsigned int init_bytes);
void *io_cache_alloc_new(struct io_alloc_cache *cache, gfp_t gfp);
static inline void io_alloc_cache_kasan(struct iovec **iov, int *nr)
{
if (IS_ENABLED(CONFIG_KASAN)) {
kfree(*iov);
*iov = NULL;
*nr = 0;
}
}
static inline bool io_alloc_cache_put(struct io_alloc_cache *cache,
void *entry)
{
if (cache->nr_cached < cache->max_cached) {
if (!kasan_mempool_poison_object(entry))
return false;
cache->entries[cache->nr_cached++] = entry;
return true;
}
return false;
}
static inline void *io_alloc_cache_get(struct io_alloc_cache *cache)
{
if (cache->nr_cached) {
void *entry = cache->entries[--cache->nr_cached];
/*
* If KASAN is enabled, always clear the initial bytes that
* must be zeroed post alloc, in case any of them overlap
* with KASAN storage.
*/
#if defined(CONFIG_KASAN)
kasan_mempool_unpoison_object(entry, cache->elem_size);
if (cache->init_clear)
memset(entry, 0, cache->init_clear);
#endif
return entry;
}
return NULL;
}
static inline void *io_cache_alloc(struct io_alloc_cache *cache, gfp_t gfp)
{
void *obj;
obj = io_alloc_cache_get(cache);
if (obj)
return obj;
return io_cache_alloc_new(cache, gfp);
}
static inline void io_cache_free(struct io_alloc_cache *cache, void *obj)
{
if (!io_alloc_cache_put(cache, obj))
kfree(obj);
}
#endif