mirror of
https://github.com/nxp-imx/linux-imx.git
synced 2025-07-12 20:35:23 +02:00
bpf: replace bpf_timer_init with a generic helper
[ Upstream commit56b4a177ae
] No code change except for the new flags argument being stored in the local data struct. Signed-off-by: Benjamin Tissoires <bentiss@kernel.org> Link: https://lore.kernel.org/r/20240420-bpf_wq-v2-2-6c986a5a741f@kernel.org Signed-off-by: Alexei Starovoitov <ast@kernel.org> Stable-dep-of:d4523831f0
("bpf: Fail bpf_timer_cancel when callback is being cancelled") Signed-off-by: Sasha Levin <sashal@kernel.org>
This commit is contained in:
parent
5910035674
commit
e97c862e0b
|
@ -1110,7 +1110,10 @@ struct bpf_hrtimer {
|
||||||
|
|
||||||
/* the actual struct hidden inside uapi struct bpf_timer */
|
/* the actual struct hidden inside uapi struct bpf_timer */
|
||||||
struct bpf_async_kern {
|
struct bpf_async_kern {
|
||||||
struct bpf_hrtimer *timer;
|
union {
|
||||||
|
struct bpf_async_cb *cb;
|
||||||
|
struct bpf_hrtimer *timer;
|
||||||
|
};
|
||||||
/* bpf_spin_lock is used here instead of spinlock_t to make
|
/* bpf_spin_lock is used here instead of spinlock_t to make
|
||||||
* sure that it always fits into space reserved by struct bpf_timer
|
* sure that it always fits into space reserved by struct bpf_timer
|
||||||
* regardless of LOCKDEP and spinlock debug flags.
|
* regardless of LOCKDEP and spinlock debug flags.
|
||||||
|
@ -1118,6 +1121,10 @@ struct bpf_async_kern {
|
||||||
struct bpf_spin_lock lock;
|
struct bpf_spin_lock lock;
|
||||||
} __attribute__((aligned(8)));
|
} __attribute__((aligned(8)));
|
||||||
|
|
||||||
|
enum bpf_async_type {
|
||||||
|
BPF_ASYNC_TYPE_TIMER = 0,
|
||||||
|
};
|
||||||
|
|
||||||
static DEFINE_PER_CPU(struct bpf_hrtimer *, hrtimer_running);
|
static DEFINE_PER_CPU(struct bpf_hrtimer *, hrtimer_running);
|
||||||
|
|
||||||
static enum hrtimer_restart bpf_timer_cb(struct hrtimer *hrtimer)
|
static enum hrtimer_restart bpf_timer_cb(struct hrtimer *hrtimer)
|
||||||
|
@ -1159,46 +1166,55 @@ out:
|
||||||
return HRTIMER_NORESTART;
|
return HRTIMER_NORESTART;
|
||||||
}
|
}
|
||||||
|
|
||||||
BPF_CALL_3(bpf_timer_init, struct bpf_async_kern *, timer, struct bpf_map *, map,
|
static int __bpf_async_init(struct bpf_async_kern *async, struct bpf_map *map, u64 flags,
|
||||||
u64, flags)
|
enum bpf_async_type type)
|
||||||
{
|
{
|
||||||
clockid_t clockid = flags & (MAX_CLOCKS - 1);
|
struct bpf_async_cb *cb;
|
||||||
struct bpf_hrtimer *t;
|
struct bpf_hrtimer *t;
|
||||||
|
clockid_t clockid;
|
||||||
|
size_t size;
|
||||||
int ret = 0;
|
int ret = 0;
|
||||||
|
|
||||||
BUILD_BUG_ON(MAX_CLOCKS != 16);
|
|
||||||
BUILD_BUG_ON(sizeof(struct bpf_async_kern) > sizeof(struct bpf_timer));
|
|
||||||
BUILD_BUG_ON(__alignof__(struct bpf_async_kern) != __alignof__(struct bpf_timer));
|
|
||||||
|
|
||||||
if (in_nmi())
|
if (in_nmi())
|
||||||
return -EOPNOTSUPP;
|
return -EOPNOTSUPP;
|
||||||
|
|
||||||
if (flags >= MAX_CLOCKS ||
|
switch (type) {
|
||||||
/* similar to timerfd except _ALARM variants are not supported */
|
case BPF_ASYNC_TYPE_TIMER:
|
||||||
(clockid != CLOCK_MONOTONIC &&
|
size = sizeof(struct bpf_hrtimer);
|
||||||
clockid != CLOCK_REALTIME &&
|
break;
|
||||||
clockid != CLOCK_BOOTTIME))
|
default:
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
__bpf_spin_lock_irqsave(&timer->lock);
|
}
|
||||||
t = timer->timer;
|
|
||||||
|
__bpf_spin_lock_irqsave(&async->lock);
|
||||||
|
t = async->timer;
|
||||||
if (t) {
|
if (t) {
|
||||||
ret = -EBUSY;
|
ret = -EBUSY;
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* allocate hrtimer via map_kmalloc to use memcg accounting */
|
/* allocate hrtimer via map_kmalloc to use memcg accounting */
|
||||||
t = bpf_map_kmalloc_node(map, sizeof(*t), GFP_ATOMIC, map->numa_node);
|
cb = bpf_map_kmalloc_node(map, size, GFP_ATOMIC, map->numa_node);
|
||||||
if (!t) {
|
if (!cb) {
|
||||||
ret = -ENOMEM;
|
ret = -ENOMEM;
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
t->cb.value = (void *)timer - map->record->timer_off;
|
|
||||||
t->cb.map = map;
|
if (type == BPF_ASYNC_TYPE_TIMER) {
|
||||||
t->cb.prog = NULL;
|
clockid = flags & (MAX_CLOCKS - 1);
|
||||||
rcu_assign_pointer(t->cb.callback_fn, NULL);
|
t = (struct bpf_hrtimer *)cb;
|
||||||
hrtimer_init(&t->timer, clockid, HRTIMER_MODE_REL_SOFT);
|
|
||||||
t->timer.function = bpf_timer_cb;
|
hrtimer_init(&t->timer, clockid, HRTIMER_MODE_REL_SOFT);
|
||||||
WRITE_ONCE(timer->timer, t);
|
t->timer.function = bpf_timer_cb;
|
||||||
/* Guarantee the order between timer->timer and map->usercnt. So
|
cb->value = (void *)async - map->record->timer_off;
|
||||||
|
}
|
||||||
|
cb->map = map;
|
||||||
|
cb->prog = NULL;
|
||||||
|
cb->flags = flags;
|
||||||
|
rcu_assign_pointer(cb->callback_fn, NULL);
|
||||||
|
|
||||||
|
WRITE_ONCE(async->cb, cb);
|
||||||
|
/* Guarantee the order between async->cb and map->usercnt. So
|
||||||
* when there are concurrent uref release and bpf timer init, either
|
* when there are concurrent uref release and bpf timer init, either
|
||||||
* bpf_timer_cancel_and_free() called by uref release reads a no-NULL
|
* bpf_timer_cancel_and_free() called by uref release reads a no-NULL
|
||||||
* timer or atomic64_read() below returns a zero usercnt.
|
* timer or atomic64_read() below returns a zero usercnt.
|
||||||
|
@ -1208,15 +1224,34 @@ BPF_CALL_3(bpf_timer_init, struct bpf_async_kern *, timer, struct bpf_map *, map
|
||||||
/* maps with timers must be either held by user space
|
/* maps with timers must be either held by user space
|
||||||
* or pinned in bpffs.
|
* or pinned in bpffs.
|
||||||
*/
|
*/
|
||||||
WRITE_ONCE(timer->timer, NULL);
|
WRITE_ONCE(async->cb, NULL);
|
||||||
kfree(t);
|
kfree(cb);
|
||||||
ret = -EPERM;
|
ret = -EPERM;
|
||||||
}
|
}
|
||||||
out:
|
out:
|
||||||
__bpf_spin_unlock_irqrestore(&timer->lock);
|
__bpf_spin_unlock_irqrestore(&async->lock);
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
BPF_CALL_3(bpf_timer_init, struct bpf_async_kern *, timer, struct bpf_map *, map,
|
||||||
|
u64, flags)
|
||||||
|
{
|
||||||
|
clock_t clockid = flags & (MAX_CLOCKS - 1);
|
||||||
|
|
||||||
|
BUILD_BUG_ON(MAX_CLOCKS != 16);
|
||||||
|
BUILD_BUG_ON(sizeof(struct bpf_async_kern) > sizeof(struct bpf_timer));
|
||||||
|
BUILD_BUG_ON(__alignof__(struct bpf_async_kern) != __alignof__(struct bpf_timer));
|
||||||
|
|
||||||
|
if (flags >= MAX_CLOCKS ||
|
||||||
|
/* similar to timerfd except _ALARM variants are not supported */
|
||||||
|
(clockid != CLOCK_MONOTONIC &&
|
||||||
|
clockid != CLOCK_REALTIME &&
|
||||||
|
clockid != CLOCK_BOOTTIME))
|
||||||
|
return -EINVAL;
|
||||||
|
|
||||||
|
return __bpf_async_init(timer, map, flags, BPF_ASYNC_TYPE_TIMER);
|
||||||
|
}
|
||||||
|
|
||||||
static const struct bpf_func_proto bpf_timer_init_proto = {
|
static const struct bpf_func_proto bpf_timer_init_proto = {
|
||||||
.func = bpf_timer_init,
|
.func = bpf_timer_init,
|
||||||
.gpl_only = true,
|
.gpl_only = true,
|
||||||
|
|
Loading…
Reference in New Issue
Block a user