mirror of
git://git.yoctoproject.org/linux-yocto.git
synced 2025-08-22 00:42:01 +02:00

llvm change [1] made a change such that __sync_fetch_and_{and,or,xor}()
will generate atomic_fetch_*() insns even if the return value is not used.
This is a deliberate choice to make sure barrier semantics are preserved
from source code to asm insn.
But the change in [1] caused arena_atomics selftest failure.
test_arena_atomics:PASS:arena atomics skeleton open 0 nsec
libbpf: prog 'and': BPF program load failed: Permission denied
libbpf: prog 'and': -- BEGIN PROG LOAD LOG --
arg#0 reference type('UNKNOWN ') size cannot be determined: -22
0: R1=ctx() R10=fp0
; if (pid != (bpf_get_current_pid_tgid() >> 32)) @ arena_atomics.c:87
0: (18) r1 = 0xffffc90000064000 ; R1_w=map_value(map=arena_at.bss,ks=4,vs=4)
2: (61) r6 = *(u32 *)(r1 +0) ; R1_w=map_value(map=arena_at.bss,ks=4,vs=4) R6_w=scalar(smin=0,smax=umax=0xffffffff,v
ar_off=(0x0; 0xffffffff))
3: (85) call bpf_get_current_pid_tgid#14 ; R0_w=scalar()
4: (77) r0 >>= 32 ; R0_w=scalar(smin=0,smax=umax=0xffffffff,var_off=(0x0; 0xffffffff))
5: (5d) if r0 != r6 goto pc+11 ; R0_w=scalar(smin=0,smax=umax=0xffffffff,var_off=(0x0; 0xffffffff)) R6_w=scalar(smin=0,smax=umax=0xffffffff,var_off=(0x0; 0x)
; __sync_fetch_and_and(&and64_value, 0x011ull << 32); @ arena_atomics.c:91
6: (18) r1 = 0x100000000060 ; R1_w=scalar()
8: (bf) r1 = addr_space_cast(r1, 0, 1) ; R1_w=arena
9: (18) r2 = 0x1100000000 ; R2_w=0x1100000000
11: (db) r2 = atomic64_fetch_and((u64 *)(r1 +0), r2)
BPF_ATOMIC stores into R1 arena is not allowed
processed 9 insns (limit 1000000) max_states_per_insn 0 total_states 0 peak_states 0 mark_read 0
-- END PROG LOAD LOG --
libbpf: prog 'and': failed to load: -13
libbpf: failed to load object 'arena_atomics'
libbpf: failed to load BPF skeleton 'arena_atomics': -13
test_arena_atomics:FAIL:arena atomics skeleton load unexpected error: -13 (errno 13)
#3 arena_atomics:FAIL
The reason of the failure is due to [2] where atomic{64,}_fetch_{and,or,xor}() are not
allowed by arena addresses.
Version 2 of the patch fixed the issue by using inline asm ([3]). But further discussion
suggested to find a way from source to generate locked insn which is more user
friendly. So in not-merged llvm patch ([4]), if relax memory ordering is used and
the return value is not used, locked insn could be generated.
So with llvm patch [4] to compile the bpf selftest, the following code
__c11_atomic_fetch_and(&and64_value, 0x011ull << 32, memory_order_relaxed);
is able to generate locked insn, hence fixing the selftest failure.
[1] https://github.com/llvm/llvm-project/pull/106494
[2] d503a04f8b
("bpf: Add support for certain atomics in bpf_arena to x86 JIT")
[3] https://lore.kernel.org/bpf/20240803025928.4184433-1-yonghong.song@linux.dev/
[4] https://github.com/llvm/llvm-project/pull/107343
Signed-off-by: Yonghong Song <yonghong.song@linux.dev>
Link: https://lore.kernel.org/r/20240909223431.1666305-1-yonghong.song@linux.dev
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
278 lines
7.7 KiB
C
278 lines
7.7 KiB
C
// SPDX-License-Identifier: GPL-2.0
|
|
/* Copyright (c) 2024 Meta Platforms, Inc. and affiliates. */
|
|
#include <linux/bpf.h>
|
|
#include <bpf/bpf_helpers.h>
|
|
#include <bpf/bpf_tracing.h>
|
|
#include <stdbool.h>
|
|
#include <stdatomic.h>
|
|
#include "bpf_arena_common.h"
|
|
|
|
struct {
|
|
__uint(type, BPF_MAP_TYPE_ARENA);
|
|
__uint(map_flags, BPF_F_MMAPABLE);
|
|
__uint(max_entries, 10); /* number of pages */
|
|
#ifdef __TARGET_ARCH_arm64
|
|
__ulong(map_extra, 0x1ull << 32); /* start of mmap() region */
|
|
#else
|
|
__ulong(map_extra, 0x1ull << 44); /* start of mmap() region */
|
|
#endif
|
|
} arena SEC(".maps");
|
|
|
|
#if defined(ENABLE_ATOMICS_TESTS) && defined(__BPF_FEATURE_ADDR_SPACE_CAST)
|
|
bool skip_tests __attribute((__section__(".data"))) = false;
|
|
#else
|
|
bool skip_tests = true;
|
|
#endif
|
|
|
|
__u32 pid = 0;
|
|
|
|
__u64 __arena_global add64_value = 1;
|
|
__u64 __arena_global add64_result = 0;
|
|
__u32 __arena_global add32_value = 1;
|
|
__u32 __arena_global add32_result = 0;
|
|
__u64 __arena_global add_stack_value_copy = 0;
|
|
__u64 __arena_global add_stack_result = 0;
|
|
__u64 __arena_global add_noreturn_value = 1;
|
|
|
|
SEC("raw_tp/sys_enter")
|
|
int add(const void *ctx)
|
|
{
|
|
if (pid != (bpf_get_current_pid_tgid() >> 32))
|
|
return 0;
|
|
#ifdef ENABLE_ATOMICS_TESTS
|
|
__u64 add_stack_value = 1;
|
|
|
|
add64_result = __sync_fetch_and_add(&add64_value, 2);
|
|
add32_result = __sync_fetch_and_add(&add32_value, 2);
|
|
add_stack_result = __sync_fetch_and_add(&add_stack_value, 2);
|
|
add_stack_value_copy = add_stack_value;
|
|
__sync_fetch_and_add(&add_noreturn_value, 2);
|
|
#endif
|
|
|
|
return 0;
|
|
}
|
|
|
|
__s64 __arena_global sub64_value = 1;
|
|
__s64 __arena_global sub64_result = 0;
|
|
__s32 __arena_global sub32_value = 1;
|
|
__s32 __arena_global sub32_result = 0;
|
|
__s64 __arena_global sub_stack_value_copy = 0;
|
|
__s64 __arena_global sub_stack_result = 0;
|
|
__s64 __arena_global sub_noreturn_value = 1;
|
|
|
|
SEC("raw_tp/sys_enter")
|
|
int sub(const void *ctx)
|
|
{
|
|
if (pid != (bpf_get_current_pid_tgid() >> 32))
|
|
return 0;
|
|
#ifdef ENABLE_ATOMICS_TESTS
|
|
__u64 sub_stack_value = 1;
|
|
|
|
sub64_result = __sync_fetch_and_sub(&sub64_value, 2);
|
|
sub32_result = __sync_fetch_and_sub(&sub32_value, 2);
|
|
sub_stack_result = __sync_fetch_and_sub(&sub_stack_value, 2);
|
|
sub_stack_value_copy = sub_stack_value;
|
|
__sync_fetch_and_sub(&sub_noreturn_value, 2);
|
|
#endif
|
|
|
|
return 0;
|
|
}
|
|
|
|
#ifdef __BPF_FEATURE_ATOMIC_MEM_ORDERING
|
|
_Atomic __u64 __arena_global and64_value = (0x110ull << 32);
|
|
_Atomic __u32 __arena_global and32_value = 0x110;
|
|
#else
|
|
__u64 __arena_global and64_value = (0x110ull << 32);
|
|
__u32 __arena_global and32_value = 0x110;
|
|
#endif
|
|
|
|
SEC("raw_tp/sys_enter")
|
|
int and(const void *ctx)
|
|
{
|
|
if (pid != (bpf_get_current_pid_tgid() >> 32))
|
|
return 0;
|
|
#ifdef ENABLE_ATOMICS_TESTS
|
|
#ifdef __BPF_FEATURE_ATOMIC_MEM_ORDERING
|
|
__c11_atomic_fetch_and(&and64_value, 0x011ull << 32, memory_order_relaxed);
|
|
__c11_atomic_fetch_and(&and32_value, 0x011, memory_order_relaxed);
|
|
#else
|
|
__sync_fetch_and_and(&and64_value, 0x011ull << 32);
|
|
__sync_fetch_and_and(&and32_value, 0x011);
|
|
#endif
|
|
#endif
|
|
|
|
return 0;
|
|
}
|
|
|
|
#ifdef __BPF_FEATURE_ATOMIC_MEM_ORDERING
|
|
_Atomic __u32 __arena_global or32_value = 0x110;
|
|
_Atomic __u64 __arena_global or64_value = (0x110ull << 32);
|
|
#else
|
|
__u32 __arena_global or32_value = 0x110;
|
|
__u64 __arena_global or64_value = (0x110ull << 32);
|
|
#endif
|
|
|
|
SEC("raw_tp/sys_enter")
|
|
int or(const void *ctx)
|
|
{
|
|
if (pid != (bpf_get_current_pid_tgid() >> 32))
|
|
return 0;
|
|
#ifdef ENABLE_ATOMICS_TESTS
|
|
#ifdef __BPF_FEATURE_ATOMIC_MEM_ORDERING
|
|
__c11_atomic_fetch_or(&or64_value, 0x011ull << 32, memory_order_relaxed);
|
|
__c11_atomic_fetch_or(&or32_value, 0x011, memory_order_relaxed);
|
|
#else
|
|
__sync_fetch_and_or(&or64_value, 0x011ull << 32);
|
|
__sync_fetch_and_or(&or32_value, 0x011);
|
|
#endif
|
|
#endif
|
|
|
|
return 0;
|
|
}
|
|
|
|
#ifdef __BPF_FEATURE_ATOMIC_MEM_ORDERING
|
|
_Atomic __u64 __arena_global xor64_value = (0x110ull << 32);
|
|
_Atomic __u32 __arena_global xor32_value = 0x110;
|
|
#else
|
|
__u64 __arena_global xor64_value = (0x110ull << 32);
|
|
__u32 __arena_global xor32_value = 0x110;
|
|
#endif
|
|
|
|
SEC("raw_tp/sys_enter")
|
|
int xor(const void *ctx)
|
|
{
|
|
if (pid != (bpf_get_current_pid_tgid() >> 32))
|
|
return 0;
|
|
#ifdef ENABLE_ATOMICS_TESTS
|
|
#ifdef __BPF_FEATURE_ATOMIC_MEM_ORDERING
|
|
__c11_atomic_fetch_xor(&xor64_value, 0x011ull << 32, memory_order_relaxed);
|
|
__c11_atomic_fetch_xor(&xor32_value, 0x011, memory_order_relaxed);
|
|
#else
|
|
__sync_fetch_and_xor(&xor64_value, 0x011ull << 32);
|
|
__sync_fetch_and_xor(&xor32_value, 0x011);
|
|
#endif
|
|
#endif
|
|
|
|
return 0;
|
|
}
|
|
|
|
__u32 __arena_global cmpxchg32_value = 1;
|
|
__u32 __arena_global cmpxchg32_result_fail = 0;
|
|
__u32 __arena_global cmpxchg32_result_succeed = 0;
|
|
__u64 __arena_global cmpxchg64_value = 1;
|
|
__u64 __arena_global cmpxchg64_result_fail = 0;
|
|
__u64 __arena_global cmpxchg64_result_succeed = 0;
|
|
|
|
SEC("raw_tp/sys_enter")
|
|
int cmpxchg(const void *ctx)
|
|
{
|
|
if (pid != (bpf_get_current_pid_tgid() >> 32))
|
|
return 0;
|
|
#ifdef ENABLE_ATOMICS_TESTS
|
|
cmpxchg64_result_fail = __sync_val_compare_and_swap(&cmpxchg64_value, 0, 3);
|
|
cmpxchg64_result_succeed = __sync_val_compare_and_swap(&cmpxchg64_value, 1, 2);
|
|
|
|
cmpxchg32_result_fail = __sync_val_compare_and_swap(&cmpxchg32_value, 0, 3);
|
|
cmpxchg32_result_succeed = __sync_val_compare_and_swap(&cmpxchg32_value, 1, 2);
|
|
#endif
|
|
|
|
return 0;
|
|
}
|
|
|
|
__u64 __arena_global xchg64_value = 1;
|
|
__u64 __arena_global xchg64_result = 0;
|
|
__u32 __arena_global xchg32_value = 1;
|
|
__u32 __arena_global xchg32_result = 0;
|
|
|
|
SEC("raw_tp/sys_enter")
|
|
int xchg(const void *ctx)
|
|
{
|
|
if (pid != (bpf_get_current_pid_tgid() >> 32))
|
|
return 0;
|
|
#ifdef ENABLE_ATOMICS_TESTS
|
|
__u64 val64 = 2;
|
|
__u32 val32 = 2;
|
|
|
|
xchg64_result = __sync_lock_test_and_set(&xchg64_value, val64);
|
|
xchg32_result = __sync_lock_test_and_set(&xchg32_value, val32);
|
|
#endif
|
|
|
|
return 0;
|
|
}
|
|
|
|
__u64 __arena_global uaf_sink;
|
|
volatile __u64 __arena_global uaf_recovery_fails;
|
|
|
|
SEC("syscall")
|
|
int uaf(const void *ctx)
|
|
{
|
|
if (pid != (bpf_get_current_pid_tgid() >> 32))
|
|
return 0;
|
|
#if defined(ENABLE_ATOMICS_TESTS) && !defined(__TARGET_ARCH_arm64) && \
|
|
!defined(__TARGET_ARCH_x86)
|
|
__u32 __arena *page32;
|
|
__u64 __arena *page64;
|
|
void __arena *page;
|
|
|
|
page = bpf_arena_alloc_pages(&arena, NULL, 1, NUMA_NO_NODE, 0);
|
|
bpf_arena_free_pages(&arena, page, 1);
|
|
uaf_recovery_fails = 24;
|
|
|
|
page32 = (__u32 __arena *)page;
|
|
uaf_sink += __sync_fetch_and_add(page32, 1);
|
|
uaf_recovery_fails -= 1;
|
|
__sync_add_and_fetch(page32, 1);
|
|
uaf_recovery_fails -= 1;
|
|
uaf_sink += __sync_fetch_and_sub(page32, 1);
|
|
uaf_recovery_fails -= 1;
|
|
__sync_sub_and_fetch(page32, 1);
|
|
uaf_recovery_fails -= 1;
|
|
uaf_sink += __sync_fetch_and_and(page32, 1);
|
|
uaf_recovery_fails -= 1;
|
|
__sync_and_and_fetch(page32, 1);
|
|
uaf_recovery_fails -= 1;
|
|
uaf_sink += __sync_fetch_and_or(page32, 1);
|
|
uaf_recovery_fails -= 1;
|
|
__sync_or_and_fetch(page32, 1);
|
|
uaf_recovery_fails -= 1;
|
|
uaf_sink += __sync_fetch_and_xor(page32, 1);
|
|
uaf_recovery_fails -= 1;
|
|
__sync_xor_and_fetch(page32, 1);
|
|
uaf_recovery_fails -= 1;
|
|
uaf_sink += __sync_val_compare_and_swap(page32, 0, 1);
|
|
uaf_recovery_fails -= 1;
|
|
uaf_sink += __sync_lock_test_and_set(page32, 1);
|
|
uaf_recovery_fails -= 1;
|
|
|
|
page64 = (__u64 __arena *)page;
|
|
uaf_sink += __sync_fetch_and_add(page64, 1);
|
|
uaf_recovery_fails -= 1;
|
|
__sync_add_and_fetch(page64, 1);
|
|
uaf_recovery_fails -= 1;
|
|
uaf_sink += __sync_fetch_and_sub(page64, 1);
|
|
uaf_recovery_fails -= 1;
|
|
__sync_sub_and_fetch(page64, 1);
|
|
uaf_recovery_fails -= 1;
|
|
uaf_sink += __sync_fetch_and_and(page64, 1);
|
|
uaf_recovery_fails -= 1;
|
|
__sync_and_and_fetch(page64, 1);
|
|
uaf_recovery_fails -= 1;
|
|
uaf_sink += __sync_fetch_and_or(page64, 1);
|
|
uaf_recovery_fails -= 1;
|
|
__sync_or_and_fetch(page64, 1);
|
|
uaf_recovery_fails -= 1;
|
|
uaf_sink += __sync_fetch_and_xor(page64, 1);
|
|
uaf_recovery_fails -= 1;
|
|
__sync_xor_and_fetch(page64, 1);
|
|
uaf_recovery_fails -= 1;
|
|
uaf_sink += __sync_val_compare_and_swap(page64, 0, 1);
|
|
uaf_recovery_fails -= 1;
|
|
uaf_sink += __sync_lock_test_and_set(page64, 1);
|
|
uaf_recovery_fails -= 1;
|
|
#endif
|
|
|
|
return 0;
|
|
}
|
|
|
|
char _license[] SEC("license") = "GPL";
|