mirror of
git://git.yoctoproject.org/linux-yocto.git
synced 2025-10-22 23:13:01 +02:00
bpf-fixes
-----BEGIN PGP SIGNATURE----- iQIzBAABCAAdFiEE+soXsSLHKoYyzcli6rmadz2vbToFAmh5r+QACgkQ6rmadz2v bTqrFA/+OW+z+vh8k43C9TVZttqC5poGcFqF5zRlYTArQT3AB+QuhG/CRjVQFbGL 2YfVbq+5pxNo0I/FtCoWVui2OMf1UsRKKvM0pSn50yn3ytRfotZjQ/AWACm/9t5y fyRLBIS3ArjashQ9/S71tAIfG6l/B+FGX81wOVa1uL50ab15+4NrplhZHY421o9a lH2E2wnpy/BnrB9F/FO4iQbelixvBfMwj8epruhCVbipfx6BOKPMzKVtcm61FVT1 hDsQZ0bIpVKgpRNBlTUHjVyzYo8oeXzqVhhY7hsmpHxJSiol7KLWyHEJD5ExS9Qg XVPK34b9IPgAfS8f/DgGAkWsAht7BMLsR0GUWyVIiacHHqTinRPVfWbzqWa5yjdD +8Vp4RVrcUONx69upx+IDrb4uMfQYktdpcvQtSl0SSinsG/INXurT1Vyz8aBPfkv WbiBeXhW/dCD9NuL5D9gnyZWaPXIAmbK7+pXJOSIpfKC24WRXTONDXhGP1b6ef31 zHQu3r98ekYnHr3hbsvdHOWB7LKkJ1bcg2+OsmtYUUmnCiQTM1H8ILTwbSQ4EfXJ 6iRxYeFp+VJOPScRzmNU/A3ibQWfV+foiO4S6hmazJOy3mmHX6hgPZoj2fjV8Ejf xZeOpQbCaZQCzbxxOdtjykwfe+zPWGnyRPnQpVIVdi7Abk1EZaE= =eUL7 -----END PGP SIGNATURE----- Merge tag 'bpf-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf Pull bpf fixes from Alexei Starovoitov: - Fix handling of BPF arena relocations (Andrii Nakryiko) - Fix race in bpf_arch_text_poke() on s390 (Ilya Leoshkevich) - Fix use of virt_to_phys() on arm64 when mmapping BTF (Lorenz Bauer) - Reject %p% format string in bprintf-like BPF helpers (Paul Chaignon) * tag 'bpf-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf: libbpf: Fix handling of BPF arena relocations btf: Fix virt_to_phys() on arm64 when mmapping BTF selftests/bpf: Stress test attaching a BPF prog to another BPF prog s390/bpf: Fix bpf_arch_text_poke() with new_addr == NULL again selftests/bpf: Add negative test cases for snprintf bpf: Reject %p% format string in bprintf-like helpers
This commit is contained in:
commit
d786aba320
|
@ -566,7 +566,15 @@ static void bpf_jit_plt(struct bpf_plt *plt, void *ret, void *target)
|
|||
{
|
||||
memcpy(plt, &bpf_plt, sizeof(*plt));
|
||||
plt->ret = ret;
|
||||
plt->target = target;
|
||||
/*
|
||||
* (target == NULL) implies that the branch to this PLT entry was
|
||||
* patched and became a no-op. However, some CPU could have jumped
|
||||
* to this PLT entry before patching and may be still executing it.
|
||||
*
|
||||
* Since the intention in this case is to make the PLT entry a no-op,
|
||||
* make the target point to the return label instead of NULL.
|
||||
*/
|
||||
plt->target = target ?: ret;
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
|
@ -884,6 +884,13 @@ int bpf_bprintf_prepare(char *fmt, u32 fmt_size, const u64 *raw_args,
|
|||
if (fmt[i] == 'p') {
|
||||
sizeof_cur_arg = sizeof(long);
|
||||
|
||||
if (fmt[i + 1] == 0 || isspace(fmt[i + 1]) ||
|
||||
ispunct(fmt[i + 1])) {
|
||||
if (tmp_buf)
|
||||
cur_arg = raw_args[num_spec];
|
||||
goto nocopy_fmt;
|
||||
}
|
||||
|
||||
if ((fmt[i + 1] == 'k' || fmt[i + 1] == 'u') &&
|
||||
fmt[i + 2] == 's') {
|
||||
fmt_ptype = fmt[i + 1];
|
||||
|
@ -891,11 +898,9 @@ int bpf_bprintf_prepare(char *fmt, u32 fmt_size, const u64 *raw_args,
|
|||
goto fmt_str;
|
||||
}
|
||||
|
||||
if (fmt[i + 1] == 0 || isspace(fmt[i + 1]) ||
|
||||
ispunct(fmt[i + 1]) || fmt[i + 1] == 'K' ||
|
||||
if (fmt[i + 1] == 'K' ||
|
||||
fmt[i + 1] == 'x' || fmt[i + 1] == 's' ||
|
||||
fmt[i + 1] == 'S') {
|
||||
/* just kernel pointers */
|
||||
if (tmp_buf)
|
||||
cur_arg = raw_args[num_spec];
|
||||
i++;
|
||||
|
|
|
@ -21,7 +21,7 @@ static int btf_sysfs_vmlinux_mmap(struct file *filp, struct kobject *kobj,
|
|||
{
|
||||
unsigned long pages = PAGE_ALIGN(attr->size) >> PAGE_SHIFT;
|
||||
size_t vm_size = vma->vm_end - vma->vm_start;
|
||||
phys_addr_t addr = virt_to_phys(__start_BTF);
|
||||
phys_addr_t addr = __pa_symbol(__start_BTF);
|
||||
unsigned long pfn = addr >> PAGE_SHIFT;
|
||||
|
||||
if (attr->private != __start_BTF || !PAGE_ALIGNED(addr))
|
||||
|
|
|
@ -735,7 +735,7 @@ struct bpf_object {
|
|||
|
||||
struct usdt_manager *usdt_man;
|
||||
|
||||
struct bpf_map *arena_map;
|
||||
int arena_map_idx;
|
||||
void *arena_data;
|
||||
size_t arena_data_sz;
|
||||
|
||||
|
@ -1517,6 +1517,7 @@ static struct bpf_object *bpf_object__new(const char *path,
|
|||
obj->efile.obj_buf_sz = obj_buf_sz;
|
||||
obj->efile.btf_maps_shndx = -1;
|
||||
obj->kconfig_map_idx = -1;
|
||||
obj->arena_map_idx = -1;
|
||||
|
||||
obj->kern_version = get_kernel_version();
|
||||
obj->state = OBJ_OPEN;
|
||||
|
@ -2964,7 +2965,7 @@ static int init_arena_map_data(struct bpf_object *obj, struct bpf_map *map,
|
|||
const long page_sz = sysconf(_SC_PAGE_SIZE);
|
||||
size_t mmap_sz;
|
||||
|
||||
mmap_sz = bpf_map_mmap_sz(obj->arena_map);
|
||||
mmap_sz = bpf_map_mmap_sz(map);
|
||||
if (roundup(data_sz, page_sz) > mmap_sz) {
|
||||
pr_warn("elf: sec '%s': declared ARENA map size (%zu) is too small to hold global __arena variables of size %zu\n",
|
||||
sec_name, mmap_sz, data_sz);
|
||||
|
@ -3038,12 +3039,12 @@ static int bpf_object__init_user_btf_maps(struct bpf_object *obj, bool strict,
|
|||
if (map->def.type != BPF_MAP_TYPE_ARENA)
|
||||
continue;
|
||||
|
||||
if (obj->arena_map) {
|
||||
if (obj->arena_map_idx >= 0) {
|
||||
pr_warn("map '%s': only single ARENA map is supported (map '%s' is also ARENA)\n",
|
||||
map->name, obj->arena_map->name);
|
||||
map->name, obj->maps[obj->arena_map_idx].name);
|
||||
return -EINVAL;
|
||||
}
|
||||
obj->arena_map = map;
|
||||
obj->arena_map_idx = i;
|
||||
|
||||
if (obj->efile.arena_data) {
|
||||
err = init_arena_map_data(obj, map, ARENA_SEC, obj->efile.arena_data_shndx,
|
||||
|
@ -3053,7 +3054,7 @@ static int bpf_object__init_user_btf_maps(struct bpf_object *obj, bool strict,
|
|||
return err;
|
||||
}
|
||||
}
|
||||
if (obj->efile.arena_data && !obj->arena_map) {
|
||||
if (obj->efile.arena_data && obj->arena_map_idx < 0) {
|
||||
pr_warn("elf: sec '%s': to use global __arena variables the ARENA map should be explicitly declared in SEC(\".maps\")\n",
|
||||
ARENA_SEC);
|
||||
return -ENOENT;
|
||||
|
@ -4583,8 +4584,13 @@ static int bpf_program__record_reloc(struct bpf_program *prog,
|
|||
if (shdr_idx == obj->efile.arena_data_shndx) {
|
||||
reloc_desc->type = RELO_DATA;
|
||||
reloc_desc->insn_idx = insn_idx;
|
||||
reloc_desc->map_idx = obj->arena_map - obj->maps;
|
||||
reloc_desc->map_idx = obj->arena_map_idx;
|
||||
reloc_desc->sym_off = sym->st_value;
|
||||
|
||||
map = &obj->maps[obj->arena_map_idx];
|
||||
pr_debug("prog '%s': found arena map %d (%s, sec %d, off %zu) for insn %u\n",
|
||||
prog->name, obj->arena_map_idx, map->name, map->sec_idx,
|
||||
map->sec_offset, insn_idx);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -149,3 +149,70 @@ close_prog:
|
|||
fentry_recursive_target__destroy(target_skel);
|
||||
fentry_recursive__destroy(tracing_skel);
|
||||
}
|
||||
|
||||
static void *fentry_target_test_run(void *arg)
|
||||
{
|
||||
for (;;) {
|
||||
int prog_fd = __atomic_load_n((int *)arg, __ATOMIC_SEQ_CST);
|
||||
LIBBPF_OPTS(bpf_test_run_opts, topts);
|
||||
int err;
|
||||
|
||||
if (prog_fd == -1)
|
||||
break;
|
||||
err = bpf_prog_test_run_opts(prog_fd, &topts);
|
||||
if (!ASSERT_OK(err, "fentry_target test_run"))
|
||||
break;
|
||||
}
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
void test_fentry_attach_stress(void)
|
||||
{
|
||||
struct fentry_recursive_target *target_skel = NULL;
|
||||
struct fentry_recursive *tracing_skel = NULL;
|
||||
struct bpf_program *prog;
|
||||
int err, i, tgt_prog_fd;
|
||||
pthread_t thread;
|
||||
|
||||
target_skel = fentry_recursive_target__open_and_load();
|
||||
if (!ASSERT_OK_PTR(target_skel,
|
||||
"fentry_recursive_target__open_and_load"))
|
||||
goto close_prog;
|
||||
tgt_prog_fd = bpf_program__fd(target_skel->progs.fentry_target);
|
||||
err = pthread_create(&thread, NULL,
|
||||
fentry_target_test_run, &tgt_prog_fd);
|
||||
if (!ASSERT_OK(err, "bpf_program__set_attach_target"))
|
||||
goto close_prog;
|
||||
|
||||
for (i = 0; i < 1000; i++) {
|
||||
tracing_skel = fentry_recursive__open();
|
||||
if (!ASSERT_OK_PTR(tracing_skel, "fentry_recursive__open"))
|
||||
goto stop_thread;
|
||||
|
||||
prog = tracing_skel->progs.recursive_attach;
|
||||
err = bpf_program__set_attach_target(prog, tgt_prog_fd,
|
||||
"fentry_target");
|
||||
if (!ASSERT_OK(err, "bpf_program__set_attach_target"))
|
||||
goto stop_thread;
|
||||
|
||||
err = fentry_recursive__load(tracing_skel);
|
||||
if (!ASSERT_OK(err, "fentry_recursive__load"))
|
||||
goto stop_thread;
|
||||
|
||||
err = fentry_recursive__attach(tracing_skel);
|
||||
if (!ASSERT_OK(err, "fentry_recursive__attach"))
|
||||
goto stop_thread;
|
||||
|
||||
fentry_recursive__destroy(tracing_skel);
|
||||
tracing_skel = NULL;
|
||||
}
|
||||
|
||||
stop_thread:
|
||||
__atomic_store_n(&tgt_prog_fd, -1, __ATOMIC_SEQ_CST);
|
||||
err = pthread_join(thread, NULL);
|
||||
ASSERT_OK(err, "pthread_join");
|
||||
close_prog:
|
||||
fentry_recursive__destroy(tracing_skel);
|
||||
fentry_recursive_target__destroy(target_skel);
|
||||
}
|
||||
|
|
|
@ -116,6 +116,8 @@ static void test_snprintf_negative(void)
|
|||
ASSERT_ERR(load_single_snprintf("%llc"), "invalid specifier 7");
|
||||
ASSERT_ERR(load_single_snprintf("\x80"), "non ascii character");
|
||||
ASSERT_ERR(load_single_snprintf("\x1"), "non printable character");
|
||||
ASSERT_ERR(load_single_snprintf("%p%"), "invalid specifier 8");
|
||||
ASSERT_ERR(load_single_snprintf("%s%"), "invalid specifier 9");
|
||||
}
|
||||
|
||||
void test_snprintf(void)
|
||||
|
|
Loading…
Reference in New Issue
Block a user