mirror of
https://github.com/nxp-imx/linux-imx.git
synced 2025-07-16 14:19:36 +02:00

This patch corrects a few warnings to allow selftests to compile for GCC. -- progs/cpumask_failure.c -- progs/bpf_misc.h:136:22: error: ‘cpumask’ is used uninitialized [-Werror=uninitialized] 136 | #define __sink(expr) asm volatile("" : "+g"(expr)) | ^~~ progs/cpumask_failure.c:68:9: note: in expansion of macro ‘__sink’ 68 | __sink(cpumask); The macro __sink(cpumask) with the '+' contraint modifier forces the the compiler to expect a read and write from cpumask. GCC detects that cpumask is never initialized and reports an error. This patch removes the spurious non required definitions of cpumask. -- progs/dynptr_fail.c -- progs/dynptr_fail.c:1444:9: error: ‘ptr1’ may be used uninitialized [-Werror=maybe-uninitialized] 1444 | bpf_dynptr_clone(&ptr1, &ptr2); Many of the tests in the file are related to the detection of uninitialized pointers by the verifier. GCC is able to detect possible uninitialized values, and reports this as an error. The patch initializes all of the previous uninitialized structs. -- progs/test_tunnel_kern.c -- progs/test_tunnel_kern.c:590:9: error: array subscript 1 is outside array bounds of ‘struct geneve_opt[1]’ [-Werror=array-bounds=] 590 | *(int *) &gopt.opt_data = bpf_htonl(0xdeadbeef); | ^~~~~~~~~~~~~~~~~~~~~~~ progs/test_tunnel_kern.c:575:27: note: at offset 4 into object ‘gopt’ of size 4 575 | struct geneve_opt gopt; This tests accesses beyond the defined data for the struct geneve_opt which contains as last field "u8 opt_data[0]" which clearly does not get reserved space (in stack) in the function header. This pattern is repeated in ip6geneve_set_tunnel and geneve_set_tunnel functions. GCC is able to see this and emits a warning. The patch introduces a local struct that allocates enough space to safely allow the write to opt_data field. -- progs/jeq_infer_not_null_fail.c -- progs/jeq_infer_not_null_fail.c:21:40: error: array subscript ‘struct bpf_map[0]’ is partly outside array bounds of ‘struct <anonymous>[1]’ [-Werror=array-bounds=] 21 | struct bpf_map *inner_map = map->inner_map_meta; | ^~ progs/jeq_infer_not_null_fail.c:14:3: note: object ‘m_hash’ of size 32 14 | } m_hash SEC(".maps"); This example defines m_hash in the context of the compilation unit and casts it to struct bpf_map which is much smaller than the size of struct bpf_map. It errors out in GCC when it attempts to access an element that would be defined in struct bpf_map outsize of the defined limits for m_hash. This patch disables the warning through a GCC pragma. This changes were tested in bpf-next master selftests without any regressions. Signed-off-by: Cupertino Miranda <cupertino.miranda@oracle.com> Cc: jose.marchesi@oracle.com Cc: david.faust@oracle.com Cc: Yonghong Song <yonghong.song@linux.dev> Cc: Eduard Zingerman <eddyz87@gmail.com> Cc: Andrii Nakryiko <andrii.nakryiko@gmail.com> Link: https://lore.kernel.org/r/20240510183850.286661-2-cupertino.miranda@oracle.com Signed-off-by: Alexei Starovoitov <ast@kernel.org>
190 lines
4.1 KiB
C
190 lines
4.1 KiB
C
// SPDX-License-Identifier: GPL-2.0
|
|
/* Copyright (c) 2023 Meta Platforms, Inc. and affiliates. */
|
|
|
|
#include <vmlinux.h>
|
|
#include <bpf/bpf_tracing.h>
|
|
#include <bpf/bpf_helpers.h>
|
|
#include "bpf_misc.h"
|
|
|
|
#include "cpumask_common.h"
|
|
|
|
char _license[] SEC("license") = "GPL";
|
|
|
|
/* Prototype for all of the program trace events below:
|
|
*
|
|
* TRACE_EVENT(task_newtask,
|
|
* TP_PROTO(struct task_struct *p, u64 clone_flags)
|
|
*/
|
|
|
|
SEC("tp_btf/task_newtask")
|
|
__failure __msg("Unreleased reference")
|
|
int BPF_PROG(test_alloc_no_release, struct task_struct *task, u64 clone_flags)
|
|
{
|
|
struct bpf_cpumask *cpumask;
|
|
|
|
cpumask = create_cpumask();
|
|
__sink(cpumask);
|
|
|
|
/* cpumask is never released. */
|
|
return 0;
|
|
}
|
|
|
|
SEC("tp_btf/task_newtask")
|
|
__failure __msg("NULL pointer passed to trusted arg0")
|
|
int BPF_PROG(test_alloc_double_release, struct task_struct *task, u64 clone_flags)
|
|
{
|
|
struct bpf_cpumask *cpumask;
|
|
|
|
cpumask = create_cpumask();
|
|
|
|
/* cpumask is released twice. */
|
|
bpf_cpumask_release(cpumask);
|
|
bpf_cpumask_release(cpumask);
|
|
|
|
return 0;
|
|
}
|
|
|
|
SEC("tp_btf/task_newtask")
|
|
__failure __msg("must be referenced")
|
|
int BPF_PROG(test_acquire_wrong_cpumask, struct task_struct *task, u64 clone_flags)
|
|
{
|
|
struct bpf_cpumask *cpumask;
|
|
|
|
/* Can't acquire a non-struct bpf_cpumask. */
|
|
cpumask = bpf_cpumask_acquire((struct bpf_cpumask *)task->cpus_ptr);
|
|
__sink(cpumask);
|
|
|
|
return 0;
|
|
}
|
|
|
|
SEC("tp_btf/task_newtask")
|
|
__failure __msg("bpf_cpumask_set_cpu args#1 expected pointer to STRUCT bpf_cpumask")
|
|
int BPF_PROG(test_mutate_cpumask, struct task_struct *task, u64 clone_flags)
|
|
{
|
|
/* Can't set the CPU of a non-struct bpf_cpumask. */
|
|
bpf_cpumask_set_cpu(0, (struct bpf_cpumask *)task->cpus_ptr);
|
|
|
|
return 0;
|
|
}
|
|
|
|
SEC("tp_btf/task_newtask")
|
|
__failure __msg("Unreleased reference")
|
|
int BPF_PROG(test_insert_remove_no_release, struct task_struct *task, u64 clone_flags)
|
|
{
|
|
struct bpf_cpumask *cpumask;
|
|
struct __cpumask_map_value *v;
|
|
|
|
cpumask = create_cpumask();
|
|
if (!cpumask)
|
|
return 0;
|
|
|
|
if (cpumask_map_insert(cpumask))
|
|
return 0;
|
|
|
|
v = cpumask_map_value_lookup();
|
|
if (!v)
|
|
return 0;
|
|
|
|
cpumask = bpf_kptr_xchg(&v->cpumask, NULL);
|
|
|
|
/* cpumask is never released. */
|
|
return 0;
|
|
}
|
|
|
|
SEC("tp_btf/task_newtask")
|
|
__failure __msg("NULL pointer passed to trusted arg0")
|
|
int BPF_PROG(test_cpumask_null, struct task_struct *task, u64 clone_flags)
|
|
{
|
|
/* NULL passed to KF_TRUSTED_ARGS kfunc. */
|
|
bpf_cpumask_empty(NULL);
|
|
|
|
return 0;
|
|
}
|
|
|
|
SEC("tp_btf/task_newtask")
|
|
__failure __msg("R2 must be a rcu pointer")
|
|
int BPF_PROG(test_global_mask_out_of_rcu, struct task_struct *task, u64 clone_flags)
|
|
{
|
|
struct bpf_cpumask *local, *prev;
|
|
|
|
local = create_cpumask();
|
|
if (!local)
|
|
return 0;
|
|
|
|
prev = bpf_kptr_xchg(&global_mask, local);
|
|
if (prev) {
|
|
bpf_cpumask_release(prev);
|
|
err = 3;
|
|
return 0;
|
|
}
|
|
|
|
bpf_rcu_read_lock();
|
|
local = global_mask;
|
|
if (!local) {
|
|
err = 4;
|
|
bpf_rcu_read_unlock();
|
|
return 0;
|
|
}
|
|
|
|
bpf_rcu_read_unlock();
|
|
|
|
/* RCU region is exited before calling KF_RCU kfunc. */
|
|
|
|
bpf_cpumask_test_cpu(0, (const struct cpumask *)local);
|
|
|
|
return 0;
|
|
}
|
|
|
|
SEC("tp_btf/task_newtask")
|
|
__failure __msg("NULL pointer passed to trusted arg1")
|
|
int BPF_PROG(test_global_mask_no_null_check, struct task_struct *task, u64 clone_flags)
|
|
{
|
|
struct bpf_cpumask *local, *prev;
|
|
|
|
local = create_cpumask();
|
|
if (!local)
|
|
return 0;
|
|
|
|
prev = bpf_kptr_xchg(&global_mask, local);
|
|
if (prev) {
|
|
bpf_cpumask_release(prev);
|
|
err = 3;
|
|
return 0;
|
|
}
|
|
|
|
bpf_rcu_read_lock();
|
|
local = global_mask;
|
|
|
|
/* No NULL check is performed on global cpumask kptr. */
|
|
bpf_cpumask_test_cpu(0, (const struct cpumask *)local);
|
|
|
|
bpf_rcu_read_unlock();
|
|
|
|
return 0;
|
|
}
|
|
|
|
SEC("tp_btf/task_newtask")
|
|
__failure __msg("Possibly NULL pointer passed to helper arg2")
|
|
int BPF_PROG(test_global_mask_rcu_no_null_check, struct task_struct *task, u64 clone_flags)
|
|
{
|
|
struct bpf_cpumask *prev, *curr;
|
|
|
|
curr = bpf_cpumask_create();
|
|
if (!curr)
|
|
return 0;
|
|
|
|
prev = bpf_kptr_xchg(&global_mask, curr);
|
|
if (prev)
|
|
bpf_cpumask_release(prev);
|
|
|
|
bpf_rcu_read_lock();
|
|
curr = global_mask;
|
|
/* PTR_TO_BTF_ID | PTR_MAYBE_NULL | MEM_RCU passed to bpf_kptr_xchg() */
|
|
prev = bpf_kptr_xchg(&global_mask, curr);
|
|
bpf_rcu_read_unlock();
|
|
if (prev)
|
|
bpf_cpumask_release(prev);
|
|
|
|
return 0;
|
|
}
|