mirror of
git://git.yoctoproject.org/linux-yocto.git
synced 2025-07-19 12:39:03 +02:00
bpf: Fix typos in comments
Found the following typos in comments, and fixed them: s/unpriviledged/unprivileged/ s/reponsible/responsible/ s/possiblities/possibilities/ s/Divison/Division/ s/precsion/precision/ s/havea/have a/ s/reponsible/responsible/ s/responsibile/responsible/ s/tigher/tighter/ s/respecitve/respective/ Signed-off-by: Rafael Passos <rafael@rcpassos.me> Signed-off-by: Daniel Borkmann <daniel@iogearbox.net> Link: https://lore.kernel.org/bpf/6af7deb4-bb24-49e8-b3f1-8dd410597337@smtp-relay.sendinblue.com
This commit is contained in:
parent
e1a7545981
commit
a7de265cb2
|
@ -318,7 +318,7 @@ static bool check_storage_bpf_ma(struct bpf_local_storage *local_storage,
|
|||
*
|
||||
* If the local_storage->list is already empty, the caller will not
|
||||
* care about the bpf_ma value also because the caller is not
|
||||
* responsibile to free the local_storage.
|
||||
* responsible to free the local_storage.
|
||||
*/
|
||||
|
||||
if (storage_smap)
|
||||
|
|
|
@ -2814,7 +2814,7 @@ void bpf_prog_free(struct bpf_prog *fp)
|
|||
}
|
||||
EXPORT_SYMBOL_GPL(bpf_prog_free);
|
||||
|
||||
/* RNG for unpriviledged user space with separated state from prandom_u32(). */
|
||||
/* RNG for unprivileged user space with separated state from prandom_u32(). */
|
||||
static DEFINE_PER_CPU(struct rnd_state, bpf_user_rnd_state);
|
||||
|
||||
void bpf_user_rnd_init_once(void)
|
||||
|
|
|
@ -1539,7 +1539,7 @@ static void htab_map_free(struct bpf_map *map)
|
|||
*/
|
||||
|
||||
/* htab no longer uses call_rcu() directly. bpf_mem_alloc does it
|
||||
* underneath and is reponsible for waiting for callbacks to finish
|
||||
* underneath and is responsible for waiting for callbacks to finish
|
||||
* during bpf_mem_alloc_destroy().
|
||||
*/
|
||||
if (!htab_is_prealloc(htab)) {
|
||||
|
|
|
@ -2412,7 +2412,7 @@ __bpf_kfunc void *bpf_dynptr_slice_rdwr(const struct bpf_dynptr_kern *ptr, u32 o
|
|||
/* bpf_dynptr_slice_rdwr is the same logic as bpf_dynptr_slice.
|
||||
*
|
||||
* For skb-type dynptrs, it is safe to write into the returned pointer
|
||||
* if the bpf program allows skb data writes. There are two possiblities
|
||||
* if the bpf program allows skb data writes. There are two possibilities
|
||||
* that may occur when calling bpf_dynptr_slice_rdwr:
|
||||
*
|
||||
* 1) The requested slice is in the head of the skb. In this case, the
|
||||
|
|
|
@ -172,7 +172,7 @@ static bool bpf_global_percpu_ma_set;
|
|||
|
||||
/* verifier_state + insn_idx are pushed to stack when branch is encountered */
|
||||
struct bpf_verifier_stack_elem {
|
||||
/* verifer state is 'st'
|
||||
/* verifier state is 'st'
|
||||
* before processing instruction 'insn_idx'
|
||||
* and after processing instruction 'prev_insn_idx'
|
||||
*/
|
||||
|
@ -2131,7 +2131,7 @@ static void __reg64_deduce_bounds(struct bpf_reg_state *reg)
|
|||
static void __reg_deduce_mixed_bounds(struct bpf_reg_state *reg)
|
||||
{
|
||||
/* Try to tighten 64-bit bounds from 32-bit knowledge, using 32-bit
|
||||
* values on both sides of 64-bit range in hope to have tigher range.
|
||||
* values on both sides of 64-bit range in hope to have tighter range.
|
||||
* E.g., if r1 is [0x1'00000000, 0x3'80000000], and we learn from
|
||||
* 32-bit signed > 0 operation that s32 bounds are now [1; 0x7fffffff].
|
||||
* With this, we can substitute 1 as low 32-bits of _low_ 64-bit bound
|
||||
|
@ -2139,7 +2139,7 @@ static void __reg_deduce_mixed_bounds(struct bpf_reg_state *reg)
|
|||
* _high_ 64-bit bound (0x380000000 -> 0x37fffffff) and arrive at a
|
||||
* better overall bounds for r1 as [0x1'000000001; 0x3'7fffffff].
|
||||
* We just need to make sure that derived bounds we are intersecting
|
||||
* with are well-formed ranges in respecitve s64 or u64 domain, just
|
||||
* with are well-formed ranges in respective s64 or u64 domain, just
|
||||
* like we do with similar kinds of 32-to-64 or 64-to-32 adjustments.
|
||||
*/
|
||||
__u64 new_umin, new_umax;
|
||||
|
@ -14714,7 +14714,7 @@ static void regs_refine_cond_op(struct bpf_reg_state *reg1, struct bpf_reg_state
|
|||
|
||||
/* Adjusts the register min/max values in the case that the dst_reg and
|
||||
* src_reg are both SCALAR_VALUE registers (or we are simply doing a BPF_K
|
||||
* check, in which case we havea fake SCALAR_VALUE representing insn->imm).
|
||||
* check, in which case we have a fake SCALAR_VALUE representing insn->imm).
|
||||
* Technically we can do similar adjustments for pointers to the same object,
|
||||
* but we don't support that right now.
|
||||
*/
|
||||
|
@ -17352,7 +17352,7 @@ hit:
|
|||
err = propagate_liveness(env, &sl->state, cur);
|
||||
|
||||
/* if previous state reached the exit with precision and
|
||||
* current state is equivalent to it (except precsion marks)
|
||||
* current state is equivalent to it (except precision marks)
|
||||
* the precision needs to be propagated back in
|
||||
* the current state.
|
||||
*/
|
||||
|
@ -20209,7 +20209,7 @@ patch_map_ops_generic:
|
|||
* divide-by-3 through multiplication, followed by further
|
||||
* division by 8 through 3-bit right shift.
|
||||
* Refer to book "Hacker's Delight, 2nd ed." by Henry S. Warren, Jr.,
|
||||
* p. 227, chapter "Unsigned Divison by 3" for details and proofs.
|
||||
* p. 227, chapter "Unsigned Division by 3" for details and proofs.
|
||||
*
|
||||
* N / 3 <=> M * N / 2^33, where M = (2^33 + 1) / 3 = 0xaaaaaaab.
|
||||
*/
|
||||
|
|
Loading…
Reference in New Issue
Block a user