linux-yocto/arch/um/include/asm/uaccess.h
Johannes Berg d1d7f01f7c um: mark rodata read-only and implement _nofault accesses
Mark read-only data actually read-only (simple mprotect), and
to be able to test it also implement _nofault accesses. This
works by setting up a new "segv_continue" pointer in current,
and then when we hit a segfault we change the signal return
context so that we continue at that address. The code using
this sets it up so that it jumps to a label and then aborts
the access that way, returning -EFAULT.

It's possible to optimize the ___backtrack_faulted() thing by
using asm goto (compiler version dependent) and/or gcc's (not
sure if clang has it) &&label extension, but at least in one
attempt I made the && caused the compiler to not load -EFAULT
into the register in case of jumping to the &&label from the
fault handler. So leave it like this for now.

Signed-off-by: Johannes Berg <johannes.berg@intel.com>
Co-developed-by: Benjamin Berg <benjamin.berg@intel.com>
Signed-off-by: Benjamin Berg <benjamin.berg@intel.com>
Link: https://patch.msgid.link/20250210160926.420133-2-benjamin@sipsolutions.net
Signed-off-by: Johannes Berg <johannes.berg@intel.com>
2025-03-18 11:03:14 +01:00

73 lines
2.2 KiB
C

/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (C) 2002 Jeff Dike (jdike@karaya.com)
* Copyright (C) 2015 Richard Weinberger (richard@nod.at)
*/
#ifndef __UM_UACCESS_H
#define __UM_UACCESS_H
#include <asm/elf.h>
#include <linux/unaligned.h>
#include <sysdep/faultinfo.h>
#define __under_task_size(addr, size) \
(((unsigned long) (addr) < TASK_SIZE) && \
(((unsigned long) (addr) + (size)) < TASK_SIZE))
#define __access_ok_vsyscall(addr, size) \
(((unsigned long) (addr) >= FIXADDR_USER_START) && \
((unsigned long) (addr) + (size) <= FIXADDR_USER_END) && \
((unsigned long) (addr) + (size) >= (unsigned long)(addr)))
#define __addr_range_nowrap(addr, size) \
((unsigned long) (addr) <= ((unsigned long) (addr) + (size)))
extern unsigned long raw_copy_from_user(void *to, const void __user *from, unsigned long n);
extern unsigned long raw_copy_to_user(void __user *to, const void *from, unsigned long n);
extern unsigned long __clear_user(void __user *mem, unsigned long len);
static inline int __access_ok(const void __user *ptr, unsigned long size);
/* Teach asm-generic/uaccess.h that we have C functions for these. */
#define __access_ok __access_ok
#define __clear_user __clear_user
#define INLINE_COPY_FROM_USER
#define INLINE_COPY_TO_USER
#include <asm-generic/uaccess.h>
static inline int __access_ok(const void __user *ptr, unsigned long size)
{
unsigned long addr = (unsigned long)ptr;
return __addr_range_nowrap(addr, size) &&
(__under_task_size(addr, size) ||
__access_ok_vsyscall(addr, size));
}
#define __get_kernel_nofault(dst, src, type, err_label) \
do { \
int __faulted; \
\
___backtrack_faulted(__faulted); \
if (__faulted) { \
*((type *)dst) = (type) 0; \
goto err_label; \
} \
*((type *)dst) = get_unaligned((type *)(src)); \
current->thread.segv_continue = NULL; \
} while (0)
#define __put_kernel_nofault(dst, src, type, err_label) \
do { \
int __faulted; \
\
___backtrack_faulted(__faulted); \
if (__faulted) \
goto err_label; \
put_unaligned(*((type *)src), (type *)(dst)); \
current->thread.segv_continue = NULL; \
} while (0)
#endif