linux-yocto/arch/s390/lib/uaccess.c
Heiko Carstens 884f0582b2 s390/uaccess: Remove INLINE_COPY_FROM_USER and INLINE_COPY_TO_USER
The s390 implementations of raw_copy_from_user() and raw_copy_to_user() are
never inlined. However INLINE_COPY_FROM_USER and INLINE_COPY_TO_USER are
still set. This leads to the odd situation that only the error handling
(memset to zero of the not copied bytes) of copy_from_user() is inlined,
while the actual fast path code is out-of-line.

This would make sense if raw_copy_from_user() and raw_copy_to_user() were
implemented in assembler files, where inlining is not possible. But the
current s390 setup does not make any sense.

Address this by moving the raw uaccess copy inline assemblies to the
uaccess header file, and remove INLINE_COPY_FROM_USER and
INLINE_COPY_TO_USER definitions. This way the uaccess code, but now
including error handling, is still out-of-line with the common code
_copy_from_user() and _copy_to_user() variants, which inline the raw
uaccess functions via _inline_copy_from_user() and _inline_copy_to_user().

This reduces the size of the kernel image by ~17kb.
(defconfig, gcc 14.2.0)

Acked-by: Alexander Gordeev <agordeev@linux.ibm.com>
Signed-off-by: Heiko Carstens <hca@linux.ibm.com>
Signed-off-by: Alexander Gordeev <agordeev@linux.ibm.com>
2025-01-26 17:24:07 +01:00

97 lines
2.6 KiB
C

// SPDX-License-Identifier: GPL-2.0
/*
* Standard user space access functions based on mvcp/mvcs and doing
* interesting things in the secondary space mode.
*
* Copyright IBM Corp. 2006,2014
* Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com),
* Gerald Schaefer (gerald.schaefer@de.ibm.com)
*/
#include <linux/uaccess.h>
#include <linux/export.h>
#include <linux/mm.h>
#include <asm/asm-extable.h>
#include <asm/ctlreg.h>
#ifdef CONFIG_DEBUG_ENTRY
void debug_user_asce(int exit)
{
struct ctlreg cr1, cr7;
local_ctl_store(1, &cr1);
local_ctl_store(7, &cr7);
if (cr1.val == get_lowcore()->kernel_asce.val && cr7.val == get_lowcore()->user_asce.val)
return;
panic("incorrect ASCE on kernel %s\n"
"cr1: %016lx cr7: %016lx\n"
"kernel: %016lx user: %016lx\n",
exit ? "exit" : "entry", cr1.val, cr7.val,
get_lowcore()->kernel_asce.val, get_lowcore()->user_asce.val);
}
#endif /*CONFIG_DEBUG_ENTRY */
unsigned long _copy_from_user_key(void *to, const void __user *from,
unsigned long n, unsigned long key)
{
unsigned long res = n;
might_fault();
if (!should_fail_usercopy()) {
instrument_copy_from_user_before(to, from, n);
res = raw_copy_from_user_key(to, from, n, key);
instrument_copy_from_user_after(to, from, n, res);
}
if (unlikely(res))
memset(to + (n - res), 0, res);
return res;
}
EXPORT_SYMBOL(_copy_from_user_key);
unsigned long _copy_to_user_key(void __user *to, const void *from,
unsigned long n, unsigned long key)
{
might_fault();
if (should_fail_usercopy())
return n;
instrument_copy_to_user(to, from, n);
return raw_copy_to_user_key(to, from, n, key);
}
EXPORT_SYMBOL(_copy_to_user_key);
unsigned long __clear_user(void __user *to, unsigned long size)
{
unsigned long rem;
union oac spec = {
.oac1.as = PSW_BITS_AS_SECONDARY,
.oac1.a = 1,
};
asm volatile(
" lr 0,%[spec]\n"
"0: mvcos 0(%[to]),0(%[zeropg]),%[size]\n"
"1: jz 5f\n"
" algr %[size],%[val]\n"
" slgr %[to],%[val]\n"
" j 0b\n"
"2: la %[rem],4095(%[to])\n" /* rem = to + 4095 */
" nr %[rem],%[val]\n" /* rem = (to + 4095) & -4096 */
" slgr %[rem],%[to]\n"
" clgr %[size],%[rem]\n" /* copy crosses next page boundary? */
" jnh 6f\n"
"3: mvcos 0(%[to]),0(%[zeropg]),%[rem]\n"
"4: slgr %[size],%[rem]\n"
" j 6f\n"
"5: slgr %[size],%[size]\n"
"6:\n"
EX_TABLE(0b, 2b)
EX_TABLE(1b, 2b)
EX_TABLE(3b, 6b)
EX_TABLE(4b, 6b)
: [size] "+&a" (size), [to] "+&a" (to), [rem] "=&a" (rem)
: [val] "a" (-4096UL), [zeropg] "a" (empty_zero_page), [spec] "d" (spec.val)
: "cc", "memory", "0");
return size;
}
EXPORT_SYMBOL(__clear_user);