Merge commit 'core-entry-2025-05-25' into loongarch-next

LoongArch architecture changes for 6.16 modify some same files with the
core-entry changes, so merge them to create a base to resolve conflicts.
This commit is contained in:
Huacai Chen 2025-05-30 21:38:40 +08:00
commit c006d5d691
8 changed files with 120 additions and 80 deletions

View File

@ -12,3 +12,11 @@ __int128_t __ashlti3(__int128_t a, int b);
__int128_t __ashrti3(__int128_t a, int b);
__int128_t __lshrti3(__int128_t a, int b);
#endif
asmlinkage void noinstr __no_stack_protector ret_from_fork(struct task_struct *prev,
struct pt_regs *regs);
asmlinkage void noinstr __no_stack_protector ret_from_kernel_thread(struct task_struct *prev,
struct pt_regs *regs,
int (*fn)(void *),
void *fn_arg);

View File

@ -77,24 +77,22 @@ SYM_CODE_START(handle_syscall)
SYM_CODE_END(handle_syscall)
_ASM_NOKPROBE(handle_syscall)
SYM_CODE_START(ret_from_fork)
SYM_CODE_START(ret_from_fork_asm)
UNWIND_HINT_REGS
bl schedule_tail # a0 = struct task_struct *prev
move a0, sp
bl syscall_exit_to_user_mode
move a1, sp
bl ret_from_fork
RESTORE_STATIC
RESTORE_SOME
RESTORE_SP_AND_RET
SYM_CODE_END(ret_from_fork)
SYM_CODE_END(ret_from_fork_asm)
SYM_CODE_START(ret_from_kernel_thread)
SYM_CODE_START(ret_from_kernel_thread_asm)
UNWIND_HINT_REGS
bl schedule_tail # a0 = struct task_struct *prev
move a0, s1
jirl ra, s0, 0
move a0, sp
bl syscall_exit_to_user_mode
move a1, sp
move a2, s0
move a3, s1
bl ret_from_kernel_thread
RESTORE_STATIC
RESTORE_SOME
RESTORE_SP_AND_RET
SYM_CODE_END(ret_from_kernel_thread)
SYM_CODE_END(ret_from_kernel_thread_asm)

View File

@ -13,6 +13,7 @@
#include <linux/cpu.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/entry-common.h>
#include <linux/errno.h>
#include <linux/sched.h>
#include <linux/sched/debug.h>
@ -34,6 +35,7 @@
#include <linux/nmi.h>
#include <asm/asm.h>
#include <asm/asm-prototypes.h>
#include <asm/bootinfo.h>
#include <asm/cpu.h>
#include <asm/elf.h>
@ -47,6 +49,7 @@
#include <asm/pgtable.h>
#include <asm/processor.h>
#include <asm/reg.h>
#include <asm/switch_to.h>
#include <asm/unwind.h>
#include <asm/vdso.h>
@ -63,8 +66,9 @@ EXPORT_SYMBOL(__stack_chk_guard);
unsigned long boot_option_idle_override = IDLE_NO_OVERRIDE;
EXPORT_SYMBOL(boot_option_idle_override);
asmlinkage void ret_from_fork(void);
asmlinkage void ret_from_kernel_thread(void);
asmlinkage void restore_and_ret(void);
asmlinkage void ret_from_fork_asm(void);
asmlinkage void ret_from_kernel_thread_asm(void);
void start_thread(struct pt_regs *regs, unsigned long pc, unsigned long sp)
{
@ -138,6 +142,23 @@ int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
return 0;
}
asmlinkage void noinstr __no_stack_protector ret_from_fork(struct task_struct *prev,
struct pt_regs *regs)
{
schedule_tail(prev);
syscall_exit_to_user_mode(regs);
}
asmlinkage void noinstr __no_stack_protector ret_from_kernel_thread(struct task_struct *prev,
struct pt_regs *regs,
int (*fn)(void *),
void *fn_arg)
{
schedule_tail(prev);
fn(fn_arg);
syscall_exit_to_user_mode(regs);
}
/*
* Copy architecture-specific thread state
*/
@ -165,8 +186,8 @@ int copy_thread(struct task_struct *p, const struct kernel_clone_args *args)
p->thread.reg03 = childksp;
p->thread.reg23 = (unsigned long)args->fn;
p->thread.reg24 = (unsigned long)args->fn_arg;
p->thread.reg01 = (unsigned long)ret_from_kernel_thread;
p->thread.sched_ra = (unsigned long)ret_from_kernel_thread;
p->thread.reg01 = (unsigned long)ret_from_kernel_thread_asm;
p->thread.sched_ra = (unsigned long)ret_from_kernel_thread_asm;
memset(childregs, 0, sizeof(struct pt_regs));
childregs->csr_euen = p->thread.csr_euen;
childregs->csr_crmd = p->thread.csr_crmd;
@ -182,8 +203,8 @@ int copy_thread(struct task_struct *p, const struct kernel_clone_args *args)
childregs->regs[3] = usp;
p->thread.reg03 = (unsigned long) childregs;
p->thread.reg01 = (unsigned long) ret_from_fork;
p->thread.sched_ra = (unsigned long) ret_from_fork;
p->thread.reg01 = (unsigned long) ret_from_fork_asm;
p->thread.sched_ra = (unsigned long) ret_from_fork_asm;
/*
* New tasks lose permission to use the fpu. This accelerates context

View File

@ -52,6 +52,8 @@ DECLARE_DO_ERROR_INFO(do_trap_ecall_s);
DECLARE_DO_ERROR_INFO(do_trap_ecall_m);
DECLARE_DO_ERROR_INFO(do_trap_break);
asmlinkage void ret_from_fork_kernel(void *fn_arg, int (*fn)(void *), struct pt_regs *regs);
asmlinkage void ret_from_fork_user(struct pt_regs *regs);
asmlinkage void handle_bad_stack(struct pt_regs *regs);
asmlinkage void do_page_fault(struct pt_regs *regs);
asmlinkage void do_irq(struct pt_regs *regs);

View File

@ -319,17 +319,21 @@ SYM_CODE_END(handle_kernel_stack_overflow)
ASM_NOKPROBE(handle_kernel_stack_overflow)
#endif
SYM_CODE_START(ret_from_fork)
SYM_CODE_START(ret_from_fork_kernel_asm)
call schedule_tail
beqz s0, 1f /* not from kernel thread */
/* Call fn(arg) */
move a0, s1
jalr s0
1:
move a0, sp /* pt_regs */
call syscall_exit_to_user_mode
move a0, s1 /* fn_arg */
move a1, s0 /* fn */
move a2, sp /* pt_regs */
call ret_from_fork_kernel
j ret_from_exception
SYM_CODE_END(ret_from_fork)
SYM_CODE_END(ret_from_fork_kernel_asm)
SYM_CODE_START(ret_from_fork_user_asm)
call schedule_tail
move a0, sp /* pt_regs */
call ret_from_fork_user
j ret_from_exception
SYM_CODE_END(ret_from_fork_user_asm)
#ifdef CONFIG_IRQ_STACKS
/*

View File

@ -17,7 +17,9 @@
#include <linux/ptrace.h>
#include <linux/uaccess.h>
#include <linux/personality.h>
#include <linux/entry-common.h>
#include <asm/asm-prototypes.h>
#include <asm/unistd.h>
#include <asm/processor.h>
#include <asm/csr.h>
@ -36,7 +38,8 @@ unsigned long __stack_chk_guard __read_mostly;
EXPORT_SYMBOL(__stack_chk_guard);
#endif
extern asmlinkage void ret_from_fork(void);
extern asmlinkage void ret_from_fork_kernel_asm(void);
extern asmlinkage void ret_from_fork_user_asm(void);
void noinstr arch_cpu_idle(void)
{
@ -206,6 +209,18 @@ int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
return 0;
}
asmlinkage void ret_from_fork_kernel(void *fn_arg, int (*fn)(void *), struct pt_regs *regs)
{
fn(fn_arg);
syscall_exit_to_user_mode(regs);
}
asmlinkage void ret_from_fork_user(struct pt_regs *regs)
{
syscall_exit_to_user_mode(regs);
}
int copy_thread(struct task_struct *p, const struct kernel_clone_args *args)
{
unsigned long clone_flags = args->flags;
@ -228,6 +243,7 @@ int copy_thread(struct task_struct *p, const struct kernel_clone_args *args)
p->thread.s[0] = (unsigned long)args->fn;
p->thread.s[1] = (unsigned long)args->fn_arg;
p->thread.ra = (unsigned long)ret_from_fork_kernel_asm;
} else {
*childregs = *(current_pt_regs());
/* Turn off status.VS */
@ -237,12 +253,11 @@ int copy_thread(struct task_struct *p, const struct kernel_clone_args *args)
if (clone_flags & CLONE_SETTLS)
childregs->tp = tls;
childregs->a0 = 0; /* Return value of fork() */
p->thread.s[0] = 0;
p->thread.ra = (unsigned long)ret_from_fork_user_asm;
}
p->thread.riscv_v_flags = 0;
if (has_vector() || has_xtheadvector())
riscv_v_thread_alloc(p);
p->thread.ra = (unsigned long)ret_from_fork;
p->thread.sp = (unsigned long)childregs; /* kernel sp */
return 0;
}

View File

@ -14,6 +14,7 @@
#include <linux/kmsan.h>
#include <asm/entry-common.h>
#include <asm/syscall.h>
/*
* Define dummy _TIF work flags if not defined by the architecture or for
@ -366,6 +367,15 @@ static __always_inline void exit_to_user_mode(void)
lockdep_hardirqs_on(CALLER_ADDR0);
}
/**
* syscall_exit_work - Handle work before returning to user mode
* @regs: Pointer to current pt_regs
* @work: Current thread syscall work
*
* Do one-time syscall specific work.
*/
void syscall_exit_work(struct pt_regs *regs, unsigned long work);
/**
* syscall_exit_to_user_mode_work - Handle work before returning to user mode
* @regs: Pointer to currents pt_regs
@ -379,7 +389,30 @@ static __always_inline void exit_to_user_mode(void)
* make the final state transitions. Interrupts must stay disabled between
* return from this function and the invocation of exit_to_user_mode().
*/
void syscall_exit_to_user_mode_work(struct pt_regs *regs);
static __always_inline void syscall_exit_to_user_mode_work(struct pt_regs *regs)
{
unsigned long work = READ_ONCE(current_thread_info()->syscall_work);
unsigned long nr = syscall_get_nr(current, regs);
CT_WARN_ON(ct_state() != CT_STATE_KERNEL);
if (IS_ENABLED(CONFIG_PROVE_LOCKING)) {
if (WARN(irqs_disabled(), "syscall %lu left IRQs disabled", nr))
local_irq_enable();
}
rseq_syscall(regs);
/*
* Do one-time syscall specific work. If these work items are
* enabled, we want to run them exactly once per syscall exit with
* interrupts enabled.
*/
if (unlikely(work & SYSCALL_WORK_EXIT))
syscall_exit_work(regs, work);
local_irq_disable_exit_to_user();
exit_to_user_mode_prepare(regs);
}
/**
* syscall_exit_to_user_mode - Handle work before returning to user mode
@ -410,7 +443,13 @@ void syscall_exit_to_user_mode_work(struct pt_regs *regs);
* exit_to_user_mode(). This function is preferred unless there is a
* compelling architectural reason to use the separate functions.
*/
void syscall_exit_to_user_mode(struct pt_regs *regs);
static __always_inline void syscall_exit_to_user_mode(struct pt_regs *regs)
{
instrumentation_begin();
syscall_exit_to_user_mode_work(regs);
instrumentation_end();
exit_to_user_mode();
}
/**
* irqentry_enter_from_user_mode - Establish state before invoking the irq handler

View File

@ -146,7 +146,7 @@ static inline bool report_single_step(unsigned long work)
return work & SYSCALL_WORK_SYSCALL_EXIT_TRAP;
}
static void syscall_exit_work(struct pt_regs *regs, unsigned long work)
void syscall_exit_work(struct pt_regs *regs, unsigned long work)
{
bool step;
@ -173,53 +173,6 @@ static void syscall_exit_work(struct pt_regs *regs, unsigned long work)
ptrace_report_syscall_exit(regs, step);
}
/*
* Syscall specific exit to user mode preparation. Runs with interrupts
* enabled.
*/
static void syscall_exit_to_user_mode_prepare(struct pt_regs *regs)
{
unsigned long work = READ_ONCE(current_thread_info()->syscall_work);
unsigned long nr = syscall_get_nr(current, regs);
CT_WARN_ON(ct_state() != CT_STATE_KERNEL);
if (IS_ENABLED(CONFIG_PROVE_LOCKING)) {
if (WARN(irqs_disabled(), "syscall %lu left IRQs disabled", nr))
local_irq_enable();
}
rseq_syscall(regs);
/*
* Do one-time syscall specific work. If these work items are
* enabled, we want to run them exactly once per syscall exit with
* interrupts enabled.
*/
if (unlikely(work & SYSCALL_WORK_EXIT))
syscall_exit_work(regs, work);
}
static __always_inline void __syscall_exit_to_user_mode_work(struct pt_regs *regs)
{
syscall_exit_to_user_mode_prepare(regs);
local_irq_disable_exit_to_user();
exit_to_user_mode_prepare(regs);
}
void syscall_exit_to_user_mode_work(struct pt_regs *regs)
{
__syscall_exit_to_user_mode_work(regs);
}
__visible noinstr void syscall_exit_to_user_mode(struct pt_regs *regs)
{
instrumentation_begin();
__syscall_exit_to_user_mode_work(regs);
instrumentation_end();
exit_to_user_mode();
}
noinstr void irqentry_enter_from_user_mode(struct pt_regs *regs)
{
enter_from_user_mode(regs);