mirror of
git://git.yoctoproject.org/linux-yocto.git
synced 2025-07-05 13:25:20 +02:00

Conceptually, we want the memory mappings to always be up to date and represent whatever is in the TLB. To ensure that, we need to sync them over in the userspace case and for the kernel we need to process the mappings. The kernel will call flush_tlb_* if page table entries that were valid before become invalid. Unfortunately, this is not the case if entries are added. As such, change both flush_tlb_* and set_ptes to track the memory range that has to be synchronized. For the kernel, we need to execute a flush_tlb_kern_* immediately but we can wait for the first page fault in case of set_ptes. For userspace in contrast we only store that a range of memory needs to be synced and do so whenever we switch to that process. Signed-off-by: Benjamin Berg <benjamin.berg@intel.com> Link: https://patch.msgid.link/20240703134536.1161108-13-benjamin@sipsolutions.net Signed-off-by: Johannes Berg <johannes.berg@intel.com>
71 lines
1.3 KiB
C
71 lines
1.3 KiB
C
// SPDX-License-Identifier: GPL-2.0
|
|
/*
|
|
* Copyright (C) 2002 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
|
|
*/
|
|
|
|
#include <linux/init.h>
|
|
#include <linux/sched/mm.h>
|
|
#include <linux/sched/task_stack.h>
|
|
#include <linux/sched/task.h>
|
|
|
|
#include <asm/tlbflush.h>
|
|
|
|
#include <as-layout.h>
|
|
#include <kern.h>
|
|
#include <os.h>
|
|
#include <skas.h>
|
|
#include <kern_util.h>
|
|
|
|
extern void start_kernel(void);
|
|
|
|
static int __init start_kernel_proc(void *unused)
|
|
{
|
|
block_signals_trace();
|
|
|
|
cpu_tasks[0].task = current;
|
|
|
|
start_kernel();
|
|
return 0;
|
|
}
|
|
|
|
extern int userspace_pid[];
|
|
|
|
extern char cpu0_irqstack[];
|
|
|
|
int __init start_uml(void)
|
|
{
|
|
stack_protections((unsigned long) &cpu0_irqstack);
|
|
set_sigstack(cpu0_irqstack, THREAD_SIZE);
|
|
|
|
init_new_thread_signals();
|
|
|
|
init_task.thread.request.u.thread.proc = start_kernel_proc;
|
|
init_task.thread.request.u.thread.arg = NULL;
|
|
return start_idle_thread(task_stack_page(&init_task),
|
|
&init_task.thread.switch_buf);
|
|
}
|
|
|
|
unsigned long current_stub_stack(void)
|
|
{
|
|
if (current->mm == NULL)
|
|
return 0;
|
|
|
|
return current->mm->context.id.stack;
|
|
}
|
|
|
|
struct mm_id *current_mm_id(void)
|
|
{
|
|
if (current->mm == NULL)
|
|
return NULL;
|
|
|
|
return ¤t->mm->context.id;
|
|
}
|
|
|
|
void current_mm_sync(void)
|
|
{
|
|
if (current->mm == NULL)
|
|
return;
|
|
|
|
um_tlb_sync(current->mm);
|
|
}
|