ANDROID: rust: mm: add bindings for memory management

Add abstraction for working with the memory management types mm_struct
and vm_area_struct. This patch will be a follow-up to the memory
management patchset [1] (or included in a future version thereof).

This patch is an improved version of a patch included in the list of
dependencies of the Rust Binder RFC [2]. It is marked ANDROID: because
it has not yet been sent to the mailing list. It's mostly ready, but I
want to go through it one more time before I send it.

Link: https://lore.kernel.org/all/20240124-alice-mm-v1-0-d1abcec83c44@google.com/ [1]
Link: https://lore.kernel.org/rust-for-linux/20231101-rust-binder-v1-0-08ba9197f637@google.com/ [2]
Bug: 324206405
Co-developed-by: Wedson Almeida Filho <wedsonaf@gmail.com>
Change-Id: I8d0c71fcf76027ccc08ef83f36b51da4665125d9
Signed-off-by: Wedson Almeida Filho <wedsonaf@gmail.com>
Signed-off-by: Alice Ryhl <aliceryhl@google.com>
This commit is contained in:
Alice Ryhl 2024-01-23 14:58:13 +01:00 committed by Treehugger Robot
parent 67fe8ee749
commit a1bb998934
5 changed files with 473 additions and 0 deletions

View File

@ -13,6 +13,7 @@
#include <linux/file.h> #include <linux/file.h>
#include <linux/fs.h> #include <linux/fs.h>
#include <linux/jiffies.h> #include <linux/jiffies.h>
#include <linux/mm.h>
#include <linux/pid_namespace.h> #include <linux/pid_namespace.h>
#include <linux/poll.h> #include <linux/poll.h>
#include <linux/security.h> #include <linux/security.h>

View File

@ -292,6 +292,55 @@ void rust_helper_rb_link_node(struct rb_node *node, struct rb_node *parent,
} }
EXPORT_SYMBOL_GPL(rust_helper_rb_link_node); EXPORT_SYMBOL_GPL(rust_helper_rb_link_node);
void rust_helper_mmgrab(struct mm_struct *mm)
{
mmgrab(mm);
}
EXPORT_SYMBOL_GPL(rust_helper_mmgrab);
void rust_helper_mmdrop(struct mm_struct *mm)
{
mmdrop(mm);
}
EXPORT_SYMBOL_GPL(rust_helper_mmdrop);
bool rust_helper_mmget_not_zero(struct mm_struct *mm)
{
return mmget_not_zero(mm);
}
EXPORT_SYMBOL_GPL(rust_helper_mmget_not_zero);
bool rust_helper_mmap_read_trylock(struct mm_struct *mm)
{
return mmap_read_trylock(mm);
}
EXPORT_SYMBOL_GPL(rust_helper_mmap_read_trylock);
void rust_helper_mmap_read_unlock(struct mm_struct *mm)
{
mmap_read_unlock(mm);
}
EXPORT_SYMBOL_GPL(rust_helper_mmap_read_unlock);
void rust_helper_mmap_write_lock(struct mm_struct *mm)
{
mmap_write_lock(mm);
}
EXPORT_SYMBOL_GPL(rust_helper_mmap_write_lock);
void rust_helper_mmap_write_unlock(struct mm_struct *mm)
{
mmap_write_unlock(mm);
}
EXPORT_SYMBOL_GPL(rust_helper_mmap_write_unlock);
struct vm_area_struct *rust_helper_vma_lookup(struct mm_struct *mm,
unsigned long addr)
{
return vma_lookup(mm, addr);
}
EXPORT_SYMBOL_GPL(rust_helper_vma_lookup);
/* /*
* `bindgen` binds the C `size_t` type as the Rust `usize` type, so we can * `bindgen` binds the C `size_t` type as the Rust `usize` type, so we can
* use it in contexts where Rust expects a `usize` like slice (array) indices. * use it in contexts where Rust expects a `usize` like slice (array) indices.

View File

@ -41,6 +41,7 @@ pub mod init;
pub mod ioctl; pub mod ioctl;
#[cfg(CONFIG_KUNIT)] #[cfg(CONFIG_KUNIT)]
pub mod kunit; pub mod kunit;
pub mod mm;
pub mod page; pub mod page;
pub mod prelude; pub mod prelude;
pub mod print; pub mod print;

245
rust/kernel/mm.rs Normal file
View File

@ -0,0 +1,245 @@
// SPDX-License-Identifier: GPL-2.0
// Copyright (C) 2024 Google LLC.
//! Memory management.
//!
//! C header: [`include/linux/mm.h`](../../../../include/linux/mm.h)
use crate::bindings;
use core::{marker::PhantomData, mem::ManuallyDrop, ptr::NonNull};
pub mod virt;
/// A smart pointer that references a `struct mm` and owns an `mmgrab` refcount.
///
/// # Invariants
///
/// An `MmGrab` owns an `mmgrab` refcount to the inner `struct mm_struct`.
pub struct MmGrab {
mm: NonNull<bindings::mm_struct>,
}
impl MmGrab {
/// Call `mmgrab` on `current.mm`.
pub fn mmgrab_current() -> Option<Self> {
// SAFETY: It's safe to get the `mm` field from current.
let mm = unsafe {
let current = bindings::get_current();
(*current).mm
};
let mm = NonNull::new(mm)?;
// SAFETY: We just checked that `mm` is not null.
unsafe { bindings::mmgrab(mm.as_ptr()) };
// INVARIANT: We just created an `mmgrab` refcount.
Some(Self { mm })
}
/// Check whether this vma is associated with this mm.
pub fn is_same_mm(&self, area: &virt::Area) -> bool {
// SAFETY: The `vm_mm` field of the area is immutable, so we can read it without
// synchronization.
let vm_mm = unsafe { (*area.as_ptr()).vm_mm };
vm_mm == self.mm.as_ptr()
}
/// Calls `mmget_not_zero` and returns a handle if it succeeds.
pub fn mmget_not_zero(&self) -> Option<MmGet> {
// SAFETY: We know that `mm` is still valid since we hold an `mmgrab` refcount.
let success = unsafe { bindings::mmget_not_zero(self.mm.as_ptr()) };
if success {
Some(MmGet { mm: self.mm })
} else {
None
}
}
}
// SAFETY: It is safe to call `mmdrop` on another thread than where `mmgrab` was called.
unsafe impl Send for MmGrab {}
// SAFETY: All methods on this struct are safe to call in parallel from several threads.
unsafe impl Sync for MmGrab {}
impl Drop for MmGrab {
fn drop(&mut self) {
// SAFETY: This gives up an `mmgrab` refcount to a valid `struct mm_struct`.
// INVARIANT: We own an `mmgrab` refcount, so we can give it up.
unsafe { bindings::mmdrop(self.mm.as_ptr()) };
}
}
/// A smart pointer that references a `struct mm` and owns an `mmget` refcount.
///
/// Values of this type are created using [`MmGrab::mmget_not_zero`].
///
/// # Invariants
///
/// An `MmGet` owns an `mmget` refcount to the inner `struct mm_struct`.
pub struct MmGet {
mm: NonNull<bindings::mm_struct>,
}
impl MmGet {
/// Lock the mmap read lock.
pub fn mmap_write_lock(&self) -> MmapWriteLock<'_> {
// SAFETY: The pointer is valid since we hold a refcount.
unsafe { bindings::mmap_write_lock(self.mm.as_ptr()) };
// INVARIANT: We just acquired the write lock, so we can transfer to this guard.
//
// The signature of this function ensures that the `MmapWriteLock` will not outlive this
// `mmget` refcount.
MmapWriteLock {
mm: self.mm,
_lifetime: PhantomData,
}
}
/// When dropping this refcount, use `mmput_async` instead of `mmput`.
pub fn use_async_put(self) -> MmGetAsync {
// Disable destructor of `self`.
let me = ManuallyDrop::new(self);
MmGetAsync { mm: me.mm }
}
}
impl Drop for MmGet {
fn drop(&mut self) {
// SAFETY: We acquired a refcount when creating this object.
unsafe { bindings::mmput(self.mm.as_ptr()) };
}
}
/// A smart pointer that references a `struct mm` and owns an `mmget` refcount, that will be
/// dropped using `mmput_async`.
///
/// Values of this type are created using [`MmGet::use_async_put`].
///
/// # Invariants
///
/// An `MmGetAsync` owns an `mmget` refcount to the inner `struct mm_struct`.
pub struct MmGetAsync {
mm: NonNull<bindings::mm_struct>,
}
impl MmGetAsync {
/// Lock the mmap read lock.
pub fn mmap_write_lock(&self) -> MmapWriteLock<'_> {
// SAFETY: The pointer is valid since we hold a refcount.
unsafe { bindings::mmap_write_lock(self.mm.as_ptr()) };
// INVARIANT: We just acquired the write lock, so we can transfer to this guard.
//
// The signature of this function ensures that the `MmapWriteLock` will not outlive this
// `mmget` refcount.
MmapWriteLock {
mm: self.mm,
_lifetime: PhantomData,
}
}
/// Try to lock the mmap read lock.
pub fn mmap_read_trylock(&self) -> Option<MmapReadLock<'_>> {
// SAFETY: The pointer is valid since we hold a refcount.
let success = unsafe { bindings::mmap_read_trylock(self.mm.as_ptr()) };
if success {
// INVARIANT: We just acquired the read lock, so we can transfer to this guard.
//
// The signature of this function ensures that the `MmapReadLock` will not outlive this
// `mmget` refcount.
Some(MmapReadLock {
mm: self.mm,
_lifetime: PhantomData,
})
} else {
None
}
}
}
impl Drop for MmGetAsync {
fn drop(&mut self) {
// SAFETY: We acquired a refcount when creating this object.
unsafe { bindings::mmput_async(self.mm.as_ptr()) };
}
}
/// A guard for the mmap read lock.
///
/// # Invariants
///
/// This `MmapReadLock` guard owns the mmap read lock. For the duration of 'a, the `mmget` refcount
/// will remain positive.
pub struct MmapReadLock<'a> {
mm: NonNull<bindings::mm_struct>,
_lifetime: PhantomData<&'a bindings::mm_struct>,
}
impl<'a> MmapReadLock<'a> {
/// Look up a vma at the given address.
pub fn vma_lookup(&self, vma_addr: usize) -> Option<&virt::Area> {
// SAFETY: The `mm` pointer is known to be valid while this read lock is held.
let vma = unsafe { bindings::vma_lookup(self.mm.as_ptr(), vma_addr as u64) };
if vma.is_null() {
None
} else {
// SAFETY: We just checked that a vma was found, so the pointer is valid. Furthermore,
// the returned area will borrow from this read lock guard, so it can only be used
// while the read lock is still held. The returned reference is immutable, so the
// reference cannot be used to modify the area.
unsafe { Some(virt::Area::from_ptr(vma)) }
}
}
}
impl Drop for MmapReadLock<'_> {
fn drop(&mut self) {
// SAFETY: We acquired the lock when creating this object.
unsafe { bindings::mmap_read_unlock(self.mm.as_ptr()) };
}
}
/// A guard for the mmap write lock.
///
/// # Invariants
///
/// This `MmapReadLock` guard owns the mmap write lock. For the duration of 'a, the `mmget` refcount
/// will remain positive.
pub struct MmapWriteLock<'a> {
mm: NonNull<bindings::mm_struct>,
_lifetime: PhantomData<&'a mut bindings::mm_struct>,
}
impl<'a> MmapWriteLock<'a> {
/// Look up a vma at the given address.
pub fn vma_lookup(&mut self, vma_addr: usize) -> Option<&mut virt::Area> {
// SAFETY: The `mm` pointer is known to be valid while this read lock is held.
let vma = unsafe { bindings::vma_lookup(self.mm.as_ptr(), vma_addr as u64) };
if vma.is_null() {
None
} else {
// SAFETY: We just checked that a vma was found, so the pointer is valid. Furthermore,
// the returned area will borrow from this write lock guard, so it can only be used
// while the write lock is still held. We hold the write lock, so mutable operations on
// the area are okay.
unsafe { Some(virt::Area::from_ptr_mut(vma)) }
}
}
}
impl Drop for MmapWriteLock<'_> {
fn drop(&mut self) {
// SAFETY: We acquired the lock when creating this object.
unsafe { bindings::mmap_write_unlock(self.mm.as_ptr()) };
}
}

177
rust/kernel/mm/virt.rs Normal file
View File

@ -0,0 +1,177 @@
// SPDX-License-Identifier: GPL-2.0
// Copyright (C) 2024 Google LLC.
//! Virtual memory.
use crate::{
bindings,
error::{to_result, Result},
page::Page,
types::Opaque,
};
/// A wrapper for the kernel's `struct vm_area_struct`.
///
/// It represents an area of virtual memory.
#[repr(transparent)]
pub struct Area {
vma: Opaque<bindings::vm_area_struct>,
}
impl Area {
/// Access a virtual memory area given a raw pointer.
///
/// # Safety
///
/// Callers must ensure that `vma` is non-null and valid for the duration of the new area's
/// lifetime, with shared access. The caller must ensure that using the pointer for immutable
/// operations is okay.
pub unsafe fn from_ptr<'a>(vma: *const bindings::vm_area_struct) -> &'a Self {
// SAFETY: The caller ensures that the pointer is valid.
unsafe { &*vma.cast() }
}
/// Access a virtual memory area given a raw pointer.
///
/// # Safety
///
/// Callers must ensure that `vma` is non-null and valid for the duration of the new area's
/// lifetime, with exclusive access. The caller must ensure that using the pointer for
/// immutable and mutable operations is okay.
pub unsafe fn from_ptr_mut<'a>(vma: *mut bindings::vm_area_struct) -> &'a mut Self {
// SAFETY: The caller ensures that the pointer is valid.
unsafe { &mut *vma.cast() }
}
/// Returns a raw pointer to this area.
pub fn as_ptr(&self) -> *const bindings::vm_area_struct {
self as *const Area as *const bindings::vm_area_struct
}
/// Returns the flags associated with the virtual memory area.
///
/// The possible flags are a combination of the constants in [`flags`].
pub fn flags(&self) -> usize {
// SAFETY: `self.vma` is valid by the type invariants.
unsafe { (*self.vma.get()).__bindgen_anon_2.vm_flags as _ }
}
/// Sets the flags associated with the virtual memory area.
///
/// The possible flags are a combination of the constants in [`flags`].
pub fn set_flags(&mut self, flags: usize) {
// SAFETY: `self.vma` is valid by the type invariants.
unsafe { (*self.vma.get()).__bindgen_anon_2.vm_flags = flags as _ };
}
/// Returns the start address of the virtual memory area.
pub fn start(&self) -> usize {
// SAFETY: `self.vma` is valid by the type invariants.
unsafe { (*self.vma.get()).__bindgen_anon_1.__bindgen_anon_1.vm_start as _ }
}
/// Returns the end address of the virtual memory area.
pub fn end(&self) -> usize {
// SAFETY: `self.vma` is valid by the type invariants.
unsafe { (*self.vma.get()).__bindgen_anon_1.__bindgen_anon_1.vm_end as _ }
}
/// Maps a single page at the given address within the virtual memory area.
pub fn vm_insert_page(&mut self, address: usize, page: &Page) -> Result {
// SAFETY: The page is guaranteed to be order 0. The range of `address` is already checked
// by `vm_insert_page`. `self.vma` and `page.as_ptr()` are guaranteed by their respective
// type invariants to be valid.
to_result(unsafe { bindings::vm_insert_page(self.vma.get(), address as _, page.as_ptr()) })
}
/// Unmap pages in the given page range.
pub fn zap_page_range_single(&self, address: usize, size: usize) {
// SAFETY: The `vma` pointer is valid.
unsafe {
bindings::zap_page_range_single(
self.vma.get(),
address as _,
size as _,
core::ptr::null_mut(),
)
};
}
}
/// Container for [`Area`] flags.
pub mod flags {
use crate::bindings;
/// No flags are set.
pub const NONE: usize = bindings::VM_NONE as _;
/// Mapping allows reads.
pub const READ: usize = bindings::VM_READ as _;
/// Mapping allows writes.
pub const WRITE: usize = bindings::VM_WRITE as _;
/// Mapping allows execution.
pub const EXEC: usize = bindings::VM_EXEC as _;
/// Mapping is shared.
pub const SHARED: usize = bindings::VM_SHARED as _;
/// Mapping may be updated to allow reads.
pub const MAYREAD: usize = bindings::VM_MAYREAD as _;
/// Mapping may be updated to allow writes.
pub const MAYWRITE: usize = bindings::VM_MAYWRITE as _;
/// Mapping may be updated to allow execution.
pub const MAYEXEC: usize = bindings::VM_MAYEXEC as _;
/// Mapping may be updated to be shared.
pub const MAYSHARE: usize = bindings::VM_MAYSHARE as _;
/// Do not copy this vma on fork.
pub const DONTCOPY: usize = bindings::VM_DONTCOPY as _;
/// Cannot expand with mremap().
pub const DONTEXPAND: usize = bindings::VM_DONTEXPAND as _;
/// Lock the pages covered when they are faulted in.
pub const LOCKONFAULT: usize = bindings::VM_LOCKONFAULT as _;
/// Is a VM accounted object.
pub const ACCOUNT: usize = bindings::VM_ACCOUNT as _;
/// should the VM suppress accounting.
pub const NORESERVE: usize = bindings::VM_NORESERVE as _;
/// Huge TLB Page VM.
pub const HUGETLB: usize = bindings::VM_HUGETLB as _;
/// Synchronous page faults.
pub const SYNC: usize = bindings::VM_SYNC as _;
/// Architecture-specific flag.
pub const ARCH_1: usize = bindings::VM_ARCH_1 as _;
/// Wipe VMA contents in child..
pub const WIPEONFORK: usize = bindings::VM_WIPEONFORK as _;
/// Do not include in the core dump.
pub const DONTDUMP: usize = bindings::VM_DONTDUMP as _;
/// Not soft dirty clean area.
pub const SOFTDIRTY: usize = bindings::VM_SOFTDIRTY as _;
/// Can contain "struct page" and pure PFN pages.
pub const MIXEDMAP: usize = bindings::VM_MIXEDMAP as _;
/// MADV_HUGEPAGE marked this vma.
pub const HUGEPAGE: usize = bindings::VM_HUGEPAGE as _;
/// MADV_NOHUGEPAGE marked this vma.
pub const NOHUGEPAGE: usize = bindings::VM_NOHUGEPAGE as _;
/// KSM may merge identical pages.
pub const MERGEABLE: usize = bindings::VM_MERGEABLE as _;
}