mirror of
https://github.com/nxp-imx/linux-imx.git
synced 2025-07-06 01:15:20 +02:00
ANDROID: rust: use Rust atomics for Arc refcount
Currently, the refcounting logic in Arc uses rust helpers to modify the refcount. These extra method calls can be expensive, and are not always removed even with cross-lang LTO. Instead, we reimplement them in Rust, which enables rustc to inline these methods. The Rust implementations are private to the `Arc` module, so they cannot be used anywhere else. Bug: 324206405 Change-Id: Ibf3bcb7ca493ae9dc96012c9b75630608ec96449 Signed-off-by: Alice Ryhl <aliceryhl@google.com>
This commit is contained in:
parent
282b78baf0
commit
c6c6be80a8
|
@ -123,24 +123,6 @@ void rust_helper_kunmap_local(const void *addr)
|
|||
}
|
||||
EXPORT_SYMBOL_GPL(rust_helper_kunmap_local);
|
||||
|
||||
refcount_t rust_helper_REFCOUNT_INIT(int n)
|
||||
{
|
||||
return (refcount_t)REFCOUNT_INIT(n);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(rust_helper_REFCOUNT_INIT);
|
||||
|
||||
void rust_helper_refcount_inc(refcount_t *r)
|
||||
{
|
||||
refcount_inc(r);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(rust_helper_refcount_inc);
|
||||
|
||||
bool rust_helper_refcount_dec_and_test(refcount_t *r)
|
||||
{
|
||||
return refcount_dec_and_test(r);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(rust_helper_refcount_dec_and_test);
|
||||
|
||||
__force void *rust_helper_ERR_PTR(long err)
|
||||
{
|
||||
return ERR_PTR(err);
|
||||
|
|
|
@ -36,6 +36,10 @@ use macros::pin_data;
|
|||
|
||||
mod std_vendor;
|
||||
|
||||
// Use Rust implementations of refcount methods in Arc.
|
||||
mod refcount_t;
|
||||
use self::refcount_t::{refcount_dec_and_test, refcount_inc, REFCOUNT_INIT};
|
||||
|
||||
/// A reference-counted pointer to an instance of `T`.
|
||||
///
|
||||
/// The reference count is incremented when new instances of [`Arc`] are created, and decremented
|
||||
|
@ -166,7 +170,7 @@ impl<T> Arc<T> {
|
|||
// INVARIANT: The refcount is initialised to a non-zero value.
|
||||
let value = ArcInner {
|
||||
// SAFETY: There are no safety requirements for this FFI call.
|
||||
refcount: Opaque::new(unsafe { bindings::REFCOUNT_INIT(1) }),
|
||||
refcount: Opaque::new(unsafe { REFCOUNT_INIT(1) }),
|
||||
data: contents,
|
||||
};
|
||||
|
||||
|
@ -271,11 +275,11 @@ impl<T: ?Sized> Arc<T> {
|
|||
// SAFETY: If the refcount reaches a non-zero value, then we have destroyed this `Arc` and
|
||||
// will return without running its destructor. If the refcount reaches zero, then there are
|
||||
// no other arcs, and we can create a `UniqueArc`.
|
||||
let is_zero = unsafe { bindings::refcount_dec_and_test(refcount) };
|
||||
let is_zero = unsafe { refcount_dec_and_test(refcount) };
|
||||
if is_zero {
|
||||
// SAFETY: We have exclusive access to the arc, so we can perform unsynchronized
|
||||
// accesses to the refcount.
|
||||
unsafe { core::ptr::write(refcount, bindings::REFCOUNT_INIT(1)) };
|
||||
unsafe { core::ptr::write(refcount, REFCOUNT_INIT(1)) };
|
||||
|
||||
// SAFETY: We own one refcount, so we can create a `UniqueArc`. It needs to be pinned,
|
||||
// since an `Arc` is pinned.
|
||||
|
@ -367,7 +371,7 @@ impl<T: ?Sized> Clone for Arc<T> {
|
|||
// INVARIANT: C `refcount_inc` saturates the refcount, so it cannot overflow to zero.
|
||||
// SAFETY: By the type invariant, there is necessarily a reference to the object, so it is
|
||||
// safe to increment the refcount.
|
||||
unsafe { bindings::refcount_inc(self.ptr.as_ref().refcount.get()) };
|
||||
unsafe { refcount_inc(self.ptr.as_ref().refcount.get()) };
|
||||
|
||||
// SAFETY: We just incremented the refcount. This increment is now owned by the new `Arc`.
|
||||
unsafe { Self::from_inner(self.ptr) }
|
||||
|
@ -385,7 +389,7 @@ impl<T: ?Sized> Drop for Arc<T> {
|
|||
// INVARIANT: If the refcount reaches zero, there are no other instances of `Arc`, and
|
||||
// this instance is being dropped, so the broken invariant is not observable.
|
||||
// SAFETY: Also by the type invariant, we are allowed to decrement the refcount.
|
||||
let is_zero = unsafe { bindings::refcount_dec_and_test(refcount) };
|
||||
let is_zero = unsafe { refcount_dec_and_test(refcount) };
|
||||
if is_zero {
|
||||
// The count reached zero, we must free the memory.
|
||||
//
|
||||
|
@ -637,7 +641,7 @@ impl<T> UniqueArc<T> {
|
|||
// INVARIANT: The refcount is initialised to a non-zero value.
|
||||
let inner = Box::try_init::<AllocError>(try_init!(ArcInner {
|
||||
// SAFETY: There are no safety requirements for this FFI call.
|
||||
refcount: Opaque::new(unsafe { bindings::REFCOUNT_INIT(1) }),
|
||||
refcount: Opaque::new(unsafe { REFCOUNT_INIT(1) }),
|
||||
data <- init::uninit::<T, AllocError>(),
|
||||
}? AllocError))?;
|
||||
Ok(UniqueArc {
|
||||
|
|
120
rust/kernel/sync/arc/refcount_t.rs
Normal file
120
rust/kernel/sync/arc/refcount_t.rs
Normal file
|
@ -0,0 +1,120 @@
|
|||
// SPDX-License-Identifier: GPL-2.0
|
||||
|
||||
// Copyright (C) 2024 Google LLC.
|
||||
|
||||
//! A reimplementation of the `refcount_t` methods in Rust.
|
||||
//!
|
||||
//! These methods are only used for the `Arc` type. Since the `Arc` type does not expose its
|
||||
//! `refcount_t` to C code, this ensures that we are not mixing C and Rust atomics on the same
|
||||
//! atomic variable, which is the current recommendation for using Rust atomics in the kernel.
|
||||
|
||||
use crate::bindings::*;
|
||||
use core::ffi::c_int;
|
||||
use core::sync::atomic::{self, Ordering};
|
||||
|
||||
use crate::bindings::{
|
||||
refcount_saturation_type_REFCOUNT_ADD_OVF as REFCOUNT_ADD_OVF,
|
||||
refcount_saturation_type_REFCOUNT_ADD_UAF as REFCOUNT_ADD_UAF,
|
||||
refcount_saturation_type_REFCOUNT_SUB_UAF as REFCOUNT_SUB_UAF,
|
||||
};
|
||||
|
||||
// Use a trait to pick the right atomic type for c_int.
|
||||
trait HasAtomic {
|
||||
type AtomicInt;
|
||||
}
|
||||
impl HasAtomic for i16 {
|
||||
type AtomicInt = atomic::AtomicI16;
|
||||
}
|
||||
impl HasAtomic for i32 {
|
||||
type AtomicInt = atomic::AtomicI32;
|
||||
}
|
||||
impl HasAtomic for i64 {
|
||||
type AtomicInt = atomic::AtomicI64;
|
||||
}
|
||||
impl HasAtomic for isize {
|
||||
type AtomicInt = atomic::AtomicIsize;
|
||||
}
|
||||
|
||||
type AtomicCInt = <c_int as HasAtomic>::AtomicInt;
|
||||
|
||||
/// Create a new `refcount_t` with the given initial refcount.
|
||||
///
|
||||
/// # Safety
|
||||
///
|
||||
/// This method is safe to call.
|
||||
#[inline(always)]
|
||||
#[allow(non_snake_case)]
|
||||
pub(crate) unsafe fn REFCOUNT_INIT(n: c_int) -> refcount_t {
|
||||
refcount_t {
|
||||
refs: atomic_t { counter: n },
|
||||
}
|
||||
}
|
||||
|
||||
/// Increment the refcount.
|
||||
///
|
||||
/// Saturates if the refcount wraps around.
|
||||
///
|
||||
/// # Safety
|
||||
///
|
||||
/// * The provided pointer must point at a valid `refcount_t`.
|
||||
/// * The `refcount_t` may only be accessed concurrently by other atomic
|
||||
/// operations defined in this file.
|
||||
#[inline(always)]
|
||||
pub(crate) unsafe fn refcount_inc(r: *mut refcount_t) {
|
||||
// SAFETY: All concurrent accesses agree that this is currently an
|
||||
// `AtomicCInt`.
|
||||
let atomic = unsafe { &*r.cast::<AtomicCInt>() };
|
||||
let old = atomic.fetch_add(1, Ordering::Relaxed);
|
||||
|
||||
if old == 0 {
|
||||
// SAFETY: The caller guarantees that this is okay to call.
|
||||
unsafe { warn_saturate(r, REFCOUNT_ADD_UAF) };
|
||||
} else if old.wrapping_add(1) <= 0 {
|
||||
// SAFETY: The caller guarantees that this is okay to call.
|
||||
unsafe { warn_saturate(r, REFCOUNT_ADD_OVF) };
|
||||
}
|
||||
}
|
||||
|
||||
/// Decrement the refcount and return whether we dropped it to zero.
|
||||
///
|
||||
/// If this returns `true`, then this call dropped the refcount to zero and
|
||||
/// all previous operations on the refcount happens-before this call.
|
||||
///
|
||||
/// # Safety
|
||||
///
|
||||
/// * The provided pointer must point at a valid `refcount_t`.
|
||||
/// * The `refcount_t` may only be accessed concurrently by other atomic
|
||||
/// operations defined in this file.
|
||||
#[inline(always)]
|
||||
#[must_use]
|
||||
pub(crate) unsafe fn refcount_dec_and_test(r: *mut refcount_t) -> bool {
|
||||
// SAFETY: All concurrent accesses agree that this is currently an
|
||||
// `AtomicCInt`.
|
||||
let atomic = unsafe { &*r.cast::<AtomicCInt>() };
|
||||
let old = atomic.fetch_sub(1, Ordering::Release);
|
||||
|
||||
if old == 1 {
|
||||
atomic::fence(Ordering::Acquire);
|
||||
return true;
|
||||
}
|
||||
|
||||
if old <= 0 {
|
||||
// SAFETY: The caller guarantees that this is okay to call.
|
||||
unsafe { warn_saturate(r, REFCOUNT_SUB_UAF) };
|
||||
}
|
||||
|
||||
false
|
||||
}
|
||||
|
||||
/// A helper function so that we can use #[cold] to hint to the branch predictor.
|
||||
///
|
||||
/// # Safety
|
||||
///
|
||||
/// * The provided pointer must point at a valid `refcount_t`.
|
||||
/// * The `refcount_t` may only be accessed concurrently by other atomic
|
||||
/// operations defined in this file.
|
||||
#[cold]
|
||||
unsafe fn warn_saturate(r: *mut refcount_t, t: refcount_saturation_type) {
|
||||
// SAFETY: Caller promises that `r` is not dangling.
|
||||
unsafe { refcount_warn_saturate(r, t) };
|
||||
}
|
Loading…
Reference in New Issue
Block a user