ANDROID: rust_binder: properly handle critical refcount increments

Whenever we need to deliver a refcount increment to the current thread,
we will try to deliver it by just scheduling the `Node` directly. It is
expected that this will work most of the time, but it is possible that
the `Node` has already been scheduled on a different list. If that is
the case, we allocate a new wrapper struct around the node and schedule
that to the current thread instead.

By only using the wrappers when the `Node` is already scheduled, we
should hopefully avoid allocating memory for these wrappers outside of
rare edge cases.

After using a Pixel phone for a few minutes with this patch applied, I
observed three calls to CritIncrWrapper::new. This confirms that the
edge case is rare, but also that it can happen in practice. None of the
assertions were triggered.

Bug: 333535706
Change-Id: I26d694205bf42c935886bf3561ebd407e67895ee
Signed-off-by: Alice Ryhl <aliceryhl@google.com>
This commit is contained in:
Alice Ryhl 2024-04-09 15:18:38 +00:00
parent 39285e9a6e
commit 08dde7cab7
3 changed files with 182 additions and 23 deletions

View File

@ -25,6 +25,9 @@ use crate::{
DArc, DLArc, DTRWrap, DeliverToRead, DArc, DLArc, DTRWrap, DeliverToRead,
}; };
mod wrapper;
pub(crate) use self::wrapper::CritIncrWrapper;
#[derive(Debug)] #[derive(Debug)]
pub(crate) struct CouldNotDeliverCriticalIncrement; pub(crate) struct CouldNotDeliverCriticalIncrement;
@ -59,16 +62,26 @@ struct DeliveryState {
/// Is the `Node` currently scheduled? /// Is the `Node` currently scheduled?
has_pushed_node: bool, has_pushed_node: bool,
/// Is a wrapper currently scheduled?
///
/// The wrapper is used only for strong zero2one increments.
has_pushed_wrapper: bool,
/// Is the currently scheduled `Node` scheduled due to a weak zero2one increment? /// Is the currently scheduled `Node` scheduled due to a weak zero2one increment?
///
/// Weak zero2one operations are always scheduled using the `Node`.
has_weak_zero2one: bool, has_weak_zero2one: bool,
/// Is the currently scheduled `Node` scheduled due to a strong zero2one increment? /// Is the currently scheduled wrapper/`Node` scheduled due to a strong zero2one increment?
///
/// If `has_pushed_wrapper` is set, then the strong zero2one increment was scheduled using the
/// wrapper. Otherwise, `has_pushed_node` must be set and it was scheduled using the `Node`.
has_strong_zero2one: bool, has_strong_zero2one: bool,
} }
impl DeliveryState { impl DeliveryState {
fn should_normal_push(&self) -> bool { fn should_normal_push(&self) -> bool {
!self.has_pushed_node !self.has_pushed_node && !self.has_pushed_wrapper
} }
fn did_normal_push(&mut self) { fn did_normal_push(&mut self) {
@ -105,6 +118,13 @@ impl DeliveryState {
self.has_pushed_node = true; self.has_pushed_node = true;
self.has_strong_zero2one = true; self.has_strong_zero2one = true;
} }
fn did_push_strong_zero2one_wrapper(&mut self) {
assert!(self.should_push_strong_zero2one());
assert!(!self.can_push_strong_zero2one_normally());
self.has_pushed_wrapper = true;
self.has_strong_zero2one = true;
}
} }
struct CountState { struct CountState {
@ -202,6 +222,7 @@ impl Node {
weak: CountState::new(), weak: CountState::new(),
delivery_state: DeliveryState { delivery_state: DeliveryState {
has_pushed_node: false, has_pushed_node: false,
has_pushed_wrapper: false,
has_weak_zero2one: false, has_weak_zero2one: false,
has_strong_zero2one: false, has_strong_zero2one: false,
}, },
@ -456,6 +477,25 @@ impl Node {
} }
} }
pub(crate) fn incr_refcount_allow_zero2one_with_wrapper(
self: &DArc<Self>,
strong: bool,
wrapper: CritIncrWrapper,
owner_inner: &mut ProcessInner,
) -> Option<DLArc<dyn DeliverToRead>> {
match self.incr_refcount_allow_zero2one(strong, owner_inner) {
Ok(Some(node)) => Some(node as _),
Ok(None) => None,
Err(CouldNotDeliverCriticalIncrement) => {
assert!(strong);
let inner = self.inner.access_mut(owner_inner);
inner.strong.count += 1;
inner.delivery_state.did_push_strong_zero2one_wrapper();
Some(wrapper.init(self.clone()))
}
}
}
pub(crate) fn update_refcount(self: &DArc<Self>, inc: bool, count: usize, strong: bool) { pub(crate) fn update_refcount(self: &DArc<Self>, inc: bool, count: usize, strong: bool) {
self.owner self.owner
.inner .inner
@ -577,17 +617,15 @@ impl Node {
} }
None None
} }
}
impl DeliverToRead for Node {
fn do_work(self: DArc<Self>, _thread: &Thread, writer: &mut UserSliceWriter) -> Result<bool> {
let mut owner_inner = self.owner.inner.lock();
let inner = self.inner.access_mut(&mut owner_inner);
inner.delivery_state.has_pushed_node = false;
inner.delivery_state.has_weak_zero2one = false;
inner.delivery_state.has_strong_zero2one = false;
/// This is split into a separate function since it's called by both `Node::do_work` and
/// `NodeWrapper::do_work`.
fn do_work_locked(
&self,
writer: &mut UserSliceWriter,
mut guard: Guard<'_, ProcessInner, SpinLockBackend>,
) -> Result<bool> {
let inner = self.inner.access_mut(&mut guard);
let strong = inner.strong.count > 0; let strong = inner.strong.count > 0;
let has_strong = inner.strong.has_count; let has_strong = inner.strong.has_count;
let weak = strong || inner.weak.count > 0; let weak = strong || inner.weak.count > 0;
@ -614,9 +652,9 @@ impl DeliverToRead for Node {
} }
if no_active_inc_refs && !weak { if no_active_inc_refs && !weak {
// Remove the node if there are no references to it. // Remove the node if there are no references to it.
owner_inner.remove_node(self.ptr); guard.remove_node(self.ptr);
} }
drop(owner_inner); drop(guard);
if weak && !has_weak { if weak && !has_weak {
self.write(writer, BR_INCREFS)?; self.write(writer, BR_INCREFS)?;
@ -633,6 +671,30 @@ impl DeliverToRead for Node {
Ok(true) Ok(true)
} }
}
impl DeliverToRead for Node {
fn do_work(self: DArc<Self>, _thread: &Thread, writer: &mut UserSliceWriter) -> Result<bool> {
let mut owner_inner = self.owner.inner.lock();
let inner = self.inner.access_mut(&mut owner_inner);
assert!(inner.delivery_state.has_pushed_node);
if inner.delivery_state.has_pushed_wrapper {
// If the wrapper is scheduled, then we are either a normal push or weak zero2one
// increment, and the wrapper is a strong zero2one increment, so the wrapper always
// takes precedence over us.
assert!(inner.delivery_state.has_strong_zero2one);
inner.delivery_state.has_pushed_node = false;
inner.delivery_state.has_weak_zero2one = false;
return Ok(true);
}
inner.delivery_state.has_pushed_node = false;
inner.delivery_state.has_weak_zero2one = false;
inner.delivery_state.has_strong_zero2one = false;
self.do_work_locked(writer, owner_inner)
}
fn cancel(self: DArc<Self>) {} fn cancel(self: DArc<Self>) {}
fn on_thread_selected(&self, _thread: &Thread) {} fn on_thread_selected(&self, _thread: &Thread) {}

View File

@ -0,0 +1,82 @@
// SPDX-License-Identifier: GPL-2.0
// Copyright (C) 2024 Google LLC.
use kernel::{
list::{ListArc, ListArcSafe},
prelude::*,
seq_file::SeqFile,
seq_print,
sync::UniqueArc,
uaccess::UserSliceWriter,
};
use crate::{node::Node, thread::Thread, DArc, DLArc, DTRWrap, DeliverToRead};
use core::mem::MaybeUninit;
pub(crate) struct CritIncrWrapper {
inner: UniqueArc<MaybeUninit<DTRWrap<NodeWrapper>>>,
}
impl CritIncrWrapper {
pub(crate) fn new() -> Result<Self> {
Ok(CritIncrWrapper {
inner: UniqueArc::try_new_uninit()?,
})
}
pub(super) fn init(self, node: DArc<Node>) -> DLArc<dyn DeliverToRead> {
match self.inner.pin_init_with(DTRWrap::new(NodeWrapper { node })) {
Ok(initialized) => ListArc::from_pin_unique(initialized) as _,
Err(err) => match err {},
}
}
}
struct NodeWrapper {
node: DArc<Node>,
}
kernel::list::impl_list_arc_safe! {
impl ListArcSafe<0> for NodeWrapper {
untracked;
}
}
impl DeliverToRead for NodeWrapper {
fn do_work(self: DArc<Self>, _thread: &Thread, writer: &mut UserSliceWriter) -> Result<bool> {
let node = &self.node;
let mut owner_inner = node.owner.inner.lock();
let inner = node.inner.access_mut(&mut owner_inner);
let ds = &mut inner.delivery_state;
assert!(ds.has_pushed_wrapper);
assert!(ds.has_strong_zero2one);
ds.has_pushed_wrapper = false;
ds.has_strong_zero2one = false;
node.do_work_locked(writer, owner_inner)
}
fn cancel(self: DArc<Self>) {}
fn on_thread_selected(&self, _thread: &Thread) {}
fn should_sync_wakeup(&self) -> bool {
false
}
#[inline(never)]
fn debug_print(&self, m: &mut SeqFile, prefix: &str, _tprefix: &str) -> Result<()> {
seq_print!(
m,
"{}node work {}: u{:016x} c{:016x}\n",
prefix,
self.node.debug_id,
self.node.ptr,
self.node.cookie,
);
Ok(())
}
}

View File

@ -38,7 +38,7 @@ use crate::{
context::Context, context::Context,
defs::*, defs::*,
error::{BinderError, BinderResult}, error::{BinderError, BinderResult},
node::{CouldNotDeliverCriticalIncrement, Node, NodeDeath, NodeRef}, node::{CouldNotDeliverCriticalIncrement, CritIncrWrapper, Node, NodeDeath, NodeRef},
prio::{self, BinderPriority}, prio::{self, BinderPriority},
range_alloc::{self, RangeAllocator}, range_alloc::{self, RangeAllocator},
thread::{PushWorkRes, Thread}, thread::{PushWorkRes, Thread},
@ -221,8 +221,14 @@ impl ProcessInner {
node: DArc<Node>, node: DArc<Node>,
strong: bool, strong: bool,
thread: &Thread, thread: &Thread,
wrapper: Option<CritIncrWrapper>,
) -> Result<NodeRef, CouldNotDeliverCriticalIncrement> { ) -> Result<NodeRef, CouldNotDeliverCriticalIncrement> {
let push = node.incr_refcount_allow_zero2one(strong, self)?; let push = match wrapper {
None => node
.incr_refcount_allow_zero2one(strong, self)?
.map(|node| node as _),
Some(wrapper) => node.incr_refcount_allow_zero2one_with_wrapper(strong, wrapper, self),
};
if let Some(node) = push { if let Some(node) = push {
thread.push_work_deferred(node); thread.push_work_deferred(node);
} }
@ -649,12 +655,13 @@ impl Process {
flags: u32, flags: u32,
strong: bool, strong: bool,
thread: &Thread, thread: &Thread,
wrapper: Option<CritIncrWrapper>,
) -> Result<Result<NodeRef, CouldNotDeliverCriticalIncrement>> { ) -> Result<Result<NodeRef, CouldNotDeliverCriticalIncrement>> {
// Try to find an existing node. // Try to find an existing node.
{ {
let mut inner = self.inner.lock(); let mut inner = self.inner.lock();
if let Some(node) = inner.get_existing_node(ptr, cookie)? { if let Some(node) = inner.get_existing_node(ptr, cookie)? {
return Ok(inner.new_node_ref_with_thread(node, strong, thread)); return Ok(inner.new_node_ref_with_thread(node, strong, thread, wrapper));
} }
} }
@ -663,14 +670,14 @@ impl Process {
let rbnode = RBTree::try_allocate_node(ptr, node.clone())?; let rbnode = RBTree::try_allocate_node(ptr, node.clone())?;
let mut inner = self.inner.lock(); let mut inner = self.inner.lock();
if let Some(node) = inner.get_existing_node(ptr, cookie)? { if let Some(node) = inner.get_existing_node(ptr, cookie)? {
return Ok(inner.new_node_ref_with_thread(node, strong, thread)); return Ok(inner.new_node_ref_with_thread(node, strong, thread, wrapper));
} }
inner.nodes.insert(rbnode); inner.nodes.insert(rbnode);
// This can only fail if someone has already pushed the node to a list, but we just created // This can only fail if someone has already pushed the node to a list, but we just created
// it and still hold the lock, so it can't fail right now. // it and still hold the lock, so it can't fail right now.
let node_ref = inner let node_ref = inner
.new_node_ref_with_thread(node, strong, thread) .new_node_ref_with_thread(node, strong, thread, wrapper)
.unwrap(); .unwrap();
Ok(Ok(node_ref)) Ok(Ok(node_ref))
@ -684,12 +691,20 @@ impl Process {
strong: bool, strong: bool,
thread: &Thread, thread: &Thread,
) -> Result<NodeRef> { ) -> Result<NodeRef> {
match self.get_node_inner(ptr, cookie, flags, strong, thread) { let mut wrapper = None;
Err(err) => Err(err), for _ in 0..2 {
Ok(Ok(node_ref)) => Ok(node_ref), match self.get_node_inner(ptr, cookie, flags, strong, thread, wrapper) {
Ok(Err(CouldNotDeliverCriticalIncrement)) => todo!(), Err(err) => return Err(err),
Ok(Ok(node_ref)) => return Ok(node_ref),
Ok(Err(CouldNotDeliverCriticalIncrement)) => {
wrapper = Some(CritIncrWrapper::new()?);
} }
} }
}
// We only get a `CouldNotDeliverCriticalIncrement` error if `wrapper` is `None`, so the
// loop should run at most twice.
unreachable!()
}
pub(crate) fn insert_or_update_handle( pub(crate) fn insert_or_update_handle(
self: ArcBorrow<'_, Process>, self: ArcBorrow<'_, Process>,