mirror of
https://github.com/nxp-imx/linux-imx.git
synced 2025-07-08 10:25:20 +02:00
ANDROID: rust_binder: add death notifications
This adds death notifications that let one process be notified when another process dies. A process can request to be notified when a process dies using `BC_REQUEST_DEATH_NOTIFICATION`. This will make the driver send a `BR_DEAD_BINDER` to userspace when the process dies (or immediately if it is already dead). Userspace is supposed to respond with `BC_DEAD_BINDER_DONE` once it has processed the notification. Userspace can unregister from death notifications using the `BC_CLEAR_DEATH_NOTIFICATION` command. In this case, the kernel will respond with `BR_CLEAR_DEATH_NOTIFICATION_DONE` once the notification has been removed. Note that if the remote process dies before the kernel has responded with `BR_CLEAR_DEATH_NOTIFICATION_DONE`, then the kernel will still send a `BR_DEAD_BINDER`, which userspace must be able to process. In this case, the kernel will wait for the `BC_DEAD_BINDER_DONE` command before it sends `BR_CLEAR_DEATH_NOTIFICATION_DONE`. Note that even if the kernel sends a `BR_DEAD_BINDER`, this does not remove the death notification. Userspace must still remove it manually using `BC_CLEAR_DEATH_NOTIFICATION`. If a process uses `BC_RELEASE` to destroy its last refcount on a node that has an active death registration, then the death registration is immediately deleted. However, userspace is not supposed to delete a node reference without first deregistering death notifications, so this codepath is not executed under normal circumstances. Link: https://lore.kernel.org/rust-for-linux/20231101-rust-binder-v1-10-08ba9197f637@google.com/ Change-Id: I0ceb3b7ea507470be2499d8c1fea2e960b647519 Co-developed-by: Alice Ryhl <aliceryhl@google.com> Signed-off-by: Wedson Almeida Filho <wedsonaf@gmail.com> Signed-off-by: Alice Ryhl <aliceryhl@google.com> Bug: 278052745
This commit is contained in:
parent
fe5dea5fc7
commit
892df033dc
|
@ -25,10 +25,13 @@ pub_no_prefix!(
|
|||
BR_SPAWN_LOOPER,
|
||||
BR_TRANSACTION_COMPLETE,
|
||||
BR_OK,
|
||||
BR_ERROR,
|
||||
BR_INCREFS,
|
||||
BR_ACQUIRE,
|
||||
BR_RELEASE,
|
||||
BR_DECREFS
|
||||
BR_DECREFS,
|
||||
BR_DEAD_BINDER,
|
||||
BR_CLEAR_DEATH_NOTIFICATION_DONE
|
||||
);
|
||||
|
||||
pub_no_prefix!(
|
||||
|
@ -46,7 +49,10 @@ pub_no_prefix!(
|
|||
BC_RELEASE,
|
||||
BC_DECREFS,
|
||||
BC_INCREFS_DONE,
|
||||
BC_ACQUIRE_DONE
|
||||
BC_ACQUIRE_DONE,
|
||||
BC_REQUEST_DEATH_NOTIFICATION,
|
||||
BC_CLEAR_DEATH_NOTIFICATION,
|
||||
BC_DEAD_BINDER_DONE
|
||||
);
|
||||
|
||||
pub_no_prefix!(flat_binder_object_flags_, FLAT_BINDER_FLAG_TXN_SECURITY_CTX);
|
||||
|
|
|
@ -9,7 +9,7 @@ use kernel::{
|
|||
},
|
||||
prelude::*,
|
||||
sync::lock::{spinlock::SpinLockBackend, Guard},
|
||||
sync::{Arc, LockedBy},
|
||||
sync::{Arc, LockedBy, SpinLock},
|
||||
uaccess::UserSliceWriter,
|
||||
};
|
||||
|
||||
|
@ -60,6 +60,9 @@ struct NodeInner {
|
|||
/// When this is true, incoming oneway transactions are stored in `oneway_todo`, instead of
|
||||
/// being delivered directly to the process.
|
||||
has_oneway_transaction: bool,
|
||||
/// List of processes to deliver a notification to when this node is destroyed (usually due to
|
||||
/// the process dying).
|
||||
death_list: List<DTRWrap<NodeDeath>, 1>,
|
||||
/// The number of active BR_INCREFS or BR_ACQUIRE operations. (should be maximum two)
|
||||
///
|
||||
/// If this is non-zero, then we postpone any BR_RELEASE or BR_DECREFS notifications until the
|
||||
|
@ -110,6 +113,7 @@ impl Node {
|
|||
NodeInner {
|
||||
strong: CountState::new(),
|
||||
weak: CountState::new(),
|
||||
death_list: List::new(),
|
||||
oneway_todo: List::new(),
|
||||
has_oneway_transaction: false,
|
||||
active_inc_refs: 0,
|
||||
|
@ -169,6 +173,25 @@ impl Node {
|
|||
(self.ptr, self.cookie)
|
||||
}
|
||||
|
||||
pub(crate) fn next_death(
|
||||
&self,
|
||||
guard: &mut Guard<'_, ProcessInner, SpinLockBackend>,
|
||||
) -> Option<DArc<NodeDeath>> {
|
||||
self.inner
|
||||
.access_mut(guard)
|
||||
.death_list
|
||||
.pop_front()
|
||||
.map(|larc| larc.into_arc())
|
||||
}
|
||||
|
||||
pub(crate) fn add_death(
|
||||
&self,
|
||||
death: ListArc<DTRWrap<NodeDeath>, 1>,
|
||||
guard: &mut Guard<'_, ProcessInner, SpinLockBackend>,
|
||||
) {
|
||||
self.inner.access_mut(guard).death_list.push_back(death);
|
||||
}
|
||||
|
||||
pub(crate) fn inc_ref_done_locked(
|
||||
&self,
|
||||
_strong: bool,
|
||||
|
@ -502,3 +525,231 @@ impl Drop for NodeRef {
|
|||
}
|
||||
}
|
||||
}
|
||||
|
||||
struct NodeDeathInner {
|
||||
dead: bool,
|
||||
cleared: bool,
|
||||
notification_done: bool,
|
||||
/// Indicates whether the normal flow was interrupted by removing the handle. In this case, we
|
||||
/// need behave as if the death notification didn't exist (i.e., we don't deliver anything to
|
||||
/// the user.
|
||||
aborted: bool,
|
||||
}
|
||||
|
||||
/// Used to deliver notifications when a process dies.
|
||||
///
|
||||
/// A process can request to be notified when a process dies using `BC_REQUEST_DEATH_NOTIFICATION`.
|
||||
/// This will make the driver send a `BR_DEAD_BINDER` to userspace when the process dies (or
|
||||
/// immediately if it is already dead). Userspace is supposed to respond with `BC_DEAD_BINDER_DONE`
|
||||
/// once it has processed the notification.
|
||||
///
|
||||
/// Userspace can unregister from death notifications using the `BC_CLEAR_DEATH_NOTIFICATION`
|
||||
/// command. In this case, the kernel will respond with `BR_CLEAR_DEATH_NOTIFICATION_DONE` once the
|
||||
/// notification has been removed. Note that if the remote process dies before the kernel has
|
||||
/// responded with `BR_CLEAR_DEATH_NOTIFICATION_DONE`, then the kernel will still send a
|
||||
/// `BR_DEAD_BINDER`, which userspace must be able to process. In this case, the kernel will wait
|
||||
/// for the `BC_DEAD_BINDER_DONE` command before it sends `BR_CLEAR_DEATH_NOTIFICATION_DONE`.
|
||||
///
|
||||
/// Note that even if the kernel sends a `BR_DEAD_BINDER`, this does not remove the death
|
||||
/// notification. Userspace must still remove it manually using `BC_CLEAR_DEATH_NOTIFICATION`.
|
||||
///
|
||||
/// If a process uses `BC_RELEASE` to destroy its last refcount on a node that has an active death
|
||||
/// registration, then the death registration is immediately deleted (we implement this using the
|
||||
/// `aborted` field). However, userspace is not supposed to delete a `NodeRef` without first
|
||||
/// deregistering death notifications, so this codepath is not executed under normal circumstances.
|
||||
#[pin_data]
|
||||
pub(crate) struct NodeDeath {
|
||||
node: DArc<Node>,
|
||||
process: Arc<Process>,
|
||||
pub(crate) cookie: usize,
|
||||
#[pin]
|
||||
links_track: AtomicListArcTracker<0>,
|
||||
/// Used by the owner `Node` to store a list of registered death notifications.
|
||||
///
|
||||
/// # Invariants
|
||||
///
|
||||
/// Only ever used with the `death_list` list of `self.node`.
|
||||
#[pin]
|
||||
death_links: ListLinks<1>,
|
||||
/// Used by the process to keep track of the death notifications for which we have sent a
|
||||
/// `BR_DEAD_BINDER` but not yet received a `BC_DEAD_BINDER_DONE`.
|
||||
///
|
||||
/// # Invariants
|
||||
///
|
||||
/// Only ever used with the `delivered_deaths` list of `self.process`.
|
||||
#[pin]
|
||||
delivered_links: ListLinks<2>,
|
||||
#[pin]
|
||||
delivered_links_track: AtomicListArcTracker<2>,
|
||||
#[pin]
|
||||
inner: SpinLock<NodeDeathInner>,
|
||||
}
|
||||
|
||||
impl NodeDeath {
|
||||
/// Constructs a new node death notification object.
|
||||
pub(crate) fn new(
|
||||
node: DArc<Node>,
|
||||
process: Arc<Process>,
|
||||
cookie: usize,
|
||||
) -> impl PinInit<DTRWrap<Self>> {
|
||||
DTRWrap::new(pin_init!(
|
||||
Self {
|
||||
node,
|
||||
process,
|
||||
cookie,
|
||||
links_track <- AtomicListArcTracker::new(),
|
||||
death_links <- ListLinks::new(),
|
||||
delivered_links <- ListLinks::new(),
|
||||
delivered_links_track <- AtomicListArcTracker::new(),
|
||||
inner <- kernel::new_spinlock!(NodeDeathInner {
|
||||
dead: false,
|
||||
cleared: false,
|
||||
notification_done: false,
|
||||
aborted: false,
|
||||
}, "NodeDeath::inner"),
|
||||
}
|
||||
))
|
||||
}
|
||||
|
||||
/// Sets the cleared flag to `true`.
|
||||
///
|
||||
/// It removes `self` from the node's death notification list if needed.
|
||||
///
|
||||
/// Returns whether it needs to be queued.
|
||||
pub(crate) fn set_cleared(self: &DArc<Self>, abort: bool) -> bool {
|
||||
let (needs_removal, needs_queueing) = {
|
||||
// Update state and determine if we need to queue a work item. We only need to do it
|
||||
// when the node is not dead or if the user already completed the death notification.
|
||||
let mut inner = self.inner.lock();
|
||||
if abort {
|
||||
inner.aborted = true;
|
||||
}
|
||||
if inner.cleared {
|
||||
// Already cleared.
|
||||
return false;
|
||||
}
|
||||
inner.cleared = true;
|
||||
(!inner.dead, !inner.dead || inner.notification_done)
|
||||
};
|
||||
|
||||
// Remove death notification from node.
|
||||
if needs_removal {
|
||||
let mut owner_inner = self.node.owner.inner.lock();
|
||||
let node_inner = self.node.inner.access_mut(&mut owner_inner);
|
||||
// SAFETY: A `NodeDeath` is never inserted into the death list of any node other than
|
||||
// its owner, so it is either in this death list or in no death list.
|
||||
unsafe { node_inner.death_list.remove(self) };
|
||||
}
|
||||
needs_queueing
|
||||
}
|
||||
|
||||
/// Sets the 'notification done' flag to `true`.
|
||||
pub(crate) fn set_notification_done(self: DArc<Self>, thread: &Thread) {
|
||||
let needs_queueing = {
|
||||
let mut inner = self.inner.lock();
|
||||
inner.notification_done = true;
|
||||
inner.cleared
|
||||
};
|
||||
if needs_queueing {
|
||||
if let Some(death) = ListArc::try_from_arc_or_drop(self) {
|
||||
let _ = thread.push_work_if_looper(death);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Sets the 'dead' flag to `true` and queues work item if needed.
|
||||
pub(crate) fn set_dead(self: DArc<Self>) {
|
||||
let needs_queueing = {
|
||||
let mut inner = self.inner.lock();
|
||||
if inner.cleared {
|
||||
false
|
||||
} else {
|
||||
inner.dead = true;
|
||||
true
|
||||
}
|
||||
};
|
||||
if needs_queueing {
|
||||
// Push the death notification to the target process. There is nothing else to do if
|
||||
// it's already dead.
|
||||
if let Some(death) = ListArc::try_from_arc_or_drop(self) {
|
||||
let process = death.process.clone();
|
||||
let _ = process.push_work(death);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
kernel::list::impl_list_arc_safe! {
|
||||
impl ListArcSafe<0> for NodeDeath {
|
||||
tracked_by links_track: AtomicListArcTracker;
|
||||
}
|
||||
}
|
||||
|
||||
kernel::list::impl_has_list_links! {
|
||||
impl HasListLinks<1> for DTRWrap<NodeDeath> { self.wrapped.death_links }
|
||||
}
|
||||
kernel::list::impl_list_arc_safe! {
|
||||
impl ListArcSafe<1> for DTRWrap<NodeDeath> { untracked; }
|
||||
}
|
||||
kernel::list::impl_list_item! {
|
||||
impl ListItem<1> for DTRWrap<NodeDeath> {
|
||||
using ListLinks;
|
||||
}
|
||||
}
|
||||
|
||||
kernel::list::impl_has_list_links! {
|
||||
impl HasListLinks<2> for DTRWrap<NodeDeath> { self.wrapped.delivered_links }
|
||||
}
|
||||
kernel::list::impl_list_arc_safe! {
|
||||
impl ListArcSafe<2> for DTRWrap<NodeDeath> {
|
||||
tracked_by wrapped: NodeDeath;
|
||||
}
|
||||
}
|
||||
kernel::list::impl_list_arc_safe! {
|
||||
impl ListArcSafe<2> for NodeDeath {
|
||||
tracked_by delivered_links_track: AtomicListArcTracker<2>;
|
||||
}
|
||||
}
|
||||
kernel::list::impl_list_item! {
|
||||
impl ListItem<2> for DTRWrap<NodeDeath> {
|
||||
using ListLinks;
|
||||
}
|
||||
}
|
||||
|
||||
impl DeliverToRead for NodeDeath {
|
||||
fn do_work(self: DArc<Self>, _thread: &Thread, writer: &mut UserSliceWriter) -> Result<bool> {
|
||||
let done = {
|
||||
let inner = self.inner.lock();
|
||||
if inner.aborted {
|
||||
return Ok(true);
|
||||
}
|
||||
inner.cleared && (!inner.dead || inner.notification_done)
|
||||
};
|
||||
|
||||
let cookie = self.cookie;
|
||||
let cmd = if done {
|
||||
BR_CLEAR_DEATH_NOTIFICATION_DONE
|
||||
} else {
|
||||
let process = self.process.clone();
|
||||
let mut process_inner = process.inner.lock();
|
||||
let inner = self.inner.lock();
|
||||
if inner.aborted {
|
||||
return Ok(true);
|
||||
}
|
||||
// We're still holding the inner lock, so it cannot be aborted while we insert it into
|
||||
// the delivered list.
|
||||
process_inner.death_delivered(self.clone());
|
||||
BR_DEAD_BINDER
|
||||
};
|
||||
|
||||
writer.write(&cmd)?;
|
||||
writer.write(&cookie)?;
|
||||
// Mimic the original code: we stop processing work items when we get to a death
|
||||
// notification.
|
||||
Ok(cmd != BR_DEAD_BINDER)
|
||||
}
|
||||
|
||||
fn should_sync_wakeup(&self) -> bool {
|
||||
false
|
||||
}
|
||||
}
|
||||
|
|
|
@ -34,7 +34,7 @@ use crate::{
|
|||
context::Context,
|
||||
defs::*,
|
||||
error::{BinderError, BinderResult},
|
||||
node::{Node, NodeRef},
|
||||
node::{Node, NodeDeath, NodeRef},
|
||||
range_alloc::{self, RangeAllocator},
|
||||
thread::{PushWorkRes, Thread},
|
||||
DArc, DLArc, DTRWrap, DeliverToRead,
|
||||
|
@ -72,6 +72,7 @@ pub(crate) struct ProcessInner {
|
|||
nodes: RBTree<u64, DArc<Node>>,
|
||||
mapping: Option<Mapping>,
|
||||
work: List<DTRWrap<dyn DeliverToRead>>,
|
||||
delivered_deaths: List<DTRWrap<NodeDeath>, 2>,
|
||||
|
||||
/// The number of requested threads that haven't registered yet.
|
||||
requested_thread_count: u32,
|
||||
|
@ -94,6 +95,7 @@ impl ProcessInner {
|
|||
mapping: None,
|
||||
nodes: RBTree::new(),
|
||||
work: List::new(),
|
||||
delivered_deaths: List::new(),
|
||||
requested_thread_count: 0,
|
||||
max_threads: 0,
|
||||
started_thread_count: 0,
|
||||
|
@ -239,6 +241,27 @@ impl ProcessInner {
|
|||
self.started_thread_count += 1;
|
||||
true
|
||||
}
|
||||
|
||||
/// Finds a delivered death notification with the given cookie, removes it from the thread's
|
||||
/// delivered list, and returns it.
|
||||
fn pull_delivered_death(&mut self, cookie: usize) -> Option<DArc<NodeDeath>> {
|
||||
let mut cursor_opt = self.delivered_deaths.cursor_front();
|
||||
while let Some(cursor) = cursor_opt {
|
||||
if cursor.current().cookie == cookie {
|
||||
return Some(cursor.remove().into_arc());
|
||||
}
|
||||
cursor_opt = cursor.next();
|
||||
}
|
||||
None
|
||||
}
|
||||
|
||||
pub(crate) fn death_delivered(&mut self, death: DArc<NodeDeath>) {
|
||||
if let Some(death) = ListArc::try_from_arc_or_drop(death) {
|
||||
self.delivered_deaths.push_back(death);
|
||||
} else {
|
||||
pr_warn!("Notification added to `delivered_deaths` twice.");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Used to keep track of a node that this process has a handle to.
|
||||
|
@ -246,6 +269,7 @@ impl ProcessInner {
|
|||
pub(crate) struct NodeRefInfo {
|
||||
/// The refcount that this process owns to the node.
|
||||
node_ref: ListArcField<NodeRef, { Self::LIST_PROC }>,
|
||||
death: ListArcField<Option<DArc<NodeDeath>>, { Self::LIST_PROC }>,
|
||||
/// Used to store this `NodeRefInfo` in the node's `refs` list.
|
||||
#[pin]
|
||||
links: ListLinks<{ Self::LIST_NODE }>,
|
||||
|
@ -264,6 +288,7 @@ impl NodeRefInfo {
|
|||
fn new(node_ref: NodeRef, handle: u32, process: Arc<Process>) -> impl PinInit<Self> {
|
||||
pin_init!(Self {
|
||||
node_ref: ListArcField::new(node_ref),
|
||||
death: ListArcField::new(None),
|
||||
links <- ListLinks::new(),
|
||||
handle,
|
||||
process,
|
||||
|
@ -271,6 +296,7 @@ impl NodeRefInfo {
|
|||
}
|
||||
|
||||
kernel::list::define_list_arc_field_getter! {
|
||||
pub(crate) fn death(&mut self<{Self::LIST_PROC}>) -> &mut Option<DArc<NodeDeath>> { death }
|
||||
pub(crate) fn node_ref(&mut self<{Self::LIST_PROC}>) -> &mut NodeRef { node_ref }
|
||||
pub(crate) fn node_ref2(&self<{Self::LIST_PROC}>) -> &NodeRef { node_ref }
|
||||
}
|
||||
|
@ -456,6 +482,18 @@ impl Process {
|
|||
}
|
||||
}
|
||||
|
||||
pub(crate) fn push_work(&self, work: DLArc<dyn DeliverToRead>) -> BinderResult {
|
||||
// If push_work fails, drop the work item outside the lock.
|
||||
let res = self.inner.lock().push_work(work);
|
||||
match res {
|
||||
Ok(()) => Ok(()),
|
||||
Err((err, work)) => {
|
||||
drop(work);
|
||||
Err(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn set_as_manager(
|
||||
self: ArcBorrow<'_, Self>,
|
||||
info: Option<FlatBinderObject>,
|
||||
|
@ -601,6 +639,14 @@ impl Process {
|
|||
.clone(strong)
|
||||
}
|
||||
|
||||
pub(crate) fn remove_from_delivered_deaths(&self, death: &DArc<NodeDeath>) {
|
||||
let mut inner = self.inner.lock();
|
||||
// SAFETY: By the invariant on the `delivered_links` field, this is the right linked list.
|
||||
let removed = unsafe { inner.delivered_deaths.remove(death) };
|
||||
drop(inner);
|
||||
drop(removed);
|
||||
}
|
||||
|
||||
pub(crate) fn update_ref(
|
||||
self: ArcBorrow<'_, Process>,
|
||||
handle: u32,
|
||||
|
@ -622,6 +668,12 @@ impl Process {
|
|||
let mut refs = self.node_refs.lock();
|
||||
if let Some(info) = refs.by_handle.get_mut(&handle) {
|
||||
if info.node_ref().update(inc, strong) {
|
||||
// Clean up death if there is one attached to this node reference.
|
||||
if let Some(death) = info.death().take() {
|
||||
death.set_cleared(true);
|
||||
self.remove_from_delivered_deaths(&death);
|
||||
}
|
||||
|
||||
// Remove reference from process tables, and from the node's `refs` list.
|
||||
|
||||
// SAFETY: We are removing the `NodeRefInfo` from the right node.
|
||||
|
@ -822,6 +874,88 @@ impl Process {
|
|||
ret
|
||||
}
|
||||
|
||||
pub(crate) fn request_death(
|
||||
self: &Arc<Self>,
|
||||
reader: &mut UserSliceReader,
|
||||
thread: &Thread,
|
||||
) -> Result {
|
||||
let handle: u32 = reader.read()?;
|
||||
let cookie: usize = reader.read()?;
|
||||
|
||||
// TODO: First two should result in error, but not the others.
|
||||
|
||||
// TODO: Do we care about the context manager dying?
|
||||
|
||||
// Queue BR_ERROR if we can't allocate memory for the death notification.
|
||||
let death = UniqueArc::try_new_uninit().map_err(|err| {
|
||||
thread.push_return_work(BR_ERROR);
|
||||
err
|
||||
})?;
|
||||
let mut refs = self.node_refs.lock();
|
||||
let info = refs.by_handle.get_mut(&handle).ok_or(EINVAL)?;
|
||||
|
||||
// Nothing to do if there is already a death notification request for this handle.
|
||||
if info.death().is_some() {
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
let death = {
|
||||
let death_init = NodeDeath::new(info.node_ref().node.clone(), self.clone(), cookie);
|
||||
match death.pin_init_with(death_init) {
|
||||
Ok(death) => death,
|
||||
// error is infallible
|
||||
Err(err) => match err {},
|
||||
}
|
||||
};
|
||||
|
||||
// Register the death notification.
|
||||
{
|
||||
let owner = info.node_ref2().node.owner.clone();
|
||||
let mut owner_inner = owner.inner.lock();
|
||||
if owner_inner.is_dead {
|
||||
let death = ListArc::from_pin_unique(death);
|
||||
*info.death() = Some(death.clone_arc());
|
||||
drop(owner_inner);
|
||||
let _ = self.push_work(death);
|
||||
} else {
|
||||
let death = ListArc::from_pin_unique(death);
|
||||
*info.death() = Some(death.clone_arc());
|
||||
info.node_ref().node.add_death(death, &mut owner_inner);
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub(crate) fn clear_death(&self, reader: &mut UserSliceReader, thread: &Thread) -> Result {
|
||||
let handle: u32 = reader.read()?;
|
||||
let cookie: usize = reader.read()?;
|
||||
|
||||
let mut refs = self.node_refs.lock();
|
||||
let info = refs.by_handle.get_mut(&handle).ok_or(EINVAL)?;
|
||||
|
||||
let death = info.death().take().ok_or(EINVAL)?;
|
||||
if death.cookie != cookie {
|
||||
*info.death() = Some(death);
|
||||
return Err(EINVAL);
|
||||
}
|
||||
|
||||
// Update state and determine if we need to queue a work item. We only need to do it when
|
||||
// the node is not dead or if the user already completed the death notification.
|
||||
if death.set_cleared(false) {
|
||||
if let Some(death) = ListArc::try_from_arc_or_drop(death) {
|
||||
let _ = thread.push_work_if_looper(death);
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub(crate) fn dead_binder_done(&self, cookie: usize, thread: &Thread) {
|
||||
if let Some(death) = self.inner.lock().pull_delivered_death(cookie) {
|
||||
death.set_notification_done(thread);
|
||||
}
|
||||
}
|
||||
|
||||
fn deferred_flush(&self) {
|
||||
let inner = self.inner.lock();
|
||||
for thread in inner.threads.values() {
|
||||
|
@ -857,17 +991,6 @@ impl Process {
|
|||
work.into_arc().cancel();
|
||||
}
|
||||
|
||||
// Move the threads out of `inner` so that we can iterate over them without holding the
|
||||
// lock.
|
||||
let mut inner = self.inner.lock();
|
||||
let threads = take(&mut inner.threads);
|
||||
drop(inner);
|
||||
|
||||
// Release all threads.
|
||||
for thread in threads.values() {
|
||||
thread.release();
|
||||
}
|
||||
|
||||
// Free any resources kept alive by allocated buffers.
|
||||
let omapping = self.inner.lock().mapping.take();
|
||||
if let Some(mut mapping) = omapping {
|
||||
|
@ -886,13 +1009,47 @@ impl Process {
|
|||
// Drop all references. We do this dance with `swap` to avoid destroying the references
|
||||
// while holding the lock.
|
||||
let mut refs = self.node_refs.lock();
|
||||
let node_refs = take(&mut refs.by_handle);
|
||||
let mut node_refs = take(&mut refs.by_handle);
|
||||
drop(refs);
|
||||
for info in node_refs.values() {
|
||||
for info in node_refs.values_mut() {
|
||||
// SAFETY: We are removing the `NodeRefInfo` from the right node.
|
||||
unsafe { info.node_ref2().node.remove_node_info(&info) };
|
||||
|
||||
// Remove all death notifications from the nodes (that belong to a different process).
|
||||
let death = if let Some(existing) = info.death().take() {
|
||||
existing
|
||||
} else {
|
||||
continue;
|
||||
};
|
||||
death.set_cleared(false);
|
||||
}
|
||||
drop(node_refs);
|
||||
|
||||
// Do similar dance for the state lock.
|
||||
let mut inner = self.inner.lock();
|
||||
let threads = take(&mut inner.threads);
|
||||
let nodes = take(&mut inner.nodes);
|
||||
drop(inner);
|
||||
|
||||
// Release all threads.
|
||||
for thread in threads.values() {
|
||||
thread.release();
|
||||
}
|
||||
|
||||
// Deliver death notifications.
|
||||
for node in nodes.values() {
|
||||
loop {
|
||||
let death = {
|
||||
let mut inner = self.inner.lock();
|
||||
if let Some(death) = node.next_death(&mut inner) {
|
||||
death
|
||||
} else {
|
||||
break;
|
||||
}
|
||||
};
|
||||
death.set_dead();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) fn flush(this: ArcBorrow<'_, Process>) -> Result {
|
||||
|
|
|
@ -102,6 +102,13 @@ type DArc<T> = kernel::sync::Arc<DTRWrap<T>>;
|
|||
type DLArc<T> = kernel::list::ListArc<DTRWrap<T>>;
|
||||
|
||||
impl<T: ListArcSafe> DTRWrap<T> {
|
||||
fn new(val: impl PinInit<T>) -> impl PinInit<Self> {
|
||||
pin_init!(Self {
|
||||
links <- ListLinksSelfPtr::new(),
|
||||
wrapped <- val,
|
||||
})
|
||||
}
|
||||
|
||||
#[allow(dead_code)]
|
||||
fn arc_try_new(val: T) -> Result<DLArc<T>, alloc::alloc::AllocError> {
|
||||
ListArc::pin_init(pin_init!(Self {
|
||||
|
|
|
@ -392,10 +392,27 @@ impl Thread {
|
|||
res
|
||||
}
|
||||
|
||||
/// Attempts to push to given work item to the thread if it's a looper thread (i.e., if it's
|
||||
/// part of a thread pool) and is alive. Otherwise, push the work item to the process instead.
|
||||
pub(crate) fn push_work_if_looper(&self, work: DLArc<dyn DeliverToRead>) -> BinderResult {
|
||||
let mut inner = self.inner.lock();
|
||||
if inner.is_looper() && !inner.is_dead {
|
||||
inner.push_work(work);
|
||||
Ok(())
|
||||
} else {
|
||||
drop(inner);
|
||||
self.process.push_work(work)
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) fn push_work_deferred(&self, work: DLArc<dyn DeliverToRead>) {
|
||||
self.inner.lock().push_work_deferred(work);
|
||||
}
|
||||
|
||||
pub(crate) fn push_return_work(&self, reply: u32) {
|
||||
self.inner.lock().push_return_work(reply);
|
||||
}
|
||||
|
||||
/// This method copies the payload of a transaction into the target process.
|
||||
///
|
||||
/// The resulting payload will have several different components, which will be stored next to
|
||||
|
@ -566,7 +583,7 @@ impl Thread {
|
|||
);
|
||||
}
|
||||
|
||||
self.inner.lock().push_return_work(err.reply);
|
||||
self.push_return_work(err.reply);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -712,6 +729,9 @@ impl Thread {
|
|||
}
|
||||
BC_INCREFS_DONE => self.process.inc_ref_done(&mut reader, false)?,
|
||||
BC_ACQUIRE_DONE => self.process.inc_ref_done(&mut reader, true)?,
|
||||
BC_REQUEST_DEATH_NOTIFICATION => self.process.request_death(&mut reader, self)?,
|
||||
BC_CLEAR_DEATH_NOTIFICATION => self.process.clear_death(&mut reader, self)?,
|
||||
BC_DEAD_BINDER_DONE => self.process.dead_binder_done(reader.read()?, self),
|
||||
BC_REGISTER_LOOPER => {
|
||||
let valid = self.process.register_thread();
|
||||
self.inner.lock().looper_register(valid);
|
||||
|
|
Loading…
Reference in New Issue
Block a user