ANDROID: rust_binder: add oneway spam detection

The idea is that once we cross a certain threshold of free async space,
whoever is responsible for the low async space is likely to try to send
another async transaction.

This change allows servers to turn on oneway spam detection and return a
different binder reply when it is detected.

Link: https://lore.kernel.org/rust-for-linux/20231101-rust-binder-v1-17-08ba9197f637@google.com/
Change-Id: Ib1bf69fa0b4f79447c3dad913cb872585ea4683c
Co-developed-by: Alice Ryhl <aliceryhl@google.com>
Signed-off-by: Matt Gilbride <mattgilbride@google.com>
Signed-off-by: Alice Ryhl <aliceryhl@google.com>
Bug: 278052745
This commit is contained in:
Matt Gilbride 2023-07-18 21:20:49 +00:00 committed by Alice Ryhl
parent 6249f06992
commit 691f0f1682
8 changed files with 115 additions and 9 deletions

View File

@ -51,6 +51,7 @@ pub(crate) struct Allocation {
pub(crate) process: Arc<Process>,
allocation_info: Option<AllocationInfo>,
free_on_drop: bool,
pub(crate) oneway_spam_detected: bool,
}
impl Allocation {
@ -60,6 +61,7 @@ impl Allocation {
size: usize,
ptr: usize,
pages: Arc<Vec<Page>>,
oneway_spam_detected: bool,
) -> Self {
Self {
process,
@ -67,6 +69,7 @@ impl Allocation {
size,
ptr,
pages,
oneway_spam_detected,
allocation_info: None,
free_on_drop: true,
}

View File

@ -26,6 +26,7 @@ pub_no_prefix!(
BR_NOOP,
BR_SPAWN_LOOPER,
BR_TRANSACTION_COMPLETE,
BR_ONEWAY_SPAM_SUSPECT,
BR_OK,
BR_ERROR,
BR_INCREFS,

View File

@ -95,6 +95,8 @@ pub(crate) struct ProcessInner {
pub(crate) sync_recv: bool,
/// Process received async transactions since last frozen.
pub(crate) async_recv: bool,
/// Check for oneway spam
oneway_spam_detection_enabled: bool,
}
impl ProcessInner {
@ -116,6 +118,7 @@ impl ProcessInner {
is_frozen: false,
sync_recv: false,
async_recv: false,
oneway_spam_detection_enabled: false,
}
}
@ -754,17 +757,21 @@ impl Process {
self: &Arc<Self>,
size: usize,
is_oneway: bool,
from_pid: i32,
) -> BinderResult<Allocation> {
let alloc = range_alloc::ReserveNewBox::try_new()?;
let mut inner = self.inner.lock();
let mapping = inner.mapping.as_mut().ok_or_else(BinderError::new_dead)?;
let offset = mapping.alloc.reserve_new(size, is_oneway, alloc)?;
let offset = mapping
.alloc
.reserve_new(size, is_oneway, from_pid, alloc)?;
Ok(Allocation::new(
self.clone(),
offset,
size,
mapping.address + offset,
mapping.pages.clone(),
mapping.alloc.oneway_spam_detected,
))
}
@ -773,7 +780,14 @@ impl Process {
let mapping = inner.mapping.as_mut()?;
let offset = ptr.checked_sub(mapping.address)?;
let (size, odata) = mapping.alloc.reserve_existing(offset).ok()?;
let mut alloc = Allocation::new(self.clone(), offset, size, ptr, mapping.pages.clone());
let mut alloc = Allocation::new(
self.clone(),
offset,
size,
ptr,
mapping.pages.clone(),
mapping.alloc.oneway_spam_detected,
);
if let Some(data) = odata {
alloc.set_info(data);
}
@ -858,6 +872,14 @@ impl Process {
self.inner.lock().max_threads = max;
}
fn set_oneway_spam_detection_enabled(&self, enabled: u32) {
self.inner.lock().oneway_spam_detection_enabled = enabled != 0;
}
pub(crate) fn is_oneway_spam_detection_enabled(&self) -> bool {
self.inner.lock().oneway_spam_detection_enabled
}
fn get_node_debug_info(&self, data: UserSlice) -> Result {
let (mut reader, mut writer) = data.reader_writer();
@ -1045,9 +1067,17 @@ impl Process {
if let Some(mut mapping) = omapping {
let address = mapping.address;
let pages = mapping.pages.clone();
let oneway_spam_detected = mapping.alloc.oneway_spam_detected;
mapping.alloc.take_for_each(|offset, size, odata| {
let ptr = offset + address;
let mut alloc = Allocation::new(self.clone(), offset, size, ptr, pages.clone());
let mut alloc = Allocation::new(
self.clone(),
offset,
size,
ptr,
pages.clone(),
oneway_spam_detected,
);
if let Some(data) = odata {
alloc.set_info(data);
}
@ -1249,6 +1279,9 @@ impl Process {
bindings::BINDER_SET_CONTEXT_MGR_EXT => {
this.set_as_manager(Some(reader.read()?), &thread)?
}
bindings::BINDER_ENABLE_ONEWAY_SPAM_DETECTION => {
this.set_oneway_spam_detection_enabled(reader.read()?)
}
bindings::BINDER_FREEZE => ioctl_freeze(reader)?,
_ => return Err(EINVAL),
}

View File

@ -5,6 +5,7 @@
use kernel::{
prelude::*,
rbtree::{RBTree, RBTreeNode, RBTreeNodeReservation},
task::Pid,
};
/// Keeps track of allocations in a process' mmap.
@ -15,7 +16,9 @@ use kernel::{
pub(crate) struct RangeAllocator<T> {
tree: RBTree<usize, Descriptor<T>>,
free_tree: RBTree<FreeKey, ()>,
size: usize,
free_oneway_space: usize,
pub(crate) oneway_spam_detected: bool,
}
impl<T> RangeAllocator<T> {
@ -28,6 +31,8 @@ impl<T> RangeAllocator<T> {
free_oneway_space: size / 2,
tree,
free_tree,
oneway_spam_detected: false,
size,
})
}
@ -42,6 +47,7 @@ impl<T> RangeAllocator<T> {
&mut self,
size: usize,
is_oneway: bool,
pid: Pid,
alloc: ReserveNewBox<T>,
) -> Result<usize> {
// Compute new value of free_oneway_space, which is set only on success.
@ -54,6 +60,15 @@ impl<T> RangeAllocator<T> {
self.free_oneway_space
};
// Start detecting spammers once we have less than 20%
// of async space left (which is less than 10% of total
// buffer size).
//
// (This will short-circut, so `low_oneway_space` is
// only called when necessary.)
self.oneway_spam_detected =
is_oneway && new_oneway_space < self.size / 10 && self.low_oneway_space(pid);
let (found_size, found_off, tree_node, free_tree_node) = match self.find_best_match(size) {
None => {
pr_warn!("ENOSPC from range_alloc.reserve_new - size: {}", size);
@ -67,7 +82,7 @@ impl<T> RangeAllocator<T> {
let new_desc = Descriptor::new(found_offset + size, found_size - size);
let (tree_node, free_tree_node, desc_node_res) = alloc.initialize(new_desc);
desc.state = Some(DescriptorState::new(is_oneway, desc_node_res));
desc.state = Some(DescriptorState::new(is_oneway, pid, desc_node_res));
desc.size = size;
(found_size, found_offset, tree_node, free_tree_node)
@ -226,6 +241,30 @@ impl<T> RangeAllocator<T> {
}
}
}
/// Find the amount and size of buffers allocated by the current caller.
///
/// The idea is that once we cross the threshold, whoever is responsible
/// for the low async space is likely to try to send another async transaction,
/// and at some point we'll catch them in the act. This is more efficient
/// than keeping a map per pid.
fn low_oneway_space(&self, calling_pid: Pid) -> bool {
let mut total_alloc_size = 0;
let mut num_buffers = 0;
for (_, desc) in self.tree.iter() {
if let Some(state) = &desc.state {
if state.is_oneway() && state.pid() == calling_pid {
total_alloc_size += desc.size;
num_buffers += 1;
}
}
}
// Warn if this pid has more than 50 transactions, or more than 50% of
// async space (which is 25% of total buffer size). Oneway spam is only
// detected when the threshold is exceeded.
num_buffers > 50 || total_alloc_size > self.size / 4
}
}
struct Descriptor<T> {
@ -259,16 +298,32 @@ enum DescriptorState<T> {
}
impl<T> DescriptorState<T> {
fn new(is_oneway: bool, free_res: FreeNodeRes) -> Self {
fn new(is_oneway: bool, pid: Pid, free_res: FreeNodeRes) -> Self {
DescriptorState::Reserved(Reservation {
is_oneway,
pid,
free_res,
})
}
fn pid(&self) -> Pid {
match self {
DescriptorState::Reserved(inner) => inner.pid,
DescriptorState::Allocated(inner) => inner.pid,
}
}
fn is_oneway(&self) -> bool {
match self {
DescriptorState::Reserved(inner) => inner.is_oneway,
DescriptorState::Allocated(inner) => inner.is_oneway,
}
}
}
struct Reservation {
is_oneway: bool,
pid: Pid,
free_res: FreeNodeRes,
}
@ -277,6 +332,7 @@ impl Reservation {
Allocation {
data,
is_oneway: self.is_oneway,
pid: self.pid,
free_res: self.free_res,
}
}
@ -284,6 +340,7 @@ impl Reservation {
struct Allocation<T> {
is_oneway: bool,
pid: Pid,
free_res: FreeNodeRes,
data: Option<T>,
}
@ -293,6 +350,7 @@ impl<T> Allocation<T> {
(
Reservation {
is_oneway: self.is_oneway,
pid: self.pid,
free_res: self.free_res,
},
self.data,

View File

@ -109,7 +109,6 @@ impl<T: ListArcSafe> DTRWrap<T> {
})
}
#[allow(dead_code)]
fn arc_try_new(val: T) -> Result<DLArc<T>, alloc::alloc::AllocError> {
ListArc::pin_init(pin_init!(Self {
links <- ListLinksSelfPtr::new(),

View File

@ -915,7 +915,7 @@ impl Thread {
size_of::<usize>(),
);
let secctx_off = aligned_data_size + aligned_offsets_size + aligned_buffers_size;
let mut alloc = match to_process.buffer_alloc(len, is_oneway) {
let mut alloc = match to_process.buffer_alloc(len, is_oneway, self.process.task.pid()) {
Ok(alloc) => alloc,
Err(err) => {
pr_warn!(
@ -1200,8 +1200,15 @@ impl Thread {
let handle = unsafe { tr.transaction_data.target.handle };
let node_ref = self.process.get_transaction_node(handle)?;
security::binder_transaction(&self.process.cred, &node_ref.node.owner.cred)?;
let list_completion = DTRWrap::arc_try_new(DeliverCode::new(BR_TRANSACTION_COMPLETE))?;
let transaction = Transaction::new(node_ref, None, self, tr)?;
let code = if self.process.is_oneway_spam_detection_enabled()
&& transaction.oneway_spam_detected
{
BR_ONEWAY_SPAM_SUSPECT
} else {
BR_TRANSACTION_COMPLETE
};
let list_completion = DTRWrap::arc_try_new(DeliverCode::new(code))?;
let completion = list_completion.clone_arc();
self.inner.lock().push_work(list_completion);
match transaction.submit() {

View File

@ -39,6 +39,7 @@ pub(crate) struct Transaction {
data_address: usize,
sender_euid: Kuid,
txn_security_ctx_off: Option<usize>,
pub(crate) oneway_spam_detected: bool,
}
kernel::list::impl_list_arc_safe! {
@ -71,6 +72,7 @@ impl Transaction {
return Err(err);
}
};
let oneway_spam_detected = alloc.oneway_spam_detected;
if trd.flags & TF_ONE_WAY != 0 {
if from_parent.is_some() {
pr_warn!("Oneway transaction should not be in a transaction stack.");
@ -99,6 +101,7 @@ impl Transaction {
allocation <- kernel::new_spinlock!(Some(alloc), "Transaction::new"),
is_outstanding: AtomicBool::new(false),
txn_security_ctx_off,
oneway_spam_detected,
}))?)
}
@ -116,6 +119,7 @@ impl Transaction {
return Err(err);
}
};
let oneway_spam_detected = alloc.oneway_spam_detected;
if trd.flags & TF_CLEAR_BUF != 0 {
alloc.set_info_clear_on_drop();
}
@ -133,6 +137,7 @@ impl Transaction {
allocation <- kernel::new_spinlock!(Some(alloc), "Transaction::new"),
is_outstanding: AtomicBool::new(false),
txn_security_ctx_off: None,
oneway_spam_detected,
}))?)
}

View File

@ -97,7 +97,7 @@ unsafe impl Send for Task {}
unsafe impl Sync for Task {}
/// The type of process identifiers (PIDs).
type Pid = bindings::pid_t;
pub type Pid = bindings::pid_t;
/// The type of user identifiers (UIDs).
#[derive(Copy, Clone)]