ANDROID: rust_binder: add BINDER_TYPE_FDA support

In the previous patch, we introduced support for `BINDER_TYPE_FD`
objects that let you send a single fd, and in this patch, we add support
for FD arrays. One important difference between `BINDER_TYPE_FD` and
`BINDER_TYPE_FDA` is that FD arrays will close the file descriptors when
the transaction allocation is freed, whereas FDs sent using
`BINDER_TYPE_FD` are not closed.

Note that `BINDER_TYPE_FDA` is used only with hwbinder.

Link: https://lore.kernel.org/rust-for-linux/20231101-rust-binder-v1-14-08ba9197f637@google.com/
Change-Id: I0b50e440c957fba5b1adb5b53f82bce3e1f1e0e7
Signed-off-by: Alice Ryhl <aliceryhl@google.com>
Bug: 278052745
This commit is contained in:
Alice Ryhl 2023-07-20 09:35:59 +00:00
parent b427bc70cf
commit 571343fd97
4 changed files with 163 additions and 16 deletions

View File

@ -7,7 +7,7 @@ use core::ops::Range;
use kernel::{
bindings,
file::{File, FileDescriptorReservation},
file::{DeferredFdCloser, File, FileDescriptorReservation},
page::Page,
prelude::*,
sync::Arc,
@ -169,18 +169,38 @@ impl Allocation {
self.get_or_init_info().target_node = Some(target_node);
}
pub(crate) fn info_add_fd(&mut self, file: ARef<File>, buffer_offset: usize) -> Result {
/// Reserve enough space to push at least `num_fds` fds.
pub(crate) fn info_add_fd_reserve(&mut self, num_fds: usize) -> Result {
self.get_or_init_info()
.file_list
.files_to_translate
.try_reserve(num_fds)?;
Ok(())
}
pub(crate) fn info_add_fd(
&mut self,
file: ARef<File>,
buffer_offset: usize,
close_on_free: bool,
) -> Result {
self.get_or_init_info()
.file_list
.files_to_translate
.try_push(FileEntry {
file,
buffer_offset,
close_on_free,
})?;
Ok(())
}
pub(crate) fn set_info_close_on_free(&mut self, cof: FdsCloseOnFree) {
self.get_or_init_info().file_list.close_on_free = cof.0;
}
pub(crate) fn translate_fds(&mut self) -> Result<TranslatedFds> {
let file_list = match self.allocation_info.as_mut() {
Some(info) => &mut info.file_list,
@ -188,17 +208,38 @@ impl Allocation {
};
let files = core::mem::take(&mut file_list.files_to_translate);
let num_close_on_free = files.iter().filter(|entry| entry.close_on_free).count();
let mut close_on_free = Vec::try_with_capacity(num_close_on_free)?;
let mut reservations = Vec::try_with_capacity(files.len())?;
for file_info in files {
let res = FileDescriptorReservation::get_unused_fd_flags(bindings::O_CLOEXEC)?;
self.write::<u32>(file_info.buffer_offset, &res.reserved_fd())?;
let fd = res.reserved_fd();
self.write::<u32>(file_info.buffer_offset, &fd)?;
reservations.try_push(Reservation {
res,
file: file_info.file,
})?;
if file_info.close_on_free {
close_on_free.try_push(fd)?;
}
}
Ok(TranslatedFds { reservations })
Ok(TranslatedFds {
reservations,
close_on_free: FdsCloseOnFree(close_on_free),
})
}
/// Should the looper return to userspace when freeing this allocation?
pub(crate) fn looper_need_return_on_free(&self) -> bool {
// Closing fds involves pushing task_work for execution when we return to userspace. Hence,
// we should return to userspace asap if we are closing fds.
match self.allocation_info {
Some(ref info) => !info.file_list.close_on_free.is_empty(),
None => false,
}
}
}
@ -224,6 +265,21 @@ impl Drop for Allocation {
}
}
for &fd in &info.file_list.close_on_free {
let closer = match DeferredFdCloser::new() {
Ok(closer) => closer,
Err(core::alloc::AllocError) => {
// Ignore allocation failures.
break;
}
};
// Here, we ignore errors. The operation can fail if the fd is not valid, or if the
// method is called from a kthread. However, this is always called from a syscall,
// so the latter case cannot happen, and we don't care about the first case.
let _ = closer.close_fd(fd);
}
if info.clear_on_free {
if let Err(e) = self.fill_zero() {
pr_warn!("Failed to clear data on free: {:?}", e);
@ -469,6 +525,7 @@ impl BinderObject {
#[derive(Default)]
struct FileList {
files_to_translate: Vec<FileEntry>,
close_on_free: Vec<u32>,
}
struct FileEntry {
@ -476,10 +533,15 @@ struct FileEntry {
file: ARef<File>,
/// The offset in the buffer where the file descriptor is stored.
buffer_offset: usize,
/// Whether this fd should be closed when the allocation is freed.
close_on_free: bool,
}
pub(crate) struct TranslatedFds {
reservations: Vec<Reservation>,
/// If commit is called, then these fds should be closed. (If commit is not called, then they
/// shouldn't be closed.)
close_on_free: FdsCloseOnFree,
}
struct Reservation {
@ -491,12 +553,17 @@ impl TranslatedFds {
pub(crate) fn new() -> Self {
Self {
reservations: Vec::new(),
close_on_free: FdsCloseOnFree(Vec::new()),
}
}
pub(crate) fn commit(self) {
pub(crate) fn commit(self) -> FdsCloseOnFree {
for entry in self.reservations {
entry.res.fd_install(entry.file);
}
self.close_on_free
}
}
pub(crate) struct FdsCloseOnFree(Vec<u32>);

View File

@ -111,6 +111,7 @@ decl_wrapper!(BinderNodeDebugInfo, bindings::binder_node_debug_info);
decl_wrapper!(BinderNodeInfoForRef, bindings::binder_node_info_for_ref);
decl_wrapper!(FlatBinderObject, bindings::flat_binder_object);
decl_wrapper!(BinderFdObject, bindings::binder_fd_object);
decl_wrapper!(BinderFdArrayObject, bindings::binder_fd_array_object);
decl_wrapper!(BinderObjectHeader, bindings::binder_object_header);
decl_wrapper!(BinderBufferObject, bindings::binder_buffer_object);
decl_wrapper!(BinderTransactionData, bindings::binder_transaction_data);

View File

@ -655,7 +655,8 @@ impl Thread {
const FD_FIELD_OFFSET: usize =
::core::mem::offset_of!(bindings::binder_fd_object, __bindgen_anon_1.fd)
as usize;
view.alloc.info_add_fd(file, offset + FD_FIELD_OFFSET)?;
view.alloc
.info_add_fd(file, offset + FD_FIELD_OFFSET, false)?;
}
BinderObjectRef::Ptr(obj) => {
let obj_length = obj.length.try_into().map_err(|_| EINVAL)?;
@ -730,9 +731,78 @@ impl Thread {
obj_write.parent_offset = obj.parent_offset;
view.write::<BinderBufferObject>(offset, &obj_write)?;
}
BinderObjectRef::Fda(_obj) => {
pr_warn!("Using unsupported binder object type fda.");
return Err(EINVAL.into());
BinderObjectRef::Fda(obj) => {
if !allow_fds {
return Err(EPERM.into());
}
let parent_index = usize::try_from(obj.parent).map_err(|_| EINVAL)?;
let parent_offset = usize::try_from(obj.parent_offset).map_err(|_| EINVAL)?;
let num_fds = usize::try_from(obj.num_fds).map_err(|_| EINVAL)?;
let fds_len = num_fds.checked_mul(size_of::<u32>()).ok_or(EINVAL)?;
view.alloc.info_add_fd_reserve(num_fds)?;
let info = sg_state.validate_parent_fixup(parent_index, parent_offset, fds_len)?;
sg_state.ancestors.truncate(info.num_ancestors);
let parent_entry = match sg_state.sg_entries.get_mut(info.parent_sg_index) {
Some(parent_entry) => parent_entry,
None => {
pr_err!(
"validate_parent_fixup returned index out of bounds for sg.entries"
);
return Err(EINVAL.into());
}
};
parent_entry.fixup_min_offset = info.new_min_offset;
parent_entry
.pointer_fixups
.try_push(PointerFixupEntry {
skip: fds_len,
pointer_value: 0,
target_offset: info.target_offset,
})
.map_err(|_| ENOMEM)?;
let fda_uaddr = parent_entry
.sender_uaddr
.checked_add(parent_offset)
.ok_or(EINVAL)?;
let mut fda_bytes = Vec::new();
UserSlice::new(fda_uaddr as _, fds_len).read_all(&mut fda_bytes)?;
if fds_len != fda_bytes.len() {
pr_err!("UserSlice::read_all returned wrong length in BINDER_TYPE_FDA");
return Err(EINVAL.into());
}
for i in (0..fds_len).step_by(size_of::<u32>()) {
let fd = {
let mut fd_bytes = [0u8; size_of::<u32>()];
fd_bytes.copy_from_slice(&fda_bytes[i..i + size_of::<u32>()]);
u32::from_ne_bytes(fd_bytes)
};
let file = File::fget(fd)?;
security::binder_transfer_file(
&self.process.cred,
&view.alloc.process.cred,
&file,
)?;
// The `validate_parent_fixup` call ensuers that this addition will not
// overflow.
view.alloc.info_add_fd(file, info.target_offset + i, true)?;
}
drop(fda_bytes);
let mut obj_write = BinderFdArrayObject::default();
obj_write.hdr.type_ = BINDER_TYPE_FDA;
obj_write.num_fds = obj.num_fds;
obj_write.parent = obj.parent;
obj_write.parent_offset = obj.parent_offset;
view.write::<BinderFdArrayObject>(offset, &obj_write)?;
}
}
Ok(())
@ -1168,7 +1238,15 @@ impl Thread {
let tr = reader.read::<BinderTransactionDataSg>()?;
self.transaction(&tr, Self::reply_inner)
}
BC_FREE_BUFFER => drop(self.process.buffer_get(reader.read()?)),
BC_FREE_BUFFER => {
let buffer = self.process.buffer_get(reader.read()?);
if let Some(buffer) = &buffer {
if buffer.looper_need_return_on_free() {
self.inner.lock().looper_need_return = true;
}
}
drop(buffer);
}
BC_INCREFS => {
self.process
.as_arc_borrow()

View File

@ -289,17 +289,18 @@ impl DeliverToRead for Transaction {
writer.write(&*tr)?;
}
let mut alloc = self.allocation.lock().take().ok_or(ESRCH)?;
// Dismiss the completion of transaction with a failure. No failure paths are allowed from
// here on out.
send_failed_reply.dismiss();
// It is now the user's responsibility to clear the allocation.
let alloc = self.allocation.lock().take();
if let Some(alloc) = alloc {
alloc.keep_alive();
}
// Commit files, and set FDs in FDA to be closed on buffer free.
let close_on_free = files.commit();
alloc.set_info_close_on_free(close_on_free);
files.commit();
// It is now the user's responsibility to clear the allocation.
alloc.keep_alive();
// When this is not a reply and not a oneway transaction, update `current_transaction`. If
// it's a reply, `current_transaction` has already been updated appropriately.