mirror of
https://github.com/nxp-imx/linux-imx.git
synced 2025-07-06 09:25:22 +02:00
ANDROID: rust_binder: send nodes in transactions
To send a transaction to any process other than the context manager, someone must first send you the binder node. Usually, you get it from the context manager. The transaction allocation now contains a list of offsets of objects in the transaction that must be translated before they are passed to the target process. In this patch, we only support translation of binder nodes, but future patches will extend this to other object types. Link: https://lore.kernel.org/rust-for-linux/20231101-rust-binder-v1-11-08ba9197f637@google.com/ Change-Id: Ia9353b78607ff061533a971e1c74e1877bc8d9b4 Co-developed-by: Wedson Almeida Filho <wedsonaf@gmail.com> Signed-off-by: Wedson Almeida Filho <wedsonaf@gmail.com> Signed-off-by: Alice Ryhl <aliceryhl@google.com> Bug: 278052745
This commit is contained in:
parent
892df033dc
commit
ae1d247379
|
@ -2,11 +2,20 @@
|
|||
|
||||
// Copyright (C) 2024 Google LLC.
|
||||
|
||||
use core::mem::size_of_val;
|
||||
use core::mem::{size_of, size_of_val, MaybeUninit};
|
||||
use core::ops::Range;
|
||||
|
||||
use kernel::{page::Page, prelude::*, sync::Arc, uaccess::UserSliceReader};
|
||||
use kernel::{
|
||||
bindings,
|
||||
page::Page,
|
||||
prelude::*,
|
||||
sync::Arc,
|
||||
types::{AsBytes, FromBytes},
|
||||
uaccess::UserSliceReader,
|
||||
};
|
||||
|
||||
use crate::{
|
||||
defs::*,
|
||||
node::{Node, NodeRef},
|
||||
process::Process,
|
||||
DArc,
|
||||
|
@ -14,6 +23,8 @@ use crate::{
|
|||
|
||||
#[derive(Default)]
|
||||
pub(crate) struct AllocationInfo {
|
||||
/// Range within the allocation where we can find the offsets to the object descriptors.
|
||||
pub(crate) offsets: Option<Range<usize>>,
|
||||
/// The target node of the transaction this allocation is associated to.
|
||||
/// Not set for replies.
|
||||
pub(crate) target_node: Option<NodeRef>,
|
||||
|
@ -92,6 +103,21 @@ impl Allocation {
|
|||
})
|
||||
}
|
||||
|
||||
pub(crate) fn read<T: FromBytes>(&self, offset: usize) -> Result<T> {
|
||||
let mut out = MaybeUninit::<T>::uninit();
|
||||
let mut out_offset = 0;
|
||||
self.iterate(offset, size_of::<T>(), |page, offset, to_copy| {
|
||||
// SAFETY: The sum of `offset` and `to_copy` is bounded by the size of T.
|
||||
let obj_ptr = unsafe { (out.as_mut_ptr() as *mut u8).add(out_offset) };
|
||||
// SAFETY: The pointer points is in-bounds of the `out` variable, so it is valid.
|
||||
unsafe { page.read(obj_ptr, offset, to_copy) }?;
|
||||
out_offset += to_copy;
|
||||
Ok(())
|
||||
})?;
|
||||
// SAFETY: We just initialised the data.
|
||||
Ok(unsafe { out.assume_init() })
|
||||
}
|
||||
|
||||
pub(crate) fn write<T: ?Sized>(&self, offset: usize, obj: &T) -> Result {
|
||||
let mut obj_offset = 0;
|
||||
self.iterate(offset, size_of_val(obj), |page, offset, to_copy| {
|
||||
|
@ -124,6 +150,10 @@ impl Allocation {
|
|||
self.allocation_info.get_or_insert_with(Default::default)
|
||||
}
|
||||
|
||||
pub(crate) fn set_info_offsets(&mut self, offsets: Range<usize>) {
|
||||
self.get_or_init_info().offsets = Some(offsets);
|
||||
}
|
||||
|
||||
pub(crate) fn set_info_oneway_node(&mut self, oneway_node: DArc<Node>) {
|
||||
self.get_or_init_info().oneway_node = Some(oneway_node);
|
||||
}
|
||||
|
@ -150,6 +180,15 @@ impl Drop for Allocation {
|
|||
|
||||
info.target_node = None;
|
||||
|
||||
if let Some(offsets) = info.offsets.clone() {
|
||||
let view = AllocationView::new(self, offsets.start);
|
||||
for i in offsets.step_by(size_of::<usize>()) {
|
||||
if view.cleanup_object(i).is_err() {
|
||||
pr_warn!("Error cleaning up object at offset {}\n", i)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if info.clear_on_free {
|
||||
if let Err(e) = self.fill_zero() {
|
||||
pr_warn!("Failed to clear data on free: {:?}", e);
|
||||
|
@ -160,3 +199,234 @@ impl Drop for Allocation {
|
|||
self.process.buffer_raw_free(self.ptr);
|
||||
}
|
||||
}
|
||||
|
||||
/// A view into the beginning of an allocation.
|
||||
///
|
||||
/// All attempts to read or write outside of the view will fail. To intentionally access outside of
|
||||
/// this view, use the `alloc` field of this struct directly.
|
||||
pub(crate) struct AllocationView<'a> {
|
||||
pub(crate) alloc: &'a mut Allocation,
|
||||
limit: usize,
|
||||
}
|
||||
|
||||
impl<'a> AllocationView<'a> {
|
||||
pub(crate) fn new(alloc: &'a mut Allocation, limit: usize) -> Self {
|
||||
AllocationView { alloc, limit }
|
||||
}
|
||||
|
||||
pub(crate) fn read<T: FromBytes>(&self, offset: usize) -> Result<T> {
|
||||
if offset.checked_add(size_of::<T>()).ok_or(EINVAL)? > self.limit {
|
||||
return Err(EINVAL);
|
||||
}
|
||||
self.alloc.read(offset)
|
||||
}
|
||||
|
||||
pub(crate) fn write<T: AsBytes>(&self, offset: usize, obj: &T) -> Result {
|
||||
if offset.checked_add(size_of::<T>()).ok_or(EINVAL)? > self.limit {
|
||||
return Err(EINVAL);
|
||||
}
|
||||
self.alloc.write(offset, obj)
|
||||
}
|
||||
|
||||
pub(crate) fn transfer_binder_object(
|
||||
&self,
|
||||
offset: usize,
|
||||
obj: &bindings::flat_binder_object,
|
||||
strong: bool,
|
||||
node_ref: NodeRef,
|
||||
) -> Result {
|
||||
if Arc::ptr_eq(&node_ref.node.owner, &self.alloc.process) {
|
||||
// The receiving process is the owner of the node, so send it a binder object (instead
|
||||
// of a handle).
|
||||
let (ptr, cookie) = node_ref.node.get_id();
|
||||
let mut newobj = FlatBinderObject::default();
|
||||
newobj.hdr.type_ = if strong {
|
||||
BINDER_TYPE_BINDER
|
||||
} else {
|
||||
BINDER_TYPE_WEAK_BINDER
|
||||
};
|
||||
newobj.flags = obj.flags;
|
||||
newobj.__bindgen_anon_1.binder = ptr as _;
|
||||
newobj.cookie = cookie as _;
|
||||
self.write(offset, &newobj)?;
|
||||
// Increment the user ref count on the node. It will be decremented as part of the
|
||||
// destruction of the buffer, when we see a binder or weak-binder object.
|
||||
node_ref.node.update_refcount(true, 1, strong);
|
||||
} else {
|
||||
// The receiving process is different from the owner, so we need to insert a handle to
|
||||
// the binder object.
|
||||
let handle = self
|
||||
.alloc
|
||||
.process
|
||||
.as_arc_borrow()
|
||||
.insert_or_update_handle(node_ref, false)?;
|
||||
let mut newobj = FlatBinderObject::default();
|
||||
newobj.hdr.type_ = if strong {
|
||||
BINDER_TYPE_HANDLE
|
||||
} else {
|
||||
BINDER_TYPE_WEAK_HANDLE
|
||||
};
|
||||
newobj.flags = obj.flags;
|
||||
newobj.__bindgen_anon_1.handle = handle;
|
||||
if self.write(offset, &newobj).is_err() {
|
||||
// Decrement ref count on the handle we just created.
|
||||
let _ = self
|
||||
.alloc
|
||||
.process
|
||||
.as_arc_borrow()
|
||||
.update_ref(handle, false, strong);
|
||||
return Err(EINVAL);
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn cleanup_object(&self, index_offset: usize) -> Result {
|
||||
let offset = self.alloc.read(index_offset)?;
|
||||
let header = self.read::<BinderObjectHeader>(offset)?;
|
||||
match header.type_ {
|
||||
BINDER_TYPE_WEAK_BINDER | BINDER_TYPE_BINDER => {
|
||||
let obj = self.read::<FlatBinderObject>(offset)?;
|
||||
let strong = header.type_ == BINDER_TYPE_BINDER;
|
||||
// SAFETY: The type is `BINDER_TYPE_{WEAK_}BINDER`, so the `binder` field is
|
||||
// populated.
|
||||
let ptr = unsafe { obj.__bindgen_anon_1.binder };
|
||||
let cookie = obj.cookie;
|
||||
self.alloc.process.update_node(ptr, cookie, strong);
|
||||
Ok(())
|
||||
}
|
||||
BINDER_TYPE_WEAK_HANDLE | BINDER_TYPE_HANDLE => {
|
||||
let obj = self.read::<FlatBinderObject>(offset)?;
|
||||
let strong = header.type_ == BINDER_TYPE_HANDLE;
|
||||
// SAFETY: The type is `BINDER_TYPE_{WEAK_}HANDLE`, so the `handle` field is
|
||||
// populated.
|
||||
let handle = unsafe { obj.__bindgen_anon_1.handle };
|
||||
self.alloc
|
||||
.process
|
||||
.as_arc_borrow()
|
||||
.update_ref(handle, false, strong)
|
||||
}
|
||||
_ => Ok(()),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// A binder object as it is serialized.
|
||||
///
|
||||
/// # Invariants
|
||||
///
|
||||
/// All bytes must be initialized, and the value of `self.hdr.type_` must be one of the allowed
|
||||
/// types.
|
||||
#[repr(C)]
|
||||
pub(crate) union BinderObject {
|
||||
hdr: bindings::binder_object_header,
|
||||
fbo: bindings::flat_binder_object,
|
||||
fdo: bindings::binder_fd_object,
|
||||
bbo: bindings::binder_buffer_object,
|
||||
fdao: bindings::binder_fd_array_object,
|
||||
}
|
||||
|
||||
/// A view into a `BinderObject` that can be used in a match statement.
|
||||
pub(crate) enum BinderObjectRef<'a> {
|
||||
Binder(&'a mut bindings::flat_binder_object),
|
||||
Handle(&'a mut bindings::flat_binder_object),
|
||||
Fd(&'a mut bindings::binder_fd_object),
|
||||
Ptr(&'a mut bindings::binder_buffer_object),
|
||||
Fda(&'a mut bindings::binder_fd_array_object),
|
||||
}
|
||||
|
||||
impl BinderObject {
|
||||
pub(crate) fn read_from(reader: &mut UserSliceReader) -> Result<BinderObject> {
|
||||
let object = Self::read_from_inner(|slice| {
|
||||
let read_len = usize::min(slice.len(), reader.len());
|
||||
// SAFETY: The length we pass to `read_raw` is at most the length of the slice.
|
||||
unsafe {
|
||||
reader
|
||||
.clone_reader()
|
||||
.read_raw(slice.as_mut_ptr(), read_len)?;
|
||||
}
|
||||
Ok(())
|
||||
})?;
|
||||
|
||||
// If we used a object type smaller than the largest object size, then we've read more
|
||||
// bytes than we needed to. However, we used `.clone_reader()` to avoid advancing the
|
||||
// original reader. Now, we call `skip` so that the caller's reader is advanced by the
|
||||
// right amount.
|
||||
//
|
||||
// The `skip` call fails if the reader doesn't have `size` bytes available. This could
|
||||
// happen if the type header corresponds to an object type that is larger than the rest of
|
||||
// the reader.
|
||||
//
|
||||
// Any extra bytes beyond the size of the object are inaccessible after this call, so
|
||||
// reading them again from the `reader` later does not result in TOCTOU bugs.
|
||||
reader.skip(object.size())?;
|
||||
|
||||
Ok(object)
|
||||
}
|
||||
|
||||
/// Use the provided reader closure to construct a `BinderObject`.
|
||||
///
|
||||
/// The closure should write the bytes for the object into the provided slice.
|
||||
pub(crate) fn read_from_inner<R>(reader: R) -> Result<BinderObject>
|
||||
where
|
||||
R: FnOnce(&mut [u8; size_of::<BinderObject>()]) -> Result<()>,
|
||||
{
|
||||
let mut obj = MaybeUninit::<BinderObject>::zeroed();
|
||||
|
||||
// SAFETY: The lengths of `BinderObject` and `[u8; size_of::<BinderObject>()]` are equal,
|
||||
// and the byte array has an alignment requirement of one, so the pointer cast is okay.
|
||||
// Additionally, `obj` was initialized to zeros, so the byte array will not be
|
||||
// uninitialized.
|
||||
(reader)(unsafe { &mut *obj.as_mut_ptr().cast() })?;
|
||||
|
||||
// SAFETY: The entire object is initialized, so accessing this field is safe.
|
||||
let type_ = unsafe { obj.assume_init_ref().hdr.type_ };
|
||||
if Self::type_to_size(type_).is_none() {
|
||||
// The value of `obj.hdr_type_` was invalid.
|
||||
return Err(EINVAL);
|
||||
}
|
||||
|
||||
// SAFETY: All bytes are initialized (since we zeroed them at the start) and we checked
|
||||
// that `self.hdr.type_` is one of the allowed types, so the type invariants are satisfied.
|
||||
unsafe { Ok(obj.assume_init()) }
|
||||
}
|
||||
|
||||
pub(crate) fn as_ref(&mut self) -> BinderObjectRef<'_> {
|
||||
use BinderObjectRef::*;
|
||||
// SAFETY: The constructor ensures that all bytes of `self` are initialized, and all
|
||||
// variants of this union accept all initialized bit patterns.
|
||||
unsafe {
|
||||
match self.hdr.type_ {
|
||||
BINDER_TYPE_WEAK_BINDER | BINDER_TYPE_BINDER => Binder(&mut self.fbo),
|
||||
BINDER_TYPE_WEAK_HANDLE | BINDER_TYPE_HANDLE => Handle(&mut self.fbo),
|
||||
BINDER_TYPE_FD => Fd(&mut self.fdo),
|
||||
BINDER_TYPE_PTR => Ptr(&mut self.bbo),
|
||||
BINDER_TYPE_FDA => Fda(&mut self.fdao),
|
||||
// SAFETY: By the type invariant, the value of `self.hdr.type_` cannot have any
|
||||
// other value than the ones checked above.
|
||||
_ => core::hint::unreachable_unchecked(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) fn size(&self) -> usize {
|
||||
// SAFETY: The entire object is initialized, so accessing this field is safe.
|
||||
let type_ = unsafe { self.hdr.type_ };
|
||||
|
||||
// SAFETY: The type invariants guarantee that the type field is correct.
|
||||
unsafe { Self::type_to_size(type_).unwrap_unchecked() }
|
||||
}
|
||||
|
||||
fn type_to_size(type_: u32) -> Option<usize> {
|
||||
match type_ {
|
||||
BINDER_TYPE_WEAK_BINDER => Some(size_of::<bindings::flat_binder_object>()),
|
||||
BINDER_TYPE_BINDER => Some(size_of::<bindings::flat_binder_object>()),
|
||||
BINDER_TYPE_WEAK_HANDLE => Some(size_of::<bindings::flat_binder_object>()),
|
||||
BINDER_TYPE_HANDLE => Some(size_of::<bindings::flat_binder_object>()),
|
||||
BINDER_TYPE_FD => Some(size_of::<bindings::binder_fd_object>()),
|
||||
BINDER_TYPE_PTR => Some(size_of::<bindings::binder_buffer_object>()),
|
||||
BINDER_TYPE_FDA => Some(size_of::<bindings::binder_fd_array_object>()),
|
||||
_ => None,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -2,6 +2,7 @@
|
|||
|
||||
// Copyright (C) 2024 Google LLC.
|
||||
|
||||
use core::mem::MaybeUninit;
|
||||
use core::ops::{Deref, DerefMut};
|
||||
use kernel::{
|
||||
bindings::{self, *},
|
||||
|
@ -59,11 +60,18 @@ pub_no_prefix!(flat_binder_object_flags_, FLAT_BINDER_FLAG_TXN_SECURITY_CTX);
|
|||
|
||||
pub_no_prefix!(transaction_flags_, TF_ONE_WAY, TF_CLEAR_BUF);
|
||||
|
||||
pub(crate) use bindings::{
|
||||
BINDER_TYPE_BINDER, BINDER_TYPE_FD, BINDER_TYPE_FDA, BINDER_TYPE_HANDLE, BINDER_TYPE_PTR,
|
||||
BINDER_TYPE_WEAK_BINDER, BINDER_TYPE_WEAK_HANDLE,
|
||||
};
|
||||
|
||||
macro_rules! decl_wrapper {
|
||||
($newname:ident, $wrapped:ty) => {
|
||||
#[derive(Copy, Clone, Default)]
|
||||
// Define a wrapper around the C type. Use `MaybeUninit` to enforce that the value of
|
||||
// padding bytes must be preserved.
|
||||
#[derive(Copy, Clone)]
|
||||
#[repr(transparent)]
|
||||
pub(crate) struct $newname($wrapped);
|
||||
pub(crate) struct $newname(MaybeUninit<$wrapped>);
|
||||
|
||||
// SAFETY: This macro is only used with types where this is ok.
|
||||
unsafe impl FromBytes for $newname {}
|
||||
|
@ -72,13 +80,24 @@ macro_rules! decl_wrapper {
|
|||
impl Deref for $newname {
|
||||
type Target = $wrapped;
|
||||
fn deref(&self) -> &Self::Target {
|
||||
&self.0
|
||||
// SAFETY: We use `MaybeUninit` only to preserve padding. The value must still
|
||||
// always be valid.
|
||||
unsafe { self.0.assume_init_ref() }
|
||||
}
|
||||
}
|
||||
|
||||
impl DerefMut for $newname {
|
||||
fn deref_mut(&mut self) -> &mut Self::Target {
|
||||
&mut self.0
|
||||
// SAFETY: We use `MaybeUninit` only to preserve padding. The value must still
|
||||
// always be valid.
|
||||
unsafe { self.0.assume_init_mut() }
|
||||
}
|
||||
}
|
||||
|
||||
impl Default for $newname {
|
||||
fn default() -> Self {
|
||||
// Create a new value of this type where all bytes (including padding) are zeroed.
|
||||
Self(MaybeUninit::zeroed())
|
||||
}
|
||||
}
|
||||
};
|
||||
|
@ -87,6 +106,7 @@ macro_rules! decl_wrapper {
|
|||
decl_wrapper!(BinderNodeDebugInfo, bindings::binder_node_debug_info);
|
||||
decl_wrapper!(BinderNodeInfoForRef, bindings::binder_node_info_for_ref);
|
||||
decl_wrapper!(FlatBinderObject, bindings::flat_binder_object);
|
||||
decl_wrapper!(BinderObjectHeader, bindings::binder_object_header);
|
||||
decl_wrapper!(BinderTransactionData, bindings::binder_transaction_data);
|
||||
decl_wrapper!(
|
||||
BinderTransactionDataSecctx,
|
||||
|
@ -102,18 +122,18 @@ decl_wrapper!(ExtendedError, bindings::binder_extended_error);
|
|||
|
||||
impl BinderVersion {
|
||||
pub(crate) fn current() -> Self {
|
||||
Self(bindings::binder_version {
|
||||
Self(MaybeUninit::new(bindings::binder_version {
|
||||
protocol_version: bindings::BINDER_CURRENT_PROTOCOL_VERSION as _,
|
||||
})
|
||||
}))
|
||||
}
|
||||
}
|
||||
|
||||
impl BinderTransactionData {
|
||||
pub(crate) fn with_buffers_size(self, buffers_size: u64) -> BinderTransactionDataSg {
|
||||
BinderTransactionDataSg(bindings::binder_transaction_data_sg {
|
||||
transaction_data: self.0,
|
||||
BinderTransactionDataSg(MaybeUninit::new(bindings::binder_transaction_data_sg {
|
||||
transaction_data: *self,
|
||||
buffers_size,
|
||||
})
|
||||
}))
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -130,6 +150,10 @@ impl BinderTransactionDataSecctx {
|
|||
|
||||
impl ExtendedError {
|
||||
pub(crate) fn new(id: u32, command: u32, param: i32) -> Self {
|
||||
Self(bindings::binder_extended_error { id, command, param })
|
||||
Self(MaybeUninit::new(bindings::binder_extended_error {
|
||||
id,
|
||||
command,
|
||||
param,
|
||||
}))
|
||||
}
|
||||
}
|
||||
|
|
|
@ -549,7 +549,7 @@ impl Process {
|
|||
Ok(inner.new_node_ref(node, strong, thread))
|
||||
}
|
||||
|
||||
fn insert_or_update_handle(
|
||||
pub(crate) fn insert_or_update_handle(
|
||||
self: ArcBorrow<'_, Process>,
|
||||
node_ref: NodeRef,
|
||||
is_mananger: bool,
|
||||
|
@ -687,6 +687,14 @@ impl Process {
|
|||
Ok(())
|
||||
}
|
||||
|
||||
/// Decrements the refcount of the given node, if one exists.
|
||||
pub(crate) fn update_node(&self, ptr: u64, cookie: u64, strong: bool) {
|
||||
let mut inner = self.inner.lock();
|
||||
if let Ok(Some(node)) = inner.get_existing_node(ptr, cookie) {
|
||||
inner.update_node_refcount(&node, false, strong, 1, None);
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) fn inc_ref_done(&self, reader: &mut UserSliceReader, strong: bool) -> Result {
|
||||
let ptr = reader.read::<u64>()?;
|
||||
let cookie = reader.read::<u64>()?;
|
||||
|
|
|
@ -23,8 +23,13 @@ use kernel::{
|
|||
};
|
||||
|
||||
use crate::{
|
||||
allocation::Allocation, defs::*, error::BinderResult, process::Process, ptr_align,
|
||||
transaction::Transaction, DArc, DLArc, DTRWrap, DeliverCode, DeliverToRead,
|
||||
allocation::{Allocation, AllocationView, BinderObject, BinderObjectRef},
|
||||
defs::*,
|
||||
error::BinderResult,
|
||||
process::Process,
|
||||
ptr_align,
|
||||
transaction::Transaction,
|
||||
DArc, DLArc, DTRWrap, DeliverCode, DeliverToRead,
|
||||
};
|
||||
|
||||
use core::{
|
||||
|
@ -413,6 +418,54 @@ impl Thread {
|
|||
self.inner.lock().push_return_work(reply);
|
||||
}
|
||||
|
||||
fn translate_object(
|
||||
&self,
|
||||
offset: usize,
|
||||
object: BinderObjectRef<'_>,
|
||||
view: &mut AllocationView<'_>,
|
||||
) -> BinderResult {
|
||||
match object {
|
||||
BinderObjectRef::Binder(obj) => {
|
||||
let strong = obj.hdr.type_ == BINDER_TYPE_BINDER;
|
||||
// SAFETY: `binder` is a `binder_uintptr_t`; any bit pattern is a valid
|
||||
// representation.
|
||||
let ptr = unsafe { obj.__bindgen_anon_1.binder } as _;
|
||||
let cookie = obj.cookie as _;
|
||||
let flags = obj.flags as _;
|
||||
let node = self.process.as_arc_borrow().get_node(
|
||||
ptr,
|
||||
cookie,
|
||||
flags,
|
||||
strong,
|
||||
Some(self),
|
||||
)?;
|
||||
security::binder_transfer_binder(&self.process.cred, &view.alloc.process.cred)?;
|
||||
view.transfer_binder_object(offset, obj, strong, node)?;
|
||||
}
|
||||
BinderObjectRef::Handle(obj) => {
|
||||
let strong = obj.hdr.type_ == BINDER_TYPE_HANDLE;
|
||||
// SAFETY: `handle` is a `u32`; any bit pattern is a valid representation.
|
||||
let handle = unsafe { obj.__bindgen_anon_1.handle } as _;
|
||||
let node = self.process.get_node_from_handle(handle, strong)?;
|
||||
security::binder_transfer_binder(&self.process.cred, &view.alloc.process.cred)?;
|
||||
view.transfer_binder_object(offset, obj, strong, node)?;
|
||||
}
|
||||
BinderObjectRef::Fd(_obj) => {
|
||||
pr_warn!("Using unsupported binder object type fd.");
|
||||
return Err(EINVAL.into());
|
||||
}
|
||||
BinderObjectRef::Ptr(_obj) => {
|
||||
pr_warn!("Using unsupported binder object type ptr.");
|
||||
return Err(EINVAL.into());
|
||||
}
|
||||
BinderObjectRef::Fda(_obj) => {
|
||||
pr_warn!("Using unsupported binder object type fda.");
|
||||
return Err(EINVAL.into());
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// This method copies the payload of a transaction into the target process.
|
||||
///
|
||||
/// The resulting payload will have several different components, which will be stored next to
|
||||
|
@ -442,6 +495,8 @@ impl Thread {
|
|||
|
||||
let data_size = trd.data_size.try_into().map_err(|_| EINVAL)?;
|
||||
let aligned_data_size = ptr_align(data_size);
|
||||
let offsets_size = trd.offsets_size.try_into().map_err(|_| EINVAL)?;
|
||||
let aligned_offsets_size = ptr_align(offsets_size);
|
||||
let aligned_secctx_size = secctx
|
||||
.as_ref()
|
||||
.map(|(_, ctx)| ptr_align(ctx.len()))
|
||||
|
@ -450,12 +505,13 @@ impl Thread {
|
|||
// This guarantees that at least `sizeof(usize)` bytes will be allocated.
|
||||
let len = usize::max(
|
||||
aligned_data_size
|
||||
.checked_add(aligned_secctx_size)
|
||||
.checked_add(aligned_offsets_size)
|
||||
.and_then(|sum| sum.checked_add(aligned_secctx_size))
|
||||
.ok_or(ENOMEM)?,
|
||||
size_of::<usize>(),
|
||||
);
|
||||
let secctx_off = aligned_data_size;
|
||||
let alloc = match to_process.buffer_alloc(len, is_oneway) {
|
||||
let secctx_off = aligned_data_size + aligned_offsets_size;
|
||||
let mut alloc = match to_process.buffer_alloc(len, is_oneway) {
|
||||
Ok(alloc) => alloc,
|
||||
Err(err) => {
|
||||
pr_warn!(
|
||||
|
@ -467,12 +523,59 @@ impl Thread {
|
|||
}
|
||||
};
|
||||
|
||||
// SAFETY: This is unsafe as a speed-bump to make TOCTOU bugs hard, but it's not actually
|
||||
// unsafe to call. UserSlice need to be fixed so that this isn't unsafe...
|
||||
let mut buffer_reader =
|
||||
unsafe { UserSlice::new(trd.data.ptr.buffer as _, data_size) }.reader();
|
||||
// SAFETY: This accesses a union field, but it's okay because the field's type is valid for
|
||||
// all bit-patterns.
|
||||
let trd_data_ptr = unsafe { &trd.data.ptr };
|
||||
let mut buffer_reader = UserSlice::new(trd_data_ptr.buffer as _, data_size).reader();
|
||||
let mut end_of_previous_object = 0;
|
||||
|
||||
alloc.copy_into(&mut buffer_reader, 0, data_size)?;
|
||||
// Copy offsets if there are any.
|
||||
if offsets_size > 0 {
|
||||
{
|
||||
let mut reader = UserSlice::new(trd_data_ptr.offsets as _, offsets_size).reader();
|
||||
alloc.copy_into(&mut reader, aligned_data_size, offsets_size)?;
|
||||
}
|
||||
|
||||
let offsets_start = aligned_data_size;
|
||||
let offsets_end = aligned_data_size + aligned_offsets_size;
|
||||
|
||||
// Traverse the objects specified.
|
||||
let mut view = AllocationView::new(&mut alloc, data_size);
|
||||
for index_offset in (offsets_start..offsets_end).step_by(size_of::<usize>()) {
|
||||
let offset = view.alloc.read(index_offset)?;
|
||||
|
||||
// Copy data between two objects.
|
||||
if end_of_previous_object < offset {
|
||||
view.alloc.copy_into(
|
||||
&mut buffer_reader,
|
||||
end_of_previous_object,
|
||||
offset - end_of_previous_object,
|
||||
)?;
|
||||
}
|
||||
|
||||
let mut object = BinderObject::read_from(&mut buffer_reader)?;
|
||||
|
||||
match self.translate_object(offset, object.as_ref(), &mut view) {
|
||||
Ok(()) => end_of_previous_object = offset + object.size(),
|
||||
Err(err) => {
|
||||
pr_warn!("Error while translating object.");
|
||||
return Err(err);
|
||||
}
|
||||
}
|
||||
|
||||
// Update the indexes containing objects to clean up.
|
||||
let offset_after_object = index_offset + size_of::<usize>();
|
||||
view.alloc
|
||||
.set_info_offsets(offsets_start..offset_after_object);
|
||||
}
|
||||
}
|
||||
|
||||
// Copy remaining raw data.
|
||||
alloc.copy_into(
|
||||
&mut buffer_reader,
|
||||
end_of_previous_object,
|
||||
data_size - end_of_previous_object,
|
||||
)?;
|
||||
|
||||
if let Some((off_out, secctx)) = secctx.as_mut() {
|
||||
if let Err(err) = alloc.write(secctx_off, secctx.as_bytes()) {
|
||||
|
|
|
@ -33,6 +33,7 @@ pub(crate) struct Transaction {
|
|||
code: u32,
|
||||
pub(crate) flags: u32,
|
||||
data_size: usize,
|
||||
offsets_size: usize,
|
||||
data_address: usize,
|
||||
sender_euid: Kuid,
|
||||
txn_security_ctx_off: Option<usize>,
|
||||
|
@ -86,6 +87,7 @@ impl Transaction {
|
|||
code: trd.code,
|
||||
flags: trd.flags,
|
||||
data_size: trd.data_size as _,
|
||||
offsets_size: trd.offsets_size as _,
|
||||
data_address,
|
||||
allocation <- kernel::new_spinlock!(Some(alloc), "Transaction::new"),
|
||||
txn_security_ctx_off,
|
||||
|
@ -117,6 +119,7 @@ impl Transaction {
|
|||
code: trd.code,
|
||||
flags: trd.flags,
|
||||
data_size: trd.data_size as _,
|
||||
offsets_size: trd.offsets_size as _,
|
||||
data_address: alloc.ptr,
|
||||
allocation <- kernel::new_spinlock!(Some(alloc), "Transaction::new"),
|
||||
txn_security_ctx_off: None,
|
||||
|
@ -230,7 +233,7 @@ impl DeliverToRead for Transaction {
|
|||
tr.flags = self.flags;
|
||||
tr.data_size = self.data_size as _;
|
||||
tr.data.ptr.buffer = self.data_address as _;
|
||||
tr.offsets_size = 0;
|
||||
tr.offsets_size = self.offsets_size as _;
|
||||
if tr.offsets_size > 0 {
|
||||
tr.data.ptr.offsets = (self.data_address + ptr_align(self.data_size)) as _;
|
||||
}
|
||||
|
|
|
@ -289,6 +289,13 @@ int rust_helper_security_binder_transaction(const struct cred *from,
|
|||
return security_binder_transaction(from, to);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(rust_helper_security_binder_transaction);
|
||||
|
||||
int rust_helper_security_binder_transfer_binder(const struct cred *from,
|
||||
const struct cred *to)
|
||||
{
|
||||
return security_binder_transfer_binder(from, to);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(rust_helper_security_binder_transfer_binder);
|
||||
#endif
|
||||
|
||||
void rust_helper_init_task_work(struct callback_head *twork,
|
||||
|
|
|
@ -24,6 +24,13 @@ pub fn binder_transaction(from: &Credential, to: &Credential) -> Result {
|
|||
to_result(unsafe { bindings::security_binder_transaction(from.as_ptr(), to.as_ptr()) })
|
||||
}
|
||||
|
||||
/// Calls the security modules to determine if task `from` is allowed to send binder objects
|
||||
/// (owned by itself or other processes) to task `to` through a binder transaction.
|
||||
pub fn binder_transfer_binder(from: &Credential, to: &Credential) -> Result {
|
||||
// SAFETY: `from` and `to` are valid because the shared references guarantee nonzero refcounts.
|
||||
to_result(unsafe { bindings::security_binder_transfer_binder(from.as_ptr(), to.as_ptr()) })
|
||||
}
|
||||
|
||||
/// A security context string.
|
||||
///
|
||||
/// # Invariants
|
||||
|
|
Loading…
Reference in New Issue
Block a user