summaryrefslogtreecommitdiff
path: root/drivers/android
diff options
context:
space:
mode:
authorJiri Kosina <jkosina@suse.com>2026-04-16 22:01:18 +0300
committerJiri Kosina <jkosina@suse.com>2026-04-16 22:01:18 +0300
commitd4eb7b2da66c848709e31585b9c371fa234abc39 (patch)
tree5c6311cfa45567ecdcb8946a2252da7d55d1ac10 /drivers/android
parent8b9a097eb2fc37b486afd81388c693bf3ab44466 (diff)
parent69c02ffde6ed4d535fa4e693a9e572729cad3d0d (diff)
downloadlinux-d4eb7b2da66c848709e31585b9c371fa234abc39.tar.xz
Merge branch 'for-7.1/core-v2' into for-linus
- fixed handling of 0-sized reports (Dmitry Torokhov) - convert core code to __free() (Dmitry Torokhov) - support for multiple batteries per HID device (Lucas Zampieri)
Diffstat (limited to 'drivers/android')
-rw-r--r--drivers/android/binder.c33
-rw-r--r--drivers/android/binder/context.rs86
-rw-r--r--drivers/android/binder/node.rs8
-rw-r--r--drivers/android/binder/page_range.rs84
-rw-r--r--drivers/android/binder/process.rs17
-rw-r--r--drivers/android/binder/range_alloc/array.rs35
-rw-r--r--drivers/android/binder/range_alloc/mod.rs4
-rw-r--r--drivers/android/binder/range_alloc/tree.rs18
-rw-r--r--drivers/android/binder/rust_binder.h79
-rw-r--r--drivers/android/binder/rust_binder_events.h30
-rw-r--r--drivers/android/binder/rust_binder_main.rs12
-rw-r--r--drivers/android/binder/rust_binderfs.c8
-rw-r--r--drivers/android/binder/thread.rs21
-rw-r--r--drivers/android/binder/trace.rs21
-rw-r--r--drivers/android/binder/transaction.rs14
-rw-r--r--drivers/android/binder_alloc.c17
-rw-r--r--drivers/android/binderfs.c8
17 files changed, 357 insertions, 138 deletions
diff --git a/drivers/android/binder.c b/drivers/android/binder.c
index bd780d88b468..21f91d9f2fbc 100644
--- a/drivers/android/binder.c
+++ b/drivers/android/binder.c
@@ -795,7 +795,7 @@ static struct binder_node *binder_new_node(struct binder_proc *proc,
struct flat_binder_object *fp)
{
struct binder_node *node;
- struct binder_node *new_node = kzalloc(sizeof(*node), GFP_KERNEL);
+ struct binder_node *new_node = kzalloc_obj(*node);
if (!new_node)
return NULL;
@@ -1469,7 +1469,7 @@ static int binder_inc_ref_for_node(struct binder_proc *proc,
ref = binder_get_ref_for_node_olocked(proc, node, NULL);
if (!ref) {
binder_proc_unlock(proc);
- new_ref = kzalloc(sizeof(*ref), GFP_KERNEL);
+ new_ref = kzalloc_obj(*ref);
if (!new_ref)
return -ENOMEM;
binder_proc_lock(proc);
@@ -2009,7 +2009,7 @@ static void binder_deferred_fd_close(int fd)
{
struct binder_task_work_cb *twcb;
- twcb = kzalloc(sizeof(*twcb), GFP_KERNEL);
+ twcb = kzalloc_obj(*twcb);
if (!twcb)
return;
init_task_work(&twcb->twork, binder_do_fd_close);
@@ -2386,7 +2386,7 @@ static int binder_translate_fd(u32 fd, binder_size_t fd_offset,
* of the fd in the target needs to be done from a
* target thread.
*/
- fixup = kzalloc(sizeof(*fixup), GFP_KERNEL);
+ fixup = kzalloc_obj(*fixup);
if (!fixup) {
ret = -ENOMEM;
goto err_alloc;
@@ -2579,7 +2579,7 @@ static void binder_cleanup_deferred_txn_lists(struct list_head *sgc_head,
static int binder_defer_copy(struct list_head *sgc_head, binder_size_t offset,
const void __user *sender_uaddr, size_t length)
{
- struct binder_sg_copy *bc = kzalloc(sizeof(*bc), GFP_KERNEL);
+ struct binder_sg_copy *bc = kzalloc_obj(*bc);
if (!bc)
return -ENOMEM;
@@ -2622,7 +2622,7 @@ static int binder_defer_copy(struct list_head *sgc_head, binder_size_t offset,
static int binder_add_fixup(struct list_head *pf_head, binder_size_t offset,
binder_uintptr_t fixup, size_t skip_size)
{
- struct binder_ptr_fixup *pf = kzalloc(sizeof(*pf), GFP_KERNEL);
+ struct binder_ptr_fixup *pf = kzalloc_obj(*pf);
struct binder_ptr_fixup *tmppf;
if (!pf)
@@ -3101,7 +3101,7 @@ static void binder_transaction(struct binder_proc *proc,
binder_set_extended_error(&thread->ee, t_debug_id, BR_OK, 0);
binder_inner_proc_unlock(proc);
- t = kzalloc(sizeof(*t), GFP_KERNEL);
+ t = kzalloc_obj(*t);
if (!t) {
binder_txn_error("%d:%d cannot allocate transaction\n",
thread->pid, proc->pid);
@@ -3320,7 +3320,7 @@ static void binder_transaction(struct binder_proc *proc,
e->to_thread = target_thread->pid;
e->to_proc = target_proc->pid;
- tcomplete = kzalloc(sizeof(*tcomplete), GFP_KERNEL);
+ tcomplete = kzalloc_obj(*tcomplete);
if (tcomplete == NULL) {
binder_txn_error("%d:%d cannot allocate work for transaction\n",
thread->pid, proc->pid);
@@ -3926,7 +3926,7 @@ binder_request_freeze_notification(struct binder_proc *proc,
struct binder_ref_freeze *freeze;
struct binder_ref *ref;
- freeze = kzalloc(sizeof(*freeze), GFP_KERNEL);
+ freeze = kzalloc_obj(*freeze);
if (!freeze)
return -ENOMEM;
binder_proc_lock(proc);
@@ -4394,7 +4394,7 @@ static int binder_thread_write(struct binder_proc *proc,
* Allocate memory for death notification
* before taking lock
*/
- death = kzalloc(sizeof(*death), GFP_KERNEL);
+ death = kzalloc_obj(*death);
if (death == NULL) {
WARN_ON(thread->return_error.cmd !=
BR_OK);
@@ -4523,7 +4523,7 @@ static int binder_thread_write(struct binder_proc *proc,
}
}
binder_debug(BINDER_DEBUG_DEAD_BINDER,
- "%d:%d BC_DEAD_BINDER_DONE %016llx found %pK\n",
+ "%d:%d BC_DEAD_BINDER_DONE %016llx found %p\n",
proc->pid, thread->pid, (u64)cookie,
death);
if (death == NULL) {
@@ -5293,7 +5293,7 @@ static struct binder_thread *binder_get_thread(struct binder_proc *proc)
thread = binder_get_thread_ilocked(proc, NULL);
binder_inner_proc_unlock(proc);
if (!thread) {
- new_thread = kzalloc(sizeof(*thread), GFP_KERNEL);
+ new_thread = kzalloc_obj(*thread);
if (new_thread == NULL)
return NULL;
binder_inner_proc_lock(proc);
@@ -5902,9 +5902,8 @@ static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
goto err;
}
- target_procs = kcalloc(target_procs_count,
- sizeof(struct binder_proc *),
- GFP_KERNEL);
+ target_procs = kzalloc_objs(struct binder_proc *,
+ target_procs_count);
if (!target_procs) {
mutex_unlock(&binder_procs_lock);
@@ -6061,7 +6060,7 @@ static int binder_open(struct inode *nodp, struct file *filp)
binder_debug(BINDER_DEBUG_OPEN_CLOSE, "%s: %d:%d\n", __func__,
current->tgid, current->pid);
- proc = kzalloc(sizeof(*proc), GFP_KERNEL);
+ proc = kzalloc_obj(*proc);
if (proc == NULL)
return -ENOMEM;
@@ -7065,7 +7064,7 @@ static int __init init_binder_device(const char *name)
int ret;
struct binder_device *binder_device;
- binder_device = kzalloc(sizeof(*binder_device), GFP_KERNEL);
+ binder_device = kzalloc_obj(*binder_device);
if (!binder_device)
return -ENOMEM;
diff --git a/drivers/android/binder/context.rs b/drivers/android/binder/context.rs
index 3d135ec03ca7..9cf437c025a2 100644
--- a/drivers/android/binder/context.rs
+++ b/drivers/android/binder/context.rs
@@ -3,8 +3,8 @@
// Copyright (C) 2025 Google LLC.
use kernel::{
- error::Error,
- list::{List, ListArc, ListLinks},
+ alloc::kvec::KVVec,
+ error::code::*,
prelude::*,
security,
str::{CStr, CString},
@@ -17,22 +17,19 @@ use crate::{error::BinderError, node::NodeRef, process::Process};
kernel::sync::global_lock! {
// SAFETY: We call `init` in the module initializer, so it's initialized before first use.
pub(crate) unsafe(uninit) static CONTEXTS: Mutex<ContextList> = ContextList {
- list: List::new(),
+ contexts: KVVec::new(),
};
}
pub(crate) struct ContextList {
- list: List<Context>,
+ contexts: KVVec<Arc<Context>>,
}
-pub(crate) fn get_all_contexts() -> Result<KVec<Arc<Context>>> {
+pub(crate) fn get_all_contexts() -> Result<KVVec<Arc<Context>>> {
let lock = CONTEXTS.lock();
-
- let count = lock.list.iter().count();
-
- let mut ctxs = KVec::with_capacity(count, GFP_KERNEL)?;
- for ctx in &lock.list {
- ctxs.push(Arc::from(ctx), GFP_KERNEL)?;
+ let mut ctxs = KVVec::with_capacity(lock.contexts.len(), GFP_KERNEL)?;
+ for ctx in lock.contexts.iter() {
+ ctxs.push(ctx.clone(), GFP_KERNEL)?;
}
Ok(ctxs)
}
@@ -42,7 +39,7 @@ pub(crate) fn get_all_contexts() -> Result<KVec<Arc<Context>>> {
struct Manager {
node: Option<NodeRef>,
uid: Option<Kuid>,
- all_procs: List<Process>,
+ all_procs: KVVec<Arc<Process>>,
}
/// There is one context per binder file (/dev/binder, /dev/hwbinder, etc)
@@ -51,28 +48,16 @@ pub(crate) struct Context {
#[pin]
manager: Mutex<Manager>,
pub(crate) name: CString,
- #[pin]
- links: ListLinks,
-}
-
-kernel::list::impl_list_arc_safe! {
- impl ListArcSafe<0> for Context { untracked; }
-}
-kernel::list::impl_list_item! {
- impl ListItem<0> for Context {
- using ListLinks { self.links };
- }
}
impl Context {
pub(crate) fn new(name: &CStr) -> Result<Arc<Self>> {
let name = CString::try_from(name)?;
- let list_ctx = ListArc::pin_init::<Error>(
+ let ctx = Arc::pin_init(
try_pin_init!(Context {
name,
- links <- ListLinks::new(),
manager <- kernel::new_mutex!(Manager {
- all_procs: List::new(),
+ all_procs: KVVec::new(),
node: None,
uid: None,
}, "Context::manager"),
@@ -80,8 +65,7 @@ impl Context {
GFP_KERNEL,
)?;
- let ctx = list_ctx.clone_arc();
- CONTEXTS.lock().list.push_back(list_ctx);
+ CONTEXTS.lock().contexts.push(ctx.clone(), GFP_KERNEL)?;
Ok(ctx)
}
@@ -89,27 +73,27 @@ impl Context {
/// Called when the file for this context is unlinked.
///
/// No-op if called twice.
- pub(crate) fn deregister(&self) {
- // SAFETY: We never add the context to any other linked list than this one, so it is either
- // in this list, or not in any list.
- unsafe { CONTEXTS.lock().list.remove(self) };
+ pub(crate) fn deregister(self: &Arc<Self>) {
+ // Safe removal using retain
+ CONTEXTS.lock().contexts.retain(|c| !Arc::ptr_eq(c, self));
}
- pub(crate) fn register_process(self: &Arc<Self>, proc: ListArc<Process>) {
+ pub(crate) fn register_process(self: &Arc<Self>, proc: Arc<Process>) -> Result {
if !Arc::ptr_eq(self, &proc.ctx) {
pr_err!("Context::register_process called on the wrong context.");
- return;
+ return Err(EINVAL);
}
- self.manager.lock().all_procs.push_back(proc);
+ self.manager.lock().all_procs.push(proc, GFP_KERNEL)?;
+ Ok(())
}
- pub(crate) fn deregister_process(self: &Arc<Self>, proc: &Process) {
+ pub(crate) fn deregister_process(self: &Arc<Self>, proc: &Arc<Process>) {
if !Arc::ptr_eq(self, &proc.ctx) {
pr_err!("Context::deregister_process called on the wrong context.");
return;
}
- // SAFETY: We just checked that this is the right list.
- unsafe { self.manager.lock().all_procs.remove(proc) };
+ let mut manager = self.manager.lock();
+ manager.all_procs.retain(|p| !Arc::ptr_eq(p, proc));
}
pub(crate) fn set_manager_node(&self, node_ref: NodeRef) -> Result {
@@ -154,27 +138,27 @@ impl Context {
{
let lock = self.manager.lock();
for proc in &lock.all_procs {
- func(&proc);
+ func(proc);
}
}
- pub(crate) fn get_all_procs(&self) -> Result<KVec<Arc<Process>>> {
+ pub(crate) fn get_all_procs(&self) -> Result<KVVec<Arc<Process>>> {
let lock = self.manager.lock();
- let count = lock.all_procs.iter().count();
-
- let mut procs = KVec::with_capacity(count, GFP_KERNEL)?;
- for proc in &lock.all_procs {
- procs.push(Arc::from(proc), GFP_KERNEL)?;
+ let mut procs = KVVec::with_capacity(lock.all_procs.len(), GFP_KERNEL)?;
+ for proc in lock.all_procs.iter() {
+ procs.push(Arc::clone(proc), GFP_KERNEL)?;
}
Ok(procs)
}
- pub(crate) fn get_procs_with_pid(&self, pid: i32) -> Result<KVec<Arc<Process>>> {
- let orig = self.get_all_procs()?;
- let mut backing = KVec::with_capacity(orig.len(), GFP_KERNEL)?;
- for proc in orig.into_iter().filter(|proc| proc.task.pid() == pid) {
- backing.push(proc, GFP_KERNEL)?;
+ pub(crate) fn get_procs_with_pid(&self, pid: i32) -> Result<KVVec<Arc<Process>>> {
+ let lock = self.manager.lock();
+ let mut matching_procs = KVVec::new();
+ for proc in lock.all_procs.iter() {
+ if proc.task.pid() == pid {
+ matching_procs.push(Arc::clone(proc), GFP_KERNEL)?;
+ }
}
- Ok(backing)
+ Ok(matching_procs)
}
}
diff --git a/drivers/android/binder/node.rs b/drivers/android/binder/node.rs
index c26d113ede96..69f757ff7461 100644
--- a/drivers/android/binder/node.rs
+++ b/drivers/android/binder/node.rs
@@ -178,6 +178,14 @@ struct NodeInner {
refs: List<NodeRefInfo, { NodeRefInfo::LIST_NODE }>,
}
+use kernel::bindings::rb_node_layout;
+use mem::offset_of;
+pub(crate) const NODE_LAYOUT: rb_node_layout = rb_node_layout {
+ arc_offset: Arc::<Node>::DATA_OFFSET + offset_of!(DTRWrap<Node>, wrapped),
+ debug_id: offset_of!(Node, debug_id),
+ ptr: offset_of!(Node, ptr),
+};
+
#[pin_data]
pub(crate) struct Node {
pub(crate) debug_id: usize,
diff --git a/drivers/android/binder/page_range.rs b/drivers/android/binder/page_range.rs
index fdd97112ef5c..9dfc154e5dd4 100644
--- a/drivers/android/binder/page_range.rs
+++ b/drivers/android/binder/page_range.rs
@@ -142,6 +142,30 @@ pub(crate) struct ShrinkablePageRange {
_pin: PhantomPinned,
}
+// We do not define any ops. For now, used only to check identity of vmas.
+static BINDER_VM_OPS: bindings::vm_operations_struct = pin_init::zeroed();
+
+// To ensure that we do not accidentally install pages into or zap pages from the wrong vma, we
+// check its vm_ops and private data before using it.
+fn check_vma(vma: &virt::VmaRef, owner: *const ShrinkablePageRange) -> Option<&virt::VmaMixedMap> {
+ // SAFETY: Just reading the vm_ops pointer of any active vma is safe.
+ let vm_ops = unsafe { (*vma.as_ptr()).vm_ops };
+ if !ptr::eq(vm_ops, &BINDER_VM_OPS) {
+ return None;
+ }
+
+ // SAFETY: Reading the vm_private_data pointer of a binder-owned vma is safe.
+ let vm_private_data = unsafe { (*vma.as_ptr()).vm_private_data };
+ // The ShrinkablePageRange is only dropped when the Process is dropped, which only happens once
+ // the file's ->release handler is invoked, which means the ShrinkablePageRange outlives any
+ // VMA associated with it, so there can't be any false positives due to pointer reuse here.
+ if !ptr::eq(vm_private_data, owner.cast()) {
+ return None;
+ }
+
+ vma.as_mixedmap_vma()
+}
+
struct Inner {
/// Array of pages.
///
@@ -308,6 +332,18 @@ impl ShrinkablePageRange {
inner.size = num_pages;
inner.vma_addr = vma.start();
+ // This pointer is only used for comparison - it's not dereferenced.
+ //
+ // SAFETY: We own the vma, and we don't use any methods on VmaNew that rely on
+ // `vm_private_data`.
+ unsafe {
+ (*vma.as_ptr()).vm_private_data = ptr::from_ref(self).cast_mut().cast::<c_void>()
+ };
+
+ // SAFETY: We own the vma, and we don't use any methods on VmaNew that rely on
+ // `vm_ops`.
+ unsafe { (*vma.as_ptr()).vm_ops = &BINDER_VM_OPS };
+
Ok(num_pages)
}
@@ -399,22 +435,25 @@ impl ShrinkablePageRange {
//
// Using `mmput_async` avoids this, because then the `mm` cleanup is instead queued to a
// workqueue.
- MmWithUser::into_mmput_async(self.mm.mmget_not_zero().ok_or(ESRCH)?)
- .mmap_read_lock()
- .vma_lookup(vma_addr)
- .ok_or(ESRCH)?
- .as_mixedmap_vma()
- .ok_or(ESRCH)?
- .vm_insert_page(user_page_addr, &new_page)
- .inspect_err(|err| {
- pr_warn!(
- "Failed to vm_insert_page({}): vma_addr:{} i:{} err:{:?}",
- user_page_addr,
- vma_addr,
- i,
- err
- )
- })?;
+ let mm = MmWithUser::into_mmput_async(self.mm.mmget_not_zero().ok_or(ESRCH)?);
+ {
+ let vma_read;
+ let mmap_read;
+ let vma = if let Some(ret) = mm.lock_vma_under_rcu(vma_addr) {
+ vma_read = ret;
+ check_vma(&vma_read, self)
+ } else {
+ mmap_read = mm.mmap_read_lock();
+ mmap_read
+ .vma_lookup(vma_addr)
+ .and_then(|vma| check_vma(vma, self))
+ };
+
+ match vma {
+ Some(vma) => vma.vm_insert_page(user_page_addr, &new_page)?,
+ None => return Err(ESRCH),
+ }
+ }
let inner = self.lock.lock();
@@ -667,12 +706,15 @@ unsafe extern "C" fn rust_shrink_free_page(
let mmap_read;
let mm_mutex;
let vma_addr;
+ let range_ptr;
{
// CAST: The `list_head` field is first in `PageInfo`.
let info = item as *mut PageInfo;
// SAFETY: The `range` field of `PageInfo` is immutable.
- let range = unsafe { &*((*info).range) };
+ range_ptr = unsafe { (*info).range };
+ // SAFETY: The `range` outlives its `PageInfo` values.
+ let range = unsafe { &*range_ptr };
mm = match range.mm.mmget_not_zero() {
Some(mm) => MmWithUser::into_mmput_async(mm),
@@ -717,9 +759,11 @@ unsafe extern "C" fn rust_shrink_free_page(
// SAFETY: The lru lock is locked when this method is called.
unsafe { bindings::spin_unlock(&raw mut (*lru).lock) };
- if let Some(vma) = mmap_read.vma_lookup(vma_addr) {
- let user_page_addr = vma_addr + (page_index << PAGE_SHIFT);
- vma.zap_page_range_single(user_page_addr, PAGE_SIZE);
+ if let Some(unchecked_vma) = mmap_read.vma_lookup(vma_addr) {
+ if let Some(vma) = check_vma(unchecked_vma, range_ptr) {
+ let user_page_addr = vma_addr + (page_index << PAGE_SHIFT);
+ vma.zap_page_range_single(user_page_addr, PAGE_SIZE);
+ }
}
drop(mmap_read);
diff --git a/drivers/android/binder/process.rs b/drivers/android/binder/process.rs
index 132055b4790f..f06498129aa9 100644
--- a/drivers/android/binder/process.rs
+++ b/drivers/android/binder/process.rs
@@ -28,11 +28,11 @@ use kernel::{
seq_print,
sync::poll::PollTable,
sync::{
+ aref::ARef,
lock::{spinlock::SpinLockBackend, Guard},
Arc, ArcBorrow, CondVar, CondVarTimeoutResult, Mutex, SpinLock, UniqueArc,
},
task::Task,
- types::ARef,
uaccess::{UserSlice, UserSliceReader},
uapi,
workqueue::{self, Work},
@@ -418,6 +418,13 @@ impl ProcessNodeRefs {
}
}
+use core::mem::offset_of;
+use kernel::bindings::rb_process_layout;
+pub(crate) const PROCESS_LAYOUT: rb_process_layout = rb_process_layout {
+ arc_offset: Arc::<Process>::DATA_OFFSET,
+ task: offset_of!(Process, task),
+};
+
/// A process using binder.
///
/// Strictly speaking, there can be multiple of these per process. There is one for each binder fd
@@ -496,7 +503,7 @@ impl workqueue::WorkItem for Process {
impl Process {
fn new(ctx: Arc<Context>, cred: ARef<Credential>) -> Result<Arc<Self>> {
let current = kernel::current!();
- let list_process = ListArc::pin_init::<Error>(
+ let process = Arc::pin_init::<Error>(
try_pin_init!(Process {
ctx,
cred,
@@ -512,8 +519,7 @@ impl Process {
GFP_KERNEL,
)?;
- let process = list_process.clone_arc();
- process.ctx.register_process(list_process);
+ process.ctx.register_process(process.clone())?;
Ok(process)
}
@@ -1289,7 +1295,8 @@ impl Process {
}
pub(crate) fn dead_binder_done(&self, cookie: u64, thread: &Thread) {
- if let Some(death) = self.inner.lock().pull_delivered_death(cookie) {
+ let death = self.inner.lock().pull_delivered_death(cookie);
+ if let Some(death) = death {
death.set_notification_done(thread);
}
}
diff --git a/drivers/android/binder/range_alloc/array.rs b/drivers/android/binder/range_alloc/array.rs
index 07e1dec2ce63..ada1d1b4302e 100644
--- a/drivers/android/binder/range_alloc/array.rs
+++ b/drivers/android/binder/range_alloc/array.rs
@@ -118,7 +118,7 @@ impl<T> ArrayRangeAllocator<T> {
size: usize,
is_oneway: bool,
pid: Pid,
- ) -> Result<usize> {
+ ) -> Result<(usize, bool)> {
// Compute new value of free_oneway_space, which is set only on success.
let new_oneway_space = if is_oneway {
match self.free_oneway_space.checked_sub(size) {
@@ -146,7 +146,38 @@ impl<T> ArrayRangeAllocator<T> {
.ok()
.unwrap();
- Ok(insert_at_offset)
+ // Start detecting spammers once we have less than 20%
+ // of async space left (which is less than 10% of total
+ // buffer size).
+ //
+ // (This will short-circuit, so `low_oneway_space` is
+ // only called when necessary.)
+ let oneway_spam_detected =
+ is_oneway && new_oneway_space < self.size / 10 && self.low_oneway_space(pid);
+
+ Ok((insert_at_offset, oneway_spam_detected))
+ }
+
+ /// Find the amount and size of buffers allocated by the current caller.
+ ///
+ /// The idea is that once we cross the threshold, whoever is responsible
+ /// for the low async space is likely to try to send another async transaction,
+ /// and at some point we'll catch them in the act. This is more efficient
+ /// than keeping a map per pid.
+ fn low_oneway_space(&self, calling_pid: Pid) -> bool {
+ let mut total_alloc_size = 0;
+ let mut num_buffers = 0;
+
+ // Warn if this pid has more than 50 transactions, or more than 50% of
+ // async space (which is 25% of total buffer size). Oneway spam is only
+ // detected when the threshold is exceeded.
+ for range in &self.ranges {
+ if range.state.is_oneway() && range.state.pid() == calling_pid {
+ total_alloc_size += range.size;
+ num_buffers += 1;
+ }
+ }
+ num_buffers > 50 || total_alloc_size > self.size / 4
}
pub(crate) fn reservation_abort(&mut self, offset: usize) -> Result<FreedRange> {
diff --git a/drivers/android/binder/range_alloc/mod.rs b/drivers/android/binder/range_alloc/mod.rs
index 2301e2bc1a1f..1f4734468ff1 100644
--- a/drivers/android/binder/range_alloc/mod.rs
+++ b/drivers/android/binder/range_alloc/mod.rs
@@ -188,11 +188,11 @@ impl<T> RangeAllocator<T> {
self.reserve_new(args)
}
Impl::Array(array) => {
- let offset =
+ let (offset, oneway_spam_detected) =
array.reserve_new(args.debug_id, args.size, args.is_oneway, args.pid)?;
Ok(ReserveNew::Success(ReserveNewSuccess {
offset,
- oneway_spam_detected: false,
+ oneway_spam_detected,
_empty_array_alloc: args.empty_array_alloc,
_new_tree_alloc: args.new_tree_alloc,
_tree_alloc: args.tree_alloc,
diff --git a/drivers/android/binder/range_alloc/tree.rs b/drivers/android/binder/range_alloc/tree.rs
index 838fdd2b47ea..48796fcdb362 100644
--- a/drivers/android/binder/range_alloc/tree.rs
+++ b/drivers/android/binder/range_alloc/tree.rs
@@ -164,15 +164,6 @@ impl<T> TreeRangeAllocator<T> {
self.free_oneway_space
};
- // Start detecting spammers once we have less than 20%
- // of async space left (which is less than 10% of total
- // buffer size).
- //
- // (This will short-circut, so `low_oneway_space` is
- // only called when necessary.)
- let oneway_spam_detected =
- is_oneway && new_oneway_space < self.size / 10 && self.low_oneway_space(pid);
-
let (found_size, found_off, tree_node, free_tree_node) = match self.find_best_match(size) {
None => {
pr_warn!("ENOSPC from range_alloc.reserve_new - size: {}", size);
@@ -203,6 +194,15 @@ impl<T> TreeRangeAllocator<T> {
self.free_tree.insert(free_tree_node);
}
+ // Start detecting spammers once we have less than 20%
+ // of async space left (which is less than 10% of total
+ // buffer size).
+ //
+ // (This will short-circuit, so `low_oneway_space` is
+ // only called when necessary.)
+ let oneway_spam_detected =
+ is_oneway && new_oneway_space < self.size / 10 && self.low_oneway_space(pid);
+
Ok((found_off, oneway_spam_detected))
}
diff --git a/drivers/android/binder/rust_binder.h b/drivers/android/binder/rust_binder.h
index 31806890ed1a..d2284726c025 100644
--- a/drivers/android/binder/rust_binder.h
+++ b/drivers/android/binder/rust_binder.h
@@ -20,4 +20,83 @@ struct inode;
struct dentry *rust_binderfs_create_proc_file(struct inode *nodp, int pid);
void rust_binderfs_remove_file(struct dentry *dentry);
+/*
+ * The internal data types in the Rust Binder driver are opaque to C, so we use
+ * void pointer typedefs for these types.
+ */
+
+typedef void *rust_binder_transaction;
+typedef void *rust_binder_process;
+typedef void *rust_binder_node;
+
+struct rb_process_layout {
+ size_t arc_offset;
+ size_t task;
+};
+
+struct rb_transaction_layout {
+ size_t debug_id;
+ size_t code;
+ size_t flags;
+ size_t from_thread;
+ size_t to_proc;
+ size_t target_node;
+};
+
+struct rb_node_layout {
+ size_t arc_offset;
+ size_t debug_id;
+ size_t ptr;
+};
+
+struct rust_binder_layout {
+ struct rb_transaction_layout t;
+ struct rb_process_layout p;
+ struct rb_node_layout n;
+};
+
+extern const struct rust_binder_layout RUST_BINDER_LAYOUT;
+
+static inline size_t rust_binder_transaction_debug_id(rust_binder_transaction t)
+{
+ return *(size_t *) (t + RUST_BINDER_LAYOUT.t.debug_id);
+}
+
+static inline u32 rust_binder_transaction_code(rust_binder_transaction t)
+{
+ return *(u32 *) (t + RUST_BINDER_LAYOUT.t.code);
+}
+
+static inline u32 rust_binder_transaction_flags(rust_binder_transaction t)
+{
+ return *(u32 *) (t + RUST_BINDER_LAYOUT.t.flags);
+}
+
+// Nullable!
+static inline rust_binder_node rust_binder_transaction_target_node(rust_binder_transaction t)
+{
+ void *p = *(void **) (t + RUST_BINDER_LAYOUT.t.target_node);
+
+ if (p)
+ p = p + RUST_BINDER_LAYOUT.n.arc_offset;
+ return p;
+}
+
+static inline rust_binder_process rust_binder_transaction_to_proc(rust_binder_transaction t)
+{
+ void *p = *(void **) (t + RUST_BINDER_LAYOUT.t.to_proc);
+
+ return p + RUST_BINDER_LAYOUT.p.arc_offset;
+}
+
+static inline struct task_struct *rust_binder_process_task(rust_binder_process t)
+{
+ return *(struct task_struct **) (t + RUST_BINDER_LAYOUT.p.task);
+}
+
+static inline size_t rust_binder_node_debug_id(rust_binder_node t)
+{
+ return *(size_t *) (t + RUST_BINDER_LAYOUT.n.debug_id);
+}
+
#endif
diff --git a/drivers/android/binder/rust_binder_events.h b/drivers/android/binder/rust_binder_events.h
index 2f3efbf9dba6..8ad785c6bd0f 100644
--- a/drivers/android/binder/rust_binder_events.h
+++ b/drivers/android/binder/rust_binder_events.h
@@ -30,6 +30,36 @@ TRACE_EVENT(rust_binder_ioctl,
TP_printk("cmd=0x%x arg=0x%lx", __entry->cmd, __entry->arg)
);
+TRACE_EVENT(rust_binder_transaction,
+ TP_PROTO(bool reply, rust_binder_transaction t, struct task_struct *thread),
+ TP_ARGS(reply, t, thread),
+ TP_STRUCT__entry(
+ __field(int, debug_id)
+ __field(int, target_node)
+ __field(int, to_proc)
+ __field(int, to_thread)
+ __field(int, reply)
+ __field(unsigned int, code)
+ __field(unsigned int, flags)
+ ),
+ TP_fast_assign(
+ rust_binder_process to = rust_binder_transaction_to_proc(t);
+ rust_binder_node target_node = rust_binder_transaction_target_node(t);
+
+ __entry->debug_id = rust_binder_transaction_debug_id(t);
+ __entry->target_node = target_node ? rust_binder_node_debug_id(target_node) : 0;
+ __entry->to_proc = rust_binder_process_task(to)->pid;
+ __entry->to_thread = thread ? thread->pid : 0;
+ __entry->reply = reply;
+ __entry->code = rust_binder_transaction_code(t);
+ __entry->flags = rust_binder_transaction_flags(t);
+ ),
+ TP_printk("transaction=%d dest_node=%d dest_proc=%d dest_thread=%d reply=%d flags=0x%x code=0x%x",
+ __entry->debug_id, __entry->target_node,
+ __entry->to_proc, __entry->to_thread,
+ __entry->reply, __entry->flags, __entry->code)
+);
+
#endif /* _RUST_BINDER_TRACE_H */
/* This part must be outside protection */
diff --git a/drivers/android/binder/rust_binder_main.rs b/drivers/android/binder/rust_binder_main.rs
index 47bfb114cabb..aa5f2a75adb4 100644
--- a/drivers/android/binder/rust_binder_main.rs
+++ b/drivers/android/binder/rust_binder_main.rs
@@ -87,6 +87,14 @@ module! {
license: "GPL",
}
+use kernel::bindings::rust_binder_layout;
+#[no_mangle]
+static RUST_BINDER_LAYOUT: rust_binder_layout = rust_binder_layout {
+ t: transaction::TRANSACTION_LAYOUT,
+ p: process::PROCESS_LAYOUT,
+ n: node::NODE_LAYOUT,
+};
+
fn next_debug_id() -> usize {
static NEXT_DEBUG_ID: Atomic<usize> = Atomic::new(0);
@@ -286,7 +294,7 @@ impl kernel::Module for BinderModule {
pr_warn!("Loaded Rust Binder.");
- BINDER_SHRINKER.register(kernel::c_str!("android-binder"))?;
+ BINDER_SHRINKER.register(c"android-binder")?;
// SAFETY: The module is being loaded, so we can initialize binderfs.
unsafe { kernel::error::to_result(binderfs::init_rust_binderfs())? };
@@ -312,7 +320,7 @@ pub static rust_binder_fops: AssertSync<kernel::bindings::file_operations> = {
owner: THIS_MODULE.as_ptr(),
poll: Some(rust_binder_poll),
unlocked_ioctl: Some(rust_binder_ioctl),
- compat_ioctl: Some(bindings::compat_ptr_ioctl),
+ compat_ioctl: bindings::compat_ptr_ioctl,
mmap: Some(rust_binder_mmap),
open: Some(rust_binder_open),
release: Some(rust_binder_release),
diff --git a/drivers/android/binder/rust_binderfs.c b/drivers/android/binder/rust_binderfs.c
index 5c1319d80036..ade1c4d92499 100644
--- a/drivers/android/binder/rust_binderfs.c
+++ b/drivers/android/binder/rust_binderfs.c
@@ -145,7 +145,7 @@ static int binderfs_binder_device_create(struct inode *ref_inode,
mutex_unlock(&binderfs_minors_mutex);
ret = -ENOMEM;
- device = kzalloc(sizeof(*device), GFP_KERNEL);
+ device = kzalloc_obj(*device);
if (!device)
goto err;
@@ -387,7 +387,7 @@ static int binderfs_binder_ctl_create(struct super_block *sb)
bool use_reserve = true;
#endif
- device = kzalloc(sizeof(*device), GFP_KERNEL);
+ device = kzalloc_obj(*device);
if (!device)
return -ENOMEM;
@@ -642,7 +642,7 @@ static int binderfs_fill_super(struct super_block *sb, struct fs_context *fc)
sb->s_op = &binderfs_super_ops;
sb->s_time_gran = 1;
- sb->s_fs_info = kzalloc(sizeof(struct binderfs_info), GFP_KERNEL);
+ sb->s_fs_info = kzalloc_obj(struct binderfs_info);
if (!sb->s_fs_info)
return -ENOMEM;
info = sb->s_fs_info;
@@ -721,7 +721,7 @@ static int binderfs_init_fs_context(struct fs_context *fc)
{
struct binderfs_mount_opts *ctx;
- ctx = kzalloc(sizeof(struct binderfs_mount_opts), GFP_KERNEL);
+ ctx = kzalloc_obj(struct binderfs_mount_opts);
if (!ctx)
return -ENOMEM;
diff --git a/drivers/android/binder/thread.rs b/drivers/android/binder/thread.rs
index 1f1709a6a77a..c004214b1662 100644
--- a/drivers/android/binder/thread.rs
+++ b/drivers/android/binder/thread.rs
@@ -17,9 +17,8 @@ use kernel::{
seq_print,
sync::atomic::{ordering::Relaxed, Atomic},
sync::poll::{PollCondVar, PollTable},
- sync::{Arc, SpinLock},
+ sync::{aref::ARef, Arc, SpinLock},
task::Task,
- types::ARef,
uaccess::UserSlice,
uapi,
};
@@ -1016,12 +1015,9 @@ impl Thread {
// Copy offsets if there are any.
if offsets_size > 0 {
- {
- let mut reader =
- UserSlice::new(UserPtr::from_addr(trd_data_ptr.offsets as _), offsets_size)
- .reader();
- alloc.copy_into(&mut reader, aligned_data_size, offsets_size)?;
- }
+ let mut offsets_reader =
+ UserSlice::new(UserPtr::from_addr(trd_data_ptr.offsets as _), offsets_size)
+ .reader();
let offsets_start = aligned_data_size;
let offsets_end = aligned_data_size + offsets_size;
@@ -1042,11 +1038,9 @@ impl Thread {
.step_by(size_of::<u64>())
.enumerate()
{
- let offset: usize = view
- .alloc
- .read::<u64>(index_offset)?
- .try_into()
- .map_err(|_| EINVAL)?;
+ let offset = offsets_reader.read::<u64>()?;
+ view.alloc.write(index_offset, &offset)?;
+ let offset: usize = offset.try_into().map_err(|_| EINVAL)?;
if offset < end_of_previous_object || !is_aligned(offset, size_of::<u32>()) {
pr_warn!("Got transaction with invalid offset.");
@@ -1146,6 +1140,7 @@ impl Thread {
transaction: &DArc<Transaction>,
) -> bool {
if let Ok(transaction) = &reply {
+ crate::trace::trace_transaction(true, transaction, Some(&self.task));
transaction.set_outstanding(&mut self.process.inner.lock());
}
diff --git a/drivers/android/binder/trace.rs b/drivers/android/binder/trace.rs
index af0e4392805e..9839901c7151 100644
--- a/drivers/android/binder/trace.rs
+++ b/drivers/android/binder/trace.rs
@@ -2,11 +2,21 @@
// Copyright (C) 2025 Google LLC.
+use crate::transaction::Transaction;
+
+use kernel::bindings::{rust_binder_transaction, task_struct};
use kernel::ffi::{c_uint, c_ulong};
+use kernel::task::Task;
use kernel::tracepoint::declare_trace;
declare_trace! {
unsafe fn rust_binder_ioctl(cmd: c_uint, arg: c_ulong);
+ unsafe fn rust_binder_transaction(reply: bool, t: rust_binder_transaction, thread: *mut task_struct);
+}
+
+#[inline]
+fn raw_transaction(t: &Transaction) -> rust_binder_transaction {
+ t as *const Transaction as rust_binder_transaction
}
#[inline]
@@ -14,3 +24,14 @@ pub(crate) fn trace_ioctl(cmd: u32, arg: usize) {
// SAFETY: Always safe to call.
unsafe { rust_binder_ioctl(cmd, arg as c_ulong) }
}
+
+#[inline]
+pub(crate) fn trace_transaction(reply: bool, t: &Transaction, thread: Option<&Task>) {
+ let thread = match thread {
+ Some(thread) => thread.as_ptr(),
+ None => core::ptr::null_mut(),
+ };
+ // SAFETY: The raw transaction is valid for the duration of this call. The thread pointer is
+ // valid or null.
+ unsafe { rust_binder_transaction(reply, raw_transaction(t), thread) }
+}
diff --git a/drivers/android/binder/transaction.rs b/drivers/android/binder/transaction.rs
index 2273a8e9d01c..75e6f5fbaaae 100644
--- a/drivers/android/binder/transaction.rs
+++ b/drivers/android/binder/transaction.rs
@@ -24,6 +24,17 @@ use crate::{
BinderReturnWriter, DArc, DLArc, DTRWrap, DeliverToRead,
};
+use core::mem::offset_of;
+use kernel::bindings::rb_transaction_layout;
+pub(crate) const TRANSACTION_LAYOUT: rb_transaction_layout = rb_transaction_layout {
+ debug_id: offset_of!(Transaction, debug_id),
+ code: offset_of!(Transaction, code),
+ flags: offset_of!(Transaction, flags),
+ from_thread: offset_of!(Transaction, from),
+ to_proc: offset_of!(Transaction, to),
+ target_node: offset_of!(Transaction, target_node),
+};
+
#[pin_data(PinnedDrop)]
pub(crate) struct Transaction {
pub(crate) debug_id: usize,
@@ -249,6 +260,7 @@ impl Transaction {
if oneway {
if let Some(target_node) = self.target_node.clone() {
+ crate::trace::trace_transaction(false, &self, None);
if process_inner.is_frozen.is_frozen() {
process_inner.async_recv = true;
if self.flags & TF_UPDATE_TXN != 0 {
@@ -286,11 +298,13 @@ impl Transaction {
}
let res = if let Some(thread) = self.find_target_thread() {
+ crate::trace::trace_transaction(false, &self, Some(&thread.task));
match thread.push_work(self) {
PushWorkRes::Ok => Ok(()),
PushWorkRes::FailedDead(me) => Err((BinderError::new_dead(), me)),
}
} else {
+ crate::trace::trace_transaction(false, &self, None);
process_inner.push_work(self)
};
drop(process_inner);
diff --git a/drivers/android/binder_alloc.c b/drivers/android/binder_alloc.c
index 145ed5f14cdb..241f16a9b63d 100644
--- a/drivers/android/binder_alloc.c
+++ b/drivers/android/binder_alloc.c
@@ -81,7 +81,7 @@ static void binder_insert_free_buffer(struct binder_alloc *alloc,
new_buffer_size = binder_alloc_buffer_size(alloc, new_buffer);
binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
- "%d: add free buffer, size %zd, at %pK\n",
+ "%d: add free buffer, size %zd, at %p\n",
alloc->pid, new_buffer_size, new_buffer);
while (*p) {
@@ -289,7 +289,7 @@ static struct page *binder_page_alloc(struct binder_alloc *alloc,
return NULL;
/* allocate and install shrinker metadata under page->private */
- mdata = kzalloc(sizeof(*mdata), GFP_KERNEL);
+ mdata = kzalloc_obj(*mdata);
if (!mdata) {
__free_page(page);
return NULL;
@@ -572,7 +572,7 @@ static struct binder_buffer *binder_alloc_new_buf_locked(
}
binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
- "%d: binder_alloc_buf size %zd got buffer %pK size %zd\n",
+ "%d: binder_alloc_buf size %zd got buffer %p size %zd\n",
alloc->pid, size, buffer, buffer_size);
/*
@@ -672,7 +672,7 @@ struct binder_buffer *binder_alloc_new_buf(struct binder_alloc *alloc,
}
/* Preallocate the next buffer */
- next = kzalloc(sizeof(*next), GFP_KERNEL);
+ next = kzalloc_obj(*next);
if (!next)
return ERR_PTR(-ENOMEM);
@@ -748,7 +748,7 @@ static void binder_free_buf_locked(struct binder_alloc *alloc,
ALIGN(buffer->extra_buffers_size, sizeof(void *));
binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
- "%d: binder_free_buf %pK size %zd buffer_size %zd\n",
+ "%d: binder_free_buf %p size %zd buffer_size %zd\n",
alloc->pid, buffer, size, buffer_size);
BUG_ON(buffer->free);
@@ -916,16 +916,15 @@ int binder_alloc_mmap_handler(struct binder_alloc *alloc,
alloc->vm_start = vma->vm_start;
- alloc->pages = kvcalloc(alloc->buffer_size / PAGE_SIZE,
- sizeof(alloc->pages[0]),
- GFP_KERNEL);
+ alloc->pages = kvzalloc_objs(alloc->pages[0],
+ alloc->buffer_size / PAGE_SIZE);
if (!alloc->pages) {
ret = -ENOMEM;
failure_string = "alloc page array";
goto err_alloc_pages_failed;
}
- buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
+ buffer = kzalloc_obj(*buffer);
if (!buffer) {
ret = -ENOMEM;
failure_string = "alloc buffer struct";
diff --git a/drivers/android/binderfs.c b/drivers/android/binderfs.c
index 9f8a18c88d66..361d69f756f5 100644
--- a/drivers/android/binderfs.c
+++ b/drivers/android/binderfs.c
@@ -145,7 +145,7 @@ static int binderfs_binder_device_create(struct inode *ref_inode,
mutex_unlock(&binderfs_minors_mutex);
ret = -ENOMEM;
- device = kzalloc(sizeof(*device), GFP_KERNEL);
+ device = kzalloc_obj(*device);
if (!device)
goto err;
@@ -396,7 +396,7 @@ static int binderfs_binder_ctl_create(struct super_block *sb)
bool use_reserve = true;
#endif
- device = kzalloc(sizeof(*device), GFP_KERNEL);
+ device = kzalloc_obj(*device);
if (!device)
return -ENOMEM;
@@ -638,7 +638,7 @@ static int binderfs_fill_super(struct super_block *sb, struct fs_context *fc)
sb->s_op = &binderfs_super_ops;
sb->s_time_gran = 1;
- sb->s_fs_info = kzalloc(sizeof(struct binderfs_info), GFP_KERNEL);
+ sb->s_fs_info = kzalloc_obj(struct binderfs_info);
if (!sb->s_fs_info)
return -ENOMEM;
info = sb->s_fs_info;
@@ -717,7 +717,7 @@ static int binderfs_init_fs_context(struct fs_context *fc)
{
struct binderfs_mount_opts *ctx;
- ctx = kzalloc(sizeof(struct binderfs_mount_opts), GFP_KERNEL);
+ ctx = kzalloc_obj(struct binderfs_mount_opts);
if (!ctx)
return -ENOMEM;