lkml.org 
[lkml]   [2024]   [Apr]   [17]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
Patch in this message
/
Date
Subject[PATCH 3/4] binder: add support for PF_LARGE_HANDLES
From
Introduce the PF_LARGE_HANDLES flag to enable the use of monotonically
increasing counters to generate handles. This improves performance in
transactions when dealing with a large number of references.

The legacy logic performs an inorder traversal of an rbtree to find the
smallest unused handle. This limitation is due to userspace using the
handles as indexes (e.g. in vectors). The new logic scales much better
but requires userspace to support large handle numbers.

The benchmark below with 100,000 references shows the performance gains
in binder_get_ref_for_node_olocked() calls with PF_LARGE_HANDLES.

[ 167.855945] binder_get_ref_for_node_olocked: 17us (flag on)
[ 237.088072] binder_get_ref_for_node_olocked: 18178us (flag off)

Suggested-by: Tim Murray <timmurray@google.com>
Suggested-by: Alice Ryhl <aliceryhl@google.com>
Signed-off-by: Carlos Llamas <cmllamas@google.com>
---
drivers/android/binder.c | 44 ++++++++++++++++++++++-------
drivers/android/binder_internal.h | 3 ++
include/uapi/linux/android/binder.h | 3 +-
3 files changed, 39 insertions(+), 11 deletions(-)

diff --git a/drivers/android/binder.c b/drivers/android/binder.c
index 54d27dbf1de2..f120a24c9ae6 100644
--- a/drivers/android/binder.c
+++ b/drivers/android/binder.c
@@ -1045,6 +1045,35 @@ static struct binder_ref *binder_get_ref_olocked(struct binder_proc *proc,
return NULL;
}

+static u32 next_ref_desc(struct binder_proc *proc, struct binder_node *node)
+{
+ struct binder_ref *ref;
+ struct rb_node *n;
+ u32 desc;
+
+ /* Handle 0 is reserved for context manager refs */
+ if (node == proc->context->binder_context_mgr_node)
+ return 0;
+
+ /* Get the next handle (non-zero) */
+ if (proc->flags & PF_LARGE_HANDLES)
+ return proc->next_ref_desc++ ? : proc->next_ref_desc++;
+
+ /*
+ * Userspace doesn't support large handles. Find the smallest
+ * unused descriptor by doing an in-order traversal (slow).
+ */
+ desc = 1;
+ for (n = rb_first(&proc->refs_by_desc); n; n = rb_next(n)) {
+ ref = rb_entry(n, struct binder_ref, rb_node_desc);
+ if (ref->data.desc > desc)
+ break;
+ desc = ref->data.desc + 1;
+ }
+
+ return desc;
+}
+
/**
* binder_get_ref_for_node_olocked() - get the ref associated with given node
* @proc: binder_proc that owns the ref
@@ -1068,11 +1097,9 @@ static struct binder_ref *binder_get_ref_for_node_olocked(
struct binder_node *node,
struct binder_ref *new_ref)
{
- struct binder_context *context = proc->context;
struct rb_node **p = &proc->refs_by_node.rb_node;
struct rb_node *parent = NULL;
struct binder_ref *ref;
- struct rb_node *n;

while (*p) {
parent = *p;
@@ -1095,14 +1122,8 @@ static struct binder_ref *binder_get_ref_for_node_olocked(
rb_link_node(&new_ref->rb_node_node, parent, p);
rb_insert_color(&new_ref->rb_node_node, &proc->refs_by_node);

- new_ref->data.desc = (node == context->binder_context_mgr_node) ? 0 : 1;
- for (n = rb_first(&proc->refs_by_desc); n != NULL; n = rb_next(n)) {
- ref = rb_entry(n, struct binder_ref, rb_node_desc);
- if (ref->data.desc > new_ref->data.desc)
- break;
- new_ref->data.desc = ref->data.desc + 1;
- }
-
+retry:
+ new_ref->data.desc = next_ref_desc(proc, node);
p = &proc->refs_by_desc.rb_node;
while (*p) {
parent = *p;
@@ -1112,6 +1133,8 @@ static struct binder_ref *binder_get_ref_for_node_olocked(
p = &(*p)->rb_left;
else if (new_ref->data.desc > ref->data.desc)
p = &(*p)->rb_right;
+ else if (proc->flags & PF_LARGE_HANDLES)
+ goto retry;
else
BUG();
}
@@ -5663,6 +5686,7 @@ static int binder_open(struct inode *nodp, struct file *filp)
get_task_struct(current->group_leader);
proc->tsk = current->group_leader;
proc->cred = get_cred(filp->f_cred);
+ proc->next_ref_desc = 1;
INIT_LIST_HEAD(&proc->todo);
init_waitqueue_head(&proc->freeze_wait);
proc->default_priority = task_nice(current);
diff --git a/drivers/android/binder_internal.h b/drivers/android/binder_internal.h
index 3312fe93a306..221ab7a6384a 100644
--- a/drivers/android/binder_internal.h
+++ b/drivers/android/binder_internal.h
@@ -337,6 +337,8 @@ struct binder_ref {
* (protected by @outer_lock)
* @refs_by_node: rbtree of refs ordered by ref->node
* (protected by @outer_lock)
+ * @next_ref_desc: monotonic wrap-around counter to get the next handle
+ * (protected by @outer_lock)
* @waiting_threads: threads currently waiting for proc work
* (protected by @inner_lock)
* @pid PID of group_leader of process
@@ -407,6 +409,7 @@ struct binder_proc {
struct rb_root nodes;
struct rb_root refs_by_desc;
struct rb_root refs_by_node;
+ u32 next_ref_desc;
struct list_head waiting_threads;
int pid;
struct task_struct *tsk;
diff --git a/include/uapi/linux/android/binder.h b/include/uapi/linux/android/binder.h
index 972f402415c2..d257ab8689ce 100644
--- a/include/uapi/linux/android/binder.h
+++ b/include/uapi/linux/android/binder.h
@@ -254,8 +254,9 @@ struct binder_extended_error {
/* Used with BINDER_SET_PROC_FLAGS ioctl */
enum proc_flags {
PF_SPAM_DETECTION = (1 << 0), /* enable oneway spam detection */
+ PF_LARGE_HANDLES = (1 << 1), /* use large reference handles */

- PF_SUPPORTED_FLAGS_MASK = PF_SPAM_DETECTION,
+ PF_SUPPORTED_FLAGS_MASK = (PF_SPAM_DETECTION | PF_LARGE_HANDLES),
};

enum {
--
2.44.0.683.g7961c838ac-goog

\
 
 \ /
  Last update: 2024-04-17 21:15    [W:0.111 / U:0.056 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site