lkml.org 
[lkml]   [2017]   [Jun]   [29]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
Patch in this message
/
From
Subject[PATCH 04/37] binder: separate binder allocator structure from binder proc
Date
The binder allocator is logically separate from the rest
of the binder drivers. Separating the data structures
to prepare for splitting into separate file with separate
locking.

Signed-off-by: Todd Kjos <tkjos@google.com>
---
drivers/android/binder.c | 212 +++++++++++++++++++++++++----------------
drivers/android/binder_trace.h | 2 +-
2 files changed, 129 insertions(+), 85 deletions(-)

diff --git a/drivers/android/binder.c b/drivers/android/binder.c
index f7665c31feca..1097d056ea6b 100644
--- a/drivers/android/binder.c
+++ b/drivers/android/binder.c
@@ -319,6 +319,41 @@ enum binder_deferred_state {
BINDER_DEFERRED_RELEASE = 0x04,
};

+/**
+ * struct binder_alloc - per-binder proc state for binder allocator
+ * @vma: vm_area_struct passed to mmap_handler
+ * (invarient after mmap)
+ * @vma_vm_mm: copy of vma->vm_mm (invarient after mmap)
+ * @buffer: base of per-proc address space mapped via mmap
+ * @user_buffer_offset: offset between user and kernel VAs for buffer
+ * @buffers: list of all buffers for this proc
+ * @free_buffers: rb tree of buffers available for allocation
+ * sorted by size
+ * @allocated_buffers: rb tree of allocated buffers sorted by address
+ * @free_async_space: VA space available for async buffers. This is
+ * initialized at mmap time to 1/2 the full VA space
+ * @pages: array of physical page addresses for each page of
+ * mmap'd space
+ * @buffer_size: size of address space (could be less than requested)
+ *
+ * Bookkeeping structure for per-proc address space management for binder
+ * buffers. It is normally initialized during binder_init() and binder_mmap()
+ * calls. The address space is used for both user-visible buffers and for
+ * struct binder_buffer objects used to track the user buffers
+ */
+struct binder_alloc {
+ struct vm_area_struct *vma;
+ struct mm_struct *vma_vm_mm;
+ void *buffer;
+ ptrdiff_t user_buffer_offset;
+ struct list_head buffers;
+ struct rb_root free_buffers;
+ struct rb_root allocated_buffers;
+ size_t free_async_space;
+ struct page **pages;
+ size_t buffer_size;
+};
+
struct binder_proc {
struct hlist_node proc_node;
struct rb_root threads;
@@ -326,23 +361,11 @@ struct binder_proc {
struct rb_root refs_by_desc;
struct rb_root refs_by_node;
int pid;
- struct vm_area_struct *vma;
- struct mm_struct *vma_vm_mm;
struct task_struct *tsk;
struct files_struct *files;
struct hlist_node deferred_work_node;
int deferred_work;
- void *buffer;
- ptrdiff_t user_buffer_offset;

- struct list_head buffers;
- struct rb_root free_buffers;
- struct rb_root allocated_buffers;
- size_t free_async_space;
-
- struct page **pages;
- size_t buffer_size;
- uint32_t buffer_free;
struct list_head todo;
wait_queue_head_t wait;
struct binder_stats stats;
@@ -353,6 +376,7 @@ struct binder_proc {
int ready_threads;
long default_priority;
struct dentry *debugfs_entry;
+ struct binder_alloc alloc;
struct binder_context *context;
};

@@ -485,8 +509,10 @@ static void binder_set_nice(long nice)
static size_t binder_buffer_size(struct binder_proc *proc,
struct binder_buffer *buffer)
{
- if (list_is_last(&buffer->entry, &proc->buffers))
- return proc->buffer + proc->buffer_size - (void *)buffer->data;
+ if (list_is_last(&buffer->entry, &proc->alloc.buffers))
+ return proc->alloc.buffer +
+ proc->alloc.buffer_size -
+ (void *)buffer->data;
return (size_t)list_entry(buffer->entry.next,
struct binder_buffer, entry) - (size_t)buffer->data;
}
@@ -494,7 +520,7 @@ static size_t binder_buffer_size(struct binder_proc *proc,
static void binder_insert_free_buffer(struct binder_proc *proc,
struct binder_buffer *new_buffer)
{
- struct rb_node **p = &proc->free_buffers.rb_node;
+ struct rb_node **p = &proc->alloc.free_buffers.rb_node;
struct rb_node *parent = NULL;
struct binder_buffer *buffer;
size_t buffer_size;
@@ -521,13 +547,13 @@ static void binder_insert_free_buffer(struct binder_proc *proc,
p = &parent->rb_right;
}
rb_link_node(&new_buffer->rb_node, parent, p);
- rb_insert_color(&new_buffer->rb_node, &proc->free_buffers);
+ rb_insert_color(&new_buffer->rb_node, &proc->alloc.free_buffers);
}

static void binder_insert_allocated_buffer(struct binder_proc *proc,
struct binder_buffer *new_buffer)
{
- struct rb_node **p = &proc->allocated_buffers.rb_node;
+ struct rb_node **p = &proc->alloc.allocated_buffers.rb_node;
struct rb_node *parent = NULL;
struct binder_buffer *buffer;

@@ -546,18 +572,19 @@ static void binder_insert_allocated_buffer(struct binder_proc *proc,
BUG();
}
rb_link_node(&new_buffer->rb_node, parent, p);
- rb_insert_color(&new_buffer->rb_node, &proc->allocated_buffers);
+ rb_insert_color(&new_buffer->rb_node, &proc->alloc.allocated_buffers);
}

static struct binder_buffer *binder_buffer_lookup(struct binder_proc *proc,
uintptr_t user_ptr)
{
- struct rb_node *n = proc->allocated_buffers.rb_node;
+ struct rb_node *n = proc->alloc.allocated_buffers.rb_node;
struct binder_buffer *buffer;
struct binder_buffer *kern_ptr;

- kern_ptr = (struct binder_buffer *)(user_ptr - proc->user_buffer_offset
- - offsetof(struct binder_buffer, data));
+ kern_ptr = (struct binder_buffer *)
+ (user_ptr - proc->alloc.user_buffer_offset -
+ offsetof(struct binder_buffer, data));

while (n) {
buffer = rb_entry(n, struct binder_buffer, rb_node);
@@ -598,8 +625,8 @@ static int binder_update_page_range(struct binder_proc *proc, int allocate,

if (mm) {
down_write(&mm->mmap_sem);
- vma = proc->vma;
- if (vma && mm != proc->vma_vm_mm) {
+ vma = proc->alloc.vma;
+ if (vma && mm != proc->alloc.vma_vm_mm) {
pr_err("%d: vma mm and task mm mismatch\n",
proc->pid);
vma = NULL;
@@ -618,7 +645,8 @@ static int binder_update_page_range(struct binder_proc *proc, int allocate,
for (page_addr = start; page_addr < end; page_addr += PAGE_SIZE) {
int ret;

- page = &proc->pages[(page_addr - proc->buffer) / PAGE_SIZE];
+ page = &proc->alloc.pages[
+ (page_addr - proc->alloc.buffer) / PAGE_SIZE];

BUG_ON(*page);
*page = alloc_page(GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO);
@@ -637,7 +665,7 @@ static int binder_update_page_range(struct binder_proc *proc, int allocate,
goto err_map_kernel_failed;
}
user_page_addr =
- (uintptr_t)page_addr + proc->user_buffer_offset;
+ (uintptr_t)page_addr + proc->alloc.user_buffer_offset;
ret = vm_insert_page(vma, user_page_addr, page[0]);
if (ret) {
pr_err("%d: binder_alloc_buf failed to map page at %lx in userspace\n",
@@ -655,10 +683,11 @@ static int binder_update_page_range(struct binder_proc *proc, int allocate,
free_range:
for (page_addr = end - PAGE_SIZE; page_addr >= start;
page_addr -= PAGE_SIZE) {
- page = &proc->pages[(page_addr - proc->buffer) / PAGE_SIZE];
+ page = &proc->alloc.pages[
+ (page_addr - proc->alloc.buffer) / PAGE_SIZE];
if (vma)
zap_page_range(vma, (uintptr_t)page_addr +
- proc->user_buffer_offset, PAGE_SIZE);
+ proc->alloc.user_buffer_offset, PAGE_SIZE);
err_vm_insert_page_failed:
unmap_kernel_range((unsigned long)page_addr, PAGE_SIZE);
err_map_kernel_failed:
@@ -681,7 +710,7 @@ static struct binder_buffer *binder_alloc_buf(struct binder_proc *proc,
size_t extra_buffers_size,
int is_async)
{
- struct rb_node *n = proc->free_buffers.rb_node;
+ struct rb_node *n = proc->alloc.free_buffers.rb_node;
struct binder_buffer *buffer;
size_t buffer_size;
struct rb_node *best_fit = NULL;
@@ -689,7 +718,7 @@ static struct binder_buffer *binder_alloc_buf(struct binder_proc *proc,
void *end_page_addr;
size_t size, data_offsets_size;

- if (proc->vma == NULL) {
+ if (proc->alloc.vma == NULL) {
pr_err("%d: binder_alloc_buf, no vma\n",
proc->pid);
return NULL;
@@ -710,7 +739,8 @@ static struct binder_buffer *binder_alloc_buf(struct binder_proc *proc,
return NULL;
}
if (is_async &&
- proc->free_async_space < size + sizeof(struct binder_buffer)) {
+ proc->alloc.free_async_space <
+ size + sizeof(struct binder_buffer)) {
binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
"%d: binder_alloc_buf size %zd failed, no async space left\n",
proc->pid, size);
@@ -762,7 +792,7 @@ static struct binder_buffer *binder_alloc_buf(struct binder_proc *proc,
(void *)PAGE_ALIGN((uintptr_t)buffer->data), end_page_addr, NULL))
return NULL;

- rb_erase(best_fit, &proc->free_buffers);
+ rb_erase(best_fit, &proc->alloc.free_buffers);
buffer->free = 0;
binder_insert_allocated_buffer(proc, buffer);
if (buffer_size != size) {
@@ -780,10 +810,11 @@ static struct binder_buffer *binder_alloc_buf(struct binder_proc *proc,
buffer->extra_buffers_size = extra_buffers_size;
buffer->async_transaction = is_async;
if (is_async) {
- proc->free_async_space -= size + sizeof(struct binder_buffer);
+ proc->alloc.free_async_space -=
+ size + sizeof(struct binder_buffer);
binder_debug(BINDER_DEBUG_BUFFER_ALLOC_ASYNC,
"%d: binder_alloc_buf size %zd async free %zd\n",
- proc->pid, size, proc->free_async_space);
+ proc->pid, size, proc->alloc.free_async_space);
}

return buffer;
@@ -806,7 +837,7 @@ static void binder_delete_free_buffer(struct binder_proc *proc,
int free_page_end = 1;
int free_page_start = 1;

- BUG_ON(proc->buffers.next == &buffer->entry);
+ BUG_ON(proc->alloc.buffers.next == &buffer->entry);
prev = list_entry(buffer->entry.prev, struct binder_buffer, entry);
BUG_ON(!prev->free);
if (buffer_end_page(prev) == buffer_start_page(buffer)) {
@@ -818,7 +849,7 @@ static void binder_delete_free_buffer(struct binder_proc *proc,
proc->pid, buffer, prev);
}

- if (!list_is_last(&buffer->entry, &proc->buffers)) {
+ if (!list_is_last(&buffer->entry, &proc->alloc.buffers)) {
next = list_entry(buffer->entry.next,
struct binder_buffer, entry);
if (buffer_start_page(next) == buffer_end_page(buffer)) {
@@ -862,39 +893,40 @@ static void binder_free_buf(struct binder_proc *proc,
BUG_ON(buffer->free);
BUG_ON(size > buffer_size);
BUG_ON(buffer->transaction != NULL);
- BUG_ON((void *)buffer < proc->buffer);
- BUG_ON((void *)buffer > proc->buffer + proc->buffer_size);
+ BUG_ON((void *)buffer < proc->alloc.buffer);
+ BUG_ON((void *)buffer > proc->alloc.buffer + proc->alloc.buffer_size);

if (buffer->async_transaction) {
- proc->free_async_space += size + sizeof(struct binder_buffer);
+ proc->alloc.free_async_space +=
+ size + sizeof(struct binder_buffer);

binder_debug(BINDER_DEBUG_BUFFER_ALLOC_ASYNC,
"%d: binder_free_buf size %zd async free %zd\n",
- proc->pid, size, proc->free_async_space);
+ proc->pid, size, proc->alloc.free_async_space);
}

binder_update_page_range(proc, 0,
(void *)PAGE_ALIGN((uintptr_t)buffer->data),
(void *)(((uintptr_t)buffer->data + buffer_size) & PAGE_MASK),
NULL);
- rb_erase(&buffer->rb_node, &proc->allocated_buffers);
+ rb_erase(&buffer->rb_node, &proc->alloc.allocated_buffers);
buffer->free = 1;
- if (!list_is_last(&buffer->entry, &proc->buffers)) {
+ if (!list_is_last(&buffer->entry, &proc->alloc.buffers)) {
struct binder_buffer *next = list_entry(buffer->entry.next,
struct binder_buffer, entry);

if (next->free) {
- rb_erase(&next->rb_node, &proc->free_buffers);
+ rb_erase(&next->rb_node, &proc->alloc.free_buffers);
binder_delete_free_buffer(proc, next);
}
}
- if (proc->buffers.next != &buffer->entry) {
+ if (proc->alloc.buffers.next != &buffer->entry) {
struct binder_buffer *prev = list_entry(buffer->entry.prev,
struct binder_buffer, entry);

if (prev->free) {
binder_delete_free_buffer(proc, buffer);
- rb_erase(&prev->rb_node, &proc->free_buffers);
+ rb_erase(&prev->rb_node, &proc->alloc.free_buffers);
buffer = prev;
}
}
@@ -1532,7 +1564,7 @@ static void binder_transaction_buffer_release(struct binder_proc *proc,
* back to kernel address space to access it
*/
parent_buffer = parent->buffer -
- proc->user_buffer_offset;
+ proc->alloc.user_buffer_offset;

fd_buf_size = sizeof(u32) * fda->num_fds;
if (fda->num_fds >= SIZE_MAX / sizeof(u32)) {
@@ -1750,7 +1782,7 @@ static int binder_translate_fd_array(struct binder_fd_array_object *fda,
* Since the parent was already fixed up, convert it
* back to the kernel address space to access it
*/
- parent_buffer = parent->buffer - target_proc->user_buffer_offset;
+ parent_buffer = parent->buffer - target_proc->alloc.user_buffer_offset;
fd_array = (u32 *)(parent_buffer + fda->parent_offset);
if (!IS_ALIGNED((unsigned long)fd_array, sizeof(u32))) {
binder_user_error("%d:%d parent offset not aligned correctly.\n",
@@ -1818,7 +1850,7 @@ static int binder_fixup_parent(struct binder_transaction *t,
return -EINVAL;
}
parent_buffer = (u8 *)(parent->buffer -
- target_proc->user_buffer_offset);
+ target_proc->alloc.user_buffer_offset);
*(binder_uintptr_t *)(parent_buffer + bp->parent_offset) = bp->buffer;

return 0;
@@ -2158,7 +2190,7 @@ static void binder_transaction(struct binder_proc *proc,
}
/* Fixup buffer pointer to target proc address space */
bp->buffer = (uintptr_t)sg_bufp +
- target_proc->user_buffer_offset;
+ target_proc->alloc.user_buffer_offset;
sg_bufp += ALIGN(bp->length, sizeof(u64));

ret = binder_fixup_parent(t, thread, bp, off_start,
@@ -2920,7 +2952,7 @@ static int binder_thread_read(struct binder_proc *proc,
tr.offsets_size = t->buffer->offsets_size;
tr.data.ptr.buffer = (binder_uintptr_t)(
(uintptr_t)t->buffer->data +
- proc->user_buffer_offset);
+ proc->alloc.user_buffer_offset);
tr.data.ptr.offsets = tr.data.ptr.buffer +
ALIGN(t->buffer->data_size,
sizeof(void *));
@@ -3338,8 +3370,8 @@ static void binder_vma_close(struct vm_area_struct *vma)
proc->pid, vma->vm_start, vma->vm_end,
(vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
(unsigned long)pgprot_val(vma->vm_page_prot));
- proc->vma = NULL;
- proc->vma_vm_mm = NULL;
+ proc->alloc.vma = NULL;
+ proc->alloc.vma_vm_mm = NULL;
binder_defer_work(proc, BINDER_DEFERRED_PUT_FILES);
}

@@ -3382,7 +3414,7 @@ static int binder_mmap(struct file *filp, struct vm_area_struct *vma)
vma->vm_flags = (vma->vm_flags | VM_DONTCOPY) & ~VM_MAYWRITE;

mutex_lock(&binder_mmap_lock);
- if (proc->buffer) {
+ if (proc->alloc.buffer) {
ret = -EBUSY;
failure_string = "already mapped";
goto err_already_mapped;
@@ -3394,56 +3426,66 @@ static int binder_mmap(struct file *filp, struct vm_area_struct *vma)
failure_string = "get_vm_area";
goto err_get_vm_area_failed;
}
- proc->buffer = area->addr;
- proc->user_buffer_offset = vma->vm_start - (uintptr_t)proc->buffer;
+ proc->alloc.buffer = area->addr;
+ proc->alloc.user_buffer_offset =
+ vma->vm_start - (uintptr_t)proc->alloc.buffer;
mutex_unlock(&binder_mmap_lock);

#ifdef CONFIG_CPU_CACHE_VIPT
if (cache_is_vipt_aliasing()) {
- while (CACHE_COLOUR((vma->vm_start ^ (uint32_t)proc->buffer))) {
- pr_info("binder_mmap: %d %lx-%lx maps %p bad alignment\n", proc->pid, vma->vm_start, vma->vm_end, proc->buffer);
+ while (CACHE_COLOUR(
+ (vma->vm_start ^ (uint32_t)proc->alloc.buffer))) {
+ pr_info("%s: %d %lx-%lx maps %pK bad alignment\n",
+ __func__,
+ proc->pid, vma->vm_start,
+ vma->vm_end, proc->alloc.buffer);
vma->vm_start += PAGE_SIZE;
}
}
#endif
- proc->pages = kzalloc(sizeof(proc->pages[0]) * ((vma->vm_end - vma->vm_start) / PAGE_SIZE), GFP_KERNEL);
- if (proc->pages == NULL) {
+ proc->alloc.pages =
+ kzalloc(sizeof(proc->alloc.pages[0]) *
+ ((vma->vm_end - vma->vm_start) / PAGE_SIZE),
+ GFP_KERNEL);
+ if (proc->alloc.pages == NULL) {
ret = -ENOMEM;
failure_string = "alloc page array";
goto err_alloc_pages_failed;
}
- proc->buffer_size = vma->vm_end - vma->vm_start;
+ proc->alloc.buffer_size = vma->vm_end - vma->vm_start;

vma->vm_ops = &binder_vm_ops;
vma->vm_private_data = proc;

- if (binder_update_page_range(proc, 1, proc->buffer, proc->buffer + PAGE_SIZE, vma)) {
+ if (binder_update_page_range(proc, 1, proc->alloc.buffer,
+ proc->alloc.buffer + PAGE_SIZE, vma)) {
ret = -ENOMEM;
failure_string = "alloc small buf";
goto err_alloc_small_buf_failed;
}
- buffer = proc->buffer;
- INIT_LIST_HEAD(&proc->buffers);
- list_add(&buffer->entry, &proc->buffers);
+ buffer = proc->alloc.buffer;
+ INIT_LIST_HEAD(&proc->alloc.buffers);
+ list_add(&buffer->entry, &proc->alloc.buffers);
buffer->free = 1;
binder_insert_free_buffer(proc, buffer);
- proc->free_async_space = proc->buffer_size / 2;
+ proc->alloc.free_async_space = proc->alloc.buffer_size / 2;
barrier();
proc->files = get_files_struct(current);
- proc->vma = vma;
- proc->vma_vm_mm = vma->vm_mm;
+ proc->alloc.vma = vma;
+ proc->alloc.vma_vm_mm = vma->vm_mm;

- /*pr_info("binder_mmap: %d %lx-%lx maps %p\n",
- proc->pid, vma->vm_start, vma->vm_end, proc->buffer);*/
+ /*pr_info("binder_mmap: %d %lx-%lx maps %pK\n",
+ * proc->pid, vma->vm_start, vma->vm_end, proc->alloc.buffer);
+ */
return 0;

err_alloc_small_buf_failed:
- kfree(proc->pages);
- proc->pages = NULL;
+ kfree(proc->alloc.pages);
+ proc->alloc.pages = NULL;
err_alloc_pages_failed:
mutex_lock(&binder_mmap_lock);
- vfree(proc->buffer);
- proc->buffer = NULL;
+ vfree(proc->alloc.buffer);
+ proc->alloc.buffer = NULL;
err_get_vm_area_failed:
err_already_mapped:
mutex_unlock(&binder_mmap_lock);
@@ -3595,7 +3637,7 @@ static void binder_deferred_release(struct binder_proc *proc)
int threads, nodes, incoming_refs, outgoing_refs, buffers,
active_transactions, page_count;

- BUG_ON(proc->vma);
+ BUG_ON(proc->alloc.vma);
BUG_ON(proc->files);

hlist_del(&proc->proc_node);
@@ -3642,7 +3684,7 @@ static void binder_deferred_release(struct binder_proc *proc)
binder_release_work(&proc->delivered_death);

buffers = 0;
- while ((n = rb_first(&proc->allocated_buffers))) {
+ while ((n = rb_first(&proc->alloc.allocated_buffers))) {
struct binder_buffer *buffer;

buffer = rb_entry(n, struct binder_buffer, rb_node);
@@ -3663,25 +3705,25 @@ static void binder_deferred_release(struct binder_proc *proc)
binder_stats_deleted(BINDER_STAT_PROC);

page_count = 0;
- if (proc->pages) {
+ if (proc->alloc.pages) {
int i;

- for (i = 0; i < proc->buffer_size / PAGE_SIZE; i++) {
+ for (i = 0; i < proc->alloc.buffer_size / PAGE_SIZE; i++) {
void *page_addr;

- if (!proc->pages[i])
+ if (!proc->alloc.pages[i])
continue;

- page_addr = proc->buffer + i * PAGE_SIZE;
+ page_addr = proc->alloc.buffer + i * PAGE_SIZE;
binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
"%s: %d: page %d at %p not freed\n",
__func__, proc->pid, i, page_addr);
unmap_kernel_range((unsigned long)page_addr, PAGE_SIZE);
- __free_page(proc->pages[i]);
+ __free_page(proc->alloc.pages[i]);
page_count++;
}
- kfree(proc->pages);
- vfree(proc->buffer);
+ kfree(proc->alloc.pages);
+ vfree(proc->alloc.buffer);
}

put_task_struct(proc->tsk);
@@ -3911,7 +3953,8 @@ static void print_binder_proc(struct seq_file *m,
print_binder_ref(m, rb_entry(n, struct binder_ref,
rb_node_desc));
}
- for (n = rb_first(&proc->allocated_buffers); n != NULL; n = rb_next(n))
+ for (n = rb_first(&proc->alloc.allocated_buffers);
+ n != NULL; n = rb_next(n))
print_binder_buffer(m, " buffer",
rb_entry(n, struct binder_buffer, rb_node));
list_for_each_entry(w, &proc->todo, entry)
@@ -4028,7 +4071,7 @@ static void print_binder_proc_stats(struct seq_file *m,
" ready threads %d\n"
" free async space %zd\n", proc->requested_threads,
proc->requested_threads_started, proc->max_threads,
- proc->ready_threads, proc->free_async_space);
+ proc->ready_threads, proc->alloc.free_async_space);
count = 0;
for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n))
count++;
@@ -4046,7 +4089,8 @@ static void print_binder_proc_stats(struct seq_file *m,
seq_printf(m, " refs: %d s %d w %d\n", count, strong, weak);

count = 0;
- for (n = rb_first(&proc->allocated_buffers); n != NULL; n = rb_next(n))
+ for (n = rb_first(&proc->alloc.allocated_buffers);
+ n != NULL; n = rb_next(n))
count++;
seq_printf(m, " buffers: %d\n", count);

diff --git a/drivers/android/binder_trace.h b/drivers/android/binder_trace.h
index 7f20f3dc8369..c835f09656c1 100644
--- a/drivers/android/binder_trace.h
+++ b/drivers/android/binder_trace.h
@@ -280,7 +280,7 @@ TRACE_EVENT(binder_update_page_range,
TP_fast_assign(
__entry->proc = proc->pid;
__entry->allocate = allocate;
- __entry->offset = start - proc->buffer;
+ __entry->offset = start - proc->alloc.buffer;
__entry->size = end - start;
),
TP_printk("proc=%d allocate=%d offset=%zu size=%zu",
--
2.13.2.725.g09c95d1e9-goog
\
 
 \ /
  Last update: 2017-06-29 21:04    [W:0.209 / U:0.140 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site