lkml.org 
[lkml]   [2017]   [Feb]   [8]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
    Patch in this message
    /
    From
    Subject[PATCH 51/53] sched/headers, mm: Move 'struct tlbflush_unmap_batch' from <linux/sched.h> to <linux/mm_types_task.h>
    Date
    Unclutter <linux/sched.h> some more.

    Also move the CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH condition inside the
    structure body definition, to remove a pair of #ifdefs from sched.h.

    Cc: Linus Torvalds <torvalds@linux-foundation.org>
    Cc: Mike Galbraith <efault@gmx.de>
    Cc: Peter Zijlstra <peterz@infradead.org>
    Cc: Thomas Gleixner <tglx@linutronix.de>
    Cc: linux-kernel@vger.kernel.org
    Signed-off-by: Ingo Molnar <mingo@kernel.org>
    ---
    include/linux/mm_types_task.h | 22 ++++++++++++++++++++++
    include/linux/sched.h | 21 ---------------------
    2 files changed, 22 insertions(+), 21 deletions(-)

    diff --git a/include/linux/mm_types_task.h b/include/linux/mm_types_task.h
    index 9526d8b9fe0e..136dfdf63ba1 100644
    --- a/include/linux/mm_types_task.h
    +++ b/include/linux/mm_types_task.h
    @@ -10,6 +10,7 @@
    #include <linux/types.h>
    #include <linux/threads.h>
    #include <linux/atomic.h>
    +#include <linux/cpumask.h>

    #include <asm/page.h>

    @@ -62,4 +63,25 @@ struct page_frag {
    #endif
    };

    +/* Track pages that require TLB flushes */
    +struct tlbflush_unmap_batch {
    +#ifdef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH
    + /*
    + * Each bit set is a CPU that potentially has a TLB entry for one of
    + * the PFNs being flushed. See set_tlb_ubc_flush_pending().
    + */
    + struct cpumask cpumask;
    +
    + /* True if any bit in cpumask is set */
    + bool flush_required;
    +
    + /*
    + * If true then the PTE was dirty when unmapped. The entry must be
    + * flushed before IO is initiated or a stale TLB entry potentially
    + * allows an update without redirtying the page.
    + */
    + bool writable;
    +#endif
    +};
    +
    #endif /* _LINUX_MM_TYPES_TASK_H */
    diff --git a/include/linux/sched.h b/include/linux/sched.h
    index fbd3a2dc35bb..531732bf7b13 100644
    --- a/include/linux/sched.h
    +++ b/include/linux/sched.h
    @@ -480,25 +480,6 @@ enum perf_event_task_context {
    perf_nr_task_contexts,
    };

    -/* Track pages that require TLB flushes */
    -struct tlbflush_unmap_batch {
    - /*
    - * Each bit set is a CPU that potentially has a TLB entry for one of
    - * the PFNs being flushed. See set_tlb_ubc_flush_pending().
    - */
    - struct cpumask cpumask;
    -
    - /* True if any bit in cpumask is set */
    - bool flush_required;
    -
    - /*
    - * If true then the PTE was dirty when unmapped. The entry must be
    - * flushed before IO is initiated or a stale TLB entry potentially
    - * allows an update without redirtying the page.
    - */
    - bool writable;
    -};
    -
    struct task_struct {
    #ifdef CONFIG_THREAD_INFO_IN_TASK
    /*
    @@ -891,9 +872,7 @@ struct task_struct {
    unsigned long numa_pages_migrated;
    #endif /* CONFIG_NUMA_BALANCING */

    -#ifdef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH
    struct tlbflush_unmap_batch tlb_ubc;
    -#endif

    struct rcu_head rcu;

    --
    2.7.4
    \
     
     \ /
      Last update: 2017-02-08 21:32    [W:2.835 / U:0.276 seconds]
    ©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site