lkml.org 
[lkml]   [2018]   [Jun]   [20]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
Patch in this message
/
From
Subject[PATCH 3/7] x86,tlb: change tlbstate.is_lazy to tlbstate.state
Date
Replace the tlbstate.is_lazy boolean with a tlbstate.state int,
so more states can be expressed. This is a preparation for the
next patch.

Signed-off-by: Rik van Riel <riel@surriel.com>
Tested-by: Song Liu <songliubraving@fb.com>
---
arch/x86/include/asm/tlbflush.h | 11 +++++++----
arch/x86/mm/tlb.c | 10 +++++-----
2 files changed, 12 insertions(+), 9 deletions(-)

diff --git a/arch/x86/include/asm/tlbflush.h b/arch/x86/include/asm/tlbflush.h
index 3aa3204b5dc0..88a4d6b87ff7 100644
--- a/arch/x86/include/asm/tlbflush.h
+++ b/arch/x86/include/asm/tlbflush.h
@@ -169,6 +169,9 @@ struct tlb_context {
u64 tlb_gen;
};

+#define TLBSTATE_OK 0
+#define TLBSTATE_LAZY 1
+
struct tlb_state {
/*
* cpu_tlbstate.loaded_mm should match CR3 whenever interrupts
@@ -186,18 +189,18 @@ struct tlb_state {
* We can be in one of several states:
*
* - Actively using an mm. Our CPU's bit will be set in
- * mm_cpumask(loaded_mm) and is_lazy == false;
+ * mm_cpumask(loaded_mm) and state == TLBSTATE_OK
*
* - Not using a real mm. loaded_mm == &init_mm. Our CPU's bit
- * will not be set in mm_cpumask(&init_mm) and is_lazy == false.
+ * will not be set in mm_cpumask(&init_mm) and state == TLBSTATE_OK
*
* - Lazily using a real mm. loaded_mm != &init_mm, our bit
- * is set in mm_cpumask(loaded_mm), but is_lazy == true.
+ * is set in mm_cpumask(loaded_mm), but state == TLBSTATE_LAZY.
* We're heuristically guessing that the CR3 load we
* skipped more than makes up for the overhead added by
* lazy mode.
*/
- bool is_lazy;
+ int state;

/*
* If set we changed the page tables in such a way that we
diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c
index 61773b07ed54..e063e623e52c 100644
--- a/arch/x86/mm/tlb.c
+++ b/arch/x86/mm/tlb.c
@@ -136,7 +136,7 @@ void leave_mm(int cpu)
return;

/* Warn if we're not lazy. */
- WARN_ON(!this_cpu_read(cpu_tlbstate.is_lazy));
+ WARN_ON((this_cpu_read(cpu_tlbstate.state) == TLBSTATE_OK));

switch_mm(NULL, &init_mm, NULL);
}
@@ -227,7 +227,7 @@ void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next,
__flush_tlb_all();
}
#endif
- this_cpu_write(cpu_tlbstate.is_lazy, false);
+ this_cpu_write(cpu_tlbstate.state, TLBSTATE_OK);

/*
* The membarrier system call requires a full memory barrier and
@@ -364,7 +364,7 @@ void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
* old mm loaded and only switch to init_mm when
* tlb_remove_page() happens.
*/
- this_cpu_write(cpu_tlbstate.is_lazy, true);
+ this_cpu_write(cpu_tlbstate.state, TLBSTATE_LAZY);
} else {
switch_mm(NULL, &init_mm, NULL);
}
@@ -448,7 +448,7 @@ static void flush_tlb_func_common(const struct flush_tlb_info *f,
VM_WARN_ON(this_cpu_read(cpu_tlbstate.ctxs[loaded_mm_asid].ctx_id) !=
loaded_mm->context.ctx_id);

- if (this_cpu_read(cpu_tlbstate.is_lazy)) {
+ if (this_cpu_read(cpu_tlbstate.state) != TLBSTATE_OK) {
/*
* We're in lazy mode. We need to at least flush our
* paging-structure cache to avoid speculatively reading
@@ -651,7 +651,7 @@ void tlb_flush_remove_tables_local(void *arg)
struct mm_struct *mm = arg;

if (this_cpu_read(cpu_tlbstate.loaded_mm) == mm &&
- this_cpu_read(cpu_tlbstate.is_lazy))
+ this_cpu_read(cpu_tlbstate.state) != TLBSTATE_OK)
/*
* We're in lazy mode. We need to at least flush our
* paging-structure cache to avoid speculatively reading
--
2.14.4
\
 
 \ /
  Last update: 2018-06-20 21:58    [W:0.308 / U:0.272 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site