lkml.org 
[lkml]   [2024]   [Apr]   [17]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
Patch in this message
/
From
Subject[PATCH v9 3/8] mm/rmap: recognize read-only tlb entries during batched tlb flush
Date
Functionally, no change.  This is a preparation for migrc mechanism that
requires to recognize read-only tlb entries and handle them in a
different way. The newly introduced API, fold_ubc(), will be used by
migrc mechanism.

Signed-off-by: Byungchul Park <byungchul@sk.com>
---
include/linux/sched.h | 1 +
mm/internal.h | 4 ++++
mm/rmap.c | 31 ++++++++++++++++++++++++++++++-
3 files changed, 35 insertions(+), 1 deletion(-)

diff --git a/include/linux/sched.h b/include/linux/sched.h
index 3c2abbc587b4..823d83b24364 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -1334,6 +1334,7 @@ struct task_struct {
#endif

struct tlbflush_unmap_batch tlb_ubc;
+ struct tlbflush_unmap_batch tlb_ubc_ro;

/* Cache last used pipe for splice(): */
struct pipe_inode_info *splice_pipe;
diff --git a/mm/internal.h b/mm/internal.h
index 7e486f2c502c..bbaf1fd97b1c 100644
--- a/mm/internal.h
+++ b/mm/internal.h
@@ -1019,6 +1019,7 @@ extern struct workqueue_struct *mm_percpu_wq;
void try_to_unmap_flush(void);
void try_to_unmap_flush_dirty(void);
void flush_tlb_batched_pending(struct mm_struct *mm);
+void fold_ubc(struct tlbflush_unmap_batch *dst, struct tlbflush_unmap_batch *src);
#else
static inline void try_to_unmap_flush(void)
{
@@ -1029,6 +1030,9 @@ static inline void try_to_unmap_flush_dirty(void)
static inline void flush_tlb_batched_pending(struct mm_struct *mm)
{
}
+static inline void fold_ubc(struct tlbflush_unmap_batch *dst, struct tlbflush_unmap_batch *src)
+{
+}
#endif /* CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH */

extern const struct trace_print_flags pageflag_names[];
diff --git a/mm/rmap.c b/mm/rmap.c
index 3746a5531018..2542bfe1a947 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -635,6 +635,28 @@ struct anon_vma *folio_lock_anon_vma_read(struct folio *folio,
}

#ifdef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH
+
+void fold_ubc(struct tlbflush_unmap_batch *dst,
+ struct tlbflush_unmap_batch *src)
+{
+ if (!src->flush_required)
+ return;
+
+ /*
+ * Fold src to dst.
+ */
+ arch_tlbbatch_fold(&dst->arch, &src->arch);
+ dst->writable = dst->writable || src->writable;
+ dst->flush_required = true;
+
+ /*
+ * Reset src.
+ */
+ arch_tlbbatch_clear(&src->arch);
+ src->flush_required = false;
+ src->writable = false;
+}
+
/*
* Flush TLB entries for recently unmapped pages from remote CPUs. It is
* important if a PTE was dirty when it was unmapped that it's flushed
@@ -644,7 +666,9 @@ struct anon_vma *folio_lock_anon_vma_read(struct folio *folio,
void try_to_unmap_flush(void)
{
struct tlbflush_unmap_batch *tlb_ubc = &current->tlb_ubc;
+ struct tlbflush_unmap_batch *tlb_ubc_ro = &current->tlb_ubc_ro;

+ fold_ubc(tlb_ubc, tlb_ubc_ro);
if (!tlb_ubc->flush_required)
return;

@@ -675,13 +699,18 @@ void try_to_unmap_flush_dirty(void)
static void set_tlb_ubc_flush_pending(struct mm_struct *mm, pte_t pteval,
unsigned long uaddr)
{
- struct tlbflush_unmap_batch *tlb_ubc = &current->tlb_ubc;
+ struct tlbflush_unmap_batch *tlb_ubc;
int batch;
bool writable = pte_dirty(pteval);

if (!pte_accessible(mm, pteval))
return;

+ if (pte_write(pteval) || writable)
+ tlb_ubc = &current->tlb_ubc;
+ else
+ tlb_ubc = &current->tlb_ubc_ro;
+
arch_tlbbatch_add_pending(&tlb_ubc->arch, mm, uaddr);
tlb_ubc->flush_required = true;

--
2.17.1

\
 
 \ /
  Last update: 2024-04-17 09:34    [W:0.081 / U:0.648 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site