lkml.org 
[lkml]   [2019]   [Jul]   [16]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
Patch in this message
/
From
Subject[RFC v1 1/4] arm64, mm: identity mapped page table
Date
Created identiy mapped page table that maps 1 to 1 virtual to physical
addresses.

Similarly to x86, this table can be used in kasan, hibernate, and kexec.

Signed-off-by: Pavel Tatashin <pasha.tatashin@soleen.com>
---
arch/arm64/include/asm/ident_map.h | 26 ++++++++
arch/arm64/mm/Makefile | 1 +
arch/arm64/mm/ident_map.c | 99 ++++++++++++++++++++++++++++++
3 files changed, 126 insertions(+)
create mode 100644 arch/arm64/include/asm/ident_map.h
create mode 100644 arch/arm64/mm/ident_map.c

diff --git a/arch/arm64/include/asm/ident_map.h b/arch/arm64/include/asm/ident_map.h
new file mode 100644
index 000000000000..1bb9fcd27368
--- /dev/null
+++ b/arch/arm64/include/asm/ident_map.h
@@ -0,0 +1,26 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2019, Microsoft Corporation.
+ * Pavel Tatashin <patatash@linux.microsoft.com>
+ */
+
+#ifndef _ASM_IDENT_MAP_H
+#define _ASM_IDENT_MAP_H
+
+#include <linux/types.h>
+#include <asm/pgtable.h>
+
+struct ident_map_info {
+ void * (*alloc_pgt_page)(void *); /* allocate a page */
+ void *alloc_arg; /* arg. for alloc_pgt_page */
+ unsigned long page_flags; /* PMD or PUD flags */
+ unsigned long offset; /* ident mapping offset */
+ bool pud_pages; /* PUD level huge pages */
+};
+
+int ident_map_pgd_populate(struct ident_map_info *info,
+ phys_addr_t pgd_page,
+ phys_addr_t addr,
+ phys_addr_t end);
+
+#endif /* _ASM_ARM64_IDENT_MAP_H */
diff --git a/arch/arm64/mm/Makefile b/arch/arm64/mm/Makefile
index 849c1df3d214..dfa5a074a360 100644
--- a/arch/arm64/mm/Makefile
+++ b/arch/arm64/mm/Makefile
@@ -5,6 +5,7 @@ obj-y := dma-mapping.o extable.o fault.o init.o \
context.o proc.o pageattr.o
obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o
obj-$(CONFIG_ARM64_PTDUMP_CORE) += dump.o
+obj-$(CONFIG_KEXEC_CORE) += ident_map.o
obj-$(CONFIG_ARM64_PTDUMP_DEBUGFS) += ptdump_debugfs.o
obj-$(CONFIG_NUMA) += numa.o
obj-$(CONFIG_DEBUG_VIRTUAL) += physaddr.o
diff --git a/arch/arm64/mm/ident_map.c b/arch/arm64/mm/ident_map.c
new file mode 100644
index 000000000000..bcfff5e2573b
--- /dev/null
+++ b/arch/arm64/mm/ident_map.c
@@ -0,0 +1,99 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2019, Microsoft Corporation.
+ * Pavel Tatashin <patatash@linux.microsoft.com>
+ */
+
+#include <asm/ident_map.h>
+#include <asm/pgalloc.h>
+
+/* Initialize PMD size huge entries in page table */
+static void ident_map_pmd_init(struct ident_map_info *info,
+ phys_addr_t pmd_page, phys_addr_t addr,
+ phys_addr_t end)
+{
+ const unsigned long flags = info->page_flags;
+ const unsigned long offset = info->offset;
+ pmd_t *pmdp = (pmd_t *)__va(pmd_page) + pmd_index(addr);
+
+ addr &= PMD_MASK;
+ for (; addr < end; addr += PMD_SIZE, pmdp++) {
+ set_pmd(pmdp, __pmd(__phys_to_pmd_val(addr - offset) | flags));
+ }
+}
+
+/* Initialize PUD size huge entries in page table */
+static void ident_map_pud_init(struct ident_map_info *info,
+ phys_addr_t pud_page, phys_addr_t addr,
+ phys_addr_t end)
+{
+ const unsigned long flags = info->page_flags;
+ const unsigned long offset = info->offset;
+ pud_t *pudp = (pud_t *)__va(pud_page) + pud_index(addr);
+
+ addr &= PUD_MASK;
+ for (; addr < end; addr += PUD_SIZE, pudp++) {
+ set_pud(pudp, __pud(__phys_to_pud_val(addr - offset) | flags));
+ }
+}
+
+/* Populate PUD level with PMD entries */
+static int ident_map_pud_populate(struct ident_map_info *info,
+ phys_addr_t pud_page, phys_addr_t addr,
+ phys_addr_t end)
+{
+ pud_t *pudp = (pud_t *)__va(pud_page) + pud_index(addr);
+ phys_addr_t pmd_page, next;
+
+ for (; addr < end; addr = next, pudp++) {
+ next = pud_addr_end(addr, end);
+ if (pud_none(*pudp)) {
+ void *pmd = info->alloc_pgt_page(info->alloc_arg);
+
+ if (!pmd)
+ return -ENOMEM;
+
+ clear_page(pmd);
+ __pud_populate(pudp, __pa(pmd), PUD_TYPE_TABLE);
+ }
+ pmd_page = __pud_to_phys(*pudp);
+ ident_map_pmd_init(info, pmd_page, addr, next);
+ }
+
+ return 0;
+}
+
+/* Populate identify mapped page table with physical range[addr, end) */
+int ident_map_pgd_populate(struct ident_map_info *info,
+ phys_addr_t pgd_page, phys_addr_t addr,
+ phys_addr_t end)
+{
+ const bool pud_pages = info->pud_pages;
+ pgd_t *pgdp = (pgd_t *)__va(pgd_page) + pgd_index(addr);
+ phys_addr_t pud_page, next;
+
+ for (; addr < end; addr = next, pgdp++) {
+ next = pgd_addr_end(addr, end);
+ if (pgd_none(*pgdp)) {
+ void *pud = info->alloc_pgt_page(info->alloc_arg);
+
+ if (!pud)
+ return -ENOMEM;
+
+ clear_page(pud);
+ __pgd_populate(pgdp, __pa(pud), PUD_TYPE_TABLE);
+ }
+ pud_page = __pgd_to_phys(*pgdp);
+ if (pud_pages) {
+ ident_map_pud_init(info, pud_page, addr, next);
+ } else {
+ int rv = ident_map_pud_populate(info, pud_page, addr,
+ next);
+
+ if (rv)
+ return rv;
+ }
+ }
+
+ return 0;
+}
--
2.22.0
\
 
 \ /
  Last update: 2019-07-16 18:57    [W:0.777 / U:0.020 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site