lkml.org 
[lkml]   [2017]   [Nov]   [26]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
Patch in this message
/
Date
From
SubjectRe: [PATCH 25/43] x86/mm/kaiser: Unmap kernel from userspace page tables (core patch)
On Fri, Nov 24, 2017 at 06:23:53PM +0100, Ingo Molnar wrote:
> diff --git a/arch/x86/entry/calling.h b/arch/x86/entry/calling.h
> index e1650da01323..d087c3aa0514 100644
> --- a/arch/x86/entry/calling.h
> +++ b/arch/x86/entry/calling.h
> @@ -2,6 +2,7 @@
> #include <linux/jump_label.h>
> #include <asm/unwind_hints.h>
> #include <asm/cpufeatures.h>
> +#include <asm/page_types.h>
>
> /*
>
> diff --git a/arch/x86/include/asm/kaiser.h b/arch/x86/include/asm/kaiser.h
> new file mode 100644
> index 000000000000..3c2cc71b4058
> --- /dev/null
> +++ b/arch/x86/include/asm/kaiser.h
> @@ -0,0 +1,57 @@
> +#ifndef _ASM_X86_KAISER_H
> +#define _ASM_X86_KAISER_H
> +/*
> + * Copyright(c) 2017 Intel Corporation. All rights reserved.
> + *
> + * This program is free software; you can redistribute it and/or modify
> + * it under the terms of version 2 of the GNU General Public License as
> + * published by the Free Software Foundation.
> + *
> + * This program is distributed in the hope that it will be useful, but
> + * WITHOUT ANY WARRANTY; without even the implied warranty of
> + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
> + * General Public License for more details.
> + *
> + * Based on work published here: https://github.com/IAIK/KAISER
> + * Modified by Dave Hansen <dave.hansen@intel.com to actually work.
> + */
> +#ifndef __ASSEMBLY__
> +
> +#ifdef CONFIG_KAISER
> +/**
> + * kaiser_add_mapping - map a kernel range into the user page tables
> + * @addr: the start address of the range
> + * @size: the size of the range
> + * @flags: The mapping flags of the pages
> + *
> + * Use this on all data and code that need to be mapped into both
> + * copies of the page tables. This includes the code that switches
> + * to/from userspace and all of the hardware structures that are
> + * virtually-addressed and needed in userspace like the interrupt
> + * table.
> + */
> +extern int kaiser_add_mapping(unsigned long addr, unsigned long size,
> + unsigned long flags);
> +
> +/**
> + * kaiser_remove_mapping - remove a kernel mapping from the userpage tables
> + * @addr: the start address of the range
> + * @size: the size of the range
> + */
> +extern void kaiser_remove_mapping(unsigned long start, unsigned long size);
> +
> +/**
> + * kaiser_init - Initialize the shadow mapping
> + *
> + * Most parts of the shadow mapping can be mapped upon boot
> + * time. Only per-process things like the thread stacks
> + * or a new LDT have to be mapped at runtime. These boot-
> + * time mappings are permanent and never unmapped.
> + */
> +extern void kaiser_init(void);

Those externs are not needed.

> +
> +#endif
> +
> +#endif /* __ASSEMBLY__ */
> +
> +#endif /* _ASM_X86_KAISER_H */
> diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h
> index f735c3016325..d3901124143f 100644
> --- a/arch/x86/include/asm/pgtable.h
> +++ b/arch/x86/include/asm/pgtable.h
> @@ -1106,6 +1106,11 @@ static inline void pmdp_set_wrprotect(struct mm_struct *mm,
> static inline void clone_pgd_range(pgd_t *dst, pgd_t *src, int count)
> {
> memcpy(dst, src, count * sizeof(pgd_t));
> +#ifdef CONFIG_KAISER
> + /* Clone the shadow pgd part as well */
> + memcpy(kernel_to_shadow_pgdp(dst), kernel_to_shadow_pgdp(src),
> + count * sizeof(pgd_t));
> +#endif
> }
>
> #define PTE_SHIFT ilog2(PTRS_PER_PTE)
> diff --git a/arch/x86/include/asm/pgtable_64.h b/arch/x86/include/asm/pgtable_64.h
> index e9f05331e732..c239839e92bd 100644
> --- a/arch/x86/include/asm/pgtable_64.h
> +++ b/arch/x86/include/asm/pgtable_64.h
> @@ -131,9 +131,137 @@ static inline pud_t native_pudp_get_and_clear(pud_t *xp)
> #endif
> }
>
> +#ifdef CONFIG_KAISER
> +/*
> + * All top-level KAISER page tables are order-1 pages (8k-aligned
> + * and 8k in size). The kernel one is at the beginning 4k and
> + * the user (shadow) one is in the last 4k. To switch between
> + * them, you just need to flip the 12th bit in their addresses.
> + */
> +#define KAISER_PGTABLE_SWITCH_BIT PAGE_SHIFT
> +
> +/*
> + * This generates better code than the inline assembly in
> + * __set_bit().
> + */
> +static inline void *ptr_set_bit(void *ptr, int bit)
> +{
> + unsigned long __ptr = (unsigned long)ptr;
> +
> + __ptr |= (1<<bit);

__ptr |= BIT(bit);

Ditto below.

> + return (void *)__ptr;
> +}
> +static inline void *ptr_clear_bit(void *ptr, int bit)
> +{
> + unsigned long __ptr = (unsigned long)ptr;
> +
> + __ptr &= ~(1<<bit);
> + return (void *)__ptr;
> +}
> +
> +static inline pgd_t *kernel_to_shadow_pgdp(pgd_t *pgdp)
> +{
> + return ptr_set_bit(pgdp, KAISER_PGTABLE_SWITCH_BIT);
> +}
> +static inline pgd_t *shadow_to_kernel_pgdp(pgd_t *pgdp)
> +{
> + return ptr_clear_bit(pgdp, KAISER_PGTABLE_SWITCH_BIT);
> +}
> +static inline p4d_t *kernel_to_shadow_p4dp(p4d_t *p4dp)
> +{
> + return ptr_set_bit(p4dp, KAISER_PGTABLE_SWITCH_BIT);
> +}
> +static inline p4d_t *shadow_to_kernel_p4dp(p4d_t *p4dp)
> +{
> + return ptr_clear_bit(p4dp, KAISER_PGTABLE_SWITCH_BIT);
> +}
> +#endif /* CONFIG_KAISER */
> +
> +/*
> + * Page table pages are page-aligned. The lower half of the top
> + * level is used for userspace and the top half for the kernel.
> + *
> + * Returns true for parts of the PGD that map userspace and
> + * false for the parts that map the kernel.
> + */
> +static inline bool pgdp_maps_userspace(void *__ptr)
> +{
> + unsigned long ptr = (unsigned long)__ptr;
> +
> + return (ptr & ~PAGE_MASK) < (PAGE_SIZE / 2);

This generates

return (ptr & ~(~(((1UL) << 12)-1))) < (((1UL) << 12) / 2);

and if you turn that ~PAGE_MASK into PAGE_SIZE-1 you get

return (ptr & (((1UL) << 12)-1)) < (((1UL) << 12) / 2);

which removes the self-cancelling negation:

return (ptr & (PAGE_SIZE-1)) < (PAGE_SIZE / 2);

The final asm is the same, though.

> +
> +/*
> + * Does this PGD allow access from userspace?
> + */
> +static inline bool pgd_userspace_access(pgd_t pgd)
> +{
> + return pgd.pgd & _PAGE_USER;
> +}
> +
> +/*
> + * Take a PGD location (pgdp) and a pgd value that needs
> + * to be set there. Populates the shadow and returns
> + * the resulting PGD that must be set in the kernel copy
> + * of the page tables.
> + */
> +static inline pgd_t kaiser_set_shadow_pgd(pgd_t *pgdp, pgd_t pgd)
> +{
> +#ifdef CONFIG_KAISER
> + if (pgd_userspace_access(pgd)) {
> + if (pgdp_maps_userspace(pgdp)) {
> + /*
> + * The user/shadow page tables get the full
> + * PGD, accessible from userspace:
> + */
> + kernel_to_shadow_pgdp(pgdp)->pgd = pgd.pgd;
> + /*
> + * For the copy of the pgd that the kernel
> + * uses, make it unusable to userspace. This
> + * ensures if we get out to userspace with the
> + * wrong CR3 value, userspace will crash
> + * instead of running.
> + */
> + pgd.pgd |= _PAGE_NX;
> + }
> + } else if (pgd_userspace_access(*pgdp)) {
> + /*
> + * We are clearing a _PAGE_USER PGD for which we
> + * presumably populated the shadow. We must now
> + * clear the shadow PGD entry.
> + */
> + if (pgdp_maps_userspace(pgdp)) {
> + kernel_to_shadow_pgdp(pgdp)->pgd = pgd.pgd;
> + } else {
> + /*
> + * Attempted to clear a _PAGE_USER PGD which
> + * is in the kernel porttion of the address
> + * space. PGDs are pre-populated and we
> + * never clear them.
> + */
> + WARN_ON_ONCE(1);
> + }
> + } else {
> + /*
> + * _PAGE_USER was not set in either the PGD being set
> + * or cleared. All kernel PGDs should be
> + * pre-populated so this should never happen after
> + * boot.
> + */

So maybe do:

WARN_ON_ONCE(system_state == SYSTEM_RUNNING);

> + }
> +#endif
> + /* return the copy of the PGD we want the kernel to use: */
> + return pgd;
> +}
> +
> +
> static inline void native_set_p4d(p4d_t *p4dp, p4d_t p4d)
> {
> +#if defined(CONFIG_KAISER) && !defined(CONFIG_X86_5LEVEL)
> + p4dp->pgd = kaiser_set_shadow_pgd(&p4dp->pgd, p4d.pgd);
> +#else /* CONFIG_KAISER */

No need for that comment.

> *p4dp = p4d;
> +#endif
> }
>
> static inline void native_p4d_clear(p4d_t *p4d)
> @@ -147,7 +275,11 @@ static inline void native_p4d_clear(p4d_t *p4d)
>
> static inline void native_set_pgd(pgd_t *pgdp, pgd_t pgd)
> {
> +#ifdef CONFIG_KAISER
> + *pgdp = kaiser_set_shadow_pgd(pgdp, pgd);
> +#else /* CONFIG_KAISER */

No need for that comment.

> *pgdp = pgd;
> +#endif
> }
>
> static inline void native_pgd_clear(pgd_t *pgd)
> diff --git a/arch/x86/kernel/espfix_64.c b/arch/x86/kernel/espfix_64.c
> index 7d7715dde901..4780dba2cc59 100644
> --- a/arch/x86/kernel/espfix_64.c
> +++ b/arch/x86/kernel/espfix_64.c
> @@ -41,6 +41,7 @@
> #include <asm/pgalloc.h>
> #include <asm/setup.h>
> #include <asm/espfix.h>
> +#include <asm/kaiser.h>
>
> /*
> * Note: we only need 6*8 = 48 bytes for the espfix stack, but round
> @@ -128,6 +129,22 @@ void __init init_espfix_bsp(void)
> pgd = &init_top_pgt[pgd_index(ESPFIX_BASE_ADDR)];
> p4d = p4d_alloc(&init_mm, pgd, ESPFIX_BASE_ADDR);
> p4d_populate(&init_mm, p4d, espfix_pud_page);

<---- newline here.

> + /*
> + * Just copy the top-level PGD that is mapping the espfix
> + * area to ensure it is mapped into the shadow user page
> + * tables.
> + *
> + * For 5-level paging, the espfix pgd was populated when
> + * kaiser_init() pre-populated all the pgd entries. The above
> + * p4d_alloc() would never do anything and the p4d_populate()
> + * would be done to a p4d already mapped in the userspace pgd.
> + */
> +#ifdef CONFIG_KAISER
> + if (CONFIG_PGTABLE_LEVELS <= 4) {
> + set_pgd(kernel_to_shadow_pgdp(pgd),
> + __pgd(_KERNPG_TABLE | (p4d_pfn(*p4d) << PAGE_SHIFT)));
> + }
> +#endif
>
> /* Randomize the locations */
> init_espfix_random();

End of part I of the review of this biggy :)

--
Regards/Gruss,
Boris.
Good mailing practices for 400: avoid top-posting and trim the reply.

\
 
 \ /
  Last update: 2017-11-26 21:50    [W:0.585 / U:0.648 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site