lkml.org 
[lkml]   [2019]   [May]   [8]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
    Patch in this message
    /
    From
    Subject[PATCH, RFC 53/62] x86/mm: Use common code for DMA memory encryption
    Date
    From: Jacob Pan <jacob.jun.pan@linux.intel.com>

    Replace sme_ code with x86 memory encryption common code such that
    Intel MKTME can be supported underneath generic DMA code.
    dma_to_phys() & phys_to_dma() results will be runtime modified by
    memory encryption code.

    Signed-off-by: Jacob Pan <jacob.jun.pan@linux.intel.com>
    Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
    ---
    arch/x86/include/asm/mem_encrypt.h | 29 +++++++++++++++++++++++++++++
    arch/x86/mm/mem_encrypt_common.c | 2 +-
    include/linux/dma-direct.h | 4 ++--
    include/linux/mem_encrypt.h | 23 ++++++++++-------------
    4 files changed, 42 insertions(+), 16 deletions(-)

    diff --git a/arch/x86/include/asm/mem_encrypt.h b/arch/x86/include/asm/mem_encrypt.h
    index 616f8e637bc3..a2b69cbb0e41 100644
    --- a/arch/x86/include/asm/mem_encrypt.h
    +++ b/arch/x86/include/asm/mem_encrypt.h
    @@ -55,8 +55,19 @@ bool sev_active(void);

    #define __bss_decrypted __attribute__((__section__(".bss..decrypted")))

    +/*
    + * The __sme_set() and __sme_clr() macros are useful for adding or removing
    + * the encryption mask from a value (e.g. when dealing with pagetable
    + * entries).
    + */
    +#define __sme_set(x) ((x) | sme_me_mask)
    +#define __sme_clr(x) ((x) & ~sme_me_mask)
    +
    #else /* !CONFIG_AMD_MEM_ENCRYPT */

    +#define __sme_set(x) (x)
    +#define __sme_clr(x) (x)
    +
    #define sme_me_mask 0ULL

    static inline void __init sme_early_encrypt(resource_size_t paddr,
    @@ -97,4 +108,22 @@ extern char __start_bss_decrypted[], __end_bss_decrypted[], __start_bss_decrypte

    #endif /* __ASSEMBLY__ */

    +#ifdef CONFIG_X86_MEM_ENCRYPT_COMMON
    +
    +extern dma_addr_t __mem_encrypt_dma_set(dma_addr_t daddr, phys_addr_t paddr);
    +extern phys_addr_t __mem_encrypt_dma_clear(phys_addr_t paddr);
    +
    +#else
    +static inline dma_addr_t __mem_encrypt_dma_set(dma_addr_t daddr, phys_addr_t paddr)
    +{
    + return daddr;
    +}
    +
    +static inline phys_addr_t __mem_encrypt_dma_clear(phys_addr_t paddr)
    +{
    + return paddr;
    +}
    +#endif /* CONFIG_X86_MEM_ENCRYPT_COMMON */
    +
    +
    #endif /* __X86_MEM_ENCRYPT_H__ */
    diff --git a/arch/x86/mm/mem_encrypt_common.c b/arch/x86/mm/mem_encrypt_common.c
    index 2adee65eec46..dcc5c710a235 100644
    --- a/arch/x86/mm/mem_encrypt_common.c
    +++ b/arch/x86/mm/mem_encrypt_common.c
    @@ -1,5 +1,5 @@
    #include <linux/mm.h>
    -#include <linux/mem_encrypt.h>
    +#include <asm/mem_encrypt.h>
    #include <asm/mktme.h>

    /*
    diff --git a/include/linux/dma-direct.h b/include/linux/dma-direct.h
    index b7338702592a..a949adeb6558 100644
    --- a/include/linux/dma-direct.h
    +++ b/include/linux/dma-direct.h
    @@ -40,12 +40,12 @@ static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size)
    */
    static inline dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr)
    {
    - return __sme_set(__phys_to_dma(dev, paddr));
    + return __mem_encrypt_dma_set(__phys_to_dma(dev, paddr), paddr);
    }

    static inline phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr)
    {
    - return __sme_clr(__dma_to_phys(dev, daddr));
    + return __mem_encrypt_dma_clear(__dma_to_phys(dev, daddr));
    }

    u64 dma_direct_get_required_mask(struct device *dev);
    diff --git a/include/linux/mem_encrypt.h b/include/linux/mem_encrypt.h
    index b310a9c18113..ce8ff0ead16c 100644
    --- a/include/linux/mem_encrypt.h
    +++ b/include/linux/mem_encrypt.h
    @@ -26,6 +26,16 @@
    static inline bool sme_active(void) { return false; }
    static inline bool sev_active(void) { return false; }

    +static inline dma_addr_t __mem_encrypt_dma_set(dma_addr_t daddr, phys_addr_t paddr)
    +{
    + return daddr;
    +}
    +
    +static inline phys_addr_t __mem_encrypt_dma_clear(phys_addr_t paddr)
    +{
    + return paddr;
    +}
    +
    #endif /* CONFIG_ARCH_HAS_MEM_ENCRYPT */

    static inline bool mem_encrypt_active(void)
    @@ -38,19 +48,6 @@ static inline u64 sme_get_me_mask(void)
    return sme_me_mask;
    }

    -#ifdef CONFIG_AMD_MEM_ENCRYPT
    -/*
    - * The __sme_set() and __sme_clr() macros are useful for adding or removing
    - * the encryption mask from a value (e.g. when dealing with pagetable
    - * entries).
    - */
    -#define __sme_set(x) ((x) | sme_me_mask)
    -#define __sme_clr(x) ((x) & ~sme_me_mask)
    -#else
    -#define __sme_set(x) (x)
    -#define __sme_clr(x) (x)
    -#endif
    -
    #endif /* __ASSEMBLY__ */

    #endif /* __MEM_ENCRYPT_H__ */
    --
    2.20.1
    \
     
     \ /
      Last update: 2019-05-08 16:46    [W:2.094 / U:0.732 seconds]
    ©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site