lkml.org 
[lkml]   [2020]   [Oct]   [3]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
    Patch in this message
    /
    From
    Subject[PATCH v3 12/14] iommu/amd: Introduce iommu_v1_map_page and iommu_v1_unmap_page
    Date
    These implement map and unmap for AMD IOMMU v1 pagetable, which
    will be used by the IO pagetable framework.

    Also clean up unused extern function declarations.

    Signed-off-by: Suravee Suthikulpanit <suravee.suthikulpanit@amd.com>
    ---
    drivers/iommu/amd/amd_iommu.h | 13 -------------
    drivers/iommu/amd/io_pgtable.c | 25 ++++++++++++-------------
    drivers/iommu/amd/iommu.c | 7 ++++---
    3 files changed, 16 insertions(+), 29 deletions(-)

    diff --git a/drivers/iommu/amd/amd_iommu.h b/drivers/iommu/amd/amd_iommu.h
    index 69996e57fae2..2e8dc2a1ec0f 100644
    --- a/drivers/iommu/amd/amd_iommu.h
    +++ b/drivers/iommu/amd/amd_iommu.h
    @@ -124,19 +124,6 @@ void amd_iommu_apply_ivrs_quirks(void);
    static inline void amd_iommu_apply_ivrs_quirks(void) { }
    #endif

    -/* TODO: These are temporary and will be removed once fully transition */
    -extern int iommu_map_page(struct protection_domain *dom,
    - unsigned long bus_addr,
    - unsigned long phys_addr,
    - unsigned long page_size,
    - int prot,
    - gfp_t gfp);
    -extern unsigned long iommu_unmap_page(struct protection_domain *dom,
    - unsigned long bus_addr,
    - unsigned long page_size);
    -extern u64 *fetch_pte(struct amd_io_pgtable *pgtable,
    - unsigned long address,
    - unsigned long *page_size);
    extern void amd_iommu_domain_set_pgtable(struct protection_domain *domain,
    u64 *root, int mode);
    extern void amd_iommu_free_pgtable(struct amd_io_pgtable *pgtable);
    diff --git a/drivers/iommu/amd/io_pgtable.c b/drivers/iommu/amd/io_pgtable.c
    index 7841e5e1e563..d8b329aa0bb2 100644
    --- a/drivers/iommu/amd/io_pgtable.c
    +++ b/drivers/iommu/amd/io_pgtable.c
    @@ -317,9 +317,9 @@ static u64 *alloc_pte(struct protection_domain *domain,
    * This function checks if there is a PTE for a given dma address. If
    * there is one, it returns the pointer to it.
    */
    -u64 *fetch_pte(struct amd_io_pgtable *pgtable,
    - unsigned long address,
    - unsigned long *page_size)
    +static u64 *fetch_pte(struct amd_io_pgtable *pgtable,
    + unsigned long address,
    + unsigned long *page_size)
    {
    int level;
    u64 *pte;
    @@ -392,13 +392,10 @@ static struct page *free_clear_pte(u64 *pte, u64 pteval, struct page *freelist)
    * supporting all features of AMD IOMMU page tables like level skipping
    * and full 64 bit address spaces.
    */
    -int iommu_map_page(struct protection_domain *dom,
    - unsigned long iova,
    - unsigned long paddr,
    - unsigned long size,
    - int prot,
    - gfp_t gfp)
    +static int iommu_v1_map_page(struct io_pgtable_ops *ops, unsigned long iova,
    + phys_addr_t paddr, size_t size, int prot, gfp_t gfp)
    {
    + struct protection_domain *dom = io_pgtable_ops_to_domain(ops);
    struct page *freelist = NULL;
    bool updated = false;
    u64 __pte, *pte;
    @@ -461,11 +458,11 @@ int iommu_map_page(struct protection_domain *dom,
    return ret;
    }

    -unsigned long iommu_unmap_page(struct protection_domain *dom,
    - unsigned long iova,
    - unsigned long size)
    +static unsigned long iommu_v1_unmap_page(struct io_pgtable_ops *ops,
    + unsigned long iova,
    + size_t size,
    + struct iommu_iotlb_gather *gather)
    {
    - struct io_pgtable_ops *ops = &dom->iop.iop.ops;
    struct amd_io_pgtable *pgtable = io_pgtable_ops_to_data(ops);
    unsigned long long unmapped;
    unsigned long unmap_size;
    @@ -525,6 +522,8 @@ static struct io_pgtable *v1_alloc_pgtable(struct io_pgtable_cfg *cfg, void *coo
    {
    struct amd_io_pgtable *pgtable = io_pgtable_cfg_to_data(cfg);

    + pgtable->iop.ops.map = iommu_v1_map_page;
    + pgtable->iop.ops.unmap = iommu_v1_unmap_page;
    pgtable->iop.ops.iova_to_phys = iommu_v1_iova_to_phys;

    return &pgtable->iop;
    diff --git a/drivers/iommu/amd/iommu.c b/drivers/iommu/amd/iommu.c
    index 9a1a16031e00..77f44b927ae7 100644
    --- a/drivers/iommu/amd/iommu.c
    +++ b/drivers/iommu/amd/iommu.c
    @@ -2044,6 +2044,7 @@ static int amd_iommu_map(struct iommu_domain *dom, unsigned long iova,
    gfp_t gfp)
    {
    struct protection_domain *domain = to_pdomain(dom);
    + struct io_pgtable_ops *ops = &domain->iop.iop.ops;
    int prot = 0;
    int ret;

    @@ -2055,8 +2056,7 @@ static int amd_iommu_map(struct iommu_domain *dom, unsigned long iova,
    if (iommu_prot & IOMMU_WRITE)
    prot |= IOMMU_PROT_IW;

    - ret = iommu_map_page(domain, iova, paddr, page_size, prot, gfp);
    -
    + ret = ops->map(ops, iova, paddr, page_size, prot, gfp);
    domain_flush_np_cache(domain, iova, page_size);

    return ret;
    @@ -2067,11 +2067,12 @@ static size_t amd_iommu_unmap(struct iommu_domain *dom, unsigned long iova,
    struct iommu_iotlb_gather *gather)
    {
    struct protection_domain *domain = to_pdomain(dom);
    + struct io_pgtable_ops *ops = &domain->iop.iop.ops;

    if (domain->iop.mode == PAGE_MODE_NONE)
    return 0;

    - return iommu_unmap_page(domain, iova, page_size);
    + return ops->unmap(ops, iova, page_size, gather);
    }

    static phys_addr_t amd_iommu_iova_to_phys(struct iommu_domain *dom,
    --
    2.17.1
    \
     
     \ /
      Last update: 2020-10-04 03:43    [W:4.400 / U:0.068 seconds]
    ©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site