lkml.org 
[lkml]   [2021]   [Dec]   [14]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
/
Date
From
SubjectRe: [PATCH v2 1/3] iommu/io-pgtable-arm: Add way to debug pgtable walk
On Tue, Oct 05, 2021 at 08:16:25AM -0700, Rob Clark wrote:
> From: Rob Clark <robdclark@chromium.org>
>
> Add an io-pgtable method to retrieve the raw PTEs that would be
> traversed for a given iova access.
>
> Signed-off-by: Rob Clark <robdclark@chromium.org>
> ---
> drivers/iommu/io-pgtable-arm.c | 40 +++++++++++++++++++++++++++-------
> include/linux/io-pgtable.h | 9 ++++++++
> 2 files changed, 41 insertions(+), 8 deletions(-)
>
> diff --git a/drivers/iommu/io-pgtable-arm.c b/drivers/iommu/io-pgtable-arm.c
> index dd9e47189d0d..c470fc0b3c2b 100644
> --- a/drivers/iommu/io-pgtable-arm.c
> +++ b/drivers/iommu/io-pgtable-arm.c
> @@ -700,38 +700,61 @@ static size_t arm_lpae_unmap(struct io_pgtable_ops *ops, unsigned long iova,
> return arm_lpae_unmap_pages(ops, iova, size, 1, gather);
> }
>
> -static phys_addr_t arm_lpae_iova_to_phys(struct io_pgtable_ops *ops,
> - unsigned long iova)
> +static int arm_lpae_pgtable_walk(struct io_pgtable_ops *ops, unsigned long iova,
> + void *_ptes, int *num_ptes)
> {
> struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops);
> arm_lpae_iopte pte, *ptep = data->pgd;
> + arm_lpae_iopte *ptes = _ptes;
> + int max_ptes = *num_ptes;
> int lvl = data->start_level;
>
> + *num_ptes = 0;
> +
> do {
> + if (*num_ptes >= max_ptes)
> + return -ENOSPC;
> +
> /* Valid IOPTE pointer? */
> if (!ptep)
> - return 0;
> + return -EFAULT;
>
> /* Grab the IOPTE we're interested in */
> ptep += ARM_LPAE_LVL_IDX(iova, lvl, data);
> pte = READ_ONCE(*ptep);
>
> + ptes[(*num_ptes)++] = pte;
> +
> /* Valid entry? */
> if (!pte)
> - return 0;
> + return -EFAULT;
>
> /* Leaf entry? */
> if (iopte_leaf(pte, lvl, data->iop.fmt))
> - goto found_translation;
> + return 0;
>
> /* Take it to the next level */
> ptep = iopte_deref(pte, data);
> } while (++lvl < ARM_LPAE_MAX_LEVELS);
>
> - /* Ran out of page tables to walk */
> - return 0;
> + return -EFAULT;
> +}
> +
> +static phys_addr_t arm_lpae_iova_to_phys(struct io_pgtable_ops *ops,
> + unsigned long iova)
> +{
> + struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops);
> + arm_lpae_iopte pte, ptes[ARM_LPAE_MAX_LEVELS];
> + int lvl, num_ptes = ARM_LPAE_MAX_LEVELS;
> + int ret;
> +
> + ret = arm_lpae_pgtable_walk(ops, iova, ptes, &num_ptes);
> + if (ret)
> + return 0;
> +
> + pte = ptes[num_ptes - 1];
> + lvl = num_ptes - 1 + data->start_level;
>
> -found_translation:
> iova &= (ARM_LPAE_BLOCK_SIZE(lvl, data) - 1);
> return iopte_to_paddr(pte, data) | iova;
> }
> @@ -816,6 +839,7 @@ arm_lpae_alloc_pgtable(struct io_pgtable_cfg *cfg)
> .unmap = arm_lpae_unmap,
> .unmap_pages = arm_lpae_unmap_pages,
> .iova_to_phys = arm_lpae_iova_to_phys,
> + .pgtable_walk = arm_lpae_pgtable_walk,
> };
>
> return data;
> diff --git a/include/linux/io-pgtable.h b/include/linux/io-pgtable.h
> index 86af6f0a00a2..501f362a929c 100644
> --- a/include/linux/io-pgtable.h
> +++ b/include/linux/io-pgtable.h
> @@ -148,6 +148,13 @@ struct io_pgtable_cfg {
> * @unmap: Unmap a physically contiguous memory region.
> * @unmap_pages: Unmap a range of virtually contiguous pages of the same size.
> * @iova_to_phys: Translate iova to physical address.
> + * @pgtable_walk: Return details of a page table walk for a given iova.
> + * This returns the array of PTEs in a format that is
> + * specific to the page table format. The number of
> + * PTEs can be format specific. The num_ptes parameter
> + * on input specifies the size of the ptes array, and
> + * on output the number of PTEs filled in (which depends
> + * on the number of PTEs walked to resolve the iova)

I think this would be a fair bit cleaner if the interface instead took a
callback function to invoke at each page-table level. It would be invoked
with the pte value and the level. Depending on its return value the walk
could be terminated early. That would also potentially scale to walking
ranges of iovas as well if we ever need it and it may be more readily
implementable by other formats too.

> *
> * These functions map directly onto the iommu_ops member functions with
> * the same names.

This bit of the comment is no longer true with your change.

Will

\
 
 \ /
  Last update: 2021-12-14 16:39    [W:0.048 / U:2.256 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site