lkml.org 
[lkml]   [2021]   [Mar]   [18]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
    Patch in this message
    /
    From
    Subject[PATCH v3 01/10] fsdax: Factor helpers to simplify dax fault code
    Date
    The dax page fault code is too long and a bit difficult to read. And it
    is hard to understand when we trying to add new features. Some of the
    PTE/PMD codes have similar logic. So, factor them as helper functions to
    simplify the code.

    Signed-off-by: Shiyang Ruan <ruansy.fnst@fujitsu.com>
    Reviewed-by: Christoph Hellwig <hch@lst.de>
    ---
    fs/dax.c | 152 ++++++++++++++++++++++++++++++-------------------------
    1 file changed, 84 insertions(+), 68 deletions(-)

    diff --git a/fs/dax.c b/fs/dax.c
    index 26d5dcd2d69e..7031e4302b13 100644
    --- a/fs/dax.c
    +++ b/fs/dax.c
    @@ -1243,6 +1243,52 @@ static bool dax_fault_is_synchronous(unsigned long flags,
    && (iomap->flags & IOMAP_F_DIRTY);
    }

    +/*
    + * If we are doing synchronous page fault and inode needs fsync, we can insert
    + * PTE/PMD into page tables only after that happens. Skip insertion for now and
    + * return the pfn so that caller can insert it after fsync is done.
    + */
    +static vm_fault_t dax_fault_synchronous_pfnp(pfn_t *pfnp, pfn_t pfn)
    +{
    + if (WARN_ON_ONCE(!pfnp))
    + return VM_FAULT_SIGBUS;
    +
    + *pfnp = pfn;
    + return VM_FAULT_NEEDDSYNC;
    +}
    +
    +static int dax_fault_cow_page(struct vm_fault *vmf, struct iomap *iomap,
    + loff_t pos, vm_fault_t *ret)
    +{
    + int error = 0;
    + unsigned long vaddr = vmf->address;
    + sector_t sector = dax_iomap_sector(iomap, pos);
    +
    + switch (iomap->type) {
    + case IOMAP_HOLE:
    + case IOMAP_UNWRITTEN:
    + clear_user_highpage(vmf->cow_page, vaddr);
    + break;
    + case IOMAP_MAPPED:
    + error = copy_cow_page_dax(iomap->bdev, iomap->dax_dev,
    + sector, vmf->cow_page, vaddr);
    + break;
    + default:
    + WARN_ON_ONCE(1);
    + error = -EIO;
    + break;
    + }
    +
    + if (error)
    + return error;
    +
    + __SetPageUptodate(vmf->cow_page);
    + *ret = finish_fault(vmf);
    + if (!*ret)
    + *ret = VM_FAULT_DONE_COW;
    + return 0;
    +}
    +
    static vm_fault_t dax_iomap_pte_fault(struct vm_fault *vmf, pfn_t *pfnp,
    int *iomap_errp, const struct iomap_ops *ops)
    {
    @@ -1311,30 +1357,9 @@ static vm_fault_t dax_iomap_pte_fault(struct vm_fault *vmf, pfn_t *pfnp,
    }

    if (vmf->cow_page) {
    - sector_t sector = dax_iomap_sector(&iomap, pos);
    -
    - switch (iomap.type) {
    - case IOMAP_HOLE:
    - case IOMAP_UNWRITTEN:
    - clear_user_highpage(vmf->cow_page, vaddr);
    - break;
    - case IOMAP_MAPPED:
    - error = copy_cow_page_dax(iomap.bdev, iomap.dax_dev,
    - sector, vmf->cow_page, vaddr);
    - break;
    - default:
    - WARN_ON_ONCE(1);
    - error = -EIO;
    - break;
    - }
    -
    + error = dax_fault_cow_page(vmf, &iomap, pos, &ret);
    if (error)
    - goto error_finish_iomap;
    -
    - __SetPageUptodate(vmf->cow_page);
    - ret = finish_fault(vmf);
    - if (!ret)
    - ret = VM_FAULT_DONE_COW;
    + ret = dax_fault_return(error);
    goto finish_iomap;
    }

    @@ -1354,19 +1379,8 @@ static vm_fault_t dax_iomap_pte_fault(struct vm_fault *vmf, pfn_t *pfnp,
    entry = dax_insert_entry(&xas, mapping, vmf, entry, pfn,
    0, write && !sync);

    - /*
    - * If we are doing synchronous page fault and inode needs fsync,
    - * we can insert PTE into page tables only after that happens.
    - * Skip insertion for now and return the pfn so that caller can
    - * insert it after fsync is done.
    - */
    if (sync) {
    - if (WARN_ON_ONCE(!pfnp)) {
    - error = -EIO;
    - goto error_finish_iomap;
    - }
    - *pfnp = pfn;
    - ret = VM_FAULT_NEEDDSYNC | major;
    + ret = dax_fault_synchronous_pfnp(pfnp, pfn);
    goto finish_iomap;
    }
    trace_dax_insert_mapping(inode, vmf, entry);
    @@ -1465,13 +1479,45 @@ static vm_fault_t dax_pmd_load_hole(struct xa_state *xas, struct vm_fault *vmf,
    return VM_FAULT_FALLBACK;
    }

    +static bool dax_fault_check_fallback(struct vm_fault *vmf, struct xa_state *xas,
    + pgoff_t max_pgoff)
    +{
    + unsigned long pmd_addr = vmf->address & PMD_MASK;
    + bool write = vmf->flags & FAULT_FLAG_WRITE;
    +
    + /*
    + * Make sure that the faulting address's PMD offset (color) matches
    + * the PMD offset from the start of the file. This is necessary so
    + * that a PMD range in the page table overlaps exactly with a PMD
    + * range in the page cache.
    + */
    + if ((vmf->pgoff & PG_PMD_COLOUR) !=
    + ((vmf->address >> PAGE_SHIFT) & PG_PMD_COLOUR))
    + return true;
    +
    + /* Fall back to PTEs if we're going to COW */
    + if (write && !(vmf->vma->vm_flags & VM_SHARED))
    + return true;
    +
    + /* If the PMD would extend outside the VMA */
    + if (pmd_addr < vmf->vma->vm_start)
    + return true;
    + if ((pmd_addr + PMD_SIZE) > vmf->vma->vm_end)
    + return true;
    +
    + /* If the PMD would extend beyond the file size */
    + if ((xas->xa_index | PG_PMD_COLOUR) >= max_pgoff)
    + return true;
    +
    + return false;
    +}
    +
    static vm_fault_t dax_iomap_pmd_fault(struct vm_fault *vmf, pfn_t *pfnp,
    const struct iomap_ops *ops)
    {
    struct vm_area_struct *vma = vmf->vma;
    struct address_space *mapping = vma->vm_file->f_mapping;
    XA_STATE_ORDER(xas, &mapping->i_pages, vmf->pgoff, PMD_ORDER);
    - unsigned long pmd_addr = vmf->address & PMD_MASK;
    bool write = vmf->flags & FAULT_FLAG_WRITE;
    bool sync;
    unsigned int iomap_flags = (write ? IOMAP_WRITE : 0) | IOMAP_FAULT;
    @@ -1494,33 +1540,12 @@ static vm_fault_t dax_iomap_pmd_fault(struct vm_fault *vmf, pfn_t *pfnp,

    trace_dax_pmd_fault(inode, vmf, max_pgoff, 0);

    - /*
    - * Make sure that the faulting address's PMD offset (color) matches
    - * the PMD offset from the start of the file. This is necessary so
    - * that a PMD range in the page table overlaps exactly with a PMD
    - * range in the page cache.
    - */
    - if ((vmf->pgoff & PG_PMD_COLOUR) !=
    - ((vmf->address >> PAGE_SHIFT) & PG_PMD_COLOUR))
    - goto fallback;
    -
    - /* Fall back to PTEs if we're going to COW */
    - if (write && !(vma->vm_flags & VM_SHARED))
    - goto fallback;
    -
    - /* If the PMD would extend outside the VMA */
    - if (pmd_addr < vma->vm_start)
    - goto fallback;
    - if ((pmd_addr + PMD_SIZE) > vma->vm_end)
    - goto fallback;
    -
    if (xas.xa_index >= max_pgoff) {
    result = VM_FAULT_SIGBUS;
    goto out;
    }

    - /* If the PMD would extend beyond the file size */
    - if ((xas.xa_index | PG_PMD_COLOUR) >= max_pgoff)
    + if (dax_fault_check_fallback(vmf, &xas, max_pgoff))
    goto fallback;

    /*
    @@ -1572,17 +1597,8 @@ static vm_fault_t dax_iomap_pmd_fault(struct vm_fault *vmf, pfn_t *pfnp,
    entry = dax_insert_entry(&xas, mapping, vmf, entry, pfn,
    DAX_PMD, write && !sync);

    - /*
    - * If we are doing synchronous page fault and inode needs fsync,
    - * we can insert PMD into page tables only after that happens.
    - * Skip insertion for now and return the pfn so that caller can
    - * insert it after fsync is done.
    - */
    if (sync) {
    - if (WARN_ON_ONCE(!pfnp))
    - goto finish_iomap;
    - *pfnp = pfn;
    - result = VM_FAULT_NEEDDSYNC;
    + result = dax_fault_synchronous_pfnp(pfnp, pfn);
    goto finish_iomap;
    }

    --
    2.30.1


    \
     
     \ /
      Last update: 2021-03-19 02:53    [W:4.167 / U:0.052 seconds]
    ©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site