lkml.org 
[lkml]   [2020]   [Apr]   [27]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
Patch in this message
/
From
Subject[RFC PATCH 3/8] fs/dax: Introduce dax_copy_edges() for COW
Date
Add address output in dax_iomap_pfn() in order to perform a memcpy() in CoW
case. Since this function both output address and pfn, rename it to
dax_iomap_direct_access().

dax_copy_edges() is a helper functions performs a copy from one part of
the device to another for data not page aligned.

Signed-off-by: Goldwyn Rodrigues <rgoldwyn@suse.com>
Signed-off-by: Shiyang Ruan <ruansy.fnst@cn.fujitsu.com>
---
fs/dax.c | 64 ++++++++++++++++++++++++++++++++++++++++++++++++++------
1 file changed, 58 insertions(+), 6 deletions(-)

diff --git a/fs/dax.c b/fs/dax.c
index 2f996c566103..8107ed10c851 100644
--- a/fs/dax.c
+++ b/fs/dax.c
@@ -1111,8 +1111,8 @@ static sector_t dax_iomap_sector(struct iomap *iomap, loff_t pos)
return (iomap->addr + (pos & PAGE_MASK) - iomap->offset) >> 9;
}

-static int dax_iomap_pfn(struct iomap *iomap, loff_t pos, size_t size,
- pfn_t *pfnp)
+static int dax_iomap_direct_access(struct iomap *iomap, loff_t pos, size_t size,
+ pfn_t *pfnp, void **addr)
{
const sector_t sector = dax_iomap_sector(iomap, pos);
pgoff_t pgoff;
@@ -1123,12 +1123,14 @@ static int dax_iomap_pfn(struct iomap *iomap, loff_t pos, size_t size,
if (rc)
return rc;
id = dax_read_lock();
- length = dax_direct_access(iomap->dax_dev, pgoff, PHYS_PFN(size),
- NULL, pfnp);
+ length = dax_direct_access(iomap->dax_dev, pgoff, PHYS_PFN(size), addr,
+ pfnp);
if (length < 0) {
rc = length;
goto out;
}
+ if (!pfnp)
+ goto out_check_addr;
rc = -EINVAL;
if (PFN_PHYS(length) < size)
goto out;
@@ -1138,11 +1140,59 @@ static int dax_iomap_pfn(struct iomap *iomap, loff_t pos, size_t size,
if (length > 1 && !pfn_t_devmap(*pfnp))
goto out;
rc = 0;
+
+out_check_addr:
+ if (!addr)
+ goto out;
+ if (!*addr)
+ rc = -EFAULT;
out:
dax_read_unlock(id);
return rc;
}

+/*
+ * dax_copy_edges - Copies the part of the pages not included in
+ * the write, but required for CoW because
+ * offset/offset+length are not page aligned.
+ */
+static int dax_copy_edges(loff_t pos, loff_t length, struct iomap *srcmap,
+ void *daddr, bool pmd)
+{
+ size_t page_size = pmd ? PMD_SIZE : PAGE_SIZE;
+ loff_t offset = pos & (page_size - 1);
+ size_t size = ALIGN(offset + length, page_size);
+ loff_t end = pos + length;
+ loff_t pg_end = round_up(end, page_size);
+ void *saddr = 0;
+ int ret = 0;
+
+ ret = dax_iomap_direct_access(srcmap, pos, size, NULL, &saddr);
+ if (ret)
+ return ret;
+ /*
+ * Copy the first part of the page
+ * Note: we pass offset as length
+ */
+ if (offset) {
+ if (saddr)
+ ret = memcpy_mcsafe(daddr, saddr, offset);
+ else
+ memset(daddr, 0, offset);
+ }
+
+ /* Copy the last part of the range */
+ if (end < pg_end) {
+ if (saddr)
+ ret = memcpy_mcsafe(daddr + offset + length,
+ saddr + offset + length, pg_end - end);
+ else
+ memset(daddr + offset + length, 0,
+ pg_end - end);
+ }
+ return ret;
+}
+
/*
* The user has performed a load from a hole in the file. Allocating a new
* page in the file would cause excessive storage usage for workloads with
@@ -1462,7 +1512,8 @@ static vm_fault_t dax_iomap_pte_fault(struct vm_fault *vmf, pfn_t *pfnp,
count_memcg_event_mm(vma->vm_mm, PGMAJFAULT);
major = VM_FAULT_MAJOR;
}
- error = dax_iomap_pfn(&iomap, pos, PAGE_SIZE, &pfn);
+ error = dax_iomap_direct_access(&iomap, pos, PAGE_SIZE, &pfn,
+ NULL);
if (error < 0)
goto error_finish_iomap;

@@ -1680,7 +1731,8 @@ static vm_fault_t dax_iomap_pmd_fault(struct vm_fault *vmf, pfn_t *pfnp,

switch (iomap.type) {
case IOMAP_MAPPED:
- error = dax_iomap_pfn(&iomap, pos, PMD_SIZE, &pfn);
+ error = dax_iomap_direct_access(&iomap, pos, PMD_SIZE, &pfn,
+ NULL);
if (error < 0)
goto finish_iomap;

--
2.26.2


\
 
 \ /
  Last update: 2020-04-27 10:49    [W:0.873 / U:0.048 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site