lkml.org 
[lkml]   [2019]   [Aug]   [4]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
Patch in this message
/
From
Subject[PATCH] powerpc: convert put_page() to put_user_page*()
Date
From: John Hubbard <jhubbard@nvidia.com>

For pages that were retained via get_user_pages*(), release those pages
via the new put_user_page*() routines, instead of via put_page() or
release_pages().

This is part a tree-wide conversion, as described in commit fc1d8e7cca2d
("mm: introduce put_user_page*(), placeholder versions").

Note that this effectively changes the code's behavior in
mm_iommu_unpin(): it now ultimately calls set_page_dirty_lock(),
instead of set_page_dirty(). This is probably more accurate.

As Christoph Hellwig put it, "set_page_dirty() is only safe if we are
dealing with a file backed page where we have reference on the inode it
hangs off." [1]

[1] https://lore.kernel.org/r/20190723153640.GB720@lst.de

Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Cc: Christoph Hellwig <hch@lst.de>
Cc: Michael Ellerman <mpe@ellerman.id.au>
Cc: linuxppc-dev@lists.ozlabs.org
Signed-off-by: John Hubbard <jhubbard@nvidia.com>
---
arch/powerpc/kvm/book3s_64_mmu_hv.c | 4 ++--
arch/powerpc/kvm/book3s_64_mmu_radix.c | 19 ++++++++++++++-----
arch/powerpc/kvm/e500_mmu.c | 3 +--
arch/powerpc/mm/book3s64/iommu_api.c | 11 +++++------
4 files changed, 22 insertions(+), 15 deletions(-)

diff --git a/arch/powerpc/kvm/book3s_64_mmu_hv.c b/arch/powerpc/kvm/book3s_64_mmu_hv.c
index 9a75f0e1933b..18646b738ce1 100644
--- a/arch/powerpc/kvm/book3s_64_mmu_hv.c
+++ b/arch/powerpc/kvm/book3s_64_mmu_hv.c
@@ -731,7 +731,7 @@ int kvmppc_book3s_hv_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu,
* we have to drop the reference on the correct tail
* page to match the get inside gup()
*/
- put_page(pages[0]);
+ put_user_page(pages[0]);
}
return ret;

@@ -1206,7 +1206,7 @@ void kvmppc_unpin_guest_page(struct kvm *kvm, void *va, unsigned long gpa,
unsigned long gfn;
int srcu_idx;

- put_page(page);
+ put_user_page(page);

if (!dirty)
return;
diff --git a/arch/powerpc/kvm/book3s_64_mmu_radix.c b/arch/powerpc/kvm/book3s_64_mmu_radix.c
index 2d415c36a61d..f53273fbfa2d 100644
--- a/arch/powerpc/kvm/book3s_64_mmu_radix.c
+++ b/arch/powerpc/kvm/book3s_64_mmu_radix.c
@@ -821,8 +821,12 @@ int kvmppc_book3s_instantiate_page(struct kvm_vcpu *vcpu,
*/
if (!ptep) {
local_irq_enable();
- if (page)
- put_page(page);
+ if (page) {
+ if (upgrade_write)
+ put_user_page(page);
+ else
+ put_page(page);
+ }
return RESUME_GUEST;
}
pte = *ptep;
@@ -870,9 +874,14 @@ int kvmppc_book3s_instantiate_page(struct kvm_vcpu *vcpu,
*levelp = level;

if (page) {
- if (!ret && (pte_val(pte) & _PAGE_WRITE))
- set_page_dirty_lock(page);
- put_page(page);
+ bool dirty = !ret && (pte_val(pte) & _PAGE_WRITE);
+ if (upgrade_write)
+ put_user_pages_dirty_lock(&page, 1, dirty);
+ else {
+ if (dirty)
+ set_page_dirty_lock(page);
+ put_page(page);
+ }
}

/* Increment number of large pages if we (successfully) inserted one */
diff --git a/arch/powerpc/kvm/e500_mmu.c b/arch/powerpc/kvm/e500_mmu.c
index 2d910b87e441..67bb8d59d4b1 100644
--- a/arch/powerpc/kvm/e500_mmu.c
+++ b/arch/powerpc/kvm/e500_mmu.c
@@ -850,8 +850,7 @@ int kvm_vcpu_ioctl_config_tlb(struct kvm_vcpu *vcpu,
free_privs_first:
kfree(privs[0]);
put_pages:
- for (i = 0; i < num_pages; i++)
- put_page(pages[i]);
+ put_user_pages(pages, num_pages);
free_pages:
kfree(pages);
return ret;
diff --git a/arch/powerpc/mm/book3s64/iommu_api.c b/arch/powerpc/mm/book3s64/iommu_api.c
index b056cae3388b..e126193ba295 100644
--- a/arch/powerpc/mm/book3s64/iommu_api.c
+++ b/arch/powerpc/mm/book3s64/iommu_api.c
@@ -170,9 +170,8 @@ static long mm_iommu_do_alloc(struct mm_struct *mm, unsigned long ua,
return 0;

free_exit:
- /* free the reference taken */
- for (i = 0; i < pinned; i++)
- put_page(mem->hpages[i]);
+ /* free the references taken */
+ put_user_pages(mem->hpages, pinned);

vfree(mem->hpas);
kfree(mem);
@@ -203,6 +202,7 @@ static void mm_iommu_unpin(struct mm_iommu_table_group_mem_t *mem)
{
long i;
struct page *page = NULL;
+ bool dirty = false;

if (!mem->hpas)
return;
@@ -215,10 +215,9 @@ static void mm_iommu_unpin(struct mm_iommu_table_group_mem_t *mem)
if (!page)
continue;

- if (mem->hpas[i] & MM_IOMMU_TABLE_GROUP_PAGE_DIRTY)
- SetPageDirty(page);
+ dirty = mem->hpas[i] & MM_IOMMU_TABLE_GROUP_PAGE_DIRTY;

- put_page(page);
+ put_user_pages_dirty_lock(&page, 1, dirty);
mem->hpas[i] = 0;
}
}
--
2.22.0
\
 
 \ /
  Last update: 2019-08-05 04:39    [W:0.071 / U:0.068 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site