lkml.org 
[lkml]   [2018]   [May]   [23]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
Patch in this message
/
Subject[PATCH v2 4/7] mm, devm_memremap_pages: Add MEMORY_DEVICE_PRIVATE support
From
Date
In preparation for consolidating all ZONE_DEVICE enabling via
devm_memremap_pages(), teach it how to handle the constraints of
MEMORY_DEVICE_PRIVATE ranges.

Cc: Christoph Hellwig <hch@lst.de>
Cc: "Jérôme Glisse" <jglisse@redhat.com>
Reported-by: Logan Gunthorpe <logang@deltatee.com>
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
---
kernel/memremap.c | 38 ++++++++++++++++++++++++++++++++------
1 file changed, 32 insertions(+), 6 deletions(-)

diff --git a/kernel/memremap.c b/kernel/memremap.c
index dfec0801f652..89eed25e2a80 100644
--- a/kernel/memremap.c
+++ b/kernel/memremap.c
@@ -303,8 +303,13 @@ static void devm_memremap_pages_release(void *data)
- align_start;

mem_hotplug_begin();
- arch_remove_memory(align_start, align_size, pgmap->altmap_valid ?
- &pgmap->altmap : NULL);
+ if (pgmap->type == MEMORY_DEVICE_PRIVATE) {
+ pfn = align_start >> PAGE_SHIFT;
+ __remove_pages(page_zone(pfn_to_page(pfn)), pfn,
+ align_size >> PAGE_SHIFT, NULL);
+ } else
+ arch_remove_memory(align_start, align_size,
+ pgmap->altmap_valid ? &pgmap->altmap : NULL);
mem_hotplug_done();

untrack_pfn(NULL, PHYS_PFN(align_start), align_size);
@@ -388,11 +393,32 @@ void *devm_memremap_pages(struct device *dev, struct dev_pagemap *pgmap,
goto err_pfn_remap;

mem_hotplug_begin();
- error = arch_add_memory(nid, align_start, align_size, altmap, false);
- if (!error)
- move_pfn_range_to_zone(&NODE_DATA(nid)->node_zones[ZONE_DEVICE],
- align_start >> PAGE_SHIFT,
+
+ /*
+ * For device private memory we call add_pages() as we only need to
+ * allocate and initialize struct page for the device memory. More-
+ * over the device memory is un-accessible thus we do not want to
+ * create a linear mapping for the memory like arch_add_memory()
+ * would do.
+ *
+ * For all other device memory types, which are accessible by
+ * the CPU, we do want the linear mapping and thus use
+ * arch_add_memory().
+ */
+ if (pgmap->type == MEMORY_DEVICE_PRIVATE) {
+ error = add_pages(nid, align_start >> PAGE_SHIFT,
+ align_size >> PAGE_SHIFT, NULL, false);
+ } else {
+ struct zone *zone;
+
+ error = arch_add_memory(nid, align_start, align_size, altmap,
+ false);
+ zone = &NODE_DATA(nid)->node_zones[ZONE_DEVICE];
+ if (!error)
+ move_pfn_range_to_zone(zone, align_start >> PAGE_SHIFT,
align_size >> PAGE_SHIFT, altmap);
+ }
+
mem_hotplug_done();
if (error)
goto err_add_memory;
\
 
 \ /
  Last update: 2018-05-23 07:23    [W:0.068 / U:1.512 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site