lkml.org 
[lkml]   [2023]   [Mar]   [19]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
Patch in this message
/
From
Subject[PATCH 02/15] mm/cma: move init_cma_reserved_pages() to cma.c and make it static
Date
From: "Mike Rapoport (IBM)" <rppt@kernel.org>

init_cma_reserved_pages() only used in cma.c, no point of having it in
page_alloc.c.

Move init_cma_reserved_pages() to cma.c and make it static.

Signed-off-by: Mike Rapoport (IBM) <rppt@kernel.org>
---
include/linux/gfp.h | 5 -----
mm/cma.c | 21 +++++++++++++++++++++
mm/page_alloc.c | 21 ---------------------
3 files changed, 21 insertions(+), 26 deletions(-)

diff --git a/include/linux/gfp.h b/include/linux/gfp.h
index 65a78773dcca..7c554e4bd49f 100644
--- a/include/linux/gfp.h
+++ b/include/linux/gfp.h
@@ -361,9 +361,4 @@ extern struct page *alloc_contig_pages(unsigned long nr_pages, gfp_t gfp_mask,
#endif
void free_contig_range(unsigned long pfn, unsigned long nr_pages);

-#ifdef CONFIG_CMA
-/* CMA stuff */
-extern void init_cma_reserved_pageblock(struct page *page);
-#endif
-
#endif /* __LINUX_GFP_H */
diff --git a/mm/cma.c b/mm/cma.c
index a7263aa02c92..ce08fb9825b4 100644
--- a/mm/cma.c
+++ b/mm/cma.c
@@ -31,8 +31,10 @@
#include <linux/highmem.h>
#include <linux/io.h>
#include <linux/kmemleak.h>
+#include <linux/page-isolation.h>
#include <trace/events/cma.h>

+#include "internal.h"
#include "cma.h"

struct cma cma_areas[MAX_CMA_AREAS];
@@ -93,6 +95,25 @@ static void cma_clear_bitmap(struct cma *cma, unsigned long pfn,
spin_unlock_irqrestore(&cma->lock, flags);
}

+/* Free whole pageblock and set its migration type to MIGRATE_CMA. */
+static void init_cma_reserved_pageblock(struct page *page)
+{
+ unsigned i = pageblock_nr_pages;
+ struct page *p = page;
+
+ do {
+ __ClearPageReserved(p);
+ set_page_count(p, 0);
+ } while (++p, --i);
+
+ set_pageblock_migratetype(page, MIGRATE_CMA);
+ set_page_refcounted(page);
+ __free_pages(page, pageblock_order);
+
+ adjust_managed_page_count(page, pageblock_nr_pages);
+ page_zone(page)->cma_pages += pageblock_nr_pages;
+}
+
static void __init cma_activate_area(struct cma *cma)
{
unsigned long base_pfn = cma->base_pfn, pfn;
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 87d760236dba..22e3da842e3f 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -2280,27 +2280,6 @@ void __init page_alloc_init_late(void)
set_zone_contiguous(zone);
}

-#ifdef CONFIG_CMA
-/* Free whole pageblock and set its migration type to MIGRATE_CMA. */
-void __init init_cma_reserved_pageblock(struct page *page)
-{
- unsigned i = pageblock_nr_pages;
- struct page *p = page;
-
- do {
- __ClearPageReserved(p);
- set_page_count(p, 0);
- } while (++p, --i);
-
- set_pageblock_migratetype(page, MIGRATE_CMA);
- set_page_refcounted(page);
- __free_pages(page, pageblock_order);
-
- adjust_managed_page_count(page, pageblock_nr_pages);
- page_zone(page)->cma_pages += pageblock_nr_pages;
-}
-#endif
-
/*
* The order of subdivision here is critical for the IO subsystem.
* Please do not alter this order without good reasons and regression
--
2.35.1
\
 
 \ /
  Last update: 2023-03-27 01:07    [W:0.181 / U:0.544 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site