lkml.org 
[lkml]   [2019]   [Apr]   [19]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
/
SubjectRe: [PATCH v6 04/12] mm/hotplug: Prepare shrink_{zone, pgdat}_span for sub-section removal
From
Date
Just noticed this by inspection.
I can't say I'm very familiar with the code.

On 4/17/19 11:39 AM, Dan Williams wrote:
> Sub-section hotplug support reduces the unit of operation of hotplug
> from section-sized-units (PAGES_PER_SECTION) to sub-section-sized units
> (PAGES_PER_SUBSECTION). Teach shrink_{zone,pgdat}_span() to consider
> PAGES_PER_SUBSECTION boundaries as the points where pfn_valid(), not
> valid_section(), can toggle.
>
> Cc: Michal Hocko <mhocko@suse.com>
> Cc: Vlastimil Babka <vbabka@suse.cz>
> Cc: Logan Gunthorpe <logang@deltatee.com>
> Signed-off-by: Dan Williams <dan.j.williams@intel.com>
> ---
> include/linux/mmzone.h | 2 ++
> mm/memory_hotplug.c | 16 ++++++++--------
> 2 files changed, 10 insertions(+), 8 deletions(-)
>
> diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
> index cffde898e345..b13f0cddf75e 100644
> --- a/include/linux/mmzone.h
> +++ b/include/linux/mmzone.h
> @@ -1164,6 +1164,8 @@ static inline unsigned long section_nr_to_pfn(unsigned long sec)
>
> #define SECTION_ACTIVE_SIZE ((1UL << SECTION_SIZE_BITS) / BITS_PER_LONG)
> #define SECTION_ACTIVE_MASK (~(SECTION_ACTIVE_SIZE - 1))
> +#define PAGES_PER_SUB_SECTION (SECTION_ACTIVE_SIZE / PAGE_SIZE)
> +#define PAGE_SUB_SECTION_MASK (~(PAGES_PER_SUB_SECTION-1))
>
> struct mem_section_usage {
> /*
> diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
> index 8b7415736d21..d5874f9d4043 100644
> --- a/mm/memory_hotplug.c
> +++ b/mm/memory_hotplug.c
> @@ -327,10 +327,10 @@ static unsigned long find_smallest_section_pfn(int nid, struct zone *zone,
> {
> struct mem_section *ms;
>
> - for (; start_pfn < end_pfn; start_pfn += PAGES_PER_SECTION) {
> + for (; start_pfn < end_pfn; start_pfn += PAGES_PER_SUB_SECTION) {
> ms = __pfn_to_section(start_pfn);
>
> - if (unlikely(!valid_section(ms)))
> + if (unlikely(!pfn_valid(start_pfn)))
> continue;

Note that "struct mem_section *ms;" is now set but not used.
You can remove the definition and initialization of "ms".

> if (unlikely(pfn_to_nid(start_pfn) != nid))
> @@ -355,10 +355,10 @@ static unsigned long find_biggest_section_pfn(int nid, struct zone *zone,
>
> /* pfn is the end pfn of a memory section. */
> pfn = end_pfn - 1;
> - for (; pfn >= start_pfn; pfn -= PAGES_PER_SECTION) {
> + for (; pfn >= start_pfn; pfn -= PAGES_PER_SUB_SECTION) {
> ms = __pfn_to_section(pfn);
>
> - if (unlikely(!valid_section(ms)))
> + if (unlikely(!pfn_valid(pfn)))
> continue;

Ditto about "ms".

> if (unlikely(pfn_to_nid(pfn) != nid))
> @@ -417,10 +417,10 @@ static void shrink_zone_span(struct zone *zone, unsigned long start_pfn,
> * it check the zone has only hole or not.
> */
> pfn = zone_start_pfn;
> - for (; pfn < zone_end_pfn; pfn += PAGES_PER_SECTION) {
> + for (; pfn < zone_end_pfn; pfn += PAGES_PER_SUB_SECTION) {
> ms = __pfn_to_section(pfn);
>
> - if (unlikely(!valid_section(ms)))
> + if (unlikely(!pfn_valid(pfn)))
> continue;

Ditto about "ms".

> if (page_zone(pfn_to_page(pfn)) != zone)
> @@ -485,10 +485,10 @@ static void shrink_pgdat_span(struct pglist_data *pgdat,
> * has only hole or not.
> */
> pfn = pgdat_start_pfn;
> - for (; pfn < pgdat_end_pfn; pfn += PAGES_PER_SECTION) {
> + for (; pfn < pgdat_end_pfn; pfn += PAGES_PER_SUB_SECTION) {
> ms = __pfn_to_section(pfn);
>
> - if (unlikely(!valid_section(ms)))
> + if (unlikely(!pfn_valid(pfn)))
> continue;

Ditto about "ms".

> if (pfn_to_nid(pfn) != nid)
>

\
 
 \ /
  Last update: 2019-04-20 01:10    [W:1.765 / U:0.100 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site