Messages in this thread | | | Date | Thu, 9 Mar 2023 14:33:05 +0800 | Subject | Re: [PATCH v4 1/8] mm: vmscan: add a map_nr_max field to shrinker_info | From | Qi Zheng <> |
| |
On 2023/3/9 06:13, Kirill Tkhai wrote: > Hi, > > On 07.03.2023 09:55, Qi Zheng wrote: >> To prepare for the subsequent lockless memcg slab shrink, >> add a map_nr_max field to struct shrinker_info to records >> its own real shrinker_nr_max. >> >> Suggested-by: Kirill Tkhai <tkhai@ya.ru> >> Signed-off-by: Qi Zheng <zhengqi.arch@bytedance.com> >> --- >> include/linux/memcontrol.h | 1 + >> mm/vmscan.c | 41 ++++++++++++++++++++++---------------- >> 2 files changed, 25 insertions(+), 17 deletions(-) >> >> diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h >> index b6eda2ab205d..aa69ea98e2d8 100644 >> --- a/include/linux/memcontrol.h >> +++ b/include/linux/memcontrol.h >> @@ -97,6 +97,7 @@ struct shrinker_info { >> struct rcu_head rcu; >> atomic_long_t *nr_deferred; >> unsigned long *map; >> + int map_nr_max; >> }; >> >> struct lruvec_stats_percpu { >> diff --git a/mm/vmscan.c b/mm/vmscan.c >> index 9414226218f0..2dcc01682026 100644 >> --- a/mm/vmscan.c >> +++ b/mm/vmscan.c >> @@ -224,9 +224,16 @@ static struct shrinker_info *shrinker_info_protected(struct mem_cgroup *memcg, >> lockdep_is_held(&shrinker_rwsem)); >> } >> >> +static inline bool need_expand(int new_nr_max, int old_nr_max) >> +{ >> + return round_up(new_nr_max, BITS_PER_LONG) > >> + round_up(old_nr_max, BITS_PER_LONG); >> +} >> + >> static int expand_one_shrinker_info(struct mem_cgroup *memcg, >> int map_size, int defer_size, >> - int old_map_size, int old_defer_size) >> + int old_map_size, int old_defer_size, >> + int new_nr_max) >> { >> struct shrinker_info *new, *old; >> struct mem_cgroup_per_node *pn; >> @@ -240,12 +247,17 @@ static int expand_one_shrinker_info(struct mem_cgroup *memcg, >> if (!old) >> return 0; >> >> + /* Already expanded this shrinker_info */ >> + if (!need_expand(new_nr_max, old->map_nr_max)) > > need_expand() looks confusing here. It's strange that we round_up(old->map_nr_max), > despite old->map never may exceed old->map_nr_max. > > Won't plain > > if (new_nr_max <= old->map_nr_max) > > look clearer here?
Yeah, will change to it.
> > The rest in patch looks OK for me. > >> + continue; >> + >> new = kvmalloc_node(sizeof(*new) + size, GFP_KERNEL, nid); >> if (!new) >> return -ENOMEM; >> >> new->nr_deferred = (atomic_long_t *)(new + 1); >> new->map = (void *)new->nr_deferred + defer_size; >> + new->map_nr_max = new_nr_max; >> >> /* map: set all old bits, clear all new bits */ >> memset(new->map, (int)0xff, old_map_size); >> @@ -295,6 +307,7 @@ int alloc_shrinker_info(struct mem_cgroup *memcg) >> } >> info->nr_deferred = (atomic_long_t *)(info + 1); >> info->map = (void *)info->nr_deferred + defer_size; >> + info->map_nr_max = shrinker_nr_max; >> rcu_assign_pointer(memcg->nodeinfo[nid]->shrinker_info, info); >> } >> up_write(&shrinker_rwsem); >> @@ -302,23 +315,14 @@ int alloc_shrinker_info(struct mem_cgroup *memcg) >> return ret; >> } >> >> -static inline bool need_expand(int nr_max) >> -{ >> - return round_up(nr_max, BITS_PER_LONG) > >> - round_up(shrinker_nr_max, BITS_PER_LONG); >> -} >> - >> static int expand_shrinker_info(int new_id) >> { >> int ret = 0; >> - int new_nr_max = new_id + 1; >> + int new_nr_max = round_up(new_id + 1, BITS_PER_LONG); >> int map_size, defer_size = 0; >> int old_map_size, old_defer_size = 0; >> struct mem_cgroup *memcg; >> >> - if (!need_expand(new_nr_max)) >> - goto out; >> - >> if (!root_mem_cgroup) >> goto out; >> >> @@ -332,7 +336,8 @@ static int expand_shrinker_info(int new_id) >> memcg = mem_cgroup_iter(NULL, NULL, NULL); >> do { >> ret = expand_one_shrinker_info(memcg, map_size, defer_size, >> - old_map_size, old_defer_size); >> + old_map_size, old_defer_size, >> + new_nr_max); >> if (ret) { >> mem_cgroup_iter_break(NULL, memcg); >> goto out; >> @@ -352,9 +357,11 @@ void set_shrinker_bit(struct mem_cgroup *memcg, int nid, int shrinker_id) >> >> rcu_read_lock(); >> info = rcu_dereference(memcg->nodeinfo[nid]->shrinker_info); >> - /* Pairs with smp mb in shrink_slab() */ >> - smp_mb__before_atomic(); >> - set_bit(shrinker_id, info->map); >> + if (!WARN_ON_ONCE(shrinker_id >= info->map_nr_max)) { >> + /* Pairs with smp mb in shrink_slab() */ >> + smp_mb__before_atomic(); >> + set_bit(shrinker_id, info->map); >> + } >> rcu_read_unlock(); >> } >> } >> @@ -432,7 +439,7 @@ void reparent_shrinker_deferred(struct mem_cgroup *memcg) >> for_each_node(nid) { >> child_info = shrinker_info_protected(memcg, nid); >> parent_info = shrinker_info_protected(parent, nid); >> - for (i = 0; i < shrinker_nr_max; i++) { >> + for (i = 0; i < child_info->map_nr_max; i++) { >> nr = atomic_long_read(&child_info->nr_deferred[i]); >> atomic_long_add(nr, &parent_info->nr_deferred[i]); >> } >> @@ -899,7 +906,7 @@ static unsigned long shrink_slab_memcg(gfp_t gfp_mask, int nid, >> if (unlikely(!info)) >> goto unlock; >> >> - for_each_set_bit(i, info->map, shrinker_nr_max) { >> + for_each_set_bit(i, info->map, info->map_nr_max) { >> struct shrink_control sc = { >> .gfp_mask = gfp_mask, >> .nid = nid, >
-- Thanks, Qi
| |