Messages in this thread Patch in this message | | | From | Lincheng Yang <> | Subject | [RFC PATCH 1/5] mm/swap_slots: cleanup swap slot cache | Date | Sun, 8 Oct 2023 17:59:20 +0800 |
| |
The function of the swap slot cache will be cleaned to prepare for subsequent modifications.
Signed-off-by: Lincheng Yang <lincheng.yang@transsion.com> --- mm/swap_slots.c | 111 ++++++++++++++++++++++++++++-------------------- 1 file changed, 66 insertions(+), 45 deletions(-)
diff --git a/mm/swap_slots.c b/mm/swap_slots.c index 0bec1f705f8e..bb41c8460b62 100644 --- a/mm/swap_slots.c +++ b/mm/swap_slots.c @@ -110,11 +110,13 @@ static bool check_cache_active(void) return swap_slot_cache_active; } -static int alloc_swap_slot_cache(unsigned int cpu) +static int __alloc_swap_slot_cache(struct swap_slots_cache *cache) { - struct swap_slots_cache *cache; swp_entry_t *slots, *slots_ret; + if (!cache) + return 0; + /* * Do allocation outside swap_slots_cache_mutex * as kvzalloc could trigger reclaim and folio_alloc_swap, @@ -133,17 +135,6 @@ static int alloc_swap_slot_cache(unsigned int cpu) } mutex_lock(&swap_slots_cache_mutex); - cache = &per_cpu(swp_slots, cpu); - if (cache->slots || cache->slots_ret) { - /* cache already allocated */ - mutex_unlock(&swap_slots_cache_mutex); - - kvfree(slots); - kvfree(slots_ret); - - return 0; - } - if (!cache->lock_initialized) { mutex_init(&cache->alloc_lock); spin_lock_init(&cache->free_lock); @@ -165,13 +156,26 @@ static int alloc_swap_slot_cache(unsigned int cpu) return 0; } -static void drain_slots_cache_cpu(unsigned int cpu, unsigned int type, - bool free_slots) +static int alloc_swap_slot_cache(unsigned int cpu) { struct swap_slots_cache *cache; - swp_entry_t *slots = NULL; + mutex_lock(&swap_slots_cache_mutex); cache = &per_cpu(swp_slots, cpu); + if (cache->slots || cache->slots_ret) /* cache already allocated */ + cache = NULL; + mutex_unlock(&swap_slots_cache_mutex); + + __alloc_swap_slot_cache(cache); + + return 0; +} + +static void __drain_slots_cache_cpu(struct swap_slots_cache *cache, + unsigned int type, bool free_slots) +{ + swp_entry_t *slots = NULL; + if ((type & SLOTS_CACHE) && cache->slots) { mutex_lock(&cache->alloc_lock); swapcache_free_entries(cache->slots + cache->cur, cache->nr); @@ -196,6 +200,15 @@ static void drain_slots_cache_cpu(unsigned int cpu, unsigned int type, } } +static void drain_slots_cache_cpu(unsigned int cpu, unsigned int type, + bool free_slots) +{ + struct swap_slots_cache *cache; + + cache = &per_cpu(swp_slots, cpu); + __drain_slots_cache_cpu(cache, type, free_slots); +} + static void __drain_swap_slots_cache(unsigned int type) { unsigned int cpu; @@ -269,11 +282,8 @@ static int refill_swap_slots_cache(struct swap_slots_cache *cache) return cache->nr; } -void free_swap_slot(swp_entry_t entry) +static void __free_swap_slot(struct swap_slots_cache *cache, swp_entry_t entry) { - struct swap_slots_cache *cache; - - cache = raw_cpu_ptr(&swp_slots); if (likely(use_swap_slot_cache && cache->slots_ret)) { spin_lock_irq(&cache->free_lock); /* Swap slots cache may be deactivated before acquiring lock */ @@ -299,18 +309,18 @@ void free_swap_slot(swp_entry_t entry) } } -swp_entry_t folio_alloc_swap(struct folio *folio) +void free_swap_slot(swp_entry_t entry) { - swp_entry_t entry; struct swap_slots_cache *cache; - entry.val = 0; + cache = raw_cpu_ptr(&swp_slots); + __free_swap_slot(cache, entry); +} - if (folio_test_large(folio)) { - if (IS_ENABLED(CONFIG_THP_SWAP) && arch_thp_swp_supported()) - get_swap_pages(1, &entry, folio_nr_pages(folio)); - goto out; - } +static int __folio_alloc_swap(struct swap_slots_cache *cache, swp_entry_t *entry) +{ + if (unlikely(!check_cache_active() || !cache->slots)) + return -EINVAL; /* * Preemption is allowed here, because we may sleep @@ -321,26 +331,37 @@ swp_entry_t folio_alloc_swap(struct folio *folio) * The alloc path here does not touch cache->slots_ret * so cache->free_lock is not taken. */ - cache = raw_cpu_ptr(&swp_slots); - - if (likely(check_cache_active() && cache->slots)) { - mutex_lock(&cache->alloc_lock); - if (cache->slots) { + mutex_lock(&cache->alloc_lock); repeat: - if (cache->nr) { - entry = cache->slots[cache->cur]; - cache->slots[cache->cur++].val = 0; - cache->nr--; - } else if (refill_swap_slots_cache(cache)) { - goto repeat; - } - } - mutex_unlock(&cache->alloc_lock); - if (entry.val) - goto out; + if (cache->nr) { + *entry = cache->slots[cache->cur]; + cache->slots[cache->cur++].val = 0; + cache->nr--; + } else if (refill_swap_slots_cache(cache)) { + goto repeat; } + mutex_unlock(&cache->alloc_lock); + + return !!entry->val; +} + +swp_entry_t folio_alloc_swap(struct folio *folio) +{ + swp_entry_t entry; + struct swap_slots_cache *cache; + + entry.val = 0; + + if (folio_test_large(folio)) { + if (IS_ENABLED(CONFIG_THP_SWAP) && arch_thp_swp_supported()) + get_swap_pages(1, &entry, folio_nr_pages(folio)); + goto out; + } + + cache = raw_cpu_ptr(&swp_slots); + if (__folio_alloc_swap(cache, &entry)) + get_swap_pages(1, &entry, 1); - get_swap_pages(1, &entry, 1); out: if (mem_cgroup_try_charge_swap(folio, entry)) { put_swap_folio(folio, entry); -- 2.34.1
| |