18c2ecf20Sopenharmony_ci// SPDX-License-Identifier: GPL-2.0 28c2ecf20Sopenharmony_ci/* 38c2ecf20Sopenharmony_ci * Manage cache of swap slots to be used for and returned from 48c2ecf20Sopenharmony_ci * swap. 58c2ecf20Sopenharmony_ci * 68c2ecf20Sopenharmony_ci * Copyright(c) 2016 Intel Corporation. 78c2ecf20Sopenharmony_ci * 88c2ecf20Sopenharmony_ci * Author: Tim Chen <tim.c.chen@linux.intel.com> 98c2ecf20Sopenharmony_ci * 108c2ecf20Sopenharmony_ci * We allocate the swap slots from the global pool and put 118c2ecf20Sopenharmony_ci * it into local per cpu caches. This has the advantage 128c2ecf20Sopenharmony_ci * of no needing to acquire the swap_info lock every time 138c2ecf20Sopenharmony_ci * we need a new slot. 148c2ecf20Sopenharmony_ci * 158c2ecf20Sopenharmony_ci * There is also opportunity to simply return the slot 168c2ecf20Sopenharmony_ci * to local caches without needing to acquire swap_info 178c2ecf20Sopenharmony_ci * lock. We do not reuse the returned slots directly but 188c2ecf20Sopenharmony_ci * move them back to the global pool in a batch. This 198c2ecf20Sopenharmony_ci * allows the slots to coaellesce and reduce fragmentation. 208c2ecf20Sopenharmony_ci * 218c2ecf20Sopenharmony_ci * The swap entry allocated is marked with SWAP_HAS_CACHE 228c2ecf20Sopenharmony_ci * flag in map_count that prevents it from being allocated 238c2ecf20Sopenharmony_ci * again from the global pool. 248c2ecf20Sopenharmony_ci * 258c2ecf20Sopenharmony_ci * The swap slots cache is protected by a mutex instead of 268c2ecf20Sopenharmony_ci * a spin lock as when we search for slots with scan_swap_map, 278c2ecf20Sopenharmony_ci * we can possibly sleep. 288c2ecf20Sopenharmony_ci */ 298c2ecf20Sopenharmony_ci 308c2ecf20Sopenharmony_ci#include <linux/swap_slots.h> 318c2ecf20Sopenharmony_ci#include <linux/cpu.h> 328c2ecf20Sopenharmony_ci#include <linux/cpumask.h> 338c2ecf20Sopenharmony_ci#include <linux/vmalloc.h> 348c2ecf20Sopenharmony_ci#include <linux/mutex.h> 358c2ecf20Sopenharmony_ci#include <linux/mm.h> 368c2ecf20Sopenharmony_ci 378c2ecf20Sopenharmony_cistatic DEFINE_PER_CPU(struct swap_slots_cache, swp_slots); 388c2ecf20Sopenharmony_cistatic bool swap_slot_cache_active; 398c2ecf20Sopenharmony_cibool swap_slot_cache_enabled; 408c2ecf20Sopenharmony_cistatic bool swap_slot_cache_initialized; 418c2ecf20Sopenharmony_cistatic DEFINE_MUTEX(swap_slots_cache_mutex); 428c2ecf20Sopenharmony_ci/* Serialize swap slots cache enable/disable operations */ 438c2ecf20Sopenharmony_cistatic DEFINE_MUTEX(swap_slots_cache_enable_mutex); 448c2ecf20Sopenharmony_ci 458c2ecf20Sopenharmony_cistatic void __drain_swap_slots_cache(unsigned int type); 468c2ecf20Sopenharmony_cistatic void deactivate_swap_slots_cache(void); 478c2ecf20Sopenharmony_cistatic void reactivate_swap_slots_cache(void); 488c2ecf20Sopenharmony_ci 498c2ecf20Sopenharmony_ci#define use_swap_slot_cache (swap_slot_cache_active && swap_slot_cache_enabled) 508c2ecf20Sopenharmony_ci#define SLOTS_CACHE 0x1 518c2ecf20Sopenharmony_ci#define SLOTS_CACHE_RET 0x2 528c2ecf20Sopenharmony_ci 538c2ecf20Sopenharmony_cistatic void deactivate_swap_slots_cache(void) 548c2ecf20Sopenharmony_ci{ 558c2ecf20Sopenharmony_ci mutex_lock(&swap_slots_cache_mutex); 568c2ecf20Sopenharmony_ci swap_slot_cache_active = false; 578c2ecf20Sopenharmony_ci __drain_swap_slots_cache(SLOTS_CACHE|SLOTS_CACHE_RET); 588c2ecf20Sopenharmony_ci mutex_unlock(&swap_slots_cache_mutex); 598c2ecf20Sopenharmony_ci} 608c2ecf20Sopenharmony_ci 618c2ecf20Sopenharmony_cistatic void reactivate_swap_slots_cache(void) 628c2ecf20Sopenharmony_ci{ 638c2ecf20Sopenharmony_ci mutex_lock(&swap_slots_cache_mutex); 648c2ecf20Sopenharmony_ci swap_slot_cache_active = true; 658c2ecf20Sopenharmony_ci mutex_unlock(&swap_slots_cache_mutex); 668c2ecf20Sopenharmony_ci} 678c2ecf20Sopenharmony_ci 688c2ecf20Sopenharmony_ci/* Must not be called with cpu hot plug lock */ 698c2ecf20Sopenharmony_civoid disable_swap_slots_cache_lock(void) 708c2ecf20Sopenharmony_ci{ 718c2ecf20Sopenharmony_ci mutex_lock(&swap_slots_cache_enable_mutex); 728c2ecf20Sopenharmony_ci swap_slot_cache_enabled = false; 738c2ecf20Sopenharmony_ci if (swap_slot_cache_initialized) { 748c2ecf20Sopenharmony_ci /* serialize with cpu hotplug operations */ 758c2ecf20Sopenharmony_ci get_online_cpus(); 768c2ecf20Sopenharmony_ci __drain_swap_slots_cache(SLOTS_CACHE|SLOTS_CACHE_RET); 778c2ecf20Sopenharmony_ci put_online_cpus(); 788c2ecf20Sopenharmony_ci } 798c2ecf20Sopenharmony_ci} 808c2ecf20Sopenharmony_ci 818c2ecf20Sopenharmony_cistatic void __reenable_swap_slots_cache(void) 828c2ecf20Sopenharmony_ci{ 838c2ecf20Sopenharmony_ci swap_slot_cache_enabled = has_usable_swap(); 848c2ecf20Sopenharmony_ci} 858c2ecf20Sopenharmony_ci 868c2ecf20Sopenharmony_civoid reenable_swap_slots_cache_unlock(void) 878c2ecf20Sopenharmony_ci{ 888c2ecf20Sopenharmony_ci __reenable_swap_slots_cache(); 898c2ecf20Sopenharmony_ci mutex_unlock(&swap_slots_cache_enable_mutex); 908c2ecf20Sopenharmony_ci} 918c2ecf20Sopenharmony_ci 928c2ecf20Sopenharmony_cistatic bool check_cache_active(void) 938c2ecf20Sopenharmony_ci{ 948c2ecf20Sopenharmony_ci long pages; 958c2ecf20Sopenharmony_ci 968c2ecf20Sopenharmony_ci if (!swap_slot_cache_enabled) 978c2ecf20Sopenharmony_ci return false; 988c2ecf20Sopenharmony_ci 998c2ecf20Sopenharmony_ci pages = get_nr_swap_pages(); 1008c2ecf20Sopenharmony_ci if (!swap_slot_cache_active) { 1018c2ecf20Sopenharmony_ci if (pages > num_online_cpus() * 1028c2ecf20Sopenharmony_ci THRESHOLD_ACTIVATE_SWAP_SLOTS_CACHE) 1038c2ecf20Sopenharmony_ci reactivate_swap_slots_cache(); 1048c2ecf20Sopenharmony_ci goto out; 1058c2ecf20Sopenharmony_ci } 1068c2ecf20Sopenharmony_ci 1078c2ecf20Sopenharmony_ci /* if global pool of slot caches too low, deactivate cache */ 1088c2ecf20Sopenharmony_ci if (pages < num_online_cpus() * THRESHOLD_DEACTIVATE_SWAP_SLOTS_CACHE) 1098c2ecf20Sopenharmony_ci deactivate_swap_slots_cache(); 1108c2ecf20Sopenharmony_ciout: 1118c2ecf20Sopenharmony_ci return swap_slot_cache_active; 1128c2ecf20Sopenharmony_ci} 1138c2ecf20Sopenharmony_ci 1148c2ecf20Sopenharmony_cistatic int alloc_swap_slot_cache(unsigned int cpu) 1158c2ecf20Sopenharmony_ci{ 1168c2ecf20Sopenharmony_ci struct swap_slots_cache *cache; 1178c2ecf20Sopenharmony_ci swp_entry_t *slots, *slots_ret; 1188c2ecf20Sopenharmony_ci 1198c2ecf20Sopenharmony_ci /* 1208c2ecf20Sopenharmony_ci * Do allocation outside swap_slots_cache_mutex 1218c2ecf20Sopenharmony_ci * as kvzalloc could trigger reclaim and get_swap_page, 1228c2ecf20Sopenharmony_ci * which can lock swap_slots_cache_mutex. 1238c2ecf20Sopenharmony_ci */ 1248c2ecf20Sopenharmony_ci slots = kvcalloc(SWAP_SLOTS_CACHE_SIZE, sizeof(swp_entry_t), 1258c2ecf20Sopenharmony_ci GFP_KERNEL); 1268c2ecf20Sopenharmony_ci if (!slots) 1278c2ecf20Sopenharmony_ci return -ENOMEM; 1288c2ecf20Sopenharmony_ci 1298c2ecf20Sopenharmony_ci slots_ret = kvcalloc(SWAP_SLOTS_CACHE_SIZE, sizeof(swp_entry_t), 1308c2ecf20Sopenharmony_ci GFP_KERNEL); 1318c2ecf20Sopenharmony_ci if (!slots_ret) { 1328c2ecf20Sopenharmony_ci kvfree(slots); 1338c2ecf20Sopenharmony_ci return -ENOMEM; 1348c2ecf20Sopenharmony_ci } 1358c2ecf20Sopenharmony_ci 1368c2ecf20Sopenharmony_ci mutex_lock(&swap_slots_cache_mutex); 1378c2ecf20Sopenharmony_ci cache = &per_cpu(swp_slots, cpu); 1388c2ecf20Sopenharmony_ci if (cache->slots || cache->slots_ret) { 1398c2ecf20Sopenharmony_ci /* cache already allocated */ 1408c2ecf20Sopenharmony_ci mutex_unlock(&swap_slots_cache_mutex); 1418c2ecf20Sopenharmony_ci 1428c2ecf20Sopenharmony_ci kvfree(slots); 1438c2ecf20Sopenharmony_ci kvfree(slots_ret); 1448c2ecf20Sopenharmony_ci 1458c2ecf20Sopenharmony_ci return 0; 1468c2ecf20Sopenharmony_ci } 1478c2ecf20Sopenharmony_ci 1488c2ecf20Sopenharmony_ci if (!cache->lock_initialized) { 1498c2ecf20Sopenharmony_ci mutex_init(&cache->alloc_lock); 1508c2ecf20Sopenharmony_ci spin_lock_init(&cache->free_lock); 1518c2ecf20Sopenharmony_ci cache->lock_initialized = true; 1528c2ecf20Sopenharmony_ci } 1538c2ecf20Sopenharmony_ci cache->nr = 0; 1548c2ecf20Sopenharmony_ci cache->cur = 0; 1558c2ecf20Sopenharmony_ci cache->n_ret = 0; 1568c2ecf20Sopenharmony_ci /* 1578c2ecf20Sopenharmony_ci * We initialized alloc_lock and free_lock earlier. We use 1588c2ecf20Sopenharmony_ci * !cache->slots or !cache->slots_ret to know if it is safe to acquire 1598c2ecf20Sopenharmony_ci * the corresponding lock and use the cache. Memory barrier below 1608c2ecf20Sopenharmony_ci * ensures the assumption. 1618c2ecf20Sopenharmony_ci */ 1628c2ecf20Sopenharmony_ci mb(); 1638c2ecf20Sopenharmony_ci cache->slots = slots; 1648c2ecf20Sopenharmony_ci cache->slots_ret = slots_ret; 1658c2ecf20Sopenharmony_ci mutex_unlock(&swap_slots_cache_mutex); 1668c2ecf20Sopenharmony_ci return 0; 1678c2ecf20Sopenharmony_ci} 1688c2ecf20Sopenharmony_ci 1698c2ecf20Sopenharmony_cistatic void drain_slots_cache_cpu(unsigned int cpu, unsigned int type, 1708c2ecf20Sopenharmony_ci bool free_slots) 1718c2ecf20Sopenharmony_ci{ 1728c2ecf20Sopenharmony_ci struct swap_slots_cache *cache; 1738c2ecf20Sopenharmony_ci swp_entry_t *slots = NULL; 1748c2ecf20Sopenharmony_ci 1758c2ecf20Sopenharmony_ci cache = &per_cpu(swp_slots, cpu); 1768c2ecf20Sopenharmony_ci if ((type & SLOTS_CACHE) && cache->slots) { 1778c2ecf20Sopenharmony_ci mutex_lock(&cache->alloc_lock); 1788c2ecf20Sopenharmony_ci swapcache_free_entries(cache->slots + cache->cur, cache->nr); 1798c2ecf20Sopenharmony_ci cache->cur = 0; 1808c2ecf20Sopenharmony_ci cache->nr = 0; 1818c2ecf20Sopenharmony_ci if (free_slots && cache->slots) { 1828c2ecf20Sopenharmony_ci kvfree(cache->slots); 1838c2ecf20Sopenharmony_ci cache->slots = NULL; 1848c2ecf20Sopenharmony_ci } 1858c2ecf20Sopenharmony_ci mutex_unlock(&cache->alloc_lock); 1868c2ecf20Sopenharmony_ci } 1878c2ecf20Sopenharmony_ci if ((type & SLOTS_CACHE_RET) && cache->slots_ret) { 1888c2ecf20Sopenharmony_ci spin_lock_irq(&cache->free_lock); 1898c2ecf20Sopenharmony_ci swapcache_free_entries(cache->slots_ret, cache->n_ret); 1908c2ecf20Sopenharmony_ci cache->n_ret = 0; 1918c2ecf20Sopenharmony_ci if (free_slots && cache->slots_ret) { 1928c2ecf20Sopenharmony_ci slots = cache->slots_ret; 1938c2ecf20Sopenharmony_ci cache->slots_ret = NULL; 1948c2ecf20Sopenharmony_ci } 1958c2ecf20Sopenharmony_ci spin_unlock_irq(&cache->free_lock); 1968c2ecf20Sopenharmony_ci if (slots) 1978c2ecf20Sopenharmony_ci kvfree(slots); 1988c2ecf20Sopenharmony_ci } 1998c2ecf20Sopenharmony_ci} 2008c2ecf20Sopenharmony_ci 2018c2ecf20Sopenharmony_cistatic void __drain_swap_slots_cache(unsigned int type) 2028c2ecf20Sopenharmony_ci{ 2038c2ecf20Sopenharmony_ci unsigned int cpu; 2048c2ecf20Sopenharmony_ci 2058c2ecf20Sopenharmony_ci /* 2068c2ecf20Sopenharmony_ci * This function is called during 2078c2ecf20Sopenharmony_ci * 1) swapoff, when we have to make sure no 2088c2ecf20Sopenharmony_ci * left over slots are in cache when we remove 2098c2ecf20Sopenharmony_ci * a swap device; 2108c2ecf20Sopenharmony_ci * 2) disabling of swap slot cache, when we run low 2118c2ecf20Sopenharmony_ci * on swap slots when allocating memory and need 2128c2ecf20Sopenharmony_ci * to return swap slots to global pool. 2138c2ecf20Sopenharmony_ci * 2148c2ecf20Sopenharmony_ci * We cannot acquire cpu hot plug lock here as 2158c2ecf20Sopenharmony_ci * this function can be invoked in the cpu 2168c2ecf20Sopenharmony_ci * hot plug path: 2178c2ecf20Sopenharmony_ci * cpu_up -> lock cpu_hotplug -> cpu hotplug state callback 2188c2ecf20Sopenharmony_ci * -> memory allocation -> direct reclaim -> get_swap_page 2198c2ecf20Sopenharmony_ci * -> drain_swap_slots_cache 2208c2ecf20Sopenharmony_ci * 2218c2ecf20Sopenharmony_ci * Hence the loop over current online cpu below could miss cpu that 2228c2ecf20Sopenharmony_ci * is being brought online but not yet marked as online. 2238c2ecf20Sopenharmony_ci * That is okay as we do not schedule and run anything on a 2248c2ecf20Sopenharmony_ci * cpu before it has been marked online. Hence, we will not 2258c2ecf20Sopenharmony_ci * fill any swap slots in slots cache of such cpu. 2268c2ecf20Sopenharmony_ci * There are no slots on such cpu that need to be drained. 2278c2ecf20Sopenharmony_ci */ 2288c2ecf20Sopenharmony_ci for_each_online_cpu(cpu) 2298c2ecf20Sopenharmony_ci drain_slots_cache_cpu(cpu, type, false); 2308c2ecf20Sopenharmony_ci} 2318c2ecf20Sopenharmony_ci 2328c2ecf20Sopenharmony_cistatic int free_slot_cache(unsigned int cpu) 2338c2ecf20Sopenharmony_ci{ 2348c2ecf20Sopenharmony_ci mutex_lock(&swap_slots_cache_mutex); 2358c2ecf20Sopenharmony_ci drain_slots_cache_cpu(cpu, SLOTS_CACHE | SLOTS_CACHE_RET, true); 2368c2ecf20Sopenharmony_ci mutex_unlock(&swap_slots_cache_mutex); 2378c2ecf20Sopenharmony_ci return 0; 2388c2ecf20Sopenharmony_ci} 2398c2ecf20Sopenharmony_ci 2408c2ecf20Sopenharmony_civoid enable_swap_slots_cache(void) 2418c2ecf20Sopenharmony_ci{ 2428c2ecf20Sopenharmony_ci mutex_lock(&swap_slots_cache_enable_mutex); 2438c2ecf20Sopenharmony_ci if (!swap_slot_cache_initialized) { 2448c2ecf20Sopenharmony_ci int ret; 2458c2ecf20Sopenharmony_ci 2468c2ecf20Sopenharmony_ci ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "swap_slots_cache", 2478c2ecf20Sopenharmony_ci alloc_swap_slot_cache, free_slot_cache); 2488c2ecf20Sopenharmony_ci if (WARN_ONCE(ret < 0, "Cache allocation failed (%s), operating " 2498c2ecf20Sopenharmony_ci "without swap slots cache.\n", __func__)) 2508c2ecf20Sopenharmony_ci goto out_unlock; 2518c2ecf20Sopenharmony_ci 2528c2ecf20Sopenharmony_ci swap_slot_cache_initialized = true; 2538c2ecf20Sopenharmony_ci } 2548c2ecf20Sopenharmony_ci 2558c2ecf20Sopenharmony_ci __reenable_swap_slots_cache(); 2568c2ecf20Sopenharmony_ciout_unlock: 2578c2ecf20Sopenharmony_ci mutex_unlock(&swap_slots_cache_enable_mutex); 2588c2ecf20Sopenharmony_ci} 2598c2ecf20Sopenharmony_ci 2608c2ecf20Sopenharmony_ci/* called with swap slot cache's alloc lock held */ 2618c2ecf20Sopenharmony_cistatic int refill_swap_slots_cache(struct swap_slots_cache *cache) 2628c2ecf20Sopenharmony_ci{ 2638c2ecf20Sopenharmony_ci if (!use_swap_slot_cache || cache->nr) 2648c2ecf20Sopenharmony_ci return 0; 2658c2ecf20Sopenharmony_ci 2668c2ecf20Sopenharmony_ci cache->cur = 0; 2678c2ecf20Sopenharmony_ci if (swap_slot_cache_active) 2688c2ecf20Sopenharmony_ci cache->nr = get_swap_pages(SWAP_SLOTS_CACHE_SIZE, 2698c2ecf20Sopenharmony_ci cache->slots, 1); 2708c2ecf20Sopenharmony_ci 2718c2ecf20Sopenharmony_ci return cache->nr; 2728c2ecf20Sopenharmony_ci} 2738c2ecf20Sopenharmony_ci 2748c2ecf20Sopenharmony_ciint free_swap_slot(swp_entry_t entry) 2758c2ecf20Sopenharmony_ci{ 2768c2ecf20Sopenharmony_ci struct swap_slots_cache *cache; 2778c2ecf20Sopenharmony_ci 2788c2ecf20Sopenharmony_ci cache = raw_cpu_ptr(&swp_slots); 2798c2ecf20Sopenharmony_ci if (likely(use_swap_slot_cache && cache->slots_ret)) { 2808c2ecf20Sopenharmony_ci spin_lock_irq(&cache->free_lock); 2818c2ecf20Sopenharmony_ci /* Swap slots cache may be deactivated before acquiring lock */ 2828c2ecf20Sopenharmony_ci if (!use_swap_slot_cache || !cache->slots_ret) { 2838c2ecf20Sopenharmony_ci spin_unlock_irq(&cache->free_lock); 2848c2ecf20Sopenharmony_ci goto direct_free; 2858c2ecf20Sopenharmony_ci } 2868c2ecf20Sopenharmony_ci if (cache->n_ret >= SWAP_SLOTS_CACHE_SIZE) { 2878c2ecf20Sopenharmony_ci /* 2888c2ecf20Sopenharmony_ci * Return slots to global pool. 2898c2ecf20Sopenharmony_ci * The current swap_map value is SWAP_HAS_CACHE. 2908c2ecf20Sopenharmony_ci * Set it to 0 to indicate it is available for 2918c2ecf20Sopenharmony_ci * allocation in global pool 2928c2ecf20Sopenharmony_ci */ 2938c2ecf20Sopenharmony_ci swapcache_free_entries(cache->slots_ret, cache->n_ret); 2948c2ecf20Sopenharmony_ci cache->n_ret = 0; 2958c2ecf20Sopenharmony_ci } 2968c2ecf20Sopenharmony_ci cache->slots_ret[cache->n_ret++] = entry; 2978c2ecf20Sopenharmony_ci spin_unlock_irq(&cache->free_lock); 2988c2ecf20Sopenharmony_ci } else { 2998c2ecf20Sopenharmony_cidirect_free: 3008c2ecf20Sopenharmony_ci swapcache_free_entries(&entry, 1); 3018c2ecf20Sopenharmony_ci } 3028c2ecf20Sopenharmony_ci 3038c2ecf20Sopenharmony_ci return 0; 3048c2ecf20Sopenharmony_ci} 3058c2ecf20Sopenharmony_ci 3068c2ecf20Sopenharmony_ciswp_entry_t get_swap_page(struct page *page) 3078c2ecf20Sopenharmony_ci{ 3088c2ecf20Sopenharmony_ci swp_entry_t entry; 3098c2ecf20Sopenharmony_ci struct swap_slots_cache *cache; 3108c2ecf20Sopenharmony_ci 3118c2ecf20Sopenharmony_ci entry.val = 0; 3128c2ecf20Sopenharmony_ci 3138c2ecf20Sopenharmony_ci if (PageTransHuge(page)) { 3148c2ecf20Sopenharmony_ci if (IS_ENABLED(CONFIG_THP_SWAP)) 3158c2ecf20Sopenharmony_ci get_swap_pages(1, &entry, HPAGE_PMD_NR); 3168c2ecf20Sopenharmony_ci goto out; 3178c2ecf20Sopenharmony_ci } 3188c2ecf20Sopenharmony_ci 3198c2ecf20Sopenharmony_ci /* 3208c2ecf20Sopenharmony_ci * Preemption is allowed here, because we may sleep 3218c2ecf20Sopenharmony_ci * in refill_swap_slots_cache(). But it is safe, because 3228c2ecf20Sopenharmony_ci * accesses to the per-CPU data structure are protected by the 3238c2ecf20Sopenharmony_ci * mutex cache->alloc_lock. 3248c2ecf20Sopenharmony_ci * 3258c2ecf20Sopenharmony_ci * The alloc path here does not touch cache->slots_ret 3268c2ecf20Sopenharmony_ci * so cache->free_lock is not taken. 3278c2ecf20Sopenharmony_ci */ 3288c2ecf20Sopenharmony_ci cache = raw_cpu_ptr(&swp_slots); 3298c2ecf20Sopenharmony_ci 3308c2ecf20Sopenharmony_ci if (likely(check_cache_active() && cache->slots)) { 3318c2ecf20Sopenharmony_ci mutex_lock(&cache->alloc_lock); 3328c2ecf20Sopenharmony_ci if (cache->slots) { 3338c2ecf20Sopenharmony_cirepeat: 3348c2ecf20Sopenharmony_ci if (cache->nr) { 3358c2ecf20Sopenharmony_ci entry = cache->slots[cache->cur]; 3368c2ecf20Sopenharmony_ci cache->slots[cache->cur++].val = 0; 3378c2ecf20Sopenharmony_ci cache->nr--; 3388c2ecf20Sopenharmony_ci } else if (refill_swap_slots_cache(cache)) { 3398c2ecf20Sopenharmony_ci goto repeat; 3408c2ecf20Sopenharmony_ci } 3418c2ecf20Sopenharmony_ci } 3428c2ecf20Sopenharmony_ci mutex_unlock(&cache->alloc_lock); 3438c2ecf20Sopenharmony_ci if (entry.val) 3448c2ecf20Sopenharmony_ci goto out; 3458c2ecf20Sopenharmony_ci } 3468c2ecf20Sopenharmony_ci 3478c2ecf20Sopenharmony_ci get_swap_pages(1, &entry, 1); 3488c2ecf20Sopenharmony_ciout: 3498c2ecf20Sopenharmony_ci if (mem_cgroup_try_charge_swap(page, entry)) { 3508c2ecf20Sopenharmony_ci put_swap_page(page, entry); 3518c2ecf20Sopenharmony_ci entry.val = 0; 3528c2ecf20Sopenharmony_ci } 3538c2ecf20Sopenharmony_ci return entry; 3548c2ecf20Sopenharmony_ci} 355