Lines Matching refs:cache
3 * Manage cache of swap slots to be used for and returned from
25 * The swap slots cache is protected by a mutex instead of
42 /* Serialize swap slots cache enable/disable operations */
107 /* if global pool of slot caches too low, deactivate cache */
116 struct swap_slots_cache *cache;
137 cache = &per_cpu(swp_slots, cpu);
138 if (cache->slots || cache->slots_ret) {
139 /* cache already allocated */
148 if (!cache->lock_initialized) {
149 mutex_init(&cache->alloc_lock);
150 spin_lock_init(&cache->free_lock);
151 cache->lock_initialized = true;
153 cache->nr = 0;
154 cache->cur = 0;
155 cache->n_ret = 0;
158 * !cache->slots or !cache->slots_ret to know if it is safe to acquire
159 * the corresponding lock and use the cache. Memory barrier below
163 cache->slots = slots;
164 cache->slots_ret = slots_ret;
172 struct swap_slots_cache *cache;
175 cache = &per_cpu(swp_slots, cpu);
176 if ((type & SLOTS_CACHE) && cache->slots) {
177 mutex_lock(&cache->alloc_lock);
178 swapcache_free_entries(cache->slots + cache->cur, cache->nr);
179 cache->cur = 0;
180 cache->nr = 0;
181 if (free_slots && cache->slots) {
182 kvfree(cache->slots);
183 cache->slots = NULL;
185 mutex_unlock(&cache->alloc_lock);
187 if ((type & SLOTS_CACHE_RET) && cache->slots_ret) {
188 spin_lock_irq(&cache->free_lock);
189 swapcache_free_entries(cache->slots_ret, cache->n_ret);
190 cache->n_ret = 0;
191 if (free_slots && cache->slots_ret) {
192 slots = cache->slots_ret;
193 cache->slots_ret = NULL;
195 spin_unlock_irq(&cache->free_lock);
208 * left over slots are in cache when we remove
210 * 2) disabling of swap slot cache, when we run low
225 * fill any swap slots in slots cache of such cpu.
249 "without swap slots cache.\n", __func__))
260 /* called with swap slot cache's alloc lock held */
261 static int refill_swap_slots_cache(struct swap_slots_cache *cache)
263 if (!use_swap_slot_cache || cache->nr)
266 cache->cur = 0;
268 cache->nr = get_swap_pages(SWAP_SLOTS_CACHE_SIZE,
269 cache->slots, 1);
271 return cache->nr;
276 struct swap_slots_cache *cache;
278 cache = raw_cpu_ptr(&swp_slots);
279 if (likely(use_swap_slot_cache && cache->slots_ret)) {
280 spin_lock_irq(&cache->free_lock);
281 /* Swap slots cache may be deactivated before acquiring lock */
282 if (!use_swap_slot_cache || !cache->slots_ret) {
283 spin_unlock_irq(&cache->free_lock);
286 if (cache->n_ret >= SWAP_SLOTS_CACHE_SIZE) {
293 swapcache_free_entries(cache->slots_ret, cache->n_ret);
294 cache->n_ret = 0;
296 cache->slots_ret[cache->n_ret++] = entry;
297 spin_unlock_irq(&cache->free_lock);
309 struct swap_slots_cache *cache;
323 * mutex cache->alloc_lock.
325 * The alloc path here does not touch cache->slots_ret
326 * so cache->free_lock is not taken.
328 cache = raw_cpu_ptr(&swp_slots);
330 if (likely(check_cache_active() && cache->slots)) {
331 mutex_lock(&cache->alloc_lock);
332 if (cache->slots) {
334 if (cache->nr) {
335 entry = cache->slots[cache->cur];
336 cache->slots[cache->cur++].val = 0;
337 cache->nr--;
338 } else if (refill_swap_slots_cache(cache)) {
342 mutex_unlock(&cache->alloc_lock);