162306a36Sopenharmony_ci// SPDX-License-Identifier: GPL-2.0 262306a36Sopenharmony_ci/* 362306a36Sopenharmony_ci * Primary bucket allocation code 462306a36Sopenharmony_ci * 562306a36Sopenharmony_ci * Copyright 2012 Google, Inc. 662306a36Sopenharmony_ci * 762306a36Sopenharmony_ci * Allocation in bcache is done in terms of buckets: 862306a36Sopenharmony_ci * 962306a36Sopenharmony_ci * Each bucket has associated an 8 bit gen; this gen corresponds to the gen in 1062306a36Sopenharmony_ci * btree pointers - they must match for the pointer to be considered valid. 1162306a36Sopenharmony_ci * 1262306a36Sopenharmony_ci * Thus (assuming a bucket has no dirty data or metadata in it) we can reuse a 1362306a36Sopenharmony_ci * bucket simply by incrementing its gen. 1462306a36Sopenharmony_ci * 1562306a36Sopenharmony_ci * The gens (along with the priorities; it's really the gens are important but 1662306a36Sopenharmony_ci * the code is named as if it's the priorities) are written in an arbitrary list 1762306a36Sopenharmony_ci * of buckets on disk, with a pointer to them in the journal header. 1862306a36Sopenharmony_ci * 1962306a36Sopenharmony_ci * When we invalidate a bucket, we have to write its new gen to disk and wait 2062306a36Sopenharmony_ci * for that write to complete before we use it - otherwise after a crash we 2162306a36Sopenharmony_ci * could have pointers that appeared to be good but pointed to data that had 2262306a36Sopenharmony_ci * been overwritten. 2362306a36Sopenharmony_ci * 2462306a36Sopenharmony_ci * Since the gens and priorities are all stored contiguously on disk, we can 2562306a36Sopenharmony_ci * batch this up: We fill up the free_inc list with freshly invalidated buckets, 2662306a36Sopenharmony_ci * call prio_write(), and when prio_write() finishes we pull buckets off the 2762306a36Sopenharmony_ci * free_inc list and optionally discard them. 2862306a36Sopenharmony_ci * 2962306a36Sopenharmony_ci * free_inc isn't the only freelist - if it was, we'd often to sleep while 3062306a36Sopenharmony_ci * priorities and gens were being written before we could allocate. c->free is a 3162306a36Sopenharmony_ci * smaller freelist, and buckets on that list are always ready to be used. 3262306a36Sopenharmony_ci * 3362306a36Sopenharmony_ci * If we've got discards enabled, that happens when a bucket moves from the 3462306a36Sopenharmony_ci * free_inc list to the free list. 3562306a36Sopenharmony_ci * 3662306a36Sopenharmony_ci * There is another freelist, because sometimes we have buckets that we know 3762306a36Sopenharmony_ci * have nothing pointing into them - these we can reuse without waiting for 3862306a36Sopenharmony_ci * priorities to be rewritten. These come from freed btree nodes and buckets 3962306a36Sopenharmony_ci * that garbage collection discovered no longer had valid keys pointing into 4062306a36Sopenharmony_ci * them (because they were overwritten). That's the unused list - buckets on the 4162306a36Sopenharmony_ci * unused list move to the free list, optionally being discarded in the process. 4262306a36Sopenharmony_ci * 4362306a36Sopenharmony_ci * It's also important to ensure that gens don't wrap around - with respect to 4462306a36Sopenharmony_ci * either the oldest gen in the btree or the gen on disk. This is quite 4562306a36Sopenharmony_ci * difficult to do in practice, but we explicitly guard against it anyways - if 4662306a36Sopenharmony_ci * a bucket is in danger of wrapping around we simply skip invalidating it that 4762306a36Sopenharmony_ci * time around, and we garbage collect or rewrite the priorities sooner than we 4862306a36Sopenharmony_ci * would have otherwise. 4962306a36Sopenharmony_ci * 5062306a36Sopenharmony_ci * bch_bucket_alloc() allocates a single bucket from a specific cache. 5162306a36Sopenharmony_ci * 5262306a36Sopenharmony_ci * bch_bucket_alloc_set() allocates one bucket from different caches 5362306a36Sopenharmony_ci * out of a cache set. 5462306a36Sopenharmony_ci * 5562306a36Sopenharmony_ci * free_some_buckets() drives all the processes described above. It's called 5662306a36Sopenharmony_ci * from bch_bucket_alloc() and a few other places that need to make sure free 5762306a36Sopenharmony_ci * buckets are ready. 5862306a36Sopenharmony_ci * 5962306a36Sopenharmony_ci * invalidate_buckets_(lru|fifo)() find buckets that are available to be 6062306a36Sopenharmony_ci * invalidated, and then invalidate them and stick them on the free_inc list - 6162306a36Sopenharmony_ci * in either lru or fifo order. 6262306a36Sopenharmony_ci */ 6362306a36Sopenharmony_ci 6462306a36Sopenharmony_ci#include "bcache.h" 6562306a36Sopenharmony_ci#include "btree.h" 6662306a36Sopenharmony_ci 6762306a36Sopenharmony_ci#include <linux/blkdev.h> 6862306a36Sopenharmony_ci#include <linux/kthread.h> 6962306a36Sopenharmony_ci#include <linux/random.h> 7062306a36Sopenharmony_ci#include <trace/events/bcache.h> 7162306a36Sopenharmony_ci 7262306a36Sopenharmony_ci#define MAX_OPEN_BUCKETS 128 7362306a36Sopenharmony_ci 7462306a36Sopenharmony_ci/* Bucket heap / gen */ 7562306a36Sopenharmony_ci 7662306a36Sopenharmony_ciuint8_t bch_inc_gen(struct cache *ca, struct bucket *b) 7762306a36Sopenharmony_ci{ 7862306a36Sopenharmony_ci uint8_t ret = ++b->gen; 7962306a36Sopenharmony_ci 8062306a36Sopenharmony_ci ca->set->need_gc = max(ca->set->need_gc, bucket_gc_gen(b)); 8162306a36Sopenharmony_ci WARN_ON_ONCE(ca->set->need_gc > BUCKET_GC_GEN_MAX); 8262306a36Sopenharmony_ci 8362306a36Sopenharmony_ci return ret; 8462306a36Sopenharmony_ci} 8562306a36Sopenharmony_ci 8662306a36Sopenharmony_civoid bch_rescale_priorities(struct cache_set *c, int sectors) 8762306a36Sopenharmony_ci{ 8862306a36Sopenharmony_ci struct cache *ca; 8962306a36Sopenharmony_ci struct bucket *b; 9062306a36Sopenharmony_ci unsigned long next = c->nbuckets * c->cache->sb.bucket_size / 1024; 9162306a36Sopenharmony_ci int r; 9262306a36Sopenharmony_ci 9362306a36Sopenharmony_ci atomic_sub(sectors, &c->rescale); 9462306a36Sopenharmony_ci 9562306a36Sopenharmony_ci do { 9662306a36Sopenharmony_ci r = atomic_read(&c->rescale); 9762306a36Sopenharmony_ci 9862306a36Sopenharmony_ci if (r >= 0) 9962306a36Sopenharmony_ci return; 10062306a36Sopenharmony_ci } while (atomic_cmpxchg(&c->rescale, r, r + next) != r); 10162306a36Sopenharmony_ci 10262306a36Sopenharmony_ci mutex_lock(&c->bucket_lock); 10362306a36Sopenharmony_ci 10462306a36Sopenharmony_ci c->min_prio = USHRT_MAX; 10562306a36Sopenharmony_ci 10662306a36Sopenharmony_ci ca = c->cache; 10762306a36Sopenharmony_ci for_each_bucket(b, ca) 10862306a36Sopenharmony_ci if (b->prio && 10962306a36Sopenharmony_ci b->prio != BTREE_PRIO && 11062306a36Sopenharmony_ci !atomic_read(&b->pin)) { 11162306a36Sopenharmony_ci b->prio--; 11262306a36Sopenharmony_ci c->min_prio = min(c->min_prio, b->prio); 11362306a36Sopenharmony_ci } 11462306a36Sopenharmony_ci 11562306a36Sopenharmony_ci mutex_unlock(&c->bucket_lock); 11662306a36Sopenharmony_ci} 11762306a36Sopenharmony_ci 11862306a36Sopenharmony_ci/* 11962306a36Sopenharmony_ci * Background allocation thread: scans for buckets to be invalidated, 12062306a36Sopenharmony_ci * invalidates them, rewrites prios/gens (marking them as invalidated on disk), 12162306a36Sopenharmony_ci * then optionally issues discard commands to the newly free buckets, then puts 12262306a36Sopenharmony_ci * them on the various freelists. 12362306a36Sopenharmony_ci */ 12462306a36Sopenharmony_ci 12562306a36Sopenharmony_cistatic inline bool can_inc_bucket_gen(struct bucket *b) 12662306a36Sopenharmony_ci{ 12762306a36Sopenharmony_ci return bucket_gc_gen(b) < BUCKET_GC_GEN_MAX; 12862306a36Sopenharmony_ci} 12962306a36Sopenharmony_ci 13062306a36Sopenharmony_cibool bch_can_invalidate_bucket(struct cache *ca, struct bucket *b) 13162306a36Sopenharmony_ci{ 13262306a36Sopenharmony_ci BUG_ON(!ca->set->gc_mark_valid); 13362306a36Sopenharmony_ci 13462306a36Sopenharmony_ci return (!GC_MARK(b) || 13562306a36Sopenharmony_ci GC_MARK(b) == GC_MARK_RECLAIMABLE) && 13662306a36Sopenharmony_ci !atomic_read(&b->pin) && 13762306a36Sopenharmony_ci can_inc_bucket_gen(b); 13862306a36Sopenharmony_ci} 13962306a36Sopenharmony_ci 14062306a36Sopenharmony_civoid __bch_invalidate_one_bucket(struct cache *ca, struct bucket *b) 14162306a36Sopenharmony_ci{ 14262306a36Sopenharmony_ci lockdep_assert_held(&ca->set->bucket_lock); 14362306a36Sopenharmony_ci BUG_ON(GC_MARK(b) && GC_MARK(b) != GC_MARK_RECLAIMABLE); 14462306a36Sopenharmony_ci 14562306a36Sopenharmony_ci if (GC_SECTORS_USED(b)) 14662306a36Sopenharmony_ci trace_bcache_invalidate(ca, b - ca->buckets); 14762306a36Sopenharmony_ci 14862306a36Sopenharmony_ci bch_inc_gen(ca, b); 14962306a36Sopenharmony_ci b->prio = INITIAL_PRIO; 15062306a36Sopenharmony_ci atomic_inc(&b->pin); 15162306a36Sopenharmony_ci} 15262306a36Sopenharmony_ci 15362306a36Sopenharmony_cistatic void bch_invalidate_one_bucket(struct cache *ca, struct bucket *b) 15462306a36Sopenharmony_ci{ 15562306a36Sopenharmony_ci __bch_invalidate_one_bucket(ca, b); 15662306a36Sopenharmony_ci 15762306a36Sopenharmony_ci fifo_push(&ca->free_inc, b - ca->buckets); 15862306a36Sopenharmony_ci} 15962306a36Sopenharmony_ci 16062306a36Sopenharmony_ci/* 16162306a36Sopenharmony_ci * Determines what order we're going to reuse buckets, smallest bucket_prio() 16262306a36Sopenharmony_ci * first: we also take into account the number of sectors of live data in that 16362306a36Sopenharmony_ci * bucket, and in order for that multiply to make sense we have to scale bucket 16462306a36Sopenharmony_ci * 16562306a36Sopenharmony_ci * Thus, we scale the bucket priorities so that the bucket with the smallest 16662306a36Sopenharmony_ci * prio is worth 1/8th of what INITIAL_PRIO is worth. 16762306a36Sopenharmony_ci */ 16862306a36Sopenharmony_ci 16962306a36Sopenharmony_ci#define bucket_prio(b) \ 17062306a36Sopenharmony_ci({ \ 17162306a36Sopenharmony_ci unsigned int min_prio = (INITIAL_PRIO - ca->set->min_prio) / 8; \ 17262306a36Sopenharmony_ci \ 17362306a36Sopenharmony_ci (b->prio - ca->set->min_prio + min_prio) * GC_SECTORS_USED(b); \ 17462306a36Sopenharmony_ci}) 17562306a36Sopenharmony_ci 17662306a36Sopenharmony_ci#define bucket_max_cmp(l, r) (bucket_prio(l) < bucket_prio(r)) 17762306a36Sopenharmony_ci#define bucket_min_cmp(l, r) (bucket_prio(l) > bucket_prio(r)) 17862306a36Sopenharmony_ci 17962306a36Sopenharmony_cistatic void invalidate_buckets_lru(struct cache *ca) 18062306a36Sopenharmony_ci{ 18162306a36Sopenharmony_ci struct bucket *b; 18262306a36Sopenharmony_ci ssize_t i; 18362306a36Sopenharmony_ci 18462306a36Sopenharmony_ci ca->heap.used = 0; 18562306a36Sopenharmony_ci 18662306a36Sopenharmony_ci for_each_bucket(b, ca) { 18762306a36Sopenharmony_ci if (!bch_can_invalidate_bucket(ca, b)) 18862306a36Sopenharmony_ci continue; 18962306a36Sopenharmony_ci 19062306a36Sopenharmony_ci if (!heap_full(&ca->heap)) 19162306a36Sopenharmony_ci heap_add(&ca->heap, b, bucket_max_cmp); 19262306a36Sopenharmony_ci else if (bucket_max_cmp(b, heap_peek(&ca->heap))) { 19362306a36Sopenharmony_ci ca->heap.data[0] = b; 19462306a36Sopenharmony_ci heap_sift(&ca->heap, 0, bucket_max_cmp); 19562306a36Sopenharmony_ci } 19662306a36Sopenharmony_ci } 19762306a36Sopenharmony_ci 19862306a36Sopenharmony_ci for (i = ca->heap.used / 2 - 1; i >= 0; --i) 19962306a36Sopenharmony_ci heap_sift(&ca->heap, i, bucket_min_cmp); 20062306a36Sopenharmony_ci 20162306a36Sopenharmony_ci while (!fifo_full(&ca->free_inc)) { 20262306a36Sopenharmony_ci if (!heap_pop(&ca->heap, b, bucket_min_cmp)) { 20362306a36Sopenharmony_ci /* 20462306a36Sopenharmony_ci * We don't want to be calling invalidate_buckets() 20562306a36Sopenharmony_ci * multiple times when it can't do anything 20662306a36Sopenharmony_ci */ 20762306a36Sopenharmony_ci ca->invalidate_needs_gc = 1; 20862306a36Sopenharmony_ci wake_up_gc(ca->set); 20962306a36Sopenharmony_ci return; 21062306a36Sopenharmony_ci } 21162306a36Sopenharmony_ci 21262306a36Sopenharmony_ci bch_invalidate_one_bucket(ca, b); 21362306a36Sopenharmony_ci } 21462306a36Sopenharmony_ci} 21562306a36Sopenharmony_ci 21662306a36Sopenharmony_cistatic void invalidate_buckets_fifo(struct cache *ca) 21762306a36Sopenharmony_ci{ 21862306a36Sopenharmony_ci struct bucket *b; 21962306a36Sopenharmony_ci size_t checked = 0; 22062306a36Sopenharmony_ci 22162306a36Sopenharmony_ci while (!fifo_full(&ca->free_inc)) { 22262306a36Sopenharmony_ci if (ca->fifo_last_bucket < ca->sb.first_bucket || 22362306a36Sopenharmony_ci ca->fifo_last_bucket >= ca->sb.nbuckets) 22462306a36Sopenharmony_ci ca->fifo_last_bucket = ca->sb.first_bucket; 22562306a36Sopenharmony_ci 22662306a36Sopenharmony_ci b = ca->buckets + ca->fifo_last_bucket++; 22762306a36Sopenharmony_ci 22862306a36Sopenharmony_ci if (bch_can_invalidate_bucket(ca, b)) 22962306a36Sopenharmony_ci bch_invalidate_one_bucket(ca, b); 23062306a36Sopenharmony_ci 23162306a36Sopenharmony_ci if (++checked >= ca->sb.nbuckets) { 23262306a36Sopenharmony_ci ca->invalidate_needs_gc = 1; 23362306a36Sopenharmony_ci wake_up_gc(ca->set); 23462306a36Sopenharmony_ci return; 23562306a36Sopenharmony_ci } 23662306a36Sopenharmony_ci } 23762306a36Sopenharmony_ci} 23862306a36Sopenharmony_ci 23962306a36Sopenharmony_cistatic void invalidate_buckets_random(struct cache *ca) 24062306a36Sopenharmony_ci{ 24162306a36Sopenharmony_ci struct bucket *b; 24262306a36Sopenharmony_ci size_t checked = 0; 24362306a36Sopenharmony_ci 24462306a36Sopenharmony_ci while (!fifo_full(&ca->free_inc)) { 24562306a36Sopenharmony_ci size_t n; 24662306a36Sopenharmony_ci 24762306a36Sopenharmony_ci get_random_bytes(&n, sizeof(n)); 24862306a36Sopenharmony_ci 24962306a36Sopenharmony_ci n %= (size_t) (ca->sb.nbuckets - ca->sb.first_bucket); 25062306a36Sopenharmony_ci n += ca->sb.first_bucket; 25162306a36Sopenharmony_ci 25262306a36Sopenharmony_ci b = ca->buckets + n; 25362306a36Sopenharmony_ci 25462306a36Sopenharmony_ci if (bch_can_invalidate_bucket(ca, b)) 25562306a36Sopenharmony_ci bch_invalidate_one_bucket(ca, b); 25662306a36Sopenharmony_ci 25762306a36Sopenharmony_ci if (++checked >= ca->sb.nbuckets / 2) { 25862306a36Sopenharmony_ci ca->invalidate_needs_gc = 1; 25962306a36Sopenharmony_ci wake_up_gc(ca->set); 26062306a36Sopenharmony_ci return; 26162306a36Sopenharmony_ci } 26262306a36Sopenharmony_ci } 26362306a36Sopenharmony_ci} 26462306a36Sopenharmony_ci 26562306a36Sopenharmony_cistatic void invalidate_buckets(struct cache *ca) 26662306a36Sopenharmony_ci{ 26762306a36Sopenharmony_ci BUG_ON(ca->invalidate_needs_gc); 26862306a36Sopenharmony_ci 26962306a36Sopenharmony_ci switch (CACHE_REPLACEMENT(&ca->sb)) { 27062306a36Sopenharmony_ci case CACHE_REPLACEMENT_LRU: 27162306a36Sopenharmony_ci invalidate_buckets_lru(ca); 27262306a36Sopenharmony_ci break; 27362306a36Sopenharmony_ci case CACHE_REPLACEMENT_FIFO: 27462306a36Sopenharmony_ci invalidate_buckets_fifo(ca); 27562306a36Sopenharmony_ci break; 27662306a36Sopenharmony_ci case CACHE_REPLACEMENT_RANDOM: 27762306a36Sopenharmony_ci invalidate_buckets_random(ca); 27862306a36Sopenharmony_ci break; 27962306a36Sopenharmony_ci } 28062306a36Sopenharmony_ci} 28162306a36Sopenharmony_ci 28262306a36Sopenharmony_ci#define allocator_wait(ca, cond) \ 28362306a36Sopenharmony_cido { \ 28462306a36Sopenharmony_ci while (1) { \ 28562306a36Sopenharmony_ci set_current_state(TASK_INTERRUPTIBLE); \ 28662306a36Sopenharmony_ci if (cond) \ 28762306a36Sopenharmony_ci break; \ 28862306a36Sopenharmony_ci \ 28962306a36Sopenharmony_ci mutex_unlock(&(ca)->set->bucket_lock); \ 29062306a36Sopenharmony_ci if (kthread_should_stop() || \ 29162306a36Sopenharmony_ci test_bit(CACHE_SET_IO_DISABLE, &ca->set->flags)) { \ 29262306a36Sopenharmony_ci set_current_state(TASK_RUNNING); \ 29362306a36Sopenharmony_ci goto out; \ 29462306a36Sopenharmony_ci } \ 29562306a36Sopenharmony_ci \ 29662306a36Sopenharmony_ci schedule(); \ 29762306a36Sopenharmony_ci mutex_lock(&(ca)->set->bucket_lock); \ 29862306a36Sopenharmony_ci } \ 29962306a36Sopenharmony_ci __set_current_state(TASK_RUNNING); \ 30062306a36Sopenharmony_ci} while (0) 30162306a36Sopenharmony_ci 30262306a36Sopenharmony_cistatic int bch_allocator_push(struct cache *ca, long bucket) 30362306a36Sopenharmony_ci{ 30462306a36Sopenharmony_ci unsigned int i; 30562306a36Sopenharmony_ci 30662306a36Sopenharmony_ci /* Prios/gens are actually the most important reserve */ 30762306a36Sopenharmony_ci if (fifo_push(&ca->free[RESERVE_PRIO], bucket)) 30862306a36Sopenharmony_ci return true; 30962306a36Sopenharmony_ci 31062306a36Sopenharmony_ci for (i = 0; i < RESERVE_NR; i++) 31162306a36Sopenharmony_ci if (fifo_push(&ca->free[i], bucket)) 31262306a36Sopenharmony_ci return true; 31362306a36Sopenharmony_ci 31462306a36Sopenharmony_ci return false; 31562306a36Sopenharmony_ci} 31662306a36Sopenharmony_ci 31762306a36Sopenharmony_cistatic int bch_allocator_thread(void *arg) 31862306a36Sopenharmony_ci{ 31962306a36Sopenharmony_ci struct cache *ca = arg; 32062306a36Sopenharmony_ci 32162306a36Sopenharmony_ci mutex_lock(&ca->set->bucket_lock); 32262306a36Sopenharmony_ci 32362306a36Sopenharmony_ci while (1) { 32462306a36Sopenharmony_ci /* 32562306a36Sopenharmony_ci * First, we pull buckets off of the unused and free_inc lists, 32662306a36Sopenharmony_ci * possibly issue discards to them, then we add the bucket to 32762306a36Sopenharmony_ci * the free list: 32862306a36Sopenharmony_ci */ 32962306a36Sopenharmony_ci while (1) { 33062306a36Sopenharmony_ci long bucket; 33162306a36Sopenharmony_ci 33262306a36Sopenharmony_ci if (!fifo_pop(&ca->free_inc, bucket)) 33362306a36Sopenharmony_ci break; 33462306a36Sopenharmony_ci 33562306a36Sopenharmony_ci if (ca->discard) { 33662306a36Sopenharmony_ci mutex_unlock(&ca->set->bucket_lock); 33762306a36Sopenharmony_ci blkdev_issue_discard(ca->bdev, 33862306a36Sopenharmony_ci bucket_to_sector(ca->set, bucket), 33962306a36Sopenharmony_ci ca->sb.bucket_size, GFP_KERNEL); 34062306a36Sopenharmony_ci mutex_lock(&ca->set->bucket_lock); 34162306a36Sopenharmony_ci } 34262306a36Sopenharmony_ci 34362306a36Sopenharmony_ci allocator_wait(ca, bch_allocator_push(ca, bucket)); 34462306a36Sopenharmony_ci wake_up(&ca->set->btree_cache_wait); 34562306a36Sopenharmony_ci wake_up(&ca->set->bucket_wait); 34662306a36Sopenharmony_ci } 34762306a36Sopenharmony_ci 34862306a36Sopenharmony_ci /* 34962306a36Sopenharmony_ci * We've run out of free buckets, we need to find some buckets 35062306a36Sopenharmony_ci * we can invalidate. First, invalidate them in memory and add 35162306a36Sopenharmony_ci * them to the free_inc list: 35262306a36Sopenharmony_ci */ 35362306a36Sopenharmony_ci 35462306a36Sopenharmony_ciretry_invalidate: 35562306a36Sopenharmony_ci allocator_wait(ca, ca->set->gc_mark_valid && 35662306a36Sopenharmony_ci !ca->invalidate_needs_gc); 35762306a36Sopenharmony_ci invalidate_buckets(ca); 35862306a36Sopenharmony_ci 35962306a36Sopenharmony_ci /* 36062306a36Sopenharmony_ci * Now, we write their new gens to disk so we can start writing 36162306a36Sopenharmony_ci * new stuff to them: 36262306a36Sopenharmony_ci */ 36362306a36Sopenharmony_ci allocator_wait(ca, !atomic_read(&ca->set->prio_blocked)); 36462306a36Sopenharmony_ci if (CACHE_SYNC(&ca->sb)) { 36562306a36Sopenharmony_ci /* 36662306a36Sopenharmony_ci * This could deadlock if an allocation with a btree 36762306a36Sopenharmony_ci * node locked ever blocked - having the btree node 36862306a36Sopenharmony_ci * locked would block garbage collection, but here we're 36962306a36Sopenharmony_ci * waiting on garbage collection before we invalidate 37062306a36Sopenharmony_ci * and free anything. 37162306a36Sopenharmony_ci * 37262306a36Sopenharmony_ci * But this should be safe since the btree code always 37362306a36Sopenharmony_ci * uses btree_check_reserve() before allocating now, and 37462306a36Sopenharmony_ci * if it fails it blocks without btree nodes locked. 37562306a36Sopenharmony_ci */ 37662306a36Sopenharmony_ci if (!fifo_full(&ca->free_inc)) 37762306a36Sopenharmony_ci goto retry_invalidate; 37862306a36Sopenharmony_ci 37962306a36Sopenharmony_ci if (bch_prio_write(ca, false) < 0) { 38062306a36Sopenharmony_ci ca->invalidate_needs_gc = 1; 38162306a36Sopenharmony_ci wake_up_gc(ca->set); 38262306a36Sopenharmony_ci } 38362306a36Sopenharmony_ci } 38462306a36Sopenharmony_ci } 38562306a36Sopenharmony_ciout: 38662306a36Sopenharmony_ci wait_for_kthread_stop(); 38762306a36Sopenharmony_ci return 0; 38862306a36Sopenharmony_ci} 38962306a36Sopenharmony_ci 39062306a36Sopenharmony_ci/* Allocation */ 39162306a36Sopenharmony_ci 39262306a36Sopenharmony_cilong bch_bucket_alloc(struct cache *ca, unsigned int reserve, bool wait) 39362306a36Sopenharmony_ci{ 39462306a36Sopenharmony_ci DEFINE_WAIT(w); 39562306a36Sopenharmony_ci struct bucket *b; 39662306a36Sopenharmony_ci long r; 39762306a36Sopenharmony_ci 39862306a36Sopenharmony_ci 39962306a36Sopenharmony_ci /* No allocation if CACHE_SET_IO_DISABLE bit is set */ 40062306a36Sopenharmony_ci if (unlikely(test_bit(CACHE_SET_IO_DISABLE, &ca->set->flags))) 40162306a36Sopenharmony_ci return -1; 40262306a36Sopenharmony_ci 40362306a36Sopenharmony_ci /* fastpath */ 40462306a36Sopenharmony_ci if (fifo_pop(&ca->free[RESERVE_NONE], r) || 40562306a36Sopenharmony_ci fifo_pop(&ca->free[reserve], r)) 40662306a36Sopenharmony_ci goto out; 40762306a36Sopenharmony_ci 40862306a36Sopenharmony_ci if (!wait) { 40962306a36Sopenharmony_ci trace_bcache_alloc_fail(ca, reserve); 41062306a36Sopenharmony_ci return -1; 41162306a36Sopenharmony_ci } 41262306a36Sopenharmony_ci 41362306a36Sopenharmony_ci do { 41462306a36Sopenharmony_ci prepare_to_wait(&ca->set->bucket_wait, &w, 41562306a36Sopenharmony_ci TASK_UNINTERRUPTIBLE); 41662306a36Sopenharmony_ci 41762306a36Sopenharmony_ci mutex_unlock(&ca->set->bucket_lock); 41862306a36Sopenharmony_ci schedule(); 41962306a36Sopenharmony_ci mutex_lock(&ca->set->bucket_lock); 42062306a36Sopenharmony_ci } while (!fifo_pop(&ca->free[RESERVE_NONE], r) && 42162306a36Sopenharmony_ci !fifo_pop(&ca->free[reserve], r)); 42262306a36Sopenharmony_ci 42362306a36Sopenharmony_ci finish_wait(&ca->set->bucket_wait, &w); 42462306a36Sopenharmony_ciout: 42562306a36Sopenharmony_ci if (ca->alloc_thread) 42662306a36Sopenharmony_ci wake_up_process(ca->alloc_thread); 42762306a36Sopenharmony_ci 42862306a36Sopenharmony_ci trace_bcache_alloc(ca, reserve); 42962306a36Sopenharmony_ci 43062306a36Sopenharmony_ci if (expensive_debug_checks(ca->set)) { 43162306a36Sopenharmony_ci size_t iter; 43262306a36Sopenharmony_ci long i; 43362306a36Sopenharmony_ci unsigned int j; 43462306a36Sopenharmony_ci 43562306a36Sopenharmony_ci for (iter = 0; iter < prio_buckets(ca) * 2; iter++) 43662306a36Sopenharmony_ci BUG_ON(ca->prio_buckets[iter] == (uint64_t) r); 43762306a36Sopenharmony_ci 43862306a36Sopenharmony_ci for (j = 0; j < RESERVE_NR; j++) 43962306a36Sopenharmony_ci fifo_for_each(i, &ca->free[j], iter) 44062306a36Sopenharmony_ci BUG_ON(i == r); 44162306a36Sopenharmony_ci fifo_for_each(i, &ca->free_inc, iter) 44262306a36Sopenharmony_ci BUG_ON(i == r); 44362306a36Sopenharmony_ci } 44462306a36Sopenharmony_ci 44562306a36Sopenharmony_ci b = ca->buckets + r; 44662306a36Sopenharmony_ci 44762306a36Sopenharmony_ci BUG_ON(atomic_read(&b->pin) != 1); 44862306a36Sopenharmony_ci 44962306a36Sopenharmony_ci SET_GC_SECTORS_USED(b, ca->sb.bucket_size); 45062306a36Sopenharmony_ci 45162306a36Sopenharmony_ci if (reserve <= RESERVE_PRIO) { 45262306a36Sopenharmony_ci SET_GC_MARK(b, GC_MARK_METADATA); 45362306a36Sopenharmony_ci SET_GC_MOVE(b, 0); 45462306a36Sopenharmony_ci b->prio = BTREE_PRIO; 45562306a36Sopenharmony_ci } else { 45662306a36Sopenharmony_ci SET_GC_MARK(b, GC_MARK_RECLAIMABLE); 45762306a36Sopenharmony_ci SET_GC_MOVE(b, 0); 45862306a36Sopenharmony_ci b->prio = INITIAL_PRIO; 45962306a36Sopenharmony_ci } 46062306a36Sopenharmony_ci 46162306a36Sopenharmony_ci if (ca->set->avail_nbuckets > 0) { 46262306a36Sopenharmony_ci ca->set->avail_nbuckets--; 46362306a36Sopenharmony_ci bch_update_bucket_in_use(ca->set, &ca->set->gc_stats); 46462306a36Sopenharmony_ci } 46562306a36Sopenharmony_ci 46662306a36Sopenharmony_ci return r; 46762306a36Sopenharmony_ci} 46862306a36Sopenharmony_ci 46962306a36Sopenharmony_civoid __bch_bucket_free(struct cache *ca, struct bucket *b) 47062306a36Sopenharmony_ci{ 47162306a36Sopenharmony_ci SET_GC_MARK(b, 0); 47262306a36Sopenharmony_ci SET_GC_SECTORS_USED(b, 0); 47362306a36Sopenharmony_ci 47462306a36Sopenharmony_ci if (ca->set->avail_nbuckets < ca->set->nbuckets) { 47562306a36Sopenharmony_ci ca->set->avail_nbuckets++; 47662306a36Sopenharmony_ci bch_update_bucket_in_use(ca->set, &ca->set->gc_stats); 47762306a36Sopenharmony_ci } 47862306a36Sopenharmony_ci} 47962306a36Sopenharmony_ci 48062306a36Sopenharmony_civoid bch_bucket_free(struct cache_set *c, struct bkey *k) 48162306a36Sopenharmony_ci{ 48262306a36Sopenharmony_ci unsigned int i; 48362306a36Sopenharmony_ci 48462306a36Sopenharmony_ci for (i = 0; i < KEY_PTRS(k); i++) 48562306a36Sopenharmony_ci __bch_bucket_free(c->cache, PTR_BUCKET(c, k, i)); 48662306a36Sopenharmony_ci} 48762306a36Sopenharmony_ci 48862306a36Sopenharmony_ciint __bch_bucket_alloc_set(struct cache_set *c, unsigned int reserve, 48962306a36Sopenharmony_ci struct bkey *k, bool wait) 49062306a36Sopenharmony_ci{ 49162306a36Sopenharmony_ci struct cache *ca; 49262306a36Sopenharmony_ci long b; 49362306a36Sopenharmony_ci 49462306a36Sopenharmony_ci /* No allocation if CACHE_SET_IO_DISABLE bit is set */ 49562306a36Sopenharmony_ci if (unlikely(test_bit(CACHE_SET_IO_DISABLE, &c->flags))) 49662306a36Sopenharmony_ci return -1; 49762306a36Sopenharmony_ci 49862306a36Sopenharmony_ci lockdep_assert_held(&c->bucket_lock); 49962306a36Sopenharmony_ci 50062306a36Sopenharmony_ci bkey_init(k); 50162306a36Sopenharmony_ci 50262306a36Sopenharmony_ci ca = c->cache; 50362306a36Sopenharmony_ci b = bch_bucket_alloc(ca, reserve, wait); 50462306a36Sopenharmony_ci if (b == -1) 50562306a36Sopenharmony_ci goto err; 50662306a36Sopenharmony_ci 50762306a36Sopenharmony_ci k->ptr[0] = MAKE_PTR(ca->buckets[b].gen, 50862306a36Sopenharmony_ci bucket_to_sector(c, b), 50962306a36Sopenharmony_ci ca->sb.nr_this_dev); 51062306a36Sopenharmony_ci 51162306a36Sopenharmony_ci SET_KEY_PTRS(k, 1); 51262306a36Sopenharmony_ci 51362306a36Sopenharmony_ci return 0; 51462306a36Sopenharmony_cierr: 51562306a36Sopenharmony_ci bch_bucket_free(c, k); 51662306a36Sopenharmony_ci bkey_put(c, k); 51762306a36Sopenharmony_ci return -1; 51862306a36Sopenharmony_ci} 51962306a36Sopenharmony_ci 52062306a36Sopenharmony_ciint bch_bucket_alloc_set(struct cache_set *c, unsigned int reserve, 52162306a36Sopenharmony_ci struct bkey *k, bool wait) 52262306a36Sopenharmony_ci{ 52362306a36Sopenharmony_ci int ret; 52462306a36Sopenharmony_ci 52562306a36Sopenharmony_ci mutex_lock(&c->bucket_lock); 52662306a36Sopenharmony_ci ret = __bch_bucket_alloc_set(c, reserve, k, wait); 52762306a36Sopenharmony_ci mutex_unlock(&c->bucket_lock); 52862306a36Sopenharmony_ci return ret; 52962306a36Sopenharmony_ci} 53062306a36Sopenharmony_ci 53162306a36Sopenharmony_ci/* Sector allocator */ 53262306a36Sopenharmony_ci 53362306a36Sopenharmony_cistruct open_bucket { 53462306a36Sopenharmony_ci struct list_head list; 53562306a36Sopenharmony_ci unsigned int last_write_point; 53662306a36Sopenharmony_ci unsigned int sectors_free; 53762306a36Sopenharmony_ci BKEY_PADDED(key); 53862306a36Sopenharmony_ci}; 53962306a36Sopenharmony_ci 54062306a36Sopenharmony_ci/* 54162306a36Sopenharmony_ci * We keep multiple buckets open for writes, and try to segregate different 54262306a36Sopenharmony_ci * write streams for better cache utilization: first we try to segregate flash 54362306a36Sopenharmony_ci * only volume write streams from cached devices, secondly we look for a bucket 54462306a36Sopenharmony_ci * where the last write to it was sequential with the current write, and 54562306a36Sopenharmony_ci * failing that we look for a bucket that was last used by the same task. 54662306a36Sopenharmony_ci * 54762306a36Sopenharmony_ci * The ideas is if you've got multiple tasks pulling data into the cache at the 54862306a36Sopenharmony_ci * same time, you'll get better cache utilization if you try to segregate their 54962306a36Sopenharmony_ci * data and preserve locality. 55062306a36Sopenharmony_ci * 55162306a36Sopenharmony_ci * For example, dirty sectors of flash only volume is not reclaimable, if their 55262306a36Sopenharmony_ci * dirty sectors mixed with dirty sectors of cached device, such buckets will 55362306a36Sopenharmony_ci * be marked as dirty and won't be reclaimed, though the dirty data of cached 55462306a36Sopenharmony_ci * device have been written back to backend device. 55562306a36Sopenharmony_ci * 55662306a36Sopenharmony_ci * And say you've starting Firefox at the same time you're copying a 55762306a36Sopenharmony_ci * bunch of files. Firefox will likely end up being fairly hot and stay in the 55862306a36Sopenharmony_ci * cache awhile, but the data you copied might not be; if you wrote all that 55962306a36Sopenharmony_ci * data to the same buckets it'd get invalidated at the same time. 56062306a36Sopenharmony_ci * 56162306a36Sopenharmony_ci * Both of those tasks will be doing fairly random IO so we can't rely on 56262306a36Sopenharmony_ci * detecting sequential IO to segregate their data, but going off of the task 56362306a36Sopenharmony_ci * should be a sane heuristic. 56462306a36Sopenharmony_ci */ 56562306a36Sopenharmony_cistatic struct open_bucket *pick_data_bucket(struct cache_set *c, 56662306a36Sopenharmony_ci const struct bkey *search, 56762306a36Sopenharmony_ci unsigned int write_point, 56862306a36Sopenharmony_ci struct bkey *alloc) 56962306a36Sopenharmony_ci{ 57062306a36Sopenharmony_ci struct open_bucket *ret, *ret_task = NULL; 57162306a36Sopenharmony_ci 57262306a36Sopenharmony_ci list_for_each_entry_reverse(ret, &c->data_buckets, list) 57362306a36Sopenharmony_ci if (UUID_FLASH_ONLY(&c->uuids[KEY_INODE(&ret->key)]) != 57462306a36Sopenharmony_ci UUID_FLASH_ONLY(&c->uuids[KEY_INODE(search)])) 57562306a36Sopenharmony_ci continue; 57662306a36Sopenharmony_ci else if (!bkey_cmp(&ret->key, search)) 57762306a36Sopenharmony_ci goto found; 57862306a36Sopenharmony_ci else if (ret->last_write_point == write_point) 57962306a36Sopenharmony_ci ret_task = ret; 58062306a36Sopenharmony_ci 58162306a36Sopenharmony_ci ret = ret_task ?: list_first_entry(&c->data_buckets, 58262306a36Sopenharmony_ci struct open_bucket, list); 58362306a36Sopenharmony_cifound: 58462306a36Sopenharmony_ci if (!ret->sectors_free && KEY_PTRS(alloc)) { 58562306a36Sopenharmony_ci ret->sectors_free = c->cache->sb.bucket_size; 58662306a36Sopenharmony_ci bkey_copy(&ret->key, alloc); 58762306a36Sopenharmony_ci bkey_init(alloc); 58862306a36Sopenharmony_ci } 58962306a36Sopenharmony_ci 59062306a36Sopenharmony_ci if (!ret->sectors_free) 59162306a36Sopenharmony_ci ret = NULL; 59262306a36Sopenharmony_ci 59362306a36Sopenharmony_ci return ret; 59462306a36Sopenharmony_ci} 59562306a36Sopenharmony_ci 59662306a36Sopenharmony_ci/* 59762306a36Sopenharmony_ci * Allocates some space in the cache to write to, and k to point to the newly 59862306a36Sopenharmony_ci * allocated space, and updates KEY_SIZE(k) and KEY_OFFSET(k) (to point to the 59962306a36Sopenharmony_ci * end of the newly allocated space). 60062306a36Sopenharmony_ci * 60162306a36Sopenharmony_ci * May allocate fewer sectors than @sectors, KEY_SIZE(k) indicates how many 60262306a36Sopenharmony_ci * sectors were actually allocated. 60362306a36Sopenharmony_ci * 60462306a36Sopenharmony_ci * If s->writeback is true, will not fail. 60562306a36Sopenharmony_ci */ 60662306a36Sopenharmony_cibool bch_alloc_sectors(struct cache_set *c, 60762306a36Sopenharmony_ci struct bkey *k, 60862306a36Sopenharmony_ci unsigned int sectors, 60962306a36Sopenharmony_ci unsigned int write_point, 61062306a36Sopenharmony_ci unsigned int write_prio, 61162306a36Sopenharmony_ci bool wait) 61262306a36Sopenharmony_ci{ 61362306a36Sopenharmony_ci struct open_bucket *b; 61462306a36Sopenharmony_ci BKEY_PADDED(key) alloc; 61562306a36Sopenharmony_ci unsigned int i; 61662306a36Sopenharmony_ci 61762306a36Sopenharmony_ci /* 61862306a36Sopenharmony_ci * We might have to allocate a new bucket, which we can't do with a 61962306a36Sopenharmony_ci * spinlock held. So if we have to allocate, we drop the lock, allocate 62062306a36Sopenharmony_ci * and then retry. KEY_PTRS() indicates whether alloc points to 62162306a36Sopenharmony_ci * allocated bucket(s). 62262306a36Sopenharmony_ci */ 62362306a36Sopenharmony_ci 62462306a36Sopenharmony_ci bkey_init(&alloc.key); 62562306a36Sopenharmony_ci spin_lock(&c->data_bucket_lock); 62662306a36Sopenharmony_ci 62762306a36Sopenharmony_ci while (!(b = pick_data_bucket(c, k, write_point, &alloc.key))) { 62862306a36Sopenharmony_ci unsigned int watermark = write_prio 62962306a36Sopenharmony_ci ? RESERVE_MOVINGGC 63062306a36Sopenharmony_ci : RESERVE_NONE; 63162306a36Sopenharmony_ci 63262306a36Sopenharmony_ci spin_unlock(&c->data_bucket_lock); 63362306a36Sopenharmony_ci 63462306a36Sopenharmony_ci if (bch_bucket_alloc_set(c, watermark, &alloc.key, wait)) 63562306a36Sopenharmony_ci return false; 63662306a36Sopenharmony_ci 63762306a36Sopenharmony_ci spin_lock(&c->data_bucket_lock); 63862306a36Sopenharmony_ci } 63962306a36Sopenharmony_ci 64062306a36Sopenharmony_ci /* 64162306a36Sopenharmony_ci * If we had to allocate, we might race and not need to allocate the 64262306a36Sopenharmony_ci * second time we call pick_data_bucket(). If we allocated a bucket but 64362306a36Sopenharmony_ci * didn't use it, drop the refcount bch_bucket_alloc_set() took: 64462306a36Sopenharmony_ci */ 64562306a36Sopenharmony_ci if (KEY_PTRS(&alloc.key)) 64662306a36Sopenharmony_ci bkey_put(c, &alloc.key); 64762306a36Sopenharmony_ci 64862306a36Sopenharmony_ci for (i = 0; i < KEY_PTRS(&b->key); i++) 64962306a36Sopenharmony_ci EBUG_ON(ptr_stale(c, &b->key, i)); 65062306a36Sopenharmony_ci 65162306a36Sopenharmony_ci /* Set up the pointer to the space we're allocating: */ 65262306a36Sopenharmony_ci 65362306a36Sopenharmony_ci for (i = 0; i < KEY_PTRS(&b->key); i++) 65462306a36Sopenharmony_ci k->ptr[i] = b->key.ptr[i]; 65562306a36Sopenharmony_ci 65662306a36Sopenharmony_ci sectors = min(sectors, b->sectors_free); 65762306a36Sopenharmony_ci 65862306a36Sopenharmony_ci SET_KEY_OFFSET(k, KEY_OFFSET(k) + sectors); 65962306a36Sopenharmony_ci SET_KEY_SIZE(k, sectors); 66062306a36Sopenharmony_ci SET_KEY_PTRS(k, KEY_PTRS(&b->key)); 66162306a36Sopenharmony_ci 66262306a36Sopenharmony_ci /* 66362306a36Sopenharmony_ci * Move b to the end of the lru, and keep track of what this bucket was 66462306a36Sopenharmony_ci * last used for: 66562306a36Sopenharmony_ci */ 66662306a36Sopenharmony_ci list_move_tail(&b->list, &c->data_buckets); 66762306a36Sopenharmony_ci bkey_copy_key(&b->key, k); 66862306a36Sopenharmony_ci b->last_write_point = write_point; 66962306a36Sopenharmony_ci 67062306a36Sopenharmony_ci b->sectors_free -= sectors; 67162306a36Sopenharmony_ci 67262306a36Sopenharmony_ci for (i = 0; i < KEY_PTRS(&b->key); i++) { 67362306a36Sopenharmony_ci SET_PTR_OFFSET(&b->key, i, PTR_OFFSET(&b->key, i) + sectors); 67462306a36Sopenharmony_ci 67562306a36Sopenharmony_ci atomic_long_add(sectors, 67662306a36Sopenharmony_ci &c->cache->sectors_written); 67762306a36Sopenharmony_ci } 67862306a36Sopenharmony_ci 67962306a36Sopenharmony_ci if (b->sectors_free < c->cache->sb.block_size) 68062306a36Sopenharmony_ci b->sectors_free = 0; 68162306a36Sopenharmony_ci 68262306a36Sopenharmony_ci /* 68362306a36Sopenharmony_ci * k takes refcounts on the buckets it points to until it's inserted 68462306a36Sopenharmony_ci * into the btree, but if we're done with this bucket we just transfer 68562306a36Sopenharmony_ci * get_data_bucket()'s refcount. 68662306a36Sopenharmony_ci */ 68762306a36Sopenharmony_ci if (b->sectors_free) 68862306a36Sopenharmony_ci for (i = 0; i < KEY_PTRS(&b->key); i++) 68962306a36Sopenharmony_ci atomic_inc(&PTR_BUCKET(c, &b->key, i)->pin); 69062306a36Sopenharmony_ci 69162306a36Sopenharmony_ci spin_unlock(&c->data_bucket_lock); 69262306a36Sopenharmony_ci return true; 69362306a36Sopenharmony_ci} 69462306a36Sopenharmony_ci 69562306a36Sopenharmony_ci/* Init */ 69662306a36Sopenharmony_ci 69762306a36Sopenharmony_civoid bch_open_buckets_free(struct cache_set *c) 69862306a36Sopenharmony_ci{ 69962306a36Sopenharmony_ci struct open_bucket *b; 70062306a36Sopenharmony_ci 70162306a36Sopenharmony_ci while (!list_empty(&c->data_buckets)) { 70262306a36Sopenharmony_ci b = list_first_entry(&c->data_buckets, 70362306a36Sopenharmony_ci struct open_bucket, list); 70462306a36Sopenharmony_ci list_del(&b->list); 70562306a36Sopenharmony_ci kfree(b); 70662306a36Sopenharmony_ci } 70762306a36Sopenharmony_ci} 70862306a36Sopenharmony_ci 70962306a36Sopenharmony_ciint bch_open_buckets_alloc(struct cache_set *c) 71062306a36Sopenharmony_ci{ 71162306a36Sopenharmony_ci int i; 71262306a36Sopenharmony_ci 71362306a36Sopenharmony_ci spin_lock_init(&c->data_bucket_lock); 71462306a36Sopenharmony_ci 71562306a36Sopenharmony_ci for (i = 0; i < MAX_OPEN_BUCKETS; i++) { 71662306a36Sopenharmony_ci struct open_bucket *b = kzalloc(sizeof(*b), GFP_KERNEL); 71762306a36Sopenharmony_ci 71862306a36Sopenharmony_ci if (!b) 71962306a36Sopenharmony_ci return -ENOMEM; 72062306a36Sopenharmony_ci 72162306a36Sopenharmony_ci list_add(&b->list, &c->data_buckets); 72262306a36Sopenharmony_ci } 72362306a36Sopenharmony_ci 72462306a36Sopenharmony_ci return 0; 72562306a36Sopenharmony_ci} 72662306a36Sopenharmony_ci 72762306a36Sopenharmony_ciint bch_cache_allocator_start(struct cache *ca) 72862306a36Sopenharmony_ci{ 72962306a36Sopenharmony_ci struct task_struct *k = kthread_run(bch_allocator_thread, 73062306a36Sopenharmony_ci ca, "bcache_allocator"); 73162306a36Sopenharmony_ci if (IS_ERR(k)) 73262306a36Sopenharmony_ci return PTR_ERR(k); 73362306a36Sopenharmony_ci 73462306a36Sopenharmony_ci ca->alloc_thread = k; 73562306a36Sopenharmony_ci return 0; 73662306a36Sopenharmony_ci} 737