Lines Matching refs:ca

76 uint8_t bch_inc_gen(struct cache *ca, struct bucket *b)
80 ca->set->need_gc = max(ca->set->need_gc, bucket_gc_gen(b));
81 WARN_ON_ONCE(ca->set->need_gc > BUCKET_GC_GEN_MAX);
88 struct cache *ca;
106 ca = c->cache;
107 for_each_bucket(b, ca)
130 bool bch_can_invalidate_bucket(struct cache *ca, struct bucket *b)
132 BUG_ON(!ca->set->gc_mark_valid);
140 void __bch_invalidate_one_bucket(struct cache *ca, struct bucket *b)
142 lockdep_assert_held(&ca->set->bucket_lock);
146 trace_bcache_invalidate(ca, b - ca->buckets);
148 bch_inc_gen(ca, b);
153 static void bch_invalidate_one_bucket(struct cache *ca, struct bucket *b)
155 __bch_invalidate_one_bucket(ca, b);
157 fifo_push(&ca->free_inc, b - ca->buckets);
171 unsigned int min_prio = (INITIAL_PRIO - ca->set->min_prio) / 8; \
173 (b->prio - ca->set->min_prio + min_prio) * GC_SECTORS_USED(b); \
179 static void invalidate_buckets_lru(struct cache *ca)
184 ca->heap.used = 0;
186 for_each_bucket(b, ca) {
187 if (!bch_can_invalidate_bucket(ca, b))
190 if (!heap_full(&ca->heap))
191 heap_add(&ca->heap, b, bucket_max_cmp);
192 else if (bucket_max_cmp(b, heap_peek(&ca->heap))) {
193 ca->heap.data[0] = b;
194 heap_sift(&ca->heap, 0, bucket_max_cmp);
198 for (i = ca->heap.used / 2 - 1; i >= 0; --i)
199 heap_sift(&ca->heap, i, bucket_min_cmp);
201 while (!fifo_full(&ca->free_inc)) {
202 if (!heap_pop(&ca->heap, b, bucket_min_cmp)) {
207 ca->invalidate_needs_gc = 1;
208 wake_up_gc(ca->set);
212 bch_invalidate_one_bucket(ca, b);
216 static void invalidate_buckets_fifo(struct cache *ca)
221 while (!fifo_full(&ca->free_inc)) {
222 if (ca->fifo_last_bucket < ca->sb.first_bucket ||
223 ca->fifo_last_bucket >= ca->sb.nbuckets)
224 ca->fifo_last_bucket = ca->sb.first_bucket;
226 b = ca->buckets + ca->fifo_last_bucket++;
228 if (bch_can_invalidate_bucket(ca, b))
229 bch_invalidate_one_bucket(ca, b);
231 if (++checked >= ca->sb.nbuckets) {
232 ca->invalidate_needs_gc = 1;
233 wake_up_gc(ca->set);
239 static void invalidate_buckets_random(struct cache *ca)
244 while (!fifo_full(&ca->free_inc)) {
249 n %= (size_t) (ca->sb.nbuckets - ca->sb.first_bucket);
250 n += ca->sb.first_bucket;
252 b = ca->buckets + n;
254 if (bch_can_invalidate_bucket(ca, b))
255 bch_invalidate_one_bucket(ca, b);
257 if (++checked >= ca->sb.nbuckets / 2) {
258 ca->invalidate_needs_gc = 1;
259 wake_up_gc(ca->set);
265 static void invalidate_buckets(struct cache *ca)
267 BUG_ON(ca->invalidate_needs_gc);
269 switch (CACHE_REPLACEMENT(&ca->sb)) {
271 invalidate_buckets_lru(ca);
274 invalidate_buckets_fifo(ca);
277 invalidate_buckets_random(ca);
282 #define allocator_wait(ca, cond) \
289 mutex_unlock(&(ca)->set->bucket_lock); \
291 test_bit(CACHE_SET_IO_DISABLE, &ca->set->flags)) { \
297 mutex_lock(&(ca)->set->bucket_lock); \
302 static int bch_allocator_push(struct cache *ca, long bucket)
307 if (fifo_push(&ca->free[RESERVE_PRIO], bucket))
311 if (fifo_push(&ca->free[i], bucket))
319 struct cache *ca = arg;
321 mutex_lock(&ca->set->bucket_lock);
332 if (!fifo_pop(&ca->free_inc, bucket))
335 if (ca->discard) {
336 mutex_unlock(&ca->set->bucket_lock);
337 blkdev_issue_discard(ca->bdev,
338 bucket_to_sector(ca->set, bucket),
339 ca->sb.bucket_size, GFP_KERNEL, 0);
340 mutex_lock(&ca->set->bucket_lock);
343 allocator_wait(ca, bch_allocator_push(ca, bucket));
344 wake_up(&ca->set->btree_cache_wait);
345 wake_up(&ca->set->bucket_wait);
355 allocator_wait(ca, ca->set->gc_mark_valid &&
356 !ca->invalidate_needs_gc);
357 invalidate_buckets(ca);
363 allocator_wait(ca, !atomic_read(&ca->set->prio_blocked));
364 if (CACHE_SYNC(&ca->sb)) {
376 if (!fifo_full(&ca->free_inc))
379 if (bch_prio_write(ca, false) < 0) {
380 ca->invalidate_needs_gc = 1;
381 wake_up_gc(ca->set);
392 long bch_bucket_alloc(struct cache *ca, unsigned int reserve, bool wait)
400 if (unlikely(test_bit(CACHE_SET_IO_DISABLE, &ca->set->flags)))
404 if (fifo_pop(&ca->free[RESERVE_NONE], r) ||
405 fifo_pop(&ca->free[reserve], r))
409 trace_bcache_alloc_fail(ca, reserve);
414 prepare_to_wait(&ca->set->bucket_wait, &w,
417 mutex_unlock(&ca->set->bucket_lock);
419 mutex_lock(&ca->set->bucket_lock);
420 } while (!fifo_pop(&ca->free[RESERVE_NONE], r) &&
421 !fifo_pop(&ca->free[reserve], r));
423 finish_wait(&ca->set->bucket_wait, &w);
425 if (ca->alloc_thread)
426 wake_up_process(ca->alloc_thread);
428 trace_bcache_alloc(ca, reserve);
430 if (expensive_debug_checks(ca->set)) {
435 for (iter = 0; iter < prio_buckets(ca) * 2; iter++)
436 BUG_ON(ca->prio_buckets[iter] == (uint64_t) r);
439 fifo_for_each(i, &ca->free[j], iter)
441 fifo_for_each(i, &ca->free_inc, iter)
445 b = ca->buckets + r;
449 SET_GC_SECTORS_USED(b, ca->sb.bucket_size);
461 if (ca->set->avail_nbuckets > 0) {
462 ca->set->avail_nbuckets--;
463 bch_update_bucket_in_use(ca->set, &ca->set->gc_stats);
469 void __bch_bucket_free(struct cache *ca, struct bucket *b)
474 if (ca->set->avail_nbuckets < ca->set->nbuckets) {
475 ca->set->avail_nbuckets++;
476 bch_update_bucket_in_use(ca->set, &ca->set->gc_stats);
492 struct cache *ca;
503 ca = c->cache;
504 b = bch_bucket_alloc(ca, reserve, wait);
508 k->ptr[0] = MAKE_PTR(ca->buckets[b].gen,
510 ca->sb.nr_this_dev);
728 int bch_cache_allocator_start(struct cache *ca)
731 ca, "bcache_allocator");
735 ca->alloc_thread = k;