Lines Matching refs:dc

30 static uint64_t __calc_target_rate(struct cached_dev *dc)
32 struct cache_set *c = dc->disk.c;
48 div64_u64(bdev_sectors(dc->bdev) << WRITEBACK_SHARE_SHIFT,
52 div_u64(cache_sectors * dc->writeback_percent, 100);
61 static void __update_writeback_rate(struct cached_dev *dc)
83 int64_t target = __calc_target_rate(dc);
84 int64_t dirty = bcache_dev_sectors_dirty(&dc->disk);
87 div_s64(error, dc->writeback_rate_p_term_inverse);
91 if ((error < 0 && dc->writeback_rate_integral > 0) ||
93 dc->writeback_rate.next + NSEC_PER_MSEC))) {
104 dc->writeback_rate_integral += error *
105 dc->writeback_rate_update_seconds;
108 integral_scaled = div_s64(dc->writeback_rate_integral,
109 dc->writeback_rate_i_term_inverse);
112 dc->writeback_rate_minimum, NSEC_PER_SEC);
114 dc->writeback_rate_proportional = proportional_scaled;
115 dc->writeback_rate_integral_scaled = integral_scaled;
116 dc->writeback_rate_change = new_rate -
117 atomic_long_read(&dc->writeback_rate.rate);
118 atomic_long_set(&dc->writeback_rate.rate, new_rate);
119 dc->writeback_rate_target = target;
159 * identical dc->writeback_rate_update_seconds values, it is about 6
162 * to each dc->writeback_rate.rate.
170 struct cached_dev *dc)
186 atomic_long_set(&dc->writeback_rate.rate, INT_MAX);
189 dc->writeback_rate_proportional = 0;
190 dc->writeback_rate_integral_scaled = 0;
191 dc->writeback_rate_change = 0;
206 struct cached_dev *dc = container_of(to_delayed_work(work),
209 struct cache_set *c = dc->disk.c;
215 set_bit(BCACHE_DEV_RATE_DW_RUNNING, &dc->disk.flags);
223 if (!test_bit(BCACHE_DEV_WB_RUNNING, &dc->disk.flags) ||
225 clear_bit(BCACHE_DEV_RATE_DW_RUNNING, &dc->disk.flags);
231 if (atomic_read(&dc->has_dirty) && dc->writeback_percent) {
238 if (!set_at_max_writeback_rate(c, dc)) {
239 down_read(&dc->writeback_lock);
240 __update_writeback_rate(dc);
242 up_read(&dc->writeback_lock);
251 if (test_bit(BCACHE_DEV_WB_RUNNING, &dc->disk.flags) &&
253 schedule_delayed_work(&dc->writeback_rate_update,
254 dc->writeback_rate_update_seconds * HZ);
261 clear_bit(BCACHE_DEV_RATE_DW_RUNNING, &dc->disk.flags);
266 static unsigned int writeback_delay(struct cached_dev *dc,
269 if (test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags) ||
270 !dc->writeback_percent)
273 return bch_next_delay(&dc->writeback_rate, sectors);
278 struct cached_dev *dc;
290 if (!io->dc->writeback_percent)
309 struct cached_dev *dc = io->dc;
326 atomic_inc(&PTR_BUCKET(dc->disk.c, &w->key, i)->pin);
328 ret = bch_btree_insert(dc->disk.c, &keys, NULL, &w->key);
334 ? &dc->disk.c->writeback_keys_failed
335 : &dc->disk.c->writeback_keys_done);
338 bch_keybuf_del(&dc->writeback_keys, w);
339 up(&dc->in_flight);
351 bch_count_backing_io_errors(io->dc, bio);
361 struct cached_dev *dc = io->dc;
365 if (atomic_read(&dc->writeback_sequence_next) != io->sequence) {
367 closure_wait(&dc->writeback_ordering_wait, cl);
369 if (atomic_read(&dc->writeback_sequence_next) == io->sequence) {
374 closure_wake_up(&dc->writeback_ordering_wait);
377 continue_at(cl, write_dirty, io->dc->writeback_write_wq);
393 bio_set_dev(&io->bio, io->dc->bdev);
397 closure_bio_submit(io->dc->disk.c, &io->bio, cl);
400 atomic_set(&dc->writeback_sequence_next, next_sequence);
401 closure_wake_up(&dc->writeback_ordering_wait);
403 continue_at(cl, write_dirty_finish, io->dc->writeback_write_wq);
412 bch_count_io_errors(PTR_CACHE(io->dc->disk.c, &w->key, 0),
423 closure_bio_submit(io->dc->disk.c, &io->bio, cl);
425 continue_at(cl, write_dirty, io->dc->writeback_write_wq);
428 static void read_dirty(struct cached_dev *dc)
438 BUG_ON(!llist_empty(&dc->writeback_ordering_wait.list));
439 atomic_set(&dc->writeback_sequence_next, sequence);
447 next = bch_keybuf_next(&dc->writeback_keys);
450 !test_bit(CACHE_SET_IO_DISABLE, &dc->disk.c->flags) &&
456 BUG_ON(ptr_stale(dc->disk.c, &next->key, 0));
487 } while ((next = bch_keybuf_next(&dc->writeback_keys)));
500 io->dc = dc;
507 PTR_CACHE(dc->disk.c, &w->key, 0)->bdev);
515 down(&dc->in_flight);
525 delay = writeback_delay(dc, size);
528 !test_bit(CACHE_SET_IO_DISABLE, &dc->disk.c->flags) &&
531 delay = writeback_delay(dc, 0);
539 bch_keybuf_del(&dc->writeback_keys, w);
595 struct cached_dev *dc = container_of(buf,
599 BUG_ON(KEY_INODE(k) != dc->disk.id);
604 static void refill_full_stripes(struct cached_dev *dc)
606 struct keybuf *buf = &dc->writeback_keys;
611 stripe = offset_to_stripe(&dc->disk, KEY_OFFSET(&buf->last_scanned));
618 stripe = find_next_bit(dc->disk.full_dirty_stripes,
619 dc->disk.nr_stripes, stripe);
621 if (stripe == dc->disk.nr_stripes)
624 next_stripe = find_next_zero_bit(dc->disk.full_dirty_stripes,
625 dc->disk.nr_stripes, stripe);
627 buf->last_scanned = KEY(dc->disk.id,
628 stripe * dc->disk.stripe_size, 0);
630 bch_refill_keybuf(dc->disk.c, buf,
631 &KEY(dc->disk.id,
632 next_stripe * dc->disk.stripe_size, 0),
643 if (stripe == dc->disk.nr_stripes) {
653 static bool refill_dirty(struct cached_dev *dc)
655 struct keybuf *buf = &dc->writeback_keys;
656 struct bkey start = KEY(dc->disk.id, 0, 0);
657 struct bkey end = KEY(dc->disk.id, MAX_KEY_OFFSET, 0);
669 if (dc->partial_stripes_expensive) {
670 refill_full_stripes(dc);
676 bch_refill_keybuf(dc->disk.c, buf, &end, dirty_pred);
686 bch_refill_keybuf(dc->disk.c, buf, &start_pos, dirty_pred);
693 struct cached_dev *dc = arg;
694 struct cache_set *c = dc->disk.c;
697 bch_ratelimit_reset(&dc->writeback_rate);
701 down_write(&dc->writeback_lock);
710 if (!test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags) &&
711 (!atomic_read(&dc->has_dirty) || !dc->writeback_running)) {
712 up_write(&dc->writeback_lock);
725 searched_full_index = refill_dirty(dc);
728 RB_EMPTY_ROOT(&dc->writeback_keys.keys)) {
729 atomic_set(&dc->has_dirty, 0);
730 SET_BDEV_STATE(&dc->sb, BDEV_STATE_CLEAN);
731 bch_write_bdev_super(dc, NULL);
738 if (test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags)) {
739 up_write(&dc->writeback_lock);
761 up_write(&dc->writeback_lock);
763 read_dirty(dc);
766 unsigned int delay = dc->writeback_delay * HZ;
771 !test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags))
774 bch_ratelimit_reset(&dc->writeback_rate);
778 if (dc->writeback_write_wq) {
779 flush_workqueue(dc->writeback_write_wq);
780 destroy_workqueue(dc->writeback_write_wq);
782 cached_dev_put(dc);
992 void bch_cached_dev_writeback_init(struct cached_dev *dc)
994 sema_init(&dc->in_flight, 64);
995 init_rwsem(&dc->writeback_lock);
996 bch_keybuf_init(&dc->writeback_keys);
998 dc->writeback_metadata = true;
999 dc->writeback_running = false;
1000 dc->writeback_percent = 10;
1001 dc->writeback_delay = 30;
1002 atomic_long_set(&dc->writeback_rate.rate, 1024);
1003 dc->writeback_rate_minimum = 8;
1005 dc->writeback_rate_update_seconds = WRITEBACK_RATE_UPDATE_SECS_DEFAULT;
1006 dc->writeback_rate_p_term_inverse = 40;
1007 dc->writeback_rate_i_term_inverse = 10000;
1009 WARN_ON(test_and_clear_bit(BCACHE_DEV_WB_RUNNING, &dc->disk.flags));
1010 INIT_DELAYED_WORK(&dc->writeback_rate_update, update_writeback_rate);
1013 int bch_cached_dev_writeback_start(struct cached_dev *dc)
1015 dc->writeback_write_wq = alloc_workqueue("bcache_writeback_wq",
1017 if (!dc->writeback_write_wq)
1020 cached_dev_get(dc);
1021 dc->writeback_thread = kthread_create(bch_writeback_thread, dc,
1023 if (IS_ERR(dc->writeback_thread)) {
1024 cached_dev_put(dc);
1025 destroy_workqueue(dc->writeback_write_wq);
1026 return PTR_ERR(dc->writeback_thread);
1028 dc->writeback_running = true;
1030 WARN_ON(test_and_set_bit(BCACHE_DEV_WB_RUNNING, &dc->disk.flags));
1031 schedule_delayed_work(&dc->writeback_rate_update,
1032 dc->writeback_rate_update_seconds * HZ);
1034 bch_writeback_queue(dc);