Lines Matching refs:bdi
43 bdi_debug_root = debugfs_create_dir("bdi", NULL);
48 struct backing_dev_info *bdi = m->private;
49 struct bdi_writeback *wb = &bdi->wb;
100 !list_empty(&bdi->bdi_list), bdi->wb.state);
107 static void bdi_debug_register(struct backing_dev_info *bdi, const char *name)
109 bdi->debug_dir = debugfs_create_dir(name, bdi_debug_root);
111 debugfs_create_file("stats", 0444, bdi->debug_dir, bdi,
115 static void bdi_debug_unregister(struct backing_dev_info *bdi)
117 debugfs_remove_recursive(bdi->debug_dir);
123 static inline void bdi_debug_register(struct backing_dev_info *bdi,
127 static inline void bdi_debug_unregister(struct backing_dev_info *bdi)
136 struct backing_dev_info *bdi = dev_get_drvdata(dev);
144 bdi->ra_pages = read_ahead_kb >> (PAGE_SHIFT - 10);
155 struct backing_dev_info *bdi = dev_get_drvdata(dev); \
161 BDI_SHOW(read_ahead_kb, K(bdi->ra_pages))
166 struct backing_dev_info *bdi = dev_get_drvdata(dev);
174 ret = bdi_set_min_ratio(bdi, ratio);
180 BDI_SHOW(min_ratio, bdi->min_ratio)
185 struct backing_dev_info *bdi = dev_get_drvdata(dev);
193 ret = bdi_set_max_ratio(bdi, ratio);
199 BDI_SHOW(max_ratio, bdi->max_ratio)
222 bdi_class = class_create(THIS_MODULE, "bdi");
233 static int bdi_init(struct backing_dev_info *bdi);
252 * wakes-up the corresponding bdi thread which should then take care of the
255 * set up a timer which wakes the bdi thread up later.
280 static int wb_init(struct bdi_writeback *wb, struct backing_dev_info *bdi,
287 if (wb != &bdi->wb)
288 bdi_get(bdi);
289 wb->bdi = bdi;
325 if (wb != &bdi->wb)
326 bdi_put(bdi);
333 * Remove bdi from the global list and shutdown any threads we have running
366 if (wb != &wb->bdi->wb)
367 bdi_put(wb->bdi);
375 * cgwb_lock protects bdi->cgwb_tree, blkcg->cgwb_list, and memcg->cgwb_list.
376 * bdi->cgwb_tree is also RCU protected.
396 mutex_lock(&wb->bdi->cgwb_release_mutex);
401 mutex_unlock(&wb->bdi->cgwb_release_mutex);
422 WARN_ON(!radix_tree_delete(&wb->bdi->cgwb_tree, wb->memcg_css->id));
435 static int cgwb_create(struct backing_dev_info *bdi,
454 wb = radix_tree_lookup(&bdi->cgwb_tree, memcg_css->id);
470 ret = wb_init(wb, bdi, gfp);
488 * The root wb determines the registered state of the whole bdi and
495 if (test_bit(WB_registered, &bdi->wb.state) &&
498 ret = radix_tree_insert(&bdi->cgwb_tree, memcg_css->id, wb);
500 list_add_tail_rcu(&wb->bdi_node, &bdi->wb_list);
531 * @bdi: target bdi
534 * Try to get the wb for @memcg_css on @bdi. The returned wb has its
552 struct bdi_writeback *wb_get_lookup(struct backing_dev_info *bdi,
558 return &bdi->wb;
561 wb = radix_tree_lookup(&bdi->cgwb_tree, memcg_css->id);
578 * @bdi: target bdi
582 * Try to get the wb for @memcg_css on @bdi. If it doesn't exist, try to
585 struct bdi_writeback *wb_get_create(struct backing_dev_info *bdi,
594 return &bdi->wb;
597 wb = wb_get_lookup(bdi, memcg_css);
598 } while (!wb && !cgwb_create(bdi, memcg_css, gfp));
603 static int cgwb_bdi_init(struct backing_dev_info *bdi)
607 INIT_RADIX_TREE(&bdi->cgwb_tree, GFP_ATOMIC);
608 mutex_init(&bdi->cgwb_release_mutex);
609 init_rwsem(&bdi->wb_switch_rwsem);
611 ret = wb_init(&bdi->wb, bdi, GFP_KERNEL);
613 bdi->wb.memcg_css = &root_mem_cgroup->css;
614 bdi->wb.blkcg_css = blkcg_root_css;
619 static void cgwb_bdi_unregister(struct backing_dev_info *bdi)
625 WARN_ON(test_bit(WB_registered, &bdi->wb.state));
628 radix_tree_for_each_slot(slot, &bdi->cgwb_tree, &iter, 0)
632 mutex_lock(&bdi->cgwb_release_mutex);
634 while (!list_empty(&bdi->wb_list)) {
635 wb = list_first_entry(&bdi->wb_list, struct bdi_writeback,
642 mutex_unlock(&bdi->cgwb_release_mutex);
680 static void cgwb_bdi_register(struct backing_dev_info *bdi)
683 list_add_tail_rcu(&bdi->wb.bdi_node, &bdi->wb_list);
704 static int cgwb_bdi_init(struct backing_dev_info *bdi)
706 return wb_init(&bdi->wb, bdi, GFP_KERNEL);
709 static void cgwb_bdi_unregister(struct backing_dev_info *bdi) { }
711 static void cgwb_bdi_register(struct backing_dev_info *bdi)
713 list_add_tail_rcu(&bdi->wb.bdi_node, &bdi->wb_list);
723 static int bdi_init(struct backing_dev_info *bdi)
727 bdi->dev = NULL;
729 kref_init(&bdi->refcnt);
730 bdi->min_ratio = 0;
731 bdi->max_ratio = 100;
732 bdi->max_prop_frac = FPROP_FRAC_BASE;
733 INIT_LIST_HEAD(&bdi->bdi_list);
734 INIT_LIST_HEAD(&bdi->wb_list);
735 init_waitqueue_head(&bdi->wb_waitq);
737 ret = cgwb_bdi_init(bdi);
744 struct backing_dev_info *bdi;
746 bdi = kzalloc_node(sizeof(*bdi), GFP_KERNEL, node_id);
747 if (!bdi)
750 if (bdi_init(bdi)) {
751 kfree(bdi);
754 bdi->capabilities = BDI_CAP_WRITEBACK | BDI_CAP_WRITEBACK_ACCT;
755 bdi->ra_pages = VM_READAHEAD_PAGES;
756 bdi->io_pages = VM_READAHEAD_PAGES;
757 return bdi;
765 struct backing_dev_info *bdi;
771 bdi = rb_entry(parent, struct backing_dev_info, rb_node);
773 if (bdi->id > id)
775 else if (bdi->id < id)
787 * bdi_get_by_id - lookup and get bdi from its id
788 * @id: bdi id to lookup
790 * Find bdi matching @id and get it. Returns NULL if the matching bdi
795 struct backing_dev_info *bdi = NULL;
801 bdi = rb_entry(*p, struct backing_dev_info, rb_node);
802 bdi_get(bdi);
806 return bdi;
809 int bdi_register_va(struct backing_dev_info *bdi, const char *fmt, va_list args)
814 if (bdi->dev) /* The driver needs to use separate queues per device */
817 vsnprintf(bdi->dev_name, sizeof(bdi->dev_name), fmt, args);
818 dev = device_create(bdi_class, NULL, MKDEV(0, 0), bdi, bdi->dev_name);
822 cgwb_bdi_register(bdi);
823 bdi->dev = dev;
825 bdi_debug_register(bdi, dev_name(dev));
826 set_bit(WB_registered, &bdi->wb.state);
830 bdi->id = ++bdi_id_cursor;
832 p = bdi_lookup_rb_node(bdi->id, &parent);
833 rb_link_node(&bdi->rb_node, parent, p);
834 rb_insert_color(&bdi->rb_node, &bdi_tree);
836 list_add_tail_rcu(&bdi->bdi_list, &bdi_list);
840 trace_writeback_bdi_register(bdi);
844 int bdi_register(struct backing_dev_info *bdi, const char *fmt, ...)
850 ret = bdi_register_va(bdi, fmt, args);
856 void bdi_set_owner(struct backing_dev_info *bdi, struct device *owner)
858 WARN_ON_ONCE(bdi->owner);
859 bdi->owner = owner;
864 * Remove bdi from bdi_list, and ensure that it is no longer visible
866 static void bdi_remove_from_list(struct backing_dev_info *bdi)
869 rb_erase(&bdi->rb_node, &bdi_tree);
870 list_del_rcu(&bdi->bdi_list);
876 void bdi_unregister(struct backing_dev_info *bdi)
879 bdi_remove_from_list(bdi);
880 wb_shutdown(&bdi->wb);
881 cgwb_bdi_unregister(bdi);
887 if (bdi->min_ratio)
888 bdi_set_min_ratio(bdi, 0);
890 if (bdi->dev) {
891 bdi_debug_unregister(bdi);
892 device_unregister(bdi->dev);
893 bdi->dev = NULL;
896 if (bdi->owner) {
897 put_device(bdi->owner);
898 bdi->owner = NULL;
904 struct backing_dev_info *bdi =
907 if (test_bit(WB_registered, &bdi->wb.state))
908 bdi_unregister(bdi);
909 WARN_ON_ONCE(bdi->dev);
910 wb_exit(&bdi->wb);
911 kfree(bdi);
914 void bdi_put(struct backing_dev_info *bdi)
916 kref_put(&bdi->refcnt, release_bdi);
920 const char *bdi_dev_name(struct backing_dev_info *bdi)
922 if (!bdi || !bdi->dev)
924 return bdi->dev_name;
934 void clear_bdi_congested(struct backing_dev_info *bdi, int sync)
940 if (test_and_clear_bit(bit, &bdi->wb.congested))
948 void set_bdi_congested(struct backing_dev_info *bdi, int sync)
953 if (!test_and_set_bit(bit, &bdi->wb.congested))