Lines Matching defs:qgroup

23 #include "qgroup.h"
34 * Helpers to access qgroup reservation
39 static u64 qgroup_rsv_total(const struct btrfs_qgroup *qgroup)
45 ret += qgroup->rsv.values[i];
64 struct btrfs_qgroup *qgroup, u64 num_bytes,
67 trace_qgroup_update_reserve(fs_info, qgroup, num_bytes, type);
68 qgroup->rsv.values[type] += num_bytes;
72 struct btrfs_qgroup *qgroup, u64 num_bytes,
75 trace_qgroup_update_reserve(fs_info, qgroup, -(s64)num_bytes, type);
76 if (qgroup->rsv.values[type] >= num_bytes) {
77 qgroup->rsv.values[type] -= num_bytes;
82 "qgroup %llu %s reserved space underflow, have %llu to free %llu",
83 qgroup->qgroupid, qgroup_rsv_type_str(type),
84 qgroup->rsv.values[type], num_bytes);
86 qgroup->rsv.values[type] = 0;
169 struct btrfs_qgroup *qgroup;
172 qgroup = rb_entry(n, struct btrfs_qgroup, node);
173 if (qgroup->qgroupid < qgroupid)
175 else if (qgroup->qgroupid > qgroupid)
178 return qgroup;
189 struct btrfs_qgroup *qgroup;
193 qgroup = rb_entry(parent, struct btrfs_qgroup, node);
195 if (qgroup->qgroupid < qgroupid)
197 else if (qgroup->qgroupid > qgroupid)
200 return qgroup;
203 qgroup = kzalloc(sizeof(*qgroup), GFP_ATOMIC);
204 if (!qgroup)
207 qgroup->qgroupid = qgroupid;
208 INIT_LIST_HEAD(&qgroup->groups);
209 INIT_LIST_HEAD(&qgroup->members);
210 INIT_LIST_HEAD(&qgroup->dirty);
211 INIT_LIST_HEAD(&qgroup->iterator);
213 rb_link_node(&qgroup->node, parent, p);
214 rb_insert_color(&qgroup->node, &fs_info->qgroup_tree);
216 return qgroup;
220 struct btrfs_qgroup *qgroup)
224 list_del(&qgroup->dirty);
225 while (!list_empty(&qgroup->groups)) {
226 list = list_first_entry(&qgroup->groups,
233 while (!list_empty(&qgroup->members)) {
234 list = list_first_entry(&qgroup->members,
245 struct btrfs_qgroup *qgroup = find_qgroup_rb(fs_info, qgroupid);
247 if (!qgroup)
250 rb_erase(&qgroup->node, &fs_info->qgroup_tree);
251 __del_qgroup_rb(fs_info, qgroup);
284 * Add relation specified by two qgroup ids.
331 struct btrfs_qgroup *qgroup;
333 qgroup = find_qgroup_rb(fs_info, qgroupid);
334 if (!qgroup)
336 if (qgroup->rfer != rfer || qgroup->excl != excl)
387 * pass 1: read status, all qgroup infos and limits
397 struct btrfs_qgroup *qgroup;
412 "old qgroup version, quota disabled");
419 "qgroup generation mismatch, marked as inconsistent");
431 qgroup = find_qgroup_rb(fs_info, found_key.offset);
432 if ((qgroup && found_key.type == BTRFS_QGROUP_INFO_KEY) ||
433 (!qgroup && found_key.type == BTRFS_QGROUP_LIMIT_KEY)) {
434 btrfs_err(fs_info, "inconsistent qgroup config");
437 if (!qgroup) {
438 qgroup = add_qgroup_rb(fs_info, found_key.offset);
439 if (IS_ERR(qgroup)) {
440 ret = PTR_ERR(qgroup);
444 ret = btrfs_sysfs_add_one_qgroup(fs_info, qgroup);
454 qgroup->rfer = btrfs_qgroup_info_rfer(l, ptr);
455 qgroup->rfer_cmpr = btrfs_qgroup_info_rfer_cmpr(l, ptr);
456 qgroup->excl = btrfs_qgroup_info_excl(l, ptr);
457 qgroup->excl_cmpr = btrfs_qgroup_info_excl_cmpr(l, ptr);
466 qgroup->lim_flags = btrfs_qgroup_limit_flags(l, ptr);
467 qgroup->max_rfer = btrfs_qgroup_limit_max_rfer(l, ptr);
468 qgroup->max_excl = btrfs_qgroup_limit_max_excl(l, ptr);
469 qgroup->rsv_rfer = btrfs_qgroup_limit_rsv_rfer(l, ptr);
470 qgroup->rsv_excl = btrfs_qgroup_limit_rsv_excl(l, ptr);
484 * pass 2: read all qgroup relations
510 "orphan qgroup relation 0x%llx->0x%llx",
557 * Since we're unmounting, there is no race and no need to grab qgroup
562 struct btrfs_qgroup *qgroup;
565 qgroup = rb_entry(node, struct btrfs_qgroup, node);
567 if (qgroup->rsv.values[i]) {
570 "qgroup %hu/%llu has unreleased space, type %d rsv %llu",
571 btrfs_qgroup_level(qgroup->qgroupid),
572 btrfs_qgroup_subvolid(qgroup->qgroupid),
573 i, qgroup->rsv.values[i]);
589 struct btrfs_qgroup *qgroup;
592 qgroup = rb_entry(n, struct btrfs_qgroup, node);
594 __del_qgroup_rb(fs_info, qgroup);
595 btrfs_sysfs_del_one_qgroup(fs_info, qgroup);
596 kfree(qgroup);
778 struct btrfs_qgroup *qgroup)
790 key.offset = qgroup->qgroupid;
806 btrfs_set_qgroup_limit_flags(l, qgroup_limit, qgroup->lim_flags);
807 btrfs_set_qgroup_limit_max_rfer(l, qgroup_limit, qgroup->max_rfer);
808 btrfs_set_qgroup_limit_max_excl(l, qgroup_limit, qgroup->max_excl);
809 btrfs_set_qgroup_limit_rsv_rfer(l, qgroup_limit, qgroup->rsv_rfer);
810 btrfs_set_qgroup_limit_rsv_excl(l, qgroup_limit, qgroup->rsv_excl);
820 struct btrfs_qgroup *qgroup)
836 key.offset = qgroup->qgroupid;
853 btrfs_set_qgroup_info_rfer(l, qgroup_info, qgroup->rfer);
854 btrfs_set_qgroup_info_rfer_cmpr(l, qgroup_info, qgroup->rfer_cmpr);
855 btrfs_set_qgroup_info_excl(l, qgroup_info, qgroup->excl);
856 btrfs_set_qgroup_info_excl_cmpr(l, qgroup_info, qgroup->excl_cmpr);
962 struct btrfs_qgroup *qgroup = NULL;
1105 qgroup = add_qgroup_rb(fs_info, found_key.offset);
1106 if (IS_ERR(qgroup)) {
1107 ret = PTR_ERR(qgroup);
1111 ret = btrfs_sysfs_add_one_qgroup(fs_info, qgroup);
1148 qgroup = add_qgroup_rb(fs_info, BTRFS_FS_TREE_OBJECTID);
1149 if (IS_ERR(qgroup)) {
1150 ret = PTR_ERR(qgroup);
1154 ret = btrfs_sysfs_add_one_qgroup(fs_info, qgroup);
1163 * a deadlock with tasks concurrently doing other qgroup operations, such
1164 * adding/removing qgroups or adding/deleting qgroup relations for example,
1165 * because all qgroup operations first start or join a transaction and then
1266 * Request qgroup rescan worker to complete and wait for it. This wait
1268 * deadlock with transaction by the qgroup rescan worker.
1340 struct btrfs_qgroup *qgroup)
1342 if (list_empty(&qgroup->dirty))
1343 list_add(&qgroup->dirty, &fs_info->dirty_qgroups);
1346 static void qgroup_iterator_add(struct list_head *head, struct btrfs_qgroup *qgroup)
1348 if (!list_empty(&qgroup->iterator))
1351 list_add_tail(&qgroup->iterator, head);
1357 struct btrfs_qgroup *qgroup;
1359 qgroup = list_first_entry(head, struct btrfs_qgroup, iterator);
1360 list_del_init(&qgroup->iterator);
1365 * The easy accounting, we're updating qgroup relationship whose child qgroup
1371 * So is qgroup reservation space, which should also be added/removed to
1382 struct btrfs_qgroup *qgroup;
1389 qgroup = find_qgroup_rb(fs_info, ref_root);
1390 if (!qgroup)
1393 qgroup->rfer += sign * num_bytes;
1394 qgroup->rfer_cmpr += sign * num_bytes;
1396 WARN_ON(sign < 0 && qgroup->excl < num_bytes);
1397 qgroup->excl += sign * num_bytes;
1398 qgroup->excl_cmpr += sign * num_bytes;
1401 qgroup_rsv_add_by_qgroup(fs_info, qgroup, src);
1403 qgroup_rsv_release_by_qgroup(fs_info, qgroup, src);
1405 qgroup_dirty(fs_info, qgroup);
1407 /* Get all of the parent groups that contain this qgroup */
1408 list_for_each_entry(glist, &qgroup->groups, next_group) {
1418 qgroup = unode_aux_to_qgroup(unode);
1419 qgroup->rfer += sign * num_bytes;
1420 qgroup->rfer_cmpr += sign * num_bytes;
1421 WARN_ON(sign < 0 && qgroup->excl < num_bytes);
1422 qgroup->excl += sign * num_bytes;
1424 qgroup_rsv_add_by_qgroup(fs_info, qgroup, src);
1426 qgroup_rsv_release_by_qgroup(fs_info, qgroup, src);
1427 qgroup->excl_cmpr += sign * num_bytes;
1428 qgroup_dirty(fs_info, qgroup);
1431 list_for_each_entry(glist, &qgroup->groups, next_group) {
1445 * Quick path for updating qgroup with only excl refs.
1459 struct btrfs_qgroup *qgroup;
1463 qgroup = find_qgroup_rb(fs_info, src);
1464 if (!qgroup)
1466 if (qgroup->excl == qgroup->rfer) {
1469 qgroup, sign);
1515 /* check if such qgroup relation exist firstly */
1581 /* check if such qgroup relation exist firstly */
1629 struct btrfs_qgroup *qgroup;
1638 qgroup = find_qgroup_rb(fs_info, qgroupid);
1639 if (qgroup) {
1649 qgroup = add_qgroup_rb(fs_info, qgroupid);
1652 if (IS_ERR(qgroup)) {
1653 ret = PTR_ERR(qgroup);
1656 ret = btrfs_sysfs_add_one_qgroup(fs_info, qgroup);
1662 static bool qgroup_has_usage(struct btrfs_qgroup *qgroup)
1664 return (qgroup->rfer > 0 || qgroup->rfer_cmpr > 0 ||
1665 qgroup->excl > 0 || qgroup->excl_cmpr > 0 ||
1666 qgroup->rsv.values[BTRFS_QGROUP_RSV_DATA] > 0 ||
1667 qgroup->rsv.values[BTRFS_QGROUP_RSV_META_PREALLOC] > 0 ||
1668 qgroup->rsv.values[BTRFS_QGROUP_RSV_META_PERTRANS] > 0);
1674 struct btrfs_qgroup *qgroup;
1684 qgroup = find_qgroup_rb(fs_info, qgroupid);
1685 if (!qgroup) {
1690 if (is_fstree(qgroupid) && qgroup_has_usage(qgroup)) {
1695 /* Check if there are no children of this qgroup */
1696 if (!list_empty(&qgroup->members)) {
1705 while (!list_empty(&qgroup->groups)) {
1706 list = list_first_entry(&qgroup->groups,
1719 * Remove the qgroup from sysfs now without holding the qgroup_lock
1723 btrfs_sysfs_del_one_qgroup(fs_info, qgroup);
1724 kfree(qgroup);
1734 struct btrfs_qgroup *qgroup;
1736 /* Sometimes we would want to clear the limit on this qgroup.
1738 * which tell kernel to clear the limit on this qgroup.
1748 qgroup = find_qgroup_rb(fs_info, qgroupid);
1749 if (!qgroup) {
1757 qgroup->lim_flags &= ~BTRFS_QGROUP_LIMIT_MAX_RFER;
1759 qgroup->max_rfer = 0;
1761 qgroup->max_rfer = limit->max_rfer;
1766 qgroup->lim_flags &= ~BTRFS_QGROUP_LIMIT_MAX_EXCL;
1768 qgroup->max_excl = 0;
1770 qgroup->max_excl = limit->max_excl;
1775 qgroup->lim_flags &= ~BTRFS_QGROUP_LIMIT_RSV_RFER;
1777 qgroup->rsv_rfer = 0;
1779 qgroup->rsv_rfer = limit->rsv_rfer;
1784 qgroup->lim_flags &= ~BTRFS_QGROUP_LIMIT_RSV_EXCL;
1786 qgroup->rsv_excl = 0;
1788 qgroup->rsv_excl = limit->rsv_excl;
1791 qgroup->lim_flags |= limit->flags;
1795 ret = update_qgroup_limit_item(trans, qgroup);
1946 /* filter out non qgroup-accountable extents */
2052 * reach OO(c), then mark both OO(c) and NN(c) as qgroup dirty.
2061 * 2) Mark the final tree blocks in @src_path and @dst_path qgroup dirty
2062 * NOTE: In above case, OO(a) and NN(a) won't be marked qgroup dirty.
2154 * blocks for qgroup accounting.
2194 * We will iterate through tree blocks NN(b), NN(d) and info qgroup to trace
2382 * mark qgroup inconsistent.
2540 * Update qgroup rfer/excl counters.
2724 /* Quick exit, either not fs tree roots, or won't affect any qgroup */
2814 * Old roots should be searched when inserting qgroup
2879 * Called by the transaction commit path and the qgroup assign ioctl.
2887 * In case we are called from the qgroup assign ioctl, assert that we
2899 struct btrfs_qgroup *qgroup;
2900 qgroup = list_first_entry(&fs_info->dirty_qgroups,
2902 list_del_init(&qgroup->dirty);
2904 ret = update_qgroup_info_item(trans, qgroup);
2907 ret = update_qgroup_limit_item(trans, qgroup);
2952 * The other one in create_pending_snapshot() where no other qgroup
3001 * add qgroup to all inherited groups
3082 * qgroup, the numbers are guaranteed to be incorrect.
3157 struct btrfs_qgroup *qgroup;
3177 qgroup = find_qgroup_rb(fs_info, ref_root);
3178 if (!qgroup)
3181 qgroup_iterator_add(&qgroup_list, qgroup);
3182 list_for_each_entry(qgroup, &qgroup_list, iterator) {
3185 if (enforce && !qgroup_check_limits(qgroup, num_bytes)) {
3190 list_for_each_entry(glist, &qgroup->groups, next_group)
3198 list_for_each_entry(qgroup, &qgroup_list, iterator)
3199 qgroup_rsv_add(fs_info, qgroup, num_bytes, type);
3208 * Free @num_bytes of reserved space with @type for qgroup. (Normally level 0
3209 * qgroup).
3211 * Will handle all higher level qgroup too.
3213 * NOTE: If @num_bytes is (u64)-1, this means to free all bytes of this qgroup.
3220 struct btrfs_qgroup *qgroup;
3240 qgroup = find_qgroup_rb(fs_info, ref_root);
3241 if (!qgroup)
3247 * level 0 qgroup as real num_bytes to free.
3249 num_bytes = qgroup->rsv.values[type];
3252 ret = ulist_add(fs_info->qgroup_ulist, qgroup->qgroupid,
3253 qgroup_to_aux(qgroup), GFP_ATOMIC);
3411 * should be recorded by qgroup
3447 * qgroup info, and only if we did any actual work. This also prevents
3473 btrfs_err(fs_info, "fail to update qgroup status: %d",
3488 btrfs_info(fs_info, "qgroup scan paused");
3490 btrfs_info(fs_info, "qgroup scan cancelled");
3492 btrfs_info(fs_info, "qgroup scan completed%s",
3495 btrfs_err(fs_info, "qgroup scan failed with %d", err);
3510 /* we're resuming qgroup rescan at mount time */
3514 "qgroup rescan init failed, qgroup rescan is not queued");
3519 "qgroup rescan init failed, qgroup is not enabled");
3532 "qgroup rescan is already in progress");
3537 "qgroup rescan init failed, qgroup is not enabled");
3568 struct btrfs_qgroup *qgroup;
3571 /* clear all current qgroup tracking information */
3573 qgroup = rb_entry(n, struct btrfs_qgroup, node);
3574 qgroup->rfer = 0;
3575 qgroup->rfer_cmpr = 0;
3576 qgroup->excl = 0;
3577 qgroup->excl_cmpr = 0;
3578 qgroup_dirty(fs_info, qgroup);
3729 * Try to free some space for qgroup.
3731 * For qgroup, there are only 3 ways to free qgroup space:
3744 * In theory this shouldn't provide much space, but any more qgroup space
3802 /* @reserved parameter is mandatory for qgroup */
3840 * Reserve qgroup space for range [start, start + len).
3904 * However as long as we free qgroup reserved according to
3986 * Since new qgroup accounting framework will only update qgroup numbers at
4122 struct btrfs_qgroup *qgroup;
4131 qgroup = find_qgroup_rb(fs_info, ref_root);
4132 if (!qgroup)
4135 qgroup_iterator_add(&qgroup_list, qgroup);
4136 list_for_each_entry(qgroup, &qgroup_list, iterator) {
4139 qgroup_rsv_release(fs_info, qgroup, num_bytes,
4142 qgroup_rsv_add(fs_info, qgroup, num_bytes,
4145 list_for_each_entry(glist, &qgroup->groups, next_group)
4168 * Check qgroup reserved space leaking, normally at destroy inode
4187 "leaking qgroup reserved space, ino: %llu, start: %llu, end: %llu",
4211 * Every record here means we skipped a full subtree scan for qgroup.
4299 * no one else can modify tree blocks thus we qgroup will not change
4332 * Marking qgroup inconsistent should be enough
4355 * delayed subtree trace for qgroup.