Lines Matching refs:space_info

26  *   1) space_info.  This is the ultimate arbiter of how much space we can use.
29 * reservations we care about total_bytes - SUM(space_info->bytes_) when
30 * determining if there is space to make an allocation. There is a space_info
36 * much space is accounted for in space_info->bytes_may_use.
42 * values to adjust the space_info counters.
51 * space_info->bytes_may_reserve += num_bytes
55 * space_info->bytes_may_reserve -= num_bytes
56 * space_info->bytes_reserved += extent_bytes
60 * space_info->bytes_reserved -= extent_bytes
61 * space_info->bytes_used += extent_bytes
70 * the tail of space_info->tickets, kick async flush thread
81 * space_info->bytes_may_use, ->bytes_pinned, etc, or adds to the
82 * space_info->total_bytes. This loops through the ->priority_tickets and
84 * completed. If it can the space is added to space_info->bytes_may_use and
95 * space_info->priority_tickets, and we do not use ticket->wait, we simply
181 struct list_head *head = &info->space_info;
219 void btrfs_update_space_info_chunk_size(struct btrfs_space_info *space_info,
222 WRITE_ONCE(space_info->chunk_size, chunk_size);
228 struct btrfs_space_info *space_info;
232 space_info = kzalloc(sizeof(*space_info), GFP_NOFS);
233 if (!space_info)
237 INIT_LIST_HEAD(&space_info->block_groups[i]);
238 init_rwsem(&space_info->groups_sem);
239 spin_lock_init(&space_info->lock);
240 space_info->flags = flags & BTRFS_BLOCK_GROUP_TYPE_MASK;
241 space_info->force_alloc = CHUNK_ALLOC_NO_FORCE;
242 INIT_LIST_HEAD(&space_info->ro_bgs);
243 INIT_LIST_HEAD(&space_info->tickets);
244 INIT_LIST_HEAD(&space_info->priority_tickets);
245 space_info->clamp = 1;
246 btrfs_update_space_info_chunk_size(space_info, calc_chunk_size(info, flags));
249 space_info->bg_reclaim_threshold = BTRFS_DEFAULT_ZONED_RECLAIM_THRESH;
251 ret = btrfs_sysfs_add_space_info_type(info, space_info);
255 list_add(&space_info->list, &info->space_info);
257 info->data_sinfo = space_info;
321 block_group->space_info = found;
332 struct list_head *head = &info->space_info;
345 struct btrfs_space_info *space_info,
352 if (space_info->flags & BTRFS_BLOCK_GROUP_SYSTEM)
381 struct btrfs_space_info *space_info, u64 bytes,
388 if (space_info->flags & BTRFS_BLOCK_GROUP_DATA)
391 used = btrfs_space_info_used(space_info, true);
392 avail = calc_available_free_space(fs_info, space_info, flush);
394 if (used + bytes < space_info->total_bytes + avail)
399 static void remove_ticket(struct btrfs_space_info *space_info,
404 ASSERT(space_info->reclaim_size >= ticket->bytes);
405 space_info->reclaim_size -= ticket->bytes;
410 * This is for space we already have accounted in space_info->bytes_may_use, so
414 struct btrfs_space_info *space_info)
419 lockdep_assert_held(&space_info->lock);
421 head = &space_info->priority_tickets;
425 u64 used = btrfs_space_info_used(space_info, true);
430 if ((used + ticket->bytes <= space_info->total_bytes) ||
431 btrfs_can_overcommit(fs_info, space_info, ticket->bytes,
434 space_info,
436 remove_ticket(space_info, ticket);
438 space_info->tickets_id++;
445 if (head == &space_info->priority_tickets) {
446 head = &space_info->tickets;
461 static const char *space_info_flag_to_str(const struct btrfs_space_info *space_info)
463 switch (space_info->flags) {
493 btrfs_info(fs_info, "space_info %s has %lld free, is %sfull",
498 "space_info total=%llu, used=%llu, pinned=%llu, reserved=%llu, may_use=%llu, readonly=%llu zone_unusable=%llu",
577 struct btrfs_space_info *space_info,
687 spin_lock(&space_info->lock);
688 if (list_empty(&space_info->tickets) &&
689 list_empty(&space_info->priority_tickets)) {
690 spin_unlock(&space_info->lock);
693 spin_unlock(&space_info->lock);
705 * state of @space_info to detect the outcome.
708 struct btrfs_space_info *space_info, u64 num_bytes,
739 shrink_delalloc(fs_info, space_info, num_bytes,
766 btrfs_get_alloc_profile(fs_info, space_info->flags),
806 trace_btrfs_flush_space(fs_info, space_info->flags, num_bytes, state,
813 struct btrfs_space_info *space_info)
817 u64 to_reclaim = space_info->reclaim_size;
819 lockdep_assert_held(&space_info->lock);
821 avail = calc_available_free_space(fs_info, space_info,
823 used = btrfs_space_info_used(space_info, true);
831 if (space_info->total_bytes + avail < used)
832 to_reclaim += used - (space_info->total_bytes + avail);
838 struct btrfs_space_info *space_info)
845 thresh = mult_perc(space_info->total_bytes, 90);
847 lockdep_assert_held(&space_info->lock);
850 if ((space_info->bytes_used + space_info->bytes_reserved +
854 used = space_info->bytes_may_use + space_info->bytes_pinned;
872 if (space_info->reclaim_size)
885 * of the space_info used by bytes_used and we had 0 available we'd just
904 thresh = calc_available_free_space(fs_info, space_info,
906 used = space_info->bytes_used + space_info->bytes_reserved +
907 space_info->bytes_readonly + global_rsv_size;
908 if (used < space_info->total_bytes)
909 thresh += space_info->total_bytes - used;
910 thresh >>= space_info->clamp;
912 used = space_info->bytes_pinned;
943 used += space_info->bytes_may_use - global_rsv_size;
950 struct btrfs_space_info *space_info,
959 if (global_rsv->space_info != space_info)
969 remove_ticket(space_info, ticket);
972 space_info->tickets_id++;
983 * @space_info - the space info we were flushing
996 struct btrfs_space_info *space_info)
999 u64 tickets_id = space_info->tickets_id;
1002 trace_btrfs_fail_all_tickets(fs_info, space_info);
1006 __btrfs_dump_space_info(fs_info, space_info);
1009 while (!list_empty(&space_info->tickets) &&
1010 tickets_id == space_info->tickets_id) {
1011 ticket = list_first_entry(&space_info->tickets,
1014 if (!aborted && steal_from_global_rsv(fs_info, space_info, ticket))
1021 remove_ticket(space_info, ticket);
1035 btrfs_try_granting_tickets(fs_info, space_info);
1037 return (tickets_id != space_info->tickets_id);
1048 struct btrfs_space_info *space_info;
1055 space_info = btrfs_find_space_info(fs_info, BTRFS_BLOCK_GROUP_METADATA);
1057 spin_lock(&space_info->lock);
1058 to_reclaim = btrfs_calc_reclaim_metadata_size(fs_info, space_info);
1060 space_info->flush = 0;
1061 spin_unlock(&space_info->lock);
1064 last_tickets_id = space_info->tickets_id;
1065 spin_unlock(&space_info->lock);
1069 flush_space(fs_info, space_info, to_reclaim, flush_state, false);
1070 spin_lock(&space_info->lock);
1071 if (list_empty(&space_info->tickets)) {
1072 space_info->flush = 0;
1073 spin_unlock(&space_info->lock);
1077 space_info);
1078 if (last_tickets_id == space_info->tickets_id) {
1081 last_tickets_id = space_info->tickets_id;
1111 if (maybe_fail_all_tickets(fs_info, space_info)) {
1115 space_info->flush = 0;
1121 spin_unlock(&space_info->lock);
1136 struct btrfs_space_info *space_info;
1145 space_info = btrfs_find_space_info(fs_info, BTRFS_BLOCK_GROUP_METADATA);
1151 spin_lock(&space_info->lock);
1152 while (need_preemptive_reclaim(fs_info, space_info)) {
1171 if (block_rsv_size < space_info->bytes_may_use)
1172 delalloc_size = space_info->bytes_may_use - block_rsv_size;
1189 } else if (space_info->bytes_pinned >
1192 to_reclaim = space_info->bytes_pinned;
1203 spin_unlock(&space_info->lock);
1213 flush_space(fs_info, space_info, to_reclaim, flush, true);
1215 spin_lock(&space_info->lock);
1219 if (loops == 1 && !space_info->reclaim_size)
1220 space_info->clamp = max(1, space_info->clamp - 1);
1221 trace_btrfs_done_preemptive_reclaim(fs_info, space_info);
1222 spin_unlock(&space_info->lock);
1268 struct btrfs_space_info *space_info;
1273 space_info = fs_info->data_sinfo;
1275 spin_lock(&space_info->lock);
1276 if (list_empty(&space_info->tickets)) {
1277 space_info->flush = 0;
1278 spin_unlock(&space_info->lock);
1281 last_tickets_id = space_info->tickets_id;
1282 spin_unlock(&space_info->lock);
1284 while (!space_info->full) {
1285 flush_space(fs_info, space_info, U64_MAX, ALLOC_CHUNK_FORCE, false);
1286 spin_lock(&space_info->lock);
1287 if (list_empty(&space_info->tickets)) {
1288 space_info->flush = 0;
1289 spin_unlock(&space_info->lock);
1296 last_tickets_id = space_info->tickets_id;
1297 spin_unlock(&space_info->lock);
1301 flush_space(fs_info, space_info, U64_MAX,
1303 spin_lock(&space_info->lock);
1304 if (list_empty(&space_info->tickets)) {
1305 space_info->flush = 0;
1306 spin_unlock(&space_info->lock);
1310 if (last_tickets_id == space_info->tickets_id) {
1313 last_tickets_id = space_info->tickets_id;
1318 if (space_info->full) {
1319 if (maybe_fail_all_tickets(fs_info, space_info))
1322 space_info->flush = 0;
1332 spin_unlock(&space_info->lock);
1337 maybe_fail_all_tickets(fs_info, space_info);
1338 space_info->flush = 0;
1339 spin_unlock(&space_info->lock);
1369 struct btrfs_space_info *space_info,
1377 spin_lock(&space_info->lock);
1378 to_reclaim = btrfs_calc_reclaim_metadata_size(fs_info, space_info);
1386 spin_unlock(&space_info->lock);
1391 spin_unlock(&space_info->lock);
1392 flush_space(fs_info, space_info, to_reclaim, states[flush_state],
1395 spin_lock(&space_info->lock);
1397 spin_unlock(&space_info->lock);
1412 remove_ticket(space_info, ticket);
1413 } else if (!steal_from_global_rsv(fs_info, space_info, ticket)) {
1415 remove_ticket(space_info, ticket);
1423 btrfs_try_granting_tickets(fs_info, space_info);
1424 spin_unlock(&space_info->lock);
1428 struct btrfs_space_info *space_info,
1431 spin_lock(&space_info->lock);
1435 spin_unlock(&space_info->lock);
1439 while (!space_info->full) {
1440 spin_unlock(&space_info->lock);
1441 flush_space(fs_info, space_info, U64_MAX, ALLOC_CHUNK_FORCE, false);
1442 spin_lock(&space_info->lock);
1444 spin_unlock(&space_info->lock);
1450 remove_ticket(space_info, ticket);
1451 btrfs_try_granting_tickets(fs_info, space_info);
1452 spin_unlock(&space_info->lock);
1456 struct btrfs_space_info *space_info,
1463 spin_lock(&space_info->lock);
1473 * (bytes_may_use counter of our space_info).
1475 remove_ticket(space_info, ticket);
1479 spin_unlock(&space_info->lock);
1484 spin_lock(&space_info->lock);
1486 spin_unlock(&space_info->lock);
1493 * @space_info: space info for the reservation
1503 struct btrfs_space_info *space_info,
1514 wait_reserve_ticket(fs_info, space_info, ticket);
1517 priority_reclaim_metadata_space(fs_info, space_info, ticket,
1522 priority_reclaim_metadata_space(fs_info, space_info, ticket,
1527 priority_reclaim_data_space(fs_info, space_info, ticket);
1543 trace_btrfs_reserve_ticket(fs_info, space_info->flags, orig_bytes,
1559 struct btrfs_space_info *space_info)
1573 space_info->clamp = min(space_info->clamp + 1, 8);
1596 * @space_info: space info we want to allocate from
1608 struct btrfs_space_info *space_info, u64 orig_bytes,
1637 spin_lock(&space_info->lock);
1638 used = btrfs_space_info_used(space_info, true);
1646 pending_tickets = !list_empty(&space_info->tickets) ||
1647 !list_empty(&space_info->priority_tickets);
1649 pending_tickets = !list_empty(&space_info->priority_tickets);
1656 ((used + orig_bytes <= space_info->total_bytes) ||
1657 btrfs_can_overcommit(fs_info, space_info, orig_bytes, flush))) {
1658 btrfs_space_info_update_bytes_may_use(fs_info, space_info,
1669 used = btrfs_space_info_used(space_info, false);
1670 if (used + orig_bytes <= space_info->total_bytes) {
1671 btrfs_space_info_update_bytes_may_use(fs_info, space_info,
1687 space_info->reclaim_size += ticket.bytes;
1696 list_add_tail(&ticket.list, &space_info->tickets);
1697 if (!space_info->flush) {
1705 maybe_clamp_preempt(fs_info, space_info);
1707 space_info->flush = 1;
1709 space_info->flags,
1716 &space_info->priority_tickets);
1718 } else if (!ret && space_info->flags & BTRFS_BLOCK_GROUP_METADATA) {
1726 need_preemptive_reclaim(fs_info, space_info)) {
1727 trace_btrfs_trigger_flush(fs_info, space_info->flags,
1733 spin_unlock(&space_info->lock);
1737 return handle_reserve_ticket(fs_info, space_info, &ticket, start_ns,
1763 ret = __reserve_bytes(fs_info, block_rsv->space_info, orig_bytes, flush);
1765 trace_btrfs_space_reservation(fs_info, "space_info:enospc",
1766 block_rsv->space_info->flags,
1770 btrfs_dump_space_info(fs_info, block_rsv->space_info,
1799 trace_btrfs_space_reservation(fs_info, "space_info:enospc",
1810 struct btrfs_space_info *space_info;
1813 list_for_each_entry(space_info, &fs_info->space_info, list) {
1814 spin_lock(&space_info->lock);
1815 __btrfs_dump_space_info(fs_info, space_info);
1816 spin_unlock(&space_info->lock);
1822 * Account the unused space of all the readonly block group in the space_info.