Home
last modified time | relevance | path

Searched refs:reclaimable (Results 1 - 20 of 20) sorted by relevance

/kernel/linux/linux-5.10/include/trace/events/
H A Doom.h36 unsigned long reclaimable,
42 TP_ARGS(zoneref, order, reclaimable, available, min_wmark, no_progress_loops, wmark_check),
48 __field( unsigned long, reclaimable)
59 __entry->reclaimable = reclaimable;
66 TP_printk("node=%d zone=%-8s order=%d reclaimable=%lu available=%lu min_wmark=%lu no_progress_loops=%d wmark_check=%d",
69 __entry->reclaimable, __entry->available, __entry->min_wmark,
/kernel/linux/linux-6.6/include/trace/events/
H A Doom.h36 unsigned long reclaimable,
42 TP_ARGS(zoneref, order, reclaimable, available, min_wmark, no_progress_loops, wmark_check),
48 __field( unsigned long, reclaimable)
59 __entry->reclaimable = reclaimable;
66 TP_printk("node=%d zone=%-8s order=%d reclaimable=%lu available=%lu min_wmark=%lu no_progress_loops=%d wmark_check=%d",
69 __entry->reclaimable, __entry->available, __entry->min_wmark,
/kernel/linux/linux-5.10/mm/
H A Dmemcg_reclaim.c269 * don't have any reclaimable pages, or because their in shrink_anon()
279 * If there is no reclaimable memory, OOM. in shrink_anon()
287 * of reclaimable memory from other cgroups. in shrink_anon()
345 bool reclaimable = false; in shrink_node_hyperhold() local
469 reclaimable = true; in shrink_node_hyperhold()
533 if (reclaimable) in shrink_node_hyperhold()
536 return reclaimable; in shrink_node_hyperhold()
H A Dpage_alloc.c869 * This might let an unmovable request use a reclaimable pageblock in compaction_capture()
2454 * reclaimable and unmovable allocations, we steal regardless of page size,
2456 * is worse than movable allocations stealing from unmovable and reclaimable
3210 * We only track unmovable, reclaimable and movable on pcp lists. in free_unref_page_commit()
4587 * request even if all reclaimable pages are considered then we are in should_reclaim_retry()
4593 unsigned long reclaimable; in should_reclaim_retry() local
4597 available = reclaimable = zone_reclaimable_pages(zone); in should_reclaim_retry()
4602 * reclaimable pages? in should_reclaim_retry()
4606 trace_reclaim_retry_zone(z, order, reclaimable, in should_reclaim_retry()
4621 if (2 * write_pending > reclaimable) { in should_reclaim_retry()
5435 unsigned long reclaimable; si_mem_available() local
[all...]
H A Dvmscan.c424 * up failing allocations although there are plenty of reclaimable in do_shrink_slab()
2612 * don't have any reclaimable pages, or because their in shrink_node_memcgs()
2622 * If there is no reclaimable memory, OOM. in shrink_node_memcgs()
2630 * of reclaimable memory from other cgroups. in shrink_node_memcgs()
2660 bool reclaimable = false; in shrink_node() local
2772 reclaimable = true; in shrink_node()
2843 if (reclaimable) in shrink_node()
4175 * potentially reclaimable. Otherwise, we have to worry about in node_pagecache_reclaimable()
/kernel/linux/linux-6.6/mm/
H A Dshow_mem.c38 unsigned long reclaimable; in si_mem_available() local
64 * Part of the reclaimable slab and other kernel memory consists of in si_mem_available()
68 reclaimable = global_node_page_state_pages(NR_SLAB_RECLAIMABLE_B) + in si_mem_available()
70 available += reclaimable - min(reclaimable / 2, wmark_low); in si_mem_available()
H A Dmemcg_reclaim.c271 * don't have any reclaimable pages, or because their in shrink_anon()
281 * If there is no reclaimable memory, OOM. in shrink_anon()
289 * of reclaimable memory from other cgroups. in shrink_anon()
344 bool reclaimable = false; in shrink_node_hyperhold() local
468 reclaimable = true; in shrink_node_hyperhold()
532 if (reclaimable) in shrink_node_hyperhold()
535 return reclaimable; in shrink_node_hyperhold()
H A Dpage-writeback.c250 * free and reclaimable pages, minus some zone reserves to protect
2084 unsigned long reclaimable; in wb_over_bg_thresh() local
2100 reclaimable = wb_stat_sum(wb, WB_RECLAIMABLE); in wb_over_bg_thresh()
2102 reclaimable = wb_stat(wb, WB_RECLAIMABLE); in wb_over_bg_thresh()
2104 if (reclaimable > thresh) in wb_over_bg_thresh()
2120 reclaimable = wb_stat_sum(wb, WB_RECLAIMABLE); in wb_over_bg_thresh()
2122 reclaimable = wb_stat(wb, WB_RECLAIMABLE); in wb_over_bg_thresh()
2124 if (reclaimable > thresh) in wb_over_bg_thresh()
H A Dvmscan.c811 * up failing allocations although there are plenty of reclaimable in do_shrink_slab()
1091 int reclaimable = 0, write_pending = 0; in skip_throttle_noprogress() local
1112 reclaimable += zone_reclaimable_pages(zone); in skip_throttle_noprogress()
1116 if (2 * write_pending <= reclaimable) in skip_throttle_noprogress()
1464 * The supposedly reclaimable folio was found to be in a VM_LOCKED vma. in folio_check_references()
5535 unsigned long reclaimable; in set_initial_priority() local
5545 reclaimable = node_page_state(pgdat, NR_INACTIVE_FILE); in set_initial_priority()
5547 reclaimable += node_page_state(pgdat, NR_INACTIVE_ANON); in set_initial_priority()
5549 /* round down reclaimable and round up sc->nr_to_reclaim */ in set_initial_priority()
5550 priority = fls_long(reclaimable) in set_initial_priority()
6522 bool reclaimable = false; shrink_node() local
[all...]
H A Dpage_alloc.c643 * This might let an unmovable request use a reclaimable pageblock in compaction_capture()
1703 * reclaimable and unmovable allocations, we steal regardless of page size,
1705 * is worse than movable allocations stealing from unmovable and reclaimable
2443 * We only track unmovable, reclaimable and movable on pcp lists. in free_unref_page()
3853 * request even if all reclaimable pages are considered then we are in should_reclaim_retry()
3859 unsigned long reclaimable; in should_reclaim_retry() local
3863 available = reclaimable = zone_reclaimable_pages(zone); in should_reclaim_retry()
3868 * reclaimable pages? in should_reclaim_retry()
3872 trace_reclaim_retry_zone(z, order, reclaimable, in should_reclaim_retry()
/kernel/linux/linux-5.10/drivers/net/ethernet/chelsio/cxgb4vf/
H A Dsge.c409 * Return the number of reclaimable descriptors in a TX queue.
411 static inline int reclaimable(const struct sge_txq *tq) in reclaimable() function
414 int reclaimable = hw_cidx - tq->cidx; in reclaimable() local
415 if (reclaimable < 0) in reclaimable()
416 reclaimable += tq->size; in reclaimable()
417 return reclaimable; in reclaimable()
434 int avail = reclaimable(tq); in reclaim_completed_tx()
2136 if (reclaimable(&txq->q) && __netif_tx_trylock(txq->txq)) { in sge_tx_timer_cb()
2137 int avail = reclaimable(&txq->q); in sge_tx_timer_cb()
2158 * If we found too many reclaimable packet in sge_tx_timer_cb()
[all...]
/kernel/linux/linux-6.6/drivers/net/ethernet/chelsio/cxgb4vf/
H A Dsge.c409 * Return the number of reclaimable descriptors in a TX queue.
411 static inline int reclaimable(const struct sge_txq *tq) in reclaimable() function
414 int reclaimable = hw_cidx - tq->cidx; in reclaimable() local
415 if (reclaimable < 0) in reclaimable()
416 reclaimable += tq->size; in reclaimable()
417 return reclaimable; in reclaimable()
434 int avail = reclaimable(tq); in reclaim_completed_tx()
2133 if (reclaimable(&txq->q) && __netif_tx_trylock(txq->txq)) { in sge_tx_timer_cb()
2134 int avail = reclaimable(&txq->q); in sge_tx_timer_cb()
2155 * If we found too many reclaimable packet in sge_tx_timer_cb()
[all...]
/kernel/linux/linux-5.10/fs/xfs/
H A Dxfs_icache.c143 * Queue background inode reclaim work if there are reclaimable inodes and there
254 * When we recycle a reclaimable inode, we need to re-initialise the VFS inode
398 * clear the radix tree reclaimable tag yet as it requires in __releases()
691 * reclaimable, or being reclaimed), -EAGAIN will be returned; if the
745 /* avoid new or reclaimable inodes. Leave for reclaim code to flush */ in xfs_inode_walk_ag_grab()
1236 * Return the number of reclaimable inodes in the filesystem for
1245 int reclaimable = 0; in xfs_reclaim_inodes_count() local
1249 reclaimable += pag->pag_ici_reclaimable; in xfs_reclaim_inodes_count()
1252 return reclaimable; in xfs_reclaim_inodes_count()
/kernel/linux/linux-6.6/fs/xfs/
H A Dxfs_icache.c185 * Queue background inode reclaim work if there are reclaimable inodes and there
296 * When we recycle a reclaimable inode, we need to re-initialise the VFS inode
507 * reclaimable state, wait for the initialisation to complete in __releases()
1008 * Return the number of reclaimable inodes in the filesystem for
1017 long reclaimable = 0; in xfs_reclaim_inodes_count() local
1021 reclaimable += pag->pag_ici_reclaimable; in xfs_reclaim_inodes_count()
1024 return reclaimable; in xfs_reclaim_inodes_count()
2153 * it does make inodes reclaimable, which eventually frees memory.
/kernel/linux/linux-5.10/drivers/md/
H A Draid5-cache.c28 * log->max_free_space is min(1/4 disk size, 10G reclaimable space).
1501 sector_t reclaimable; in r5l_do_reclaim() local
1510 * reclaimable/unreclaimable io_unit can be mixed in the list, we in r5l_do_reclaim()
1514 reclaimable = r5l_reclaimable_space(log); in r5l_do_reclaim()
1515 if (reclaimable >= reclaim_target || in r5l_do_reclaim()
1524 r5l_reclaimable_space(log) > reclaimable, in r5l_do_reclaim()
1531 if (reclaimable == 0 || !write_super) in r5l_do_reclaim()
/kernel/linux/linux-6.6/drivers/md/
H A Draid5-cache.c28 * log->max_free_space is min(1/4 disk size, 10G reclaimable space).
1500 sector_t reclaimable; in r5l_do_reclaim() local
1509 * reclaimable/unreclaimable io_unit can be mixed in the list, we in r5l_do_reclaim()
1513 reclaimable = r5l_reclaimable_space(log); in r5l_do_reclaim()
1514 if (reclaimable >= reclaim_target || in r5l_do_reclaim()
1523 r5l_reclaimable_space(log) > reclaimable, in r5l_do_reclaim()
1530 if (reclaimable == 0 || !write_super) in r5l_do_reclaim()
/kernel/linux/linux-6.6/include/net/
H A Dsock.h272 * @sk_reserved_mem: space reserved and non-reclaimable for the socket
1659 int reclaimable; in sk_mem_reclaim() local
1664 reclaimable = sk->sk_forward_alloc - sk_unused_reserved_mem(sk); in sk_mem_reclaim()
1666 if (reclaimable >= (int)PAGE_SIZE) in sk_mem_reclaim()
1667 __sk_mem_reclaim(sk, reclaimable); in sk_mem_reclaim()
/kernel/linux/linux-6.6/net/mptcp/
H A Dprotocol.c193 int reclaimable; in mptcp_rmem_uncharge() local
196 reclaimable = msk->rmem_fwd_alloc - sk_unused_reserved_mem(sk); in mptcp_rmem_uncharge()
199 if (unlikely(reclaimable >= PAGE_SIZE)) in mptcp_rmem_uncharge()
200 __mptcp_rmem_reclaim(sk, reclaimable); in mptcp_rmem_uncharge()
/kernel/linux/linux-5.10/drivers/net/ethernet/chelsio/cxgb4/
H A Dsge.c339 * Return the number of reclaimable descriptors in a Tx queue.
341 static inline int reclaimable(const struct sge_txq *q) in reclaimable() function
362 int reclaim = reclaimable(q); in reclaim_completed_tx()
4279 avail = reclaimable(&q->q); in sge_tx_timer_cb()
4302 /* If we found too many reclaimable packets schedule a timer in sge_tx_timer_cb()
4307 /* We reclaimed all reclaimable TX Descriptors, so reschedule in sge_tx_timer_cb()
/kernel/linux/linux-6.6/drivers/net/ethernet/chelsio/cxgb4/
H A Dsge.c339 * Return the number of reclaimable descriptors in a Tx queue.
341 static inline int reclaimable(const struct sge_txq *q) in reclaimable() function
362 int reclaim = reclaimable(q); in reclaim_completed_tx()
4304 avail = reclaimable(&q->q); in sge_tx_timer_cb()
4327 /* If we found too many reclaimable packets schedule a timer in sge_tx_timer_cb()
4332 /* We reclaimed all reclaimable TX Descriptors, so reschedule in sge_tx_timer_cb()

Completed in 83 milliseconds