/kernel/linux/linux-5.10/net/sctp/ |
H A D | inqueue.c | 44 struct sctp_chunk *chunk, *tmp; in sctp_inq_free() local 47 list_for_each_entry_safe(chunk, tmp, &queue->in_chunk_list, list) { in sctp_inq_free() 48 list_del_init(&chunk->list); in sctp_inq_free() 49 sctp_chunk_free(chunk); in sctp_inq_free() 64 void sctp_inq_push(struct sctp_inq *q, struct sctp_chunk *chunk) in sctp_inq_push() argument 67 if (chunk->rcvr->dead) { in sctp_inq_push() 68 sctp_chunk_free(chunk); in sctp_inq_push() 77 list_add_tail(&chunk->list, &q->in_chunk_list); in sctp_inq_push() 78 if (chunk->asoc) in sctp_inq_push() 79 chunk in sctp_inq_push() 86 struct sctp_chunk *chunk; sctp_inq_peek() local 109 struct sctp_chunk *chunk; sctp_inq_pop() local [all...] |
H A D | output.c | 46 struct sctp_chunk *chunk); 48 struct sctp_chunk *chunk); 50 struct sctp_chunk *chunk); 52 struct sctp_chunk *chunk, 117 /* If there a is a prepend chunk stick it on the list before in sctp_packet_config() 121 struct sctp_chunk *chunk = sctp_get_ecne_prepend(asoc); in sctp_packet_config() local 123 if (chunk) in sctp_packet_config() 124 sctp_packet_append_chunk(packet, chunk); in sctp_packet_config() 161 struct sctp_chunk *chunk, *tmp; in sctp_packet_free() local 165 list_for_each_entry_safe(chunk, tm in sctp_packet_free() 178 sctp_packet_transmit_chunk(struct sctp_packet *packet, struct sctp_chunk *chunk, int one_packet, gfp_t gfp) sctp_packet_transmit_chunk() argument 215 sctp_packet_bundle_auth(struct sctp_packet *pkt, struct sctp_chunk *chunk) sctp_packet_bundle_auth() argument 254 sctp_packet_bundle_sack(struct sctp_packet *pkt, struct sctp_chunk *chunk) sctp_packet_bundle_sack() argument 302 __sctp_packet_append_chunk(struct sctp_packet *packet, struct sctp_chunk *chunk) __sctp_packet_append_chunk() argument 357 sctp_packet_append_chunk(struct sctp_packet *packet, struct sctp_chunk *chunk) sctp_packet_append_chunk() argument 411 struct sctp_chunk *chunk, *tmp; sctp_packet_pack() local 549 struct sctp_chunk *chunk, *tmp; sctp_packet_transmit() local 648 sctp_packet_can_append_data(struct sctp_packet *packet, struct sctp_chunk *chunk) sctp_packet_can_append_data() argument 732 sctp_packet_append_data(struct sctp_packet *packet, struct sctp_chunk *chunk) sctp_packet_append_data() argument 757 sctp_packet_will_fit(struct sctp_packet *packet, struct sctp_chunk *chunk, u16 chunk_len) sctp_packet_will_fit() argument [all...] |
H A D | chunk.c | 7 * This file contains the code relating the chunk abstraction. 60 struct sctp_chunk *chunk; in sctp_datamsg_free() local 65 list_for_each_entry(chunk, &msg->chunks, frag_list) in sctp_datamsg_free() 66 sctp_chunk_free(chunk); in sctp_datamsg_free() 76 struct sctp_chunk *chunk; in sctp_datamsg_destroy() local 83 chunk = list_entry(pos, struct sctp_chunk, frag_list); in sctp_datamsg_destroy() 86 sctp_chunk_put(chunk); in sctp_datamsg_destroy() 90 asoc = chunk->asoc; in sctp_datamsg_destroy() 92 sent = chunk->has_tsn ? SCTP_DATA_SENT : SCTP_DATA_UNSENT; in sctp_datamsg_destroy() 96 ev = sctp_ulpevent_make_send_failed(asoc, chunk, sen in sctp_datamsg_destroy() 132 sctp_datamsg_assign(struct sctp_datamsg *msg, struct sctp_chunk *chunk) sctp_datamsg_assign() argument 154 struct sctp_chunk *chunk; sctp_datamsg_from_user() local 300 sctp_chunk_abandoned(struct sctp_chunk *chunk) sctp_chunk_abandoned() argument 349 sctp_chunk_fail(struct sctp_chunk *chunk, int error) sctp_chunk_fail() argument [all...] |
H A D | outqueue.c | 79 /* Add data chunk to the end of the queue. */ 210 struct sctp_chunk *chunk, *tmp; in __sctp_outq_teardown() local 216 chunk = list_entry(lchunk, struct sctp_chunk, in __sctp_outq_teardown() 219 sctp_chunk_fail(chunk, q->error); in __sctp_outq_teardown() 220 sctp_chunk_free(chunk); in __sctp_outq_teardown() 227 chunk = list_entry(lchunk, struct sctp_chunk, in __sctp_outq_teardown() 229 sctp_chunk_fail(chunk, q->error); in __sctp_outq_teardown() 230 sctp_chunk_free(chunk); in __sctp_outq_teardown() 236 chunk = list_entry(lchunk, struct sctp_chunk, in __sctp_outq_teardown() 238 sctp_chunk_fail(chunk, in __sctp_outq_teardown() 281 sctp_outq_tail(struct sctp_outq *q, struct sctp_chunk *chunk, gfp_t gfp) sctp_outq_tail() argument 455 struct sctp_chunk *chunk; sctp_retransmit_mark() local 599 struct sctp_chunk *chunk, *chunk1; __sctp_outq_flush_rtx() local 764 sctp_packet_singleton(struct sctp_transport *transport, struct sctp_chunk *chunk, gfp_t gfp) sctp_packet_singleton() argument 793 sctp_outq_select_transport(struct sctp_flush_ctx *ctx, struct sctp_chunk *chunk) sctp_outq_select_transport() argument 874 struct sctp_chunk *chunk, *tmp; sctp_outq_flush_ctrl() local 1024 struct sctp_chunk *chunk; sctp_outq_flush_data() local 1225 sctp_outq_sack(struct sctp_outq *q, struct sctp_chunk *chunk) sctp_outq_sack() argument 1689 struct sctp_chunk *chunk; sctp_mark_missing() local 1804 struct sctp_chunk *chunk; sctp_generate_fwdtsn() local [all...] |
/kernel/linux/linux-6.6/net/sctp/ |
H A D | inqueue.c | 44 struct sctp_chunk *chunk, *tmp; in sctp_inq_free() local 47 list_for_each_entry_safe(chunk, tmp, &queue->in_chunk_list, list) { in sctp_inq_free() 48 list_del_init(&chunk->list); in sctp_inq_free() 49 sctp_chunk_free(chunk); in sctp_inq_free() 64 void sctp_inq_push(struct sctp_inq *q, struct sctp_chunk *chunk) in sctp_inq_push() argument 67 if (chunk->rcvr->dead) { in sctp_inq_push() 68 sctp_chunk_free(chunk); in sctp_inq_push() 77 list_add_tail(&chunk->list, &q->in_chunk_list); in sctp_inq_push() 78 if (chunk->asoc) in sctp_inq_push() 79 chunk in sctp_inq_push() 86 struct sctp_chunk *chunk; sctp_inq_peek() local 109 struct sctp_chunk *chunk; sctp_inq_pop() local [all...] |
H A D | chunk.c | 7 * This file contains the code relating the chunk abstraction. 60 struct sctp_chunk *chunk; in sctp_datamsg_free() local 65 list_for_each_entry(chunk, &msg->chunks, frag_list) in sctp_datamsg_free() 66 sctp_chunk_free(chunk); in sctp_datamsg_free() 76 struct sctp_chunk *chunk; in sctp_datamsg_destroy() local 83 chunk = list_entry(pos, struct sctp_chunk, frag_list); in sctp_datamsg_destroy() 86 sctp_chunk_put(chunk); in sctp_datamsg_destroy() 90 asoc = chunk->asoc; in sctp_datamsg_destroy() 92 sent = chunk->has_tsn ? SCTP_DATA_SENT : SCTP_DATA_UNSENT; in sctp_datamsg_destroy() 96 ev = sctp_ulpevent_make_send_failed(asoc, chunk, sen in sctp_datamsg_destroy() 132 sctp_datamsg_assign(struct sctp_datamsg *msg, struct sctp_chunk *chunk) sctp_datamsg_assign() argument 154 struct sctp_chunk *chunk; sctp_datamsg_from_user() local 300 sctp_chunk_abandoned(struct sctp_chunk *chunk) sctp_chunk_abandoned() argument 349 sctp_chunk_fail(struct sctp_chunk *chunk, int error) sctp_chunk_fail() argument [all...] |
H A D | outqueue.c | 79 /* Add data chunk to the end of the queue. */ 210 struct sctp_chunk *chunk, *tmp; in __sctp_outq_teardown() local 216 chunk = list_entry(lchunk, struct sctp_chunk, in __sctp_outq_teardown() 219 sctp_chunk_fail(chunk, q->error); in __sctp_outq_teardown() 220 sctp_chunk_free(chunk); in __sctp_outq_teardown() 227 chunk = list_entry(lchunk, struct sctp_chunk, in __sctp_outq_teardown() 229 sctp_chunk_fail(chunk, q->error); in __sctp_outq_teardown() 230 sctp_chunk_free(chunk); in __sctp_outq_teardown() 236 chunk = list_entry(lchunk, struct sctp_chunk, in __sctp_outq_teardown() 238 sctp_chunk_fail(chunk, in __sctp_outq_teardown() 281 sctp_outq_tail(struct sctp_outq *q, struct sctp_chunk *chunk, gfp_t gfp) sctp_outq_tail() argument 455 struct sctp_chunk *chunk; sctp_retransmit_mark() local 602 struct sctp_chunk *chunk, *chunk1; __sctp_outq_flush_rtx() local 767 sctp_packet_singleton(struct sctp_transport *transport, struct sctp_chunk *chunk, gfp_t gfp) sctp_packet_singleton() argument 800 sctp_outq_select_transport(struct sctp_flush_ctx *ctx, struct sctp_chunk *chunk) sctp_outq_select_transport() argument 881 struct sctp_chunk *chunk, *tmp; sctp_outq_flush_ctrl() local 1040 struct sctp_chunk *chunk; sctp_outq_flush_data() local 1248 sctp_outq_sack(struct sctp_outq *q, struct sctp_chunk *chunk) sctp_outq_sack() argument 1715 struct sctp_chunk *chunk; sctp_mark_missing() local 1830 struct sctp_chunk *chunk; sctp_generate_fwdtsn() local [all...] |
H A D | output.c | 46 struct sctp_chunk *chunk); 48 struct sctp_chunk *chunk); 50 struct sctp_chunk *chunk); 52 struct sctp_chunk *chunk, 118 /* If there a is a prepend chunk stick it on the list before in sctp_packet_config() 122 struct sctp_chunk *chunk = sctp_get_ecne_prepend(asoc); in sctp_packet_config() local 124 if (chunk) in sctp_packet_config() 125 sctp_packet_append_chunk(packet, chunk); in sctp_packet_config() 163 struct sctp_chunk *chunk, *tmp; in sctp_packet_free() local 167 list_for_each_entry_safe(chunk, tm in sctp_packet_free() 180 sctp_packet_transmit_chunk(struct sctp_packet *packet, struct sctp_chunk *chunk, int one_packet, gfp_t gfp) sctp_packet_transmit_chunk() argument 217 sctp_packet_bundle_pad(struct sctp_packet *pkt, struct sctp_chunk *chunk) sctp_packet_bundle_pad() argument 241 sctp_packet_bundle_auth(struct sctp_packet *pkt, struct sctp_chunk *chunk) sctp_packet_bundle_auth() argument 280 sctp_packet_bundle_sack(struct sctp_packet *pkt, struct sctp_chunk *chunk) sctp_packet_bundle_sack() argument 328 __sctp_packet_append_chunk(struct sctp_packet *packet, struct sctp_chunk *chunk) __sctp_packet_append_chunk() argument 383 sctp_packet_append_chunk(struct sctp_packet *packet, struct sctp_chunk *chunk) sctp_packet_append_chunk() argument 441 struct sctp_chunk *chunk, *tmp; sctp_packet_pack() local 573 struct sctp_chunk *chunk, *tmp; sctp_packet_transmit() local 672 sctp_packet_can_append_data(struct sctp_packet *packet, struct sctp_chunk *chunk) sctp_packet_can_append_data() argument 756 sctp_packet_append_data(struct sctp_packet *packet, struct sctp_chunk *chunk) sctp_packet_append_data() argument 781 sctp_packet_will_fit(struct sctp_packet *packet, struct sctp_chunk *chunk, u16 chunk_len) sctp_packet_will_fit() argument [all...] |
/kernel/linux/linux-6.6/mm/ |
H A D | percpu-vm.c | 3 * mm/percpu-vm.c - vmalloc area based chunk allocation 9 * This is the default chunk allocator. 13 static struct page *pcpu_chunk_page(struct pcpu_chunk *chunk, in pcpu_chunk_page() argument 16 /* must not be used on pre-mapped chunk */ in pcpu_chunk_page() 17 WARN_ON(chunk->immutable); in pcpu_chunk_page() 19 return vmalloc_to_page((void *)pcpu_chunk_addr(chunk, cpu, page_idx)); in pcpu_chunk_page() 45 * pcpu_free_pages - free pages which were allocated for @chunk 46 * @chunk: chunk pages were allocated for 52 * The pages were allocated for @chunk 54 pcpu_free_pages(struct pcpu_chunk *chunk, struct page **pages, int page_start, int page_end) pcpu_free_pages() argument 82 pcpu_alloc_pages(struct pcpu_chunk *chunk, struct page **pages, int page_start, int page_end, gfp_t gfp) pcpu_alloc_pages() argument 127 pcpu_pre_unmap_flush(struct pcpu_chunk *chunk, int page_start, int page_end) pcpu_pre_unmap_flush() argument 153 pcpu_unmap_pages(struct pcpu_chunk *chunk, struct page **pages, int page_start, int page_end) pcpu_unmap_pages() argument 185 pcpu_post_unmap_tlb_flush(struct pcpu_chunk *chunk, int page_start, int page_end) pcpu_post_unmap_tlb_flush() argument 214 pcpu_map_pages(struct pcpu_chunk *chunk, struct page **pages, int page_start, int page_end) pcpu_map_pages() argument 255 pcpu_post_map_flush(struct pcpu_chunk *chunk, int page_start, int page_end) pcpu_post_map_flush() argument 276 pcpu_populate_chunk(struct pcpu_chunk *chunk, int page_start, int page_end, gfp_t gfp) pcpu_populate_chunk() argument 312 pcpu_depopulate_chunk(struct pcpu_chunk *chunk, int page_start, int page_end) pcpu_depopulate_chunk() argument 335 struct pcpu_chunk *chunk; pcpu_create_chunk() local 358 pcpu_destroy_chunk(struct pcpu_chunk *chunk) pcpu_destroy_chunk() argument 394 pcpu_should_reclaim_chunk(struct pcpu_chunk *chunk) pcpu_should_reclaim_chunk() argument [all...] |
H A D | percpu.c | 28 * There is special consideration for the first chunk which must handle 30 * are not online yet. In short, the first chunk is structured like so: 45 * The allocator tries to allocate from the fullest chunk first. Each chunk 50 * of the bitmap. The reverse mapping from page to chunk is stored in 54 * Each bit represents a fragment of size PCPU_MIN_ALLOC_SIZE. The chunk 66 * setup the first chunk containing the kernel static percpu area 146 /* the address of the first chunk which starts with the kernel static area */ 158 * The first chunk which always exists. Note that unlike other 165 * Optional reserved chunk 215 pcpu_addr_in_chunk(struct pcpu_chunk *chunk, void *addr) pcpu_addr_in_chunk() argument 242 pcpu_chunk_slot(const struct pcpu_chunk *chunk) pcpu_chunk_slot() argument 275 pcpu_chunk_addr(struct pcpu_chunk *chunk, unsigned int cpu, int page_idx) pcpu_chunk_addr() argument 286 pcpu_index_alloc_map(struct pcpu_chunk *chunk, int index) pcpu_index_alloc_map() argument 365 pcpu_next_md_free_region(struct pcpu_chunk *chunk, int *bit_off, int *bits) pcpu_next_md_free_region() argument 420 pcpu_next_fit_region(struct pcpu_chunk *chunk, int alloc_bits, int align, int *bit_off, int *bits) pcpu_next_fit_region() argument 526 __pcpu_chunk_move(struct pcpu_chunk *chunk, int slot, bool move_front) __pcpu_chunk_move() argument 537 pcpu_chunk_move(struct pcpu_chunk *chunk, int slot) pcpu_chunk_move() argument 555 pcpu_chunk_relocate(struct pcpu_chunk *chunk, int oslot) pcpu_chunk_relocate() argument 567 pcpu_isolate_chunk(struct pcpu_chunk *chunk) pcpu_isolate_chunk() argument 578 pcpu_reintegrate_chunk(struct pcpu_chunk *chunk) pcpu_reintegrate_chunk() argument 598 pcpu_update_empty_pages(struct pcpu_chunk *chunk, int nr) pcpu_update_empty_pages() argument 712 pcpu_block_update_scan(struct pcpu_chunk *chunk, int bit_off, int bits) pcpu_block_update_scan() argument 745 pcpu_chunk_refresh_hint(struct pcpu_chunk *chunk, bool full_scan) pcpu_chunk_refresh_hint() argument 774 pcpu_block_refresh_hint(struct pcpu_chunk *chunk, int index) pcpu_block_refresh_hint() argument 808 pcpu_block_update_hint_alloc(struct pcpu_chunk *chunk, int bit_off, int bits) pcpu_block_update_hint_alloc() argument 963 pcpu_block_update_hint_free(struct pcpu_chunk *chunk, int bit_off, int bits) pcpu_block_update_hint_free() argument 1073 pcpu_is_populated(struct pcpu_chunk *chunk, int bit_off, int bits, int *next_off) pcpu_is_populated() argument 1110 pcpu_find_block_fit(struct pcpu_chunk *chunk, int alloc_bits, size_t align, bool pop_only) pcpu_find_block_fit() argument 1216 pcpu_alloc_area(struct pcpu_chunk *chunk, int alloc_bits, size_t align, int start) pcpu_alloc_area() argument 1276 pcpu_free_area(struct pcpu_chunk *chunk, int off) pcpu_free_area() argument 1319 pcpu_init_md_blocks(struct pcpu_chunk *chunk) pcpu_init_md_blocks() argument 1348 struct pcpu_chunk *chunk; pcpu_alloc_first_chunk() local 1440 struct pcpu_chunk *chunk; pcpu_alloc_chunk() local 1497 pcpu_free_chunk(struct pcpu_chunk *chunk) pcpu_free_chunk() argument 1520 pcpu_chunk_populated(struct pcpu_chunk *chunk, int page_start, int page_end) pcpu_chunk_populated() argument 1544 pcpu_chunk_depopulated(struct pcpu_chunk *chunk, int page_start, int page_end) pcpu_chunk_depopulated() argument 1644 pcpu_memcg_post_alloc_hook(struct obj_cgroup *objcg, struct pcpu_chunk *chunk, int off, size_t size) pcpu_memcg_post_alloc_hook() argument 1664 pcpu_memcg_free_hook(struct pcpu_chunk *chunk, int off, size_t size) pcpu_memcg_free_hook() argument 1693 pcpu_memcg_post_alloc_hook(struct obj_cgroup *objcg, struct pcpu_chunk *chunk, int off, size_t size) pcpu_memcg_post_alloc_hook() argument 1699 pcpu_memcg_free_hook(struct pcpu_chunk *chunk, int off, size_t size) pcpu_memcg_free_hook() argument 1727 struct pcpu_chunk *chunk, *next; pcpu_alloc() local 1985 struct pcpu_chunk *chunk, *next; pcpu_balance_free() local 2039 struct pcpu_chunk *chunk; pcpu_balance_populated() local 2130 struct pcpu_chunk *chunk; pcpu_reclaim_populated() local 2259 struct pcpu_chunk *chunk; free_percpu() local [all...] |
H A D | percpu-km.c | 3 * mm/percpu-km.c - kernel memory based chunk allocation 19 * - NUMA is not supported. When setting up the first chunk, 23 * - It's best if the chunk size is power of two multiple of 24 * PAGE_SIZE. Because each chunk is allocated as a contiguous 26 * chunk size is not aligned. percpu-km code will whine about it. 30 #error "contiguous percpu allocation is incompatible with paged first chunk" 35 static void pcpu_post_unmap_tlb_flush(struct pcpu_chunk *chunk, in pcpu_post_unmap_tlb_flush() argument 41 static int pcpu_populate_chunk(struct pcpu_chunk *chunk, in pcpu_populate_chunk() argument 47 static void pcpu_depopulate_chunk(struct pcpu_chunk *chunk, in pcpu_depopulate_chunk() argument 56 struct pcpu_chunk *chunk; in pcpu_create_chunk() local 72 pcpu_set_page_chunk(nth_page(pages, i), chunk); pcpu_create_chunk() local 87 pcpu_destroy_chunk(struct pcpu_chunk *chunk) pcpu_destroy_chunk() argument 127 pcpu_should_reclaim_chunk(struct pcpu_chunk *chunk) pcpu_should_reclaim_chunk() argument [all...] |
/kernel/linux/linux-6.6/net/sunrpc/xprtrdma/ |
H A D | svc_rdma_pcl.c | 13 * pcl_free - Release all memory associated with a parsed chunk list 14 * @pcl: parsed chunk list 20 struct svc_rdma_chunk *chunk; in pcl_free() local 22 chunk = pcl_first_chunk(pcl); in pcl_free() 23 list_del(&chunk->ch_list); in pcl_free() 24 kfree(chunk); in pcl_free() 30 struct svc_rdma_chunk *chunk; in pcl_alloc_chunk() local 32 chunk = kmalloc(struct_size(chunk, ch_segments, segcount), GFP_KERNEL); in pcl_alloc_chunk() 33 if (!chunk) in pcl_alloc_chunk() 55 pcl_insert_position(struct svc_rdma_pcl *pcl, struct svc_rdma_chunk *chunk) pcl_insert_position() argument 68 pcl_set_read_segment(const struct svc_rdma_recv_ctxt *rctxt, struct svc_rdma_chunk *chunk, u32 handle, u32 length, u64 offset) pcl_set_read_segment() argument 109 struct svc_rdma_chunk *chunk; pcl_alloc_call() local 163 struct svc_rdma_chunk *chunk; pcl_alloc_read() local 205 struct svc_rdma_chunk *chunk; pcl_alloc_write() local 270 struct svc_rdma_chunk *chunk, *next; pcl_process_nonpayloads() local [all...] |
/kernel/linux/linux-5.10/mm/ |
H A D | percpu-vm.c | 3 * mm/percpu-vm.c - vmalloc area based chunk allocation 9 * This is the default chunk allocator. 12 static struct page *pcpu_chunk_page(struct pcpu_chunk *chunk, in pcpu_chunk_page() argument 15 /* must not be used on pre-mapped chunk */ in pcpu_chunk_page() 16 WARN_ON(chunk->immutable); in pcpu_chunk_page() 18 return vmalloc_to_page((void *)pcpu_chunk_addr(chunk, cpu, page_idx)); in pcpu_chunk_page() 44 * pcpu_free_pages - free pages which were allocated for @chunk 45 * @chunk: chunk pages were allocated for 51 * The pages were allocated for @chunk 53 pcpu_free_pages(struct pcpu_chunk *chunk, struct page **pages, int page_start, int page_end) pcpu_free_pages() argument 81 pcpu_alloc_pages(struct pcpu_chunk *chunk, struct page **pages, int page_start, int page_end, gfp_t gfp) pcpu_alloc_pages() argument 126 pcpu_pre_unmap_flush(struct pcpu_chunk *chunk, int page_start, int page_end) pcpu_pre_unmap_flush() argument 152 pcpu_unmap_pages(struct pcpu_chunk *chunk, struct page **pages, int page_start, int page_end) pcpu_unmap_pages() argument 184 pcpu_post_unmap_tlb_flush(struct pcpu_chunk *chunk, int page_start, int page_end) pcpu_post_unmap_tlb_flush() argument 213 pcpu_map_pages(struct pcpu_chunk *chunk, struct page **pages, int page_start, int page_end) pcpu_map_pages() argument 254 pcpu_post_map_flush(struct pcpu_chunk *chunk, int page_start, int page_end) pcpu_post_map_flush() argument 275 pcpu_populate_chunk(struct pcpu_chunk *chunk, int page_start, int page_end, gfp_t gfp) pcpu_populate_chunk() argument 308 pcpu_depopulate_chunk(struct pcpu_chunk *chunk, int page_start, int page_end) pcpu_depopulate_chunk() argument 334 struct pcpu_chunk *chunk; pcpu_create_chunk() local 357 pcpu_destroy_chunk(struct pcpu_chunk *chunk) pcpu_destroy_chunk() argument [all...] |
H A D | percpu.c | 28 * There is special consideration for the first chunk which must handle 30 * are not online yet. In short, the first chunk is structured like so: 45 * The allocator tries to allocate from the fullest chunk first. Each chunk 50 * of the bitmap. The reverse mapping from page to chunk is stored in 54 * Each bit represents a fragment of size PCPU_MIN_ALLOC_SIZE. The chunk 66 * setup the first chunk containing the kernel static percpu area 140 /* the address of the first chunk which starts with the kernel static area */ 153 * The first chunk which always exists. Note that unlike other 160 * Optional reserved chunk 213 pcpu_addr_in_chunk(struct pcpu_chunk *chunk, void *addr) pcpu_addr_in_chunk() argument 240 pcpu_chunk_slot(const struct pcpu_chunk *chunk) pcpu_chunk_slot() argument 273 pcpu_chunk_addr(struct pcpu_chunk *chunk, unsigned int cpu, int page_idx) pcpu_chunk_addr() argument 284 pcpu_index_alloc_map(struct pcpu_chunk *chunk, int index) pcpu_index_alloc_map() argument 344 pcpu_next_md_free_region(struct pcpu_chunk *chunk, int *bit_off, int *bits) pcpu_next_md_free_region() argument 399 pcpu_next_fit_region(struct pcpu_chunk *chunk, int alloc_bits, int align, int *bit_off, int *bits) pcpu_next_fit_region() argument 505 __pcpu_chunk_move(struct pcpu_chunk *chunk, int slot, bool move_front) __pcpu_chunk_move() argument 519 pcpu_chunk_move(struct pcpu_chunk *chunk, int slot) pcpu_chunk_move() argument 537 pcpu_chunk_relocate(struct pcpu_chunk *chunk, int oslot) pcpu_chunk_relocate() argument 554 pcpu_update_empty_pages(struct pcpu_chunk *chunk, int nr) pcpu_update_empty_pages() argument 668 pcpu_block_update_scan(struct pcpu_chunk *chunk, int bit_off, int bits) pcpu_block_update_scan() argument 701 pcpu_chunk_refresh_hint(struct pcpu_chunk *chunk, bool full_scan) pcpu_chunk_refresh_hint() argument 730 pcpu_block_refresh_hint(struct pcpu_chunk *chunk, int index) pcpu_block_refresh_hint() argument 765 pcpu_block_update_hint_alloc(struct pcpu_chunk *chunk, int bit_off, int bits) pcpu_block_update_hint_alloc() argument 912 pcpu_block_update_hint_free(struct pcpu_chunk *chunk, int bit_off, int bits) pcpu_block_update_hint_free() argument 1022 pcpu_is_populated(struct pcpu_chunk *chunk, int bit_off, int bits, int *next_off) pcpu_is_populated() argument 1058 pcpu_find_block_fit(struct pcpu_chunk *chunk, int alloc_bits, size_t align, bool pop_only) pcpu_find_block_fit() argument 1167 pcpu_alloc_area(struct pcpu_chunk *chunk, int alloc_bits, size_t align, int start) pcpu_alloc_area() argument 1227 pcpu_free_area(struct pcpu_chunk *chunk, int off) pcpu_free_area() argument 1270 pcpu_init_md_blocks(struct pcpu_chunk *chunk) pcpu_init_md_blocks() argument 1299 struct pcpu_chunk *chunk; pcpu_alloc_first_chunk() local 1398 struct pcpu_chunk *chunk; pcpu_alloc_chunk() local 1455 pcpu_free_chunk(struct pcpu_chunk *chunk) pcpu_free_chunk() argument 1481 pcpu_chunk_populated(struct pcpu_chunk *chunk, int page_start, int page_end) pcpu_chunk_populated() argument 1505 pcpu_chunk_depopulated(struct pcpu_chunk *chunk, int page_start, int page_end) pcpu_chunk_depopulated() argument 1603 pcpu_memcg_post_alloc_hook(struct obj_cgroup *objcg, struct pcpu_chunk *chunk, int off, size_t size) pcpu_memcg_post_alloc_hook() argument 1623 pcpu_memcg_free_hook(struct pcpu_chunk *chunk, int off, size_t size) pcpu_memcg_free_hook() argument 1650 pcpu_memcg_post_alloc_hook(struct obj_cgroup *objcg, struct pcpu_chunk *chunk, int off, size_t size) pcpu_memcg_post_alloc_hook() argument 1656 pcpu_memcg_free_hook(struct pcpu_chunk *chunk, int off, size_t size) pcpu_memcg_free_hook() argument 1686 struct pcpu_chunk *chunk, *next; pcpu_alloc() local 1949 struct pcpu_chunk *chunk, *next; __pcpu_balance_workfn() local 2083 struct pcpu_chunk *chunk; free_percpu() local 2406 struct pcpu_chunk *chunk; pcpu_setup_first_chunk() local [all...] |
H A D | percpu-km.c | 3 * mm/percpu-km.c - kernel memory based chunk allocation 19 * - NUMA is not supported. When setting up the first chunk, 23 * - It's best if the chunk size is power of two multiple of 24 * PAGE_SIZE. Because each chunk is allocated as a contiguous 26 * chunk size is not aligned. percpu-km code will whine about it. 30 #error "contiguous percpu allocation is incompatible with paged first chunk" 35 static int pcpu_populate_chunk(struct pcpu_chunk *chunk, in pcpu_populate_chunk() argument 41 static void pcpu_depopulate_chunk(struct pcpu_chunk *chunk, in pcpu_depopulate_chunk() argument 51 struct pcpu_chunk *chunk; in pcpu_create_chunk() local 56 chunk in pcpu_create_chunk() 67 pcpu_set_page_chunk(nth_page(pages, i), chunk); pcpu_create_chunk() local 82 pcpu_destroy_chunk(struct pcpu_chunk *chunk) pcpu_destroy_chunk() argument [all...] |
/kernel/linux/linux-5.10/drivers/s390/cio/ |
H A D | itcw.c | 117 * to the placement of the data chunk in memory, and a further in itcw_calc_size() 182 void *chunk; in itcw_init() local 194 chunk = fit_chunk(&start, end, sizeof(struct itcw), 1, 0); in itcw_init() 195 if (IS_ERR(chunk)) in itcw_init() 196 return chunk; in itcw_init() 197 itcw = chunk; in itcw_init() 210 chunk = fit_chunk(&start, end, sizeof(struct tcw), 64, 0); in itcw_init() 211 if (IS_ERR(chunk)) in itcw_init() 212 return chunk; in itcw_init() 213 itcw->tcw = chunk; in itcw_init() [all...] |
/kernel/linux/linux-6.6/drivers/s390/cio/ |
H A D | itcw.c | 118 * to the placement of the data chunk in memory, and a further in itcw_calc_size() 183 void *chunk; in itcw_init() local 195 chunk = fit_chunk(&start, end, sizeof(struct itcw), 1, 0); in itcw_init() 196 if (IS_ERR(chunk)) in itcw_init() 197 return chunk; in itcw_init() 198 itcw = chunk; in itcw_init() 211 chunk = fit_chunk(&start, end, sizeof(struct tcw), 64, 0); in itcw_init() 212 if (IS_ERR(chunk)) in itcw_init() 213 return chunk; in itcw_init() 214 itcw->tcw = chunk; in itcw_init() [all...] |
/kernel/linux/linux-6.6/kernel/trace/ |
H A D | pid_list.c | 14 union lower_chunk *chunk; in get_lower_chunk() local 21 chunk = pid_list->lower_list; in get_lower_chunk() 22 pid_list->lower_list = chunk->next; in get_lower_chunk() 25 chunk->next = NULL; in get_lower_chunk() 33 return chunk; in get_lower_chunk() 38 union upper_chunk *chunk; in get_upper_chunk() local 45 chunk = pid_list->upper_list; in get_upper_chunk() 46 pid_list->upper_list = chunk->next; in get_upper_chunk() 49 chunk->next = NULL; in get_upper_chunk() 57 return chunk; in get_upper_chunk() 60 put_lower_chunk(struct trace_pid_list *pid_list, union lower_chunk *chunk) put_lower_chunk() argument 70 put_upper_chunk(struct trace_pid_list *pid_list, union upper_chunk *chunk) put_upper_chunk() argument 80 upper_empty(union upper_chunk *chunk) upper_empty() argument 355 union upper_chunk *chunk; pid_list_refill_irq() local 366 union lower_chunk *chunk; pid_list_refill_irq() local 428 union upper_chunk *chunk; trace_pid_list_alloc() local 439 union lower_chunk *chunk; trace_pid_list_alloc() local 469 union lower_chunk *chunk; trace_pid_list_free() local 477 union upper_chunk *chunk; trace_pid_list_free() local [all...] |
/kernel/linux/linux-5.10/kernel/ |
H A D | audit_tree.c | 42 struct audit_chunk *chunk; member 50 * One struct chunk is attached to each inode of interest through 51 * audit_tree_mark (fsnotify mark). We replace struct chunk on tagging / 52 * untagging, the mark is stable as long as there is chunk attached. The 53 * association between mark and chunk is protected by hash_lock and 57 * the current chunk. 62 * References to struct chunk are collected at audit_inode{,_child}() 68 * tree.chunks anchors chunk.owners[].list hash_lock 70 * chunk.trees anchors tree.same_root hash_lock 71 * chunk 128 free_chunk(struct audit_chunk *chunk) free_chunk() argument 139 audit_put_chunk(struct audit_chunk *chunk) audit_put_chunk() argument 147 struct audit_chunk *chunk = container_of(rcu, struct audit_chunk, head); __put_chunk() local 156 audit_mark_put_chunk(struct audit_chunk *chunk) audit_mark_put_chunk() argument 190 struct audit_chunk *chunk; alloc_chunk() local 226 insert_hash(struct audit_chunk *chunk) insert_hash() argument 261 audit_tree_match(struct audit_chunk *chunk, struct audit_tree *tree) audit_tree_match() argument 279 replace_mark_chunk(struct fsnotify_mark *mark, struct audit_chunk *chunk) replace_mark_chunk() argument 325 remove_chunk_node(struct audit_chunk *chunk, struct node *p) remove_chunk_node() argument 338 chunk_count_trees(struct audit_chunk *chunk) chunk_count_trees() argument 349 untag_chunk(struct audit_chunk *chunk, struct fsnotify_mark *mark) untag_chunk() argument 400 struct audit_chunk *chunk = alloc_chunk(1); create_chunk() local 461 struct audit_chunk *chunk, *old; tag_chunk() local 574 struct audit_chunk *chunk; prune_tree_chunks() local 707 struct audit_chunk *chunk = find_chunk(node); audit_trim_trees() local 1004 evict_chunk(struct audit_chunk *chunk) evict_chunk() argument 1048 struct audit_chunk *chunk; audit_tree_freeing_mark() local [all...] |
/kernel/linux/linux-6.6/kernel/ |
H A D | audit_tree.c | 42 struct audit_chunk *chunk; member 50 * One struct chunk is attached to each inode of interest through 51 * audit_tree_mark (fsnotify mark). We replace struct chunk on tagging / 52 * untagging, the mark is stable as long as there is chunk attached. The 53 * association between mark and chunk is protected by hash_lock and 57 * the current chunk. 62 * References to struct chunk are collected at audit_inode{,_child}() 68 * tree.chunks anchors chunk.owners[].list hash_lock 70 * chunk.trees anchors tree.same_root hash_lock 71 * chunk 128 free_chunk(struct audit_chunk *chunk) free_chunk() argument 139 audit_put_chunk(struct audit_chunk *chunk) audit_put_chunk() argument 147 struct audit_chunk *chunk = container_of(rcu, struct audit_chunk, head); __put_chunk() local 156 audit_mark_put_chunk(struct audit_chunk *chunk) audit_mark_put_chunk() argument 190 struct audit_chunk *chunk; alloc_chunk() local 226 insert_hash(struct audit_chunk *chunk) insert_hash() argument 261 audit_tree_match(struct audit_chunk *chunk, struct audit_tree *tree) audit_tree_match() argument 279 replace_mark_chunk(struct fsnotify_mark *mark, struct audit_chunk *chunk) replace_mark_chunk() argument 325 remove_chunk_node(struct audit_chunk *chunk, struct audit_node *p) remove_chunk_node() argument 338 chunk_count_trees(struct audit_chunk *chunk) chunk_count_trees() argument 349 untag_chunk(struct audit_chunk *chunk, struct fsnotify_mark *mark) untag_chunk() argument 400 struct audit_chunk *chunk = alloc_chunk(1); create_chunk() local 461 struct audit_chunk *chunk, *old; tag_chunk() local 574 struct audit_chunk *chunk; prune_tree_chunks() local 706 struct audit_chunk *chunk = find_chunk(node); audit_trim_trees() local 1001 evict_chunk(struct audit_chunk *chunk) evict_chunk() argument 1045 struct audit_chunk *chunk; audit_tree_freeing_mark() local [all...] |
/kernel/linux/linux-5.10/drivers/net/ethernet/mellanox/mlx5/core/steering/ |
H A D | dr_icm_pool.c | 179 static int dr_icm_chunk_ste_init(struct mlx5dr_icm_chunk *chunk) in dr_icm_chunk_ste_init() argument 181 struct mlx5dr_icm_bucket *bucket = chunk->bucket; in dr_icm_chunk_ste_init() 183 chunk->ste_arr = kvzalloc(bucket->num_of_entries * in dr_icm_chunk_ste_init() 184 sizeof(chunk->ste_arr[0]), GFP_KERNEL); in dr_icm_chunk_ste_init() 185 if (!chunk->ste_arr) in dr_icm_chunk_ste_init() 188 chunk->hw_ste_arr = kvzalloc(bucket->num_of_entries * in dr_icm_chunk_ste_init() 190 if (!chunk->hw_ste_arr) in dr_icm_chunk_ste_init() 193 chunk->miss_list = kvmalloc(bucket->num_of_entries * in dr_icm_chunk_ste_init() 194 sizeof(chunk->miss_list[0]), GFP_KERNEL); in dr_icm_chunk_ste_init() 195 if (!chunk in dr_icm_chunk_ste_init() 212 struct mlx5dr_icm_chunk *chunk; dr_icm_chunks_create() local 273 dr_icm_chunk_ste_cleanup(struct mlx5dr_icm_chunk *chunk) dr_icm_chunk_ste_cleanup() argument 280 dr_icm_chunk_destroy(struct mlx5dr_icm_chunk *chunk) dr_icm_chunk_destroy() argument 313 struct mlx5dr_icm_chunk *chunk, *next; dr_icm_bucket_cleanup() local 449 struct mlx5dr_icm_chunk *chunk = NULL; /* Fix compilation warning */ mlx5dr_icm_alloc_chunk() local 494 mlx5dr_icm_free_chunk(struct mlx5dr_icm_chunk *chunk) mlx5dr_icm_free_chunk() argument [all...] |
/kernel/linux/linux-5.10/drivers/net/ethernet/mellanox/mlx4/ |
H A D | icm.c | 47 * per chunk. Note that the chunks are not necessarily in contiguous 55 static void mlx4_free_icm_pages(struct mlx4_dev *dev, struct mlx4_icm_chunk *chunk) in mlx4_free_icm_pages() argument 59 if (chunk->nsg > 0) in mlx4_free_icm_pages() 60 dma_unmap_sg(&dev->persist->pdev->dev, chunk->sg, chunk->npages, in mlx4_free_icm_pages() 63 for (i = 0; i < chunk->npages; ++i) in mlx4_free_icm_pages() 64 __free_pages(sg_page(&chunk->sg[i]), in mlx4_free_icm_pages() 65 get_order(chunk->sg[i].length)); in mlx4_free_icm_pages() 68 static void mlx4_free_icm_coherent(struct mlx4_dev *dev, struct mlx4_icm_chunk *chunk) in mlx4_free_icm_coherent() argument 72 for (i = 0; i < chunk in mlx4_free_icm_coherent() 81 struct mlx4_icm_chunk *chunk, *tmp; mlx4_free_icm() local 136 struct mlx4_icm_chunk *chunk = NULL; mlx4_alloc_icm() local 319 struct mlx4_icm_chunk *chunk; mlx4_table_find() local [all...] |
/kernel/linux/linux-6.6/drivers/net/ethernet/mellanox/mlx4/ |
H A D | icm.c | 47 * per chunk. Note that the chunks are not necessarily in contiguous 55 static void mlx4_free_icm_pages(struct mlx4_dev *dev, struct mlx4_icm_chunk *chunk) in mlx4_free_icm_pages() argument 59 if (chunk->nsg > 0) in mlx4_free_icm_pages() 60 dma_unmap_sg(&dev->persist->pdev->dev, chunk->sg, chunk->npages, in mlx4_free_icm_pages() 63 for (i = 0; i < chunk->npages; ++i) in mlx4_free_icm_pages() 64 __free_pages(sg_page(&chunk->sg[i]), in mlx4_free_icm_pages() 65 get_order(chunk->sg[i].length)); in mlx4_free_icm_pages() 68 static void mlx4_free_icm_coherent(struct mlx4_dev *dev, struct mlx4_icm_chunk *chunk) in mlx4_free_icm_coherent() argument 72 for (i = 0; i < chunk in mlx4_free_icm_coherent() 81 struct mlx4_icm_chunk *chunk, *tmp; mlx4_free_icm() local 136 struct mlx4_icm_chunk *chunk = NULL; mlx4_alloc_icm() local 319 struct mlx4_icm_chunk *chunk; mlx4_table_find() local [all...] |
/kernel/linux/linux-5.10/drivers/infiniband/hw/i40iw/ |
H A D | i40iw_pble.c | 53 static void i40iw_free_vmalloc_mem(struct i40iw_hw *hw, struct i40iw_chunk *chunk); 63 struct i40iw_chunk *chunk; in i40iw_destroy_pble_pool() local 68 chunk = list_entry(clist, struct i40iw_chunk, list); in i40iw_destroy_pble_pool() 69 if (chunk->type == I40IW_VMALLOC) in i40iw_destroy_pble_pool() 70 i40iw_free_vmalloc_mem(dev->hw, chunk); in i40iw_destroy_pble_pool() 71 kfree(chunk); in i40iw_destroy_pble_pool() 138 struct i40iw_chunk *chunk = info->chunk; in add_sd_direct() local 151 chunk->type = I40IW_DMA_COHERENT; in add_sd_direct() 155 chunk in add_sd_direct() 168 i40iw_free_vmalloc_mem(struct i40iw_hw *hw, struct i40iw_chunk *chunk) i40iw_free_vmalloc_mem() argument 192 i40iw_get_vmalloc_mem(struct i40iw_hw *hw, struct i40iw_chunk *chunk, int pg_cnt) i40iw_get_vmalloc_mem() argument 259 struct i40iw_chunk *chunk = info->chunk; add_bp_pages() local 329 struct i40iw_chunk *chunk; add_pble_pool() local [all...] |
/kernel/linux/linux-5.10/lib/ |
H A D | genalloc.c | 38 static inline size_t chunk_size(const struct gen_pool_chunk *chunk) in chunk_size() argument 40 return chunk->end_addr - chunk->start_addr + 1; in chunk_size() 170 * gen_pool_add_owner- add a new chunk of special memory to the pool 171 * @pool: pool to add new memory chunk to 172 * @virt: virtual starting address of memory chunk to add to pool 173 * @phys: physical starting address of memory chunk to add to pool 174 * @size: size in bytes of the memory chunk to add to pool 175 * @nid: node id of the node the chunk structure and bitmap should be 179 * Add a new chunk o 186 struct gen_pool_chunk *chunk; gen_pool_add_owner() local 218 struct gen_pool_chunk *chunk; gen_pool_virt_to_phys() local 244 struct gen_pool_chunk *chunk; gen_pool_destroy() local 279 struct gen_pool_chunk *chunk; gen_pool_alloc_algo_owner() local 489 struct gen_pool_chunk *chunk; gen_pool_free_owner() local 530 gen_pool_for_each_chunk(struct gen_pool *pool, void (*func)(struct gen_pool *pool, struct gen_pool_chunk *chunk, void *data), void *data) gen_pool_for_each_chunk() argument 534 struct gen_pool_chunk *chunk; gen_pool_for_each_chunk() local 557 struct gen_pool_chunk *chunk; gen_pool_has_addr() local 581 struct gen_pool_chunk *chunk; gen_pool_avail() local 600 struct gen_pool_chunk *chunk; gen_pool_size() local [all...] |