/kernel/linux/linux-5.10/drivers/gpu/drm/i915/gt/ |
H A D | intel_timeline.c | 49 hwsp_alloc(struct intel_timeline *timeline, unsigned int *cacheline) in hwsp_alloc() argument 89 *cacheline = __ffs64(hwsp->free_bitmap); in hwsp_alloc() 90 hwsp->free_bitmap &= ~BIT_ULL(*cacheline); in hwsp_alloc() 100 static void __idle_hwsp_free(struct intel_timeline_hwsp *hwsp, int cacheline) in __idle_hwsp_free() argument 107 /* As a cacheline becomes available, publish the HWSP on the freelist */ in __idle_hwsp_free() 111 GEM_BUG_ON(cacheline >= BITS_PER_TYPE(hwsp->free_bitmap)); in __idle_hwsp_free() 112 hwsp->free_bitmap |= BIT_ULL(cacheline); in __idle_hwsp_free() 164 cacheline_alloc(struct intel_timeline_hwsp *hwsp, unsigned int cacheline) in cacheline_alloc() argument 169 GEM_BUG_ON(cacheline >= BIT(CACHELINE_BITS)); in cacheline_alloc() 182 cl->vaddr = page_pack_bits(vaddr, cacheline); in cacheline_alloc() 235 unsigned int cacheline; intel_timeline_init() local 453 unsigned int cacheline; __intel_timeline_get_seqno() local [all...] |
H A D | intel_ring.h | 102 * same cacheline, the Head Pointer must not be greater than the Tail in assert_ring_tail_valid() 108 * into the same cacheline as ring->head. in assert_ring_tail_valid() 110 #define cacheline(a) round_down(a, CACHELINE_BYTES) in assert_ring_tail_valid() macro 111 GEM_BUG_ON(cacheline(tail) == cacheline(head) && tail < head); in assert_ring_tail_valid() 112 #undef cacheline in assert_ring_tail_valid() macro 134 * same cacheline, the Head Pointer must not be greater than the Tail in __intel_ring_space()
|
H A D | selftest_timeline.c | 72 unsigned long cacheline; in __mock_hwsp_timeline() local 79 cacheline = hwsp_cacheline(tl); in __mock_hwsp_timeline() 80 err = radix_tree_insert(&state->cachelines, cacheline, tl); in __mock_hwsp_timeline() 83 pr_err("HWSP cacheline %lu already used; duplicate allocation!\n", in __mock_hwsp_timeline() 84 cacheline); in __mock_hwsp_timeline() 659 * Across a seqno wrap, we need to keep the old cacheline alive for in live_hwsp_wrap()
|
/kernel/linux/linux-6.6/drivers/gpu/drm/i915/gt/ |
H A D | intel_ring.h | 103 * same cacheline, the Head Pointer must not be greater than the Tail in assert_ring_tail_valid() 109 * into the same cacheline as ring->head. in assert_ring_tail_valid() 111 #define cacheline(a) round_down(a, CACHELINE_BYTES) in assert_ring_tail_valid() macro 112 GEM_BUG_ON(cacheline(tail) == cacheline(head) && tail < head); in assert_ring_tail_valid() 113 #undef cacheline in assert_ring_tail_valid() macro 135 * same cacheline, the Head Pointer must not be greater than the Tail in __intel_ring_space()
|
H A D | selftest_timeline.c | 97 unsigned long cacheline; in __mock_hwsp_timeline() local 110 cacheline = hwsp_cacheline(tl); in __mock_hwsp_timeline() 111 err = radix_tree_insert(&state->cachelines, cacheline, tl); in __mock_hwsp_timeline() 114 pr_err("HWSP cacheline %lu already used; duplicate allocation!\n", in __mock_hwsp_timeline() 115 cacheline); in __mock_hwsp_timeline() 679 * Across a seqno wrap, we need to keep the old cacheline alive for in live_hwsp_wrap()
|
/kernel/linux/linux-5.10/drivers/soc/qcom/ |
H A D | smem.c | 144 * @cacheline: alignment for "cached" entries 153 __le32 cacheline; member 253 * @global_cacheline: cacheline size for global partition 256 * @cacheline: list of cacheline sizes for each host 269 size_t cacheline[SMEM_HOST_COUNT]; member 287 size_t cacheline) in phdr_to_first_cached_entry() 292 return p + le32_to_cpu(phdr->size) - ALIGN(sizeof(*e), cacheline); in phdr_to_first_cached_entry() 321 cached_entry_next(struct smem_private_entry *e, size_t cacheline) in cached_entry_next() argument 325 return p - le32_to_cpu(e->size) - ALIGN(sizeof(*e), cacheline); in cached_entry_next() 286 phdr_to_first_cached_entry(struct smem_partition_header *phdr, size_t cacheline) phdr_to_first_cached_entry() argument 511 qcom_smem_get_private(struct qcom_smem *smem, struct smem_partition_header *phdr, size_t cacheline, unsigned item, size_t *size) qcom_smem_get_private() argument [all...] |
/kernel/linux/linux-6.6/drivers/soc/qcom/ |
H A D | smem.c | 146 * @cacheline: alignment for "cached" entries 155 __le32 cacheline; member 203 * @cacheline: alignment for "cached" entries 209 size_t cacheline; member 301 size_t cacheline) in phdr_to_first_cached_entry() 306 return p + le32_to_cpu(phdr->size) - ALIGN(sizeof(*e), cacheline); in phdr_to_first_cached_entry() 335 cached_entry_next(struct smem_private_entry *e, size_t cacheline) in cached_entry_next() argument 339 return p - le32_to_cpu(e->size) - ALIGN(sizeof(*e), cacheline); in cached_entry_next() 603 e = phdr_to_first_cached_entry(phdr, part->cacheline); in qcom_smem_get_private() 631 e = cached_entry_next(e, part->cacheline); in qcom_smem_get_private() 300 phdr_to_first_cached_entry(struct smem_partition_header *phdr, size_t cacheline) phdr_to_first_cached_entry() argument [all...] |
/kernel/linux/linux-5.10/include/asm-generic/ |
H A D | vmlinux.lds.h | 1064 * @cacheline: cacheline size 1069 * @cacheline is used to align subsections to avoid false cacheline 1072 #define PERCPU_INPUT(cacheline) \ 1077 . = ALIGN(cacheline); \ 1079 . = ALIGN(cacheline); \ 1087 * @cacheline: cacheline size 1093 * @cacheline i [all...] |
/kernel/linux/linux-6.6/include/asm-generic/ |
H A D | vmlinux.lds.h | 1019 * @cacheline: cacheline size 1024 * @cacheline is used to align subsections to avoid false cacheline 1027 #define PERCPU_INPUT(cacheline) \ 1032 . = ALIGN(cacheline); \ 1034 . = ALIGN(cacheline); \ 1042 * @cacheline: cacheline size 1048 * @cacheline i [all...] |
/kernel/linux/linux-5.10/drivers/md/bcache/ |
H A D | bset.c | 251 * BSET_CACHELINE was originally intended to match the hardware cacheline size - 259 * Since (after level 5) every level of the bset_tree is on a new cacheline, 260 * we're touching one fewer cacheline in the bset tree in exchange for one more 261 * cacheline in the linear search - but the linear search might stop before it 262 * gets to the second cacheline. 434 * Return the cacheline index in bset_tree->data, where j is index 461 * tree, j is the cacheline index of t->data. 510 * in one cacheline in t->set (BSET_CACHELINE bytes). 513 * the binary tree points to; to_inorder() gives us the cacheline, and then 514 * bkey_float->m gives us the offset within that cacheline, i 525 cacheline_to_bkey(struct bset_tree *t, unsigned int cacheline, unsigned int offset) cacheline_to_bkey() argument 537 bkey_to_cacheline_offset(struct bset_tree *t, unsigned int cacheline, struct bkey *k) bkey_to_cacheline_offset() argument 558 table_to_bkey(struct bset_tree *t, unsigned int cacheline) table_to_bkey() argument 694 unsigned int j, cacheline = 1; bch_bset_build_written_tree() local [all...] |
/kernel/linux/linux-6.6/drivers/md/bcache/ |
H A D | bset.c | 251 * BSET_CACHELINE was originally intended to match the hardware cacheline size - 259 * Since (after level 5) every level of the bset_tree is on a new cacheline, 260 * we're touching one fewer cacheline in the bset tree in exchange for one more 261 * cacheline in the linear search - but the linear search might stop before it 262 * gets to the second cacheline. 434 * Return the cacheline index in bset_tree->data, where j is index 461 * tree, j is the cacheline index of t->data. 510 * in one cacheline in t->set (BSET_CACHELINE bytes). 513 * the binary tree points to; to_inorder() gives us the cacheline, and then 514 * bkey_float->m gives us the offset within that cacheline, i 525 cacheline_to_bkey(struct bset_tree *t, unsigned int cacheline, unsigned int offset) cacheline_to_bkey() argument 537 bkey_to_cacheline_offset(struct bset_tree *t, unsigned int cacheline, struct bkey *k) bkey_to_cacheline_offset() argument 558 table_to_bkey(struct bset_tree *t, unsigned int cacheline) table_to_bkey() argument 694 unsigned int j, cacheline = 1; bch_bset_build_written_tree() local [all...] |
/kernel/linux/linux-5.10/drivers/lightnvm/ |
H A D | pblk-rb.c | 140 entry->cacheline = pblk_cacheline_to_addr(init_entry++); in pblk_rb_init() 146 entry->cacheline = pblk_cacheline_to_addr(init_entry++); in pblk_rb_init() 260 entry->cacheline); in __pblk_rb_update_l2p() 276 * point to the physical address instead of to the cacheline in the write buffer 303 * to the cacheline in the write buffer. 353 pblk_update_map_cache(pblk, w_ctx.lba, entry->cacheline); in pblk_rb_write_entry_user() 377 if (!pblk_update_map_gc(pblk, w_ctx.lba, entry->cacheline, line, paddr)) in pblk_rb_write_entry_gc()
|
H A D | pblk-write.c | 166 if (!pblk_ppa_comp(ppa_l2p, entry->cacheline)) in pblk_prepare_resubmit()
|
H A D | pblk.h | 149 struct ppa_addr cacheline; /* Cacheline for this entry */ member 183 * of a cacheline
|
/kernel/linux/linux-5.10/arch/sparc/kernel/ |
H A D | prom_irqtrans.c | 355 static unsigned char cacheline[64] in tomatillo_wsync_handler() local 366 "i" (FPRS_FEF), "r" (&cacheline[0]), in tomatillo_wsync_handler()
|
H A D | cherrs.S | 203 sub %g1, %g2, %g1 ! Move down 1 cacheline 215 subcc %g1, %g2, %g1 ! Next cacheline
|
/kernel/linux/linux-6.6/arch/sparc/kernel/ |
H A D | prom_irqtrans.c | 356 static unsigned char cacheline[64] in tomatillo_wsync_handler() local 367 "i" (FPRS_FEF), "r" (&cacheline[0]), in tomatillo_wsync_handler()
|
H A D | cherrs.S | 203 sub %g1, %g2, %g1 ! Move down 1 cacheline 215 subcc %g1, %g2, %g1 ! Next cacheline
|
/kernel/linux/linux-5.10/arch/parisc/kernel/ |
H A D | perf_asm.S | 132 ; Cacheline start (32-byte cacheline) 145 ; Cacheline start (32-byte cacheline) 1025 ; Start of next 32-byte cacheline 1038 ; Start of next 32-byte cacheline
|
/kernel/linux/linux-6.6/arch/parisc/kernel/ |
H A D | perf_asm.S | 132 ; Cacheline start (32-byte cacheline) 145 ; Cacheline start (32-byte cacheline) 1025 ; Start of next 32-byte cacheline 1038 ; Start of next 32-byte cacheline
|