/kernel/linux/linux-6.6/block/ |
H A D | blk-mq-tag.c | 4 * fairer distribution of tags between multiple submitters when a shared tag map 20 static void blk_mq_update_wake_batch(struct blk_mq_tags *tags, in blk_mq_update_wake_batch() argument 26 sbitmap_queue_recalculate_wake_batch(&tags->bitmap_tags, in blk_mq_update_wake_batch() 28 sbitmap_queue_recalculate_wake_batch(&tags->breserved_tags, in blk_mq_update_wake_batch() 41 struct blk_mq_tags *tags = hctx->tags; in __blk_mq_tag_busy() local 59 spin_lock_irq(&tags->lock); in __blk_mq_tag_busy() 60 users = tags->active_queues + 1; in __blk_mq_tag_busy() 61 WRITE_ONCE(tags->active_queues, users); in __blk_mq_tag_busy() 62 blk_mq_update_wake_batch(tags, user in __blk_mq_tag_busy() 69 blk_mq_tag_wakeup_all(struct blk_mq_tags *tags, bool include_reserve) blk_mq_tag_wakeup_all() argument 82 struct blk_mq_tags *tags = hctx->tags; __blk_mq_tag_idle() local 121 struct blk_mq_tags *tags = blk_mq_tags_from_data(data); blk_mq_get_tags() local 135 struct blk_mq_tags *tags = blk_mq_tags_from_data(data); blk_mq_get_tag() local 225 blk_mq_put_tag(struct blk_mq_tags *tags, struct blk_mq_ctx *ctx, unsigned int tag) blk_mq_put_tag() argument 238 blk_mq_put_tags(struct blk_mq_tags *tags, int *tag_array, int nr_tags) blk_mq_put_tags() argument 252 blk_mq_find_and_get_req(struct blk_mq_tags *tags, unsigned int bitnr) blk_mq_find_and_get_req() argument 272 struct blk_mq_tags *tags; bt_iter() local 328 struct blk_mq_tags *tags; global() member 341 struct blk_mq_tags *tags = iter_data->tags; bt_tags_iter() local 380 bt_tags_for_each(struct blk_mq_tags *tags, struct sbitmap_queue *bt, busy_tag_iter_fn *fn, void *data, unsigned int flags) bt_tags_for_each() argument 394 __blk_mq_all_tag_iter(struct blk_mq_tags *tags, busy_tag_iter_fn *fn, void *priv, unsigned int flags) __blk_mq_all_tag_iter() argument 417 blk_mq_all_tag_iter(struct blk_mq_tags *tags, busy_tag_iter_fn *fn, void *priv) blk_mq_all_tag_iter() argument 508 struct blk_mq_tags *tags = q->tag_set->shared_tags; blk_mq_queue_tag_busy_iter() local 520 struct blk_mq_tags *tags = hctx->tags; blk_mq_queue_tag_busy_iter() local 570 struct blk_mq_tags *tags; blk_mq_init_tags() local 594 blk_mq_free_tags(struct blk_mq_tags *tags) blk_mq_free_tags() argument 605 struct blk_mq_tags *tags = *tagsptr; blk_mq_tag_update_depth() local 655 struct blk_mq_tags *tags = set->shared_tags; blk_mq_tag_resize_shared_tags() local [all...] |
H A D | blk-mq.h | 58 void blk_mq_free_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags, 60 void blk_mq_free_rq_map(struct blk_mq_tags *tags); 64 struct blk_mq_tags *tags, 154 /* allocate multiple requests/tags in one go */ 165 void blk_mq_free_tags(struct blk_mq_tags *tags); 173 void blk_mq_put_tag(struct blk_mq_tags *tags, struct blk_mq_ctx *ctx, 175 void blk_mq_put_tags(struct blk_mq_tags *tags, int *tag_array, int nr_tags); 177 struct blk_mq_tags **tags, unsigned int depth, bool can_grow); 182 void blk_mq_tag_wakeup_all(struct blk_mq_tags *tags, bool); 185 void blk_mq_all_tag_iter(struct blk_mq_tags *tags, busy_tag_iter_f 211 blk_mq_tag_is_reserved(struct blk_mq_tags *tags, unsigned int tag) blk_mq_tag_is_reserved() argument [all...] |
H A D | blk-mq.c | 312 blk_mq_tag_wakeup_all(hctx->tags, true); in blk_mq_wake_waiters() 349 struct blk_mq_tags *tags, unsigned int tag) in blk_mq_rq_ctx_init() 354 struct request *rq = tags->static_rqs[tag]; in blk_mq_rq_ctx_init() 409 struct blk_mq_tags *tags; in __blk_mq_alloc_requests_batch() local 418 tags = blk_mq_tags_from_data(data); in __blk_mq_alloc_requests_batch() 423 prefetch(tags->static_rqs[tag]); in __blk_mq_alloc_requests_batch() 425 rq = blk_mq_rq_ctx_init(data, tags, tag); in __blk_mq_alloc_requests_batch() 452 * All requests use scheduler tags when an I/O scheduler is in __blk_mq_alloc_requests() 715 blk_mq_put_tag(hctx->tags, ctx, rq->tag); in __blk_mq_free_request() 1076 blk_mq_put_tags(hctx->tags, tag_arra in blk_mq_flush_tag_batch() 348 blk_mq_rq_ctx_init(struct blk_mq_alloc_data *data, struct blk_mq_tags *tags, unsigned int tag) blk_mq_rq_ctx_init() argument 1082 int tags[TAG_COMP_BATCH], nr_tags = 0; blk_mq_end_request_batch() local 3244 blk_mq_clear_rq_mapping(struct blk_mq_tags *drv_tags, struct blk_mq_tags *tags) blk_mq_clear_rq_mapping() argument 3283 blk_mq_free_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags, unsigned int hctx_idx) blk_mq_free_rqs() argument 3324 blk_mq_free_rq_map(struct blk_mq_tags *tags) blk_mq_free_rq_map() argument 3367 struct blk_mq_tags *tags; blk_mq_alloc_rq_map() local 3413 blk_mq_alloc_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags, unsigned int hctx_idx, unsigned int depth) blk_mq_alloc_rqs() argument 3507 struct blk_mq_tags *tags = hctx->sched_tags ? blk_mq_hctx_has_requests() local 3620 blk_mq_clear_flush_rq_mapping(struct blk_mq_tags *tags, unsigned int queue_depth, struct request *flush_rq) blk_mq_clear_flush_rq_mapping() argument 3822 struct blk_mq_tags *tags; blk_mq_alloc_map_and_rqs() local 3853 blk_mq_free_map_and_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags, unsigned int hctx_idx) blk_mq_free_map_and_rqs() argument [all...] |
/kernel/linux/linux-5.10/block/ |
H A D | blk-mq-tag.c | 4 * fairer distribution of tags between multiple submitters when a shared tag map 36 atomic_inc(&hctx->tags->active_queues); in __blk_mq_tag_busy() 43 * Wakeup all potentially sleeping on tags 45 void blk_mq_tag_wakeup_all(struct blk_mq_tags *tags, bool include_reserve) in blk_mq_tag_wakeup_all() argument 47 sbitmap_queue_wake_all(tags->bitmap_tags); in blk_mq_tag_wakeup_all() 49 sbitmap_queue_wake_all(tags->breserved_tags); in blk_mq_tag_wakeup_all() 58 struct blk_mq_tags *tags = hctx->tags; in __blk_mq_tag_idle() local 70 atomic_dec(&tags->active_queues); in __blk_mq_tag_idle() 73 blk_mq_tag_wakeup_all(tags, fals in __blk_mq_tag_idle() 91 struct blk_mq_tags *tags = blk_mq_tags_from_data(data); blk_mq_get_tag() local 181 blk_mq_put_tag(struct blk_mq_tags *tags, struct blk_mq_ctx *ctx, unsigned int tag) blk_mq_put_tag() argument 202 blk_mq_find_and_get_req(struct blk_mq_tags *tags, unsigned int bitnr) blk_mq_find_and_get_req() argument 220 struct blk_mq_tags *tags = hctx->tags; bt_iter() local 269 struct blk_mq_tags *tags; global() member 282 struct blk_mq_tags *tags = iter_data->tags; bt_tags_iter() local 322 bt_tags_for_each(struct blk_mq_tags *tags, struct sbitmap_queue *bt, busy_tag_iter_fn *fn, void *data, unsigned int flags) bt_tags_for_each() argument 336 __blk_mq_all_tag_iter(struct blk_mq_tags *tags, busy_tag_iter_fn *fn, void *priv, unsigned int flags) __blk_mq_all_tag_iter() argument 359 blk_mq_all_tag_iter(struct blk_mq_tags *tags, busy_tag_iter_fn *fn, void *priv) blk_mq_all_tag_iter() argument 451 struct blk_mq_tags *tags = hctx->tags; blk_mq_queue_tag_busy_iter() local 474 blk_mq_init_bitmap_tags(struct blk_mq_tags *tags, int node, int alloc_policy) blk_mq_init_bitmap_tags() argument 509 struct blk_mq_tags *tags = set->tags[i]; blk_mq_init_shared_sbitmap() local 532 struct blk_mq_tags *tags; blk_mq_init_tags() local 557 blk_mq_free_tags(struct blk_mq_tags *tags, unsigned int flags) blk_mq_free_tags() argument 570 struct blk_mq_tags *tags = *tagsptr; blk_mq_tag_update_depth() local [all...] |
H A D | blk-mq-tag.h | 34 extern void blk_mq_free_tags(struct blk_mq_tags *tags, unsigned int flags); 41 extern void blk_mq_put_tag(struct blk_mq_tags *tags, struct blk_mq_ctx *ctx, 44 struct blk_mq_tags **tags, 49 extern void blk_mq_tag_wakeup_all(struct blk_mq_tags *tags, bool); 52 void blk_mq_all_tag_iter(struct blk_mq_tags *tags, busy_tag_iter_fn *fn, 88 static inline bool blk_mq_tag_is_reserved(struct blk_mq_tags *tags, in blk_mq_tag_is_reserved() argument 91 return tag < tags->nr_reserved_tags; in blk_mq_tag_is_reserved()
|
/kernel/linux/linux-6.6/drivers/gpu/drm/nouveau/nvkm/core/ |
H A D | memory.c | 34 struct nvkm_tags *tags = *ptags; in nvkm_memory_tags_put() local 35 if (tags) { in nvkm_memory_tags_put() 36 mutex_lock(&fb->tags.mutex); in nvkm_memory_tags_put() 37 if (refcount_dec_and_test(&tags->refcount)) { in nvkm_memory_tags_put() 38 nvkm_mm_free(&fb->tags.mm, &tags->mn); in nvkm_memory_tags_put() 39 kfree(memory->tags); in nvkm_memory_tags_put() 40 memory->tags = NULL; in nvkm_memory_tags_put() 42 mutex_unlock(&fb->tags.mutex); in nvkm_memory_tags_put() 53 struct nvkm_tags *tags; in nvkm_memory_tags_get() local [all...] |
/kernel/linux/linux-5.10/drivers/gpu/drm/nouveau/nvkm/core/ |
H A D | memory.c | 34 struct nvkm_tags *tags = *ptags; in nvkm_memory_tags_put() local 35 if (tags) { in nvkm_memory_tags_put() 37 if (refcount_dec_and_test(&tags->refcount)) { in nvkm_memory_tags_put() 38 nvkm_mm_free(&fb->tags, &tags->mn); in nvkm_memory_tags_put() 39 kfree(memory->tags); in nvkm_memory_tags_put() 40 memory->tags = NULL; in nvkm_memory_tags_put() 53 struct nvkm_tags *tags; in nvkm_memory_tags_get() local 56 if ((tags = memory->tags)) { in nvkm_memory_tags_get() [all...] |
/kernel/linux/linux-5.10/fs/erofs/ |
H A D | tagptr.h | 29 extern void __compiletime_error("bad tagptr tags") 59 const typeof(_tags) tags = (_tags); \ 60 if (__builtin_constant_p(tags) && (tags & ~__tagptr_mask(type))) \ 62 tagptr_init(type, (uintptr_t)(ptr) | tags); }) 88 #define tagptr_replace_tags(_ptptr, tags) ({ \ 90 *ptptr = tagptr_fold(*ptptr, tagptr_unfold_ptr(*ptptr), tags); \ 95 const typeof(_tags) tags = (_tags); \ 96 if (__builtin_constant_p(tags) && (tags [all...] |
/kernel/linux/linux-5.10/arch/arm64/mm/ |
H A D | mteswap.c | 14 /* tags granule is 16 bytes, 2 tags stored per byte */ in mte_allocate_tag_storage() 38 if (WARN(xa_is_err(ret), "Failed to store MTE tags")) { in mte_save_tags() 51 void *tags = xa_load(&mte_pages, entry.val); in mte_restore_tags() local 53 if (!tags) in mte_restore_tags() 56 mte_restore_page_tags(page_address(page), tags); in mte_restore_tags() 64 void *tags = xa_erase(&mte_pages, entry.val); in mte_invalidate_tags() local 66 mte_free_tag_storage(tags); in mte_invalidate_tags() 73 void *tags; in mte_invalidate_tags_area() local 78 xas_for_each(&xa_state, tags, last_entr in mte_invalidate_tags_area() [all...] |
/kernel/linux/linux-6.6/arch/arm64/mm/ |
H A D | mteswap.c | 14 /* tags granule is 16 bytes, 2 tags stored per byte */ in mte_allocate_tag_storage() 39 if (WARN(xa_is_err(ret), "Failed to store MTE tags")) { in mte_save_tags() 52 void *tags = xa_load(&mte_pages, entry.val); in mte_restore_tags() local 54 if (!tags) in mte_restore_tags() 58 mte_restore_page_tags(page_address(page), tags); in mte_restore_tags() local 66 void *tags = xa_erase(&mte_pages, entry.val); in mte_invalidate_tags() local 68 mte_free_tag_storage(tags); in mte_invalidate_tags() 75 void *tags; in mte_invalidate_tags_area() local 80 xas_for_each(&xa_state, tags, last_entr in mte_invalidate_tags_area() [all...] |
/kernel/linux/linux-5.10/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ |
H A D | nv20.c | 47 u32 tags = round_up(tiles / fb->ram->parts, 0x40); in nv20_fb_tile_comp() local 48 if (!nvkm_mm_head(&fb->tags, 0, 1, tags, tags, 1, &tile->tag)) { in nv20_fb_tile_comp() 66 nvkm_mm_free(&fb->tags, &tile->tag); in nv20_fb_tile_fini() 83 const u32 tags = nvkm_rd32(fb->subdev.device, 0x100320); in nv20_fb_tags() local 84 return tags ? tags + 1 : 0; in nv20_fb_tags() 89 .tags = nv20_fb_tags,
|
H A D | nv40.c | 34 u32 tags = round_up(tiles / fb->ram->parts, 0x100); in nv40_fb_tile_comp() local 36 !nvkm_mm_head(&fb->tags, 0, 1, tags, tags, 1, &tile->tag)) { in nv40_fb_tile_comp() 39 tile->zcomp |= ((tile->tag->offset + tags - 1) >> 8) << 13; in nv40_fb_tile_comp() 54 .tags = nv20_fb_tags,
|
H A D | nv35.c | 34 u32 tags = round_up(tiles / fb->ram->parts, 0x40); in nv35_fb_tile_comp() local 35 if (!nvkm_mm_head(&fb->tags, 0, 1, tags, tags, 1, &tile->tag)) { in nv35_fb_tile_comp() 39 tile->zcomp |= ((tile->tag->offset + tags - 1) >> 6) << 13; in nv35_fb_tile_comp() 48 .tags = nv20_fb_tags,
|
H A D | nv36.c | 34 u32 tags = round_up(tiles / fb->ram->parts, 0x40); in nv36_fb_tile_comp() local 35 if (!nvkm_mm_head(&fb->tags, 0, 1, tags, tags, 1, &tile->tag)) { in nv36_fb_tile_comp() 39 tile->zcomp |= ((tile->tag->offset + tags - 1) >> 6) << 14; in nv36_fb_tile_comp() 48 .tags = nv20_fb_tags,
|
H A D | nv25.c | 34 u32 tags = round_up(tiles / fb->ram->parts, 0x40); in nv25_fb_tile_comp() local 35 if (!nvkm_mm_head(&fb->tags, 0, 1, tags, tags, 1, &tile->tag)) { in nv25_fb_tile_comp() 47 .tags = nv20_fb_tags,
|
/kernel/linux/linux-6.6/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ |
H A D | nv20.c | 47 u32 tags = round_up(tiles / fb->ram->parts, 0x40); in nv20_fb_tile_comp() local 48 if (!nvkm_mm_head(&fb->tags.mm, 0, 1, tags, tags, 1, &tile->tag)) { in nv20_fb_tile_comp() 66 nvkm_mm_free(&fb->tags.mm, &tile->tag); in nv20_fb_tile_fini() 83 const u32 tags = nvkm_rd32(fb->subdev.device, 0x100320); in nv20_fb_tags() local 84 return tags ? tags + 1 : 0; in nv20_fb_tags() 89 .tags = nv20_fb_tags,
|
H A D | nv40.c | 34 u32 tags = round_up(tiles / fb->ram->parts, 0x100); in nv40_fb_tile_comp() local 36 !nvkm_mm_head(&fb->tags.mm, 0, 1, tags, tags, 1, &tile->tag)) { in nv40_fb_tile_comp() 39 tile->zcomp |= ((tile->tag->offset + tags - 1) >> 8) << 13; in nv40_fb_tile_comp() 54 .tags = nv20_fb_tags,
|
H A D | nv35.c | 34 u32 tags = round_up(tiles / fb->ram->parts, 0x40); in nv35_fb_tile_comp() local 35 if (!nvkm_mm_head(&fb->tags.mm, 0, 1, tags, tags, 1, &tile->tag)) { in nv35_fb_tile_comp() 39 tile->zcomp |= ((tile->tag->offset + tags - 1) >> 6) << 13; in nv35_fb_tile_comp() 48 .tags = nv20_fb_tags,
|
H A D | nv36.c | 34 u32 tags = round_up(tiles / fb->ram->parts, 0x40); in nv36_fb_tile_comp() local 35 if (!nvkm_mm_head(&fb->tags.mm, 0, 1, tags, tags, 1, &tile->tag)) { in nv36_fb_tile_comp() 39 tile->zcomp |= ((tile->tag->offset + tags - 1) >> 6) << 14; in nv36_fb_tile_comp() 48 .tags = nv20_fb_tags,
|
H A D | nv25.c | 34 u32 tags = round_up(tiles / fb->ram->parts, 0x40); in nv25_fb_tile_comp() local 35 if (!nvkm_mm_head(&fb->tags.mm, 0, 1, tags, tags, 1, &tile->tag)) { in nv25_fb_tile_comp() 47 .tags = nv20_fb_tags,
|
/kernel/linux/linux-5.10/arch/arm/kernel/ |
H A D | atags_parse.c | 11 * of variable-sized tags to the kernel. The first tag must be a ATAG_CORE 158 * Parse all tags in the list, checking both the global and architecture 179 struct tag *tags = (struct tag *)&default_tags; in setup_machine_tags() local 199 tags = atags_vaddr; in setup_machine_tags() 201 tags = (void *)(PAGE_OFFSET + mdesc->atag_offset); in setup_machine_tags() 208 if (tags->hdr.tag != ATAG_CORE) in setup_machine_tags() 209 convert_to_tag_list(tags); in setup_machine_tags() 211 if (tags->hdr.tag != ATAG_CORE) { in setup_machine_tags() 213 tags = (struct tag *)&default_tags; in setup_machine_tags() 217 mdesc->fixup(tags, in setup_machine_tags() [all...] |
/kernel/linux/linux-6.6/arch/arm/kernel/ |
H A D | atags_parse.c | 11 * of variable-sized tags to the kernel. The first tag must be a ATAG_CORE 158 * Parse all tags in the list, checking both the global and architecture 179 struct tag *tags = (struct tag *)&default_tags; in setup_machine_tags() local 199 tags = atags_vaddr; in setup_machine_tags() 201 tags = (void *)(PAGE_OFFSET + mdesc->atag_offset); in setup_machine_tags() 208 if (tags->hdr.tag != ATAG_CORE) in setup_machine_tags() 209 convert_to_tag_list(tags); in setup_machine_tags() 211 if (tags->hdr.tag != ATAG_CORE) { in setup_machine_tags() 213 tags = (struct tag *)&default_tags; in setup_machine_tags() 217 mdesc->fixup(tags, in setup_machine_tags() [all...] |
/kernel/linux/linux-5.10/drivers/gpu/drm/nouveau/nvkm/subdev/volt/ |
H A D | gpio.c | 30 static const u8 tags[] = { variable 42 for (i = 0; i < ARRAY_SIZE(tags); i++) { in nvkm_voltgpio_get() 44 int ret = nvkm_gpio_get(gpio, 0, tags[i], 0xff); in nvkm_voltgpio_get() 60 for (i = 0; i < ARRAY_SIZE(tags); i++, vid >>= 1) { in nvkm_voltgpio_set() 62 int ret = nvkm_gpio_set(gpio, 0, tags[i], 0xff, vid & 1); in nvkm_voltgpio_set() 85 for (i = 0; i < ARRAY_SIZE(tags); i++) { in nvkm_voltgpio_init() 87 int ret = nvkm_gpio_find(gpio, 0, tags[i], 0xff, &func); in nvkm_voltgpio_init()
|
/kernel/linux/linux-6.6/drivers/gpu/drm/nouveau/nvkm/subdev/volt/ |
H A D | gpio.c | 30 static const u8 tags[] = { variable 42 for (i = 0; i < ARRAY_SIZE(tags); i++) { in nvkm_voltgpio_get() 44 int ret = nvkm_gpio_get(gpio, 0, tags[i], 0xff); in nvkm_voltgpio_get() 60 for (i = 0; i < ARRAY_SIZE(tags); i++, vid >>= 1) { in nvkm_voltgpio_set() 62 int ret = nvkm_gpio_set(gpio, 0, tags[i], 0xff, vid & 1); in nvkm_voltgpio_set() 85 for (i = 0; i < ARRAY_SIZE(tags); i++) { in nvkm_voltgpio_init() 87 int ret = nvkm_gpio_find(gpio, 0, tags[i], 0xff, &func); in nvkm_voltgpio_init()
|
/kernel/linux/linux-6.6/arch/arm64/kernel/ |
H A D | elfcore.c | 29 void *tags = NULL; in mte_dump_tag_range() local 55 if (!tags) { in mte_dump_tag_range() 56 tags = mte_allocate_tag_storage(); in mte_dump_tag_range() 57 if (!tags) { in mte_dump_tag_range() 64 mte_save_page_tags(page_address(page), tags); in mte_dump_tag_range() local 66 if (!dump_emit(cprm, tags, MTE_PAGE_TAG_STORAGE)) { in mte_dump_tag_range() 72 if (tags) in mte_dump_tag_range() 73 mte_free_tag_storage(tags); in mte_dump_tag_range()
|