/kernel/linux/linux-5.10/drivers/nvdimm/ |
H A D | btt.c | 27 static struct device *to_dev(struct arena_info *arena) in to_dev() argument 29 return &arena->nd_btt->dev; in to_dev() 37 static int arena_read_bytes(struct arena_info *arena, resource_size_t offset, in arena_read_bytes() argument 40 struct nd_btt *nd_btt = arena->nd_btt; in arena_read_bytes() 43 /* arena offsets may be shifted from the base of the device */ in arena_read_bytes() 48 static int arena_write_bytes(struct arena_info *arena, resource_size_t offset, in arena_write_bytes() argument 51 struct nd_btt *nd_btt = arena->nd_btt; in arena_write_bytes() 54 /* arena offsets may be shifted from the base of the device */ in arena_write_bytes() 59 static int btt_info_write(struct arena_info *arena, struct btt_sb *super) in btt_info_write() argument 68 dev_WARN_ONCE(to_dev(arena), !IS_ALIGNE in btt_info_write() 82 btt_info_read(struct arena_info *arena, struct btt_sb *super) btt_info_read() argument 94 __btt_map_write(struct arena_info *arena, u32 lba, __le32 mapping, unsigned long flags) __btt_map_write() argument 106 btt_map_write(struct arena_info *arena, u32 lba, u32 mapping, u32 z_flag, u32 e_flag, unsigned long rwb_flags) btt_map_write() argument 150 btt_map_read(struct arena_info *arena, u32 lba, u32 *mapping, int *trim, int *error, unsigned long rwb_flags) btt_map_read() argument 206 btt_log_group_read(struct arena_info *arena, u32 lane, struct log_group *log) btt_log_group_read() argument 258 struct arena_info *arena; btt_debugfs_init() local 327 btt_log_read(struct arena_info *arena, u32 lane, struct log_entry *ent, int old_flag) btt_log_read() argument 361 __btt_log_write(struct arena_info *arena, u32 lane, u32 sub, struct log_entry *ent, unsigned long flags) __btt_log_write() argument 382 btt_flog_write(struct arena_info *arena, u32 lane, u32 sub, struct log_entry *ent) btt_flog_write() argument 406 btt_map_init(struct arena_info *arena) btt_map_init() argument 450 btt_log_init(struct arena_info *arena) btt_log_init() argument 500 to_namespace_offset(struct arena_info *arena, u64 lba) to_namespace_offset() argument 505 arena_clear_freelist_error(struct arena_info *arena, u32 lane) arena_clear_freelist_error() argument 534 btt_freelist_init(struct arena_info *arena) btt_freelist_init() argument 623 log_set_indices(struct arena_info *arena) log_set_indices() argument 721 btt_rtt_init(struct arena_info *arena) btt_rtt_init() argument 730 btt_maplocks_init(struct arena_info *arena) btt_maplocks_init() argument 748 struct arena_info *arena; alloc_arena() local 804 struct arena_info *arena, *next; free_arenas() local 820 parse_arena_meta(struct arena_info *arena, struct btt_sb *super, u64 arena_off) parse_arena_meta() argument 849 struct arena_info *arena; discover_arenas() local 939 struct arena_info *arena; create_arenas() local 969 btt_arena_write_layout(struct arena_info *arena) btt_arena_write_layout() argument 1028 struct arena_info *arena; btt_meta_init() local 1068 lba_to_arena(struct btt *btt, sector_t sector, __u32 *premap, struct arena_info **arena) lba_to_arena() argument 1106 btt_data_read(struct arena_info *arena, struct page *page, unsigned int off, u32 lba, u32 len) btt_data_read() argument 1119 btt_data_write(struct arena_info *arena, u32 lba, struct page *page, unsigned int off, u32 len) btt_data_write() argument 1141 btt_rw_integrity(struct btt *btt, struct bio_integrity_payload *bip, struct arena_info *arena, u32 postmap, int rw) btt_rw_integrity() argument 1190 btt_rw_integrity(struct btt *btt, struct bio_integrity_payload *bip, struct arena_info *arena, u32 postmap, int rw) btt_rw_integrity() argument 1203 struct arena_info *arena = NULL; btt_read_pg() local 1300 btt_is_badblock(struct btt *btt, struct arena_info *arena, u32 postmap) btt_is_badblock() argument 1315 struct arena_info *arena = NULL; btt_write_pg() local [all...] |
/kernel/linux/linux-6.6/drivers/nvdimm/ |
H A D | btt.c | 27 static struct device *to_dev(struct arena_info *arena) in to_dev() argument 29 return &arena->nd_btt->dev; in to_dev() 37 static int arena_read_bytes(struct arena_info *arena, resource_size_t offset, in arena_read_bytes() argument 40 struct nd_btt *nd_btt = arena->nd_btt; in arena_read_bytes() 43 /* arena offsets may be shifted from the base of the device */ in arena_read_bytes() 48 static int arena_write_bytes(struct arena_info *arena, resource_size_t offset, in arena_write_bytes() argument 51 struct nd_btt *nd_btt = arena->nd_btt; in arena_write_bytes() 54 /* arena offsets may be shifted from the base of the device */ in arena_write_bytes() 59 static int btt_info_write(struct arena_info *arena, struct btt_sb *super) in btt_info_write() argument 68 dev_WARN_ONCE(to_dev(arena), !IS_ALIGNE in btt_info_write() 82 btt_info_read(struct arena_info *arena, struct btt_sb *super) btt_info_read() argument 94 __btt_map_write(struct arena_info *arena, u32 lba, __le32 mapping, unsigned long flags) __btt_map_write() argument 106 btt_map_write(struct arena_info *arena, u32 lba, u32 mapping, u32 z_flag, u32 e_flag, unsigned long rwb_flags) btt_map_write() argument 150 btt_map_read(struct arena_info *arena, u32 lba, u32 *mapping, int *trim, int *error, unsigned long rwb_flags) btt_map_read() argument 206 btt_log_group_read(struct arena_info *arena, u32 lane, struct log_group *log) btt_log_group_read() argument 258 struct arena_info *arena; btt_debugfs_init() local 327 btt_log_read(struct arena_info *arena, u32 lane, struct log_entry *ent, int old_flag) btt_log_read() argument 361 __btt_log_write(struct arena_info *arena, u32 lane, u32 sub, struct log_entry *ent, unsigned long flags) __btt_log_write() argument 382 btt_flog_write(struct arena_info *arena, u32 lane, u32 sub, struct log_entry *ent) btt_flog_write() argument 406 btt_map_init(struct arena_info *arena) btt_map_init() argument 450 btt_log_init(struct arena_info *arena) btt_log_init() argument 500 to_namespace_offset(struct arena_info *arena, u64 lba) to_namespace_offset() argument 505 arena_clear_freelist_error(struct arena_info *arena, u32 lane) arena_clear_freelist_error() argument 534 btt_freelist_init(struct arena_info *arena) btt_freelist_init() argument 623 log_set_indices(struct arena_info *arena) log_set_indices() argument 721 btt_rtt_init(struct arena_info *arena) btt_rtt_init() argument 730 btt_maplocks_init(struct arena_info *arena) btt_maplocks_init() argument 748 struct arena_info *arena; alloc_arena() local 804 struct arena_info *arena, *next; free_arenas() local 820 parse_arena_meta(struct arena_info *arena, struct btt_sb *super, u64 arena_off) parse_arena_meta() argument 849 struct arena_info *arena; discover_arenas() local 939 struct arena_info *arena; create_arenas() local 969 btt_arena_write_layout(struct arena_info *arena) btt_arena_write_layout() argument 1028 struct arena_info *arena; btt_meta_init() local 1068 lba_to_arena(struct btt *btt, sector_t sector, __u32 *premap, struct arena_info **arena) lba_to_arena() argument 1106 btt_data_read(struct arena_info *arena, struct page *page, unsigned int off, u32 lba, u32 len) btt_data_read() argument 1119 btt_data_write(struct arena_info *arena, u32 lba, struct page *page, unsigned int off, u32 len) btt_data_write() argument 1141 btt_rw_integrity(struct btt *btt, struct bio_integrity_payload *bip, struct arena_info *arena, u32 postmap, int rw) btt_rw_integrity() argument 1188 btt_rw_integrity(struct btt *btt, struct bio_integrity_payload *bip, struct arena_info *arena, u32 postmap, int rw) btt_rw_integrity() argument 1201 struct arena_info *arena = NULL; btt_read_pg() local 1298 btt_is_badblock(struct btt *btt, struct arena_info *arena, u32 postmap) btt_is_badblock() argument 1313 struct arena_info *arena = NULL; btt_write_pg() local [all...] |
/kernel/linux/linux-5.10/arch/alpha/kernel/ |
H A D | pci_iommu.c | 64 struct pci_iommu_arena *arena; in iommu_arena_new_node() 69 not addition, so the required arena alignment is based on in iommu_arena_new_node() 71 particular systems can over-align the arena. */ in iommu_arena_new_node() 78 arena = memblock_alloc_node(sizeof(*arena), align, nid); in iommu_arena_new_node() 79 if (!NODE_DATA(nid) || !arena) { in iommu_arena_new_node() 80 printk("%s: couldn't allocate arena from node %d\n" in iommu_arena_new_node() 83 arena = memblock_alloc(sizeof(*arena), SMP_CACHE_BYTES); in iommu_arena_new_node() 84 if (!arena) in iommu_arena_new_node() 63 struct pci_iommu_arena *arena; iommu_arena_new_node() local 134 iommu_arena_find_pages(struct device *dev, struct pci_iommu_arena *arena, long n, long mask) iommu_arena_find_pages() argument 187 iommu_arena_alloc(struct device *dev, struct pci_iommu_arena *arena, long n, unsigned int align) iommu_arena_alloc() argument 219 iommu_arena_free(struct pci_iommu_arena *arena, long ofs, long n) iommu_arena_free() argument 264 struct pci_iommu_arena *arena; pci_map_single_1() local 385 struct pci_iommu_arena *arena; alpha_pci_unmap_page() local 562 sg_fill(struct device *dev, struct scatterlist *leader, struct scatterlist *end, struct scatterlist *out, struct pci_iommu_arena *arena, dma_addr_t max_dma, int dac_allowed) sg_fill() argument 667 struct pci_iommu_arena *arena; alpha_pci_map_sg() local 745 struct pci_iommu_arena *arena; alpha_pci_unmap_sg() local 821 struct pci_iommu_arena *arena; alpha_pci_supported() local 852 iommu_reserve(struct pci_iommu_arena *arena, long pg_count, long align_mask) iommu_reserve() argument 883 iommu_release(struct pci_iommu_arena *arena, long pg_start, long pg_count) iommu_release() argument 902 iommu_bind(struct pci_iommu_arena *arena, long pg_start, long pg_count, struct page **pages) iommu_bind() argument 931 iommu_unbind(struct pci_iommu_arena *arena, long pg_start, long pg_count) iommu_unbind() argument [all...] |
H A D | core_titan.c | 499 * Check the scatter-gather arena. in titan_ioremap() 589 struct pci_iommu_arena *arena; 606 aper->arena = agp->hose->sg_pci; in titan_agp_setup() 608 aper->pg_start = iommu_reserve(aper->arena, aper->pg_count, in titan_agp_setup() 617 aper->arena->dma_base + aper->pg_start * PAGE_SIZE; in titan_agp_setup() 630 status = iommu_release(aper->arena, aper->pg_start, aper->pg_count); in titan_agp_cleanup() 634 iommu_unbind(aper->arena, aper->pg_start, aper->pg_count); in titan_agp_cleanup() 635 status = iommu_release(aper->arena, aper->pg_start, in titan_agp_cleanup() 691 return iommu_bind(aper->arena, aper->pg_start + pg_start, in titan_agp_unbind_memory() 699 return iommu_unbind(aper->arena, ape 584 struct pci_iommu_arena *arena; global() member [all...] |
H A D | core_marvel.c | 735 * Check the scatter-gather arena. in marvel_ioremap() 912 struct pci_iommu_arena *arena; in marvel_agp_setup() 928 aper->arena = agp->hose->sg_pci; in marvel_agp_setup() 930 aper->pg_start = iommu_reserve(aper->arena, aper->pg_count, in marvel_agp_setup() 940 aper->arena->dma_base + aper->pg_start * PAGE_SIZE; in marvel_agp_cleanup() 953 status = iommu_release(aper->arena, aper->pg_start, aper->pg_count); in marvel_agp_cleanup() 957 iommu_unbind(aper->arena, aper->pg_start, aper->pg_count); 958 status = iommu_release(aper->arena, aper->pg_start, in marvel_agp_configure() 1036 return iommu_bind(aper->arena, aper->pg_start + pg_start, 1044 return iommu_unbind(aper->arena, ape in marvel_agp_translate() 901 struct pci_iommu_arena *arena; global() member [all...] |
H A D | core_cia.c | 357 struct pci_iommu_arena *arena = pci_isa_hose->sg_isa; in verify_tb_operation() 376 addr0 = arena->dma_base; in verify_tb_operation() 450 cia_pci_tbi(arena->hose, 0, -1); in verify_tb_operation() 465 arena->ptes[4] = pte0; in verify_tb_operation() 489 arena->ptes[5] = pte0; in verify_tb_operation() 502 arena->align_entry = 4; in verify_tb_operation() 525 arena->ptes[4] = 0; in verify_tb_operation() 526 arena->ptes[5] = 0; in verify_tb_operation() 540 alpha_mv.mv_pci_tbi(arena->hose, 0, -1); in verify_tb_operation() 356 struct pci_iommu_arena *arena = pci_isa_hose->sg_isa; verify_tb_operation() local
|
/kernel/linux/linux-6.6/arch/alpha/kernel/ |
H A D | pci_iommu.c | 64 struct pci_iommu_arena *arena; in iommu_arena_new_node() 69 not addition, so the required arena alignment is based on in iommu_arena_new_node() 71 particular systems can over-align the arena. */ in iommu_arena_new_node() 75 arena = memblock_alloc(sizeof(*arena), SMP_CACHE_BYTES); in iommu_arena_new_node() 76 if (!arena) in iommu_arena_new_node() 78 sizeof(*arena)); in iommu_arena_new_node() 79 arena->ptes = memblock_alloc(mem_size, align); in iommu_arena_new_node() 80 if (!arena->ptes) in iommu_arena_new_node() 84 spin_lock_init(&arena in iommu_arena_new_node() 63 struct pci_iommu_arena *arena; iommu_arena_new_node() local 105 iommu_arena_find_pages(struct device *dev, struct pci_iommu_arena *arena, long n, long mask) iommu_arena_find_pages() argument 160 iommu_arena_alloc(struct device *dev, struct pci_iommu_arena *arena, long n, unsigned int align) iommu_arena_alloc() argument 192 iommu_arena_free(struct pci_iommu_arena *arena, long ofs, long n) iommu_arena_free() argument 237 struct pci_iommu_arena *arena; pci_map_single_1() local 358 struct pci_iommu_arena *arena; alpha_pci_unmap_page() local 535 sg_fill(struct device *dev, struct scatterlist *leader, struct scatterlist *end, struct scatterlist *out, struct pci_iommu_arena *arena, dma_addr_t max_dma, int dac_allowed) sg_fill() argument 640 struct pci_iommu_arena *arena; alpha_pci_map_sg() local 722 struct pci_iommu_arena *arena; alpha_pci_unmap_sg() local 798 struct pci_iommu_arena *arena; alpha_pci_supported() local 829 iommu_reserve(struct pci_iommu_arena *arena, long pg_count, long align_mask) iommu_reserve() argument 860 iommu_release(struct pci_iommu_arena *arena, long pg_start, long pg_count) iommu_release() argument 879 iommu_bind(struct pci_iommu_arena *arena, long pg_start, long pg_count, struct page **pages) iommu_bind() argument 908 iommu_unbind(struct pci_iommu_arena *arena, long pg_start, long pg_count) iommu_unbind() argument [all...] |
H A D | core_titan.c | 499 * Check the scatter-gather arena. in titan_ioremap() 589 struct pci_iommu_arena *arena; 606 aper->arena = agp->hose->sg_pci; in titan_agp_setup() 608 aper->pg_start = iommu_reserve(aper->arena, aper->pg_count, in titan_agp_setup() 617 aper->arena->dma_base + aper->pg_start * PAGE_SIZE; in titan_agp_setup() 630 status = iommu_release(aper->arena, aper->pg_start, aper->pg_count); in titan_agp_cleanup() 634 iommu_unbind(aper->arena, aper->pg_start, aper->pg_count); in titan_agp_cleanup() 635 status = iommu_release(aper->arena, aper->pg_start, in titan_agp_cleanup() 691 return iommu_bind(aper->arena, aper->pg_start + pg_start, in titan_agp_unbind_memory() 699 return iommu_unbind(aper->arena, ape 584 struct pci_iommu_arena *arena; global() member [all...] |
H A D | core_marvel.c | 733 * Check the scatter-gather arena. in marvel_ioremap() 863 struct pci_iommu_arena *arena; in marvel_agp_setup() 879 aper->arena = agp->hose->sg_pci; in marvel_agp_setup() 881 aper->pg_start = iommu_reserve(aper->arena, aper->pg_count, in marvel_agp_setup() 891 aper->arena->dma_base + aper->pg_start * PAGE_SIZE; in marvel_agp_cleanup() 904 status = iommu_release(aper->arena, aper->pg_start, aper->pg_count); in marvel_agp_cleanup() 908 iommu_unbind(aper->arena, aper->pg_start, aper->pg_count); in marvel_agp_cleanup() 909 status = iommu_release(aper->arena, aper->pg_start, 987 return iommu_bind(aper->arena, aper->pg_start + pg_start, in marvel_agp_unbind_memory() 995 return iommu_unbind(aper->arena, ape in marvel_agp_translate() 854 struct pci_iommu_arena *arena; global() member [all...] |
H A D | core_cia.c | 357 struct pci_iommu_arena *arena = pci_isa_hose->sg_isa; in verify_tb_operation() 376 addr0 = arena->dma_base; in verify_tb_operation() 450 cia_pci_tbi(arena->hose, 0, -1); in verify_tb_operation() 465 arena->ptes[4] = pte0; in verify_tb_operation() 489 arena->ptes[5] = pte0; in verify_tb_operation() 502 arena->align_entry = 4; in verify_tb_operation() 525 arena->ptes[4] = 0; in verify_tb_operation() 526 arena->ptes[5] = 0; in verify_tb_operation() 540 alpha_mv.mv_pci_tbi(arena->hose, 0, -1); in verify_tb_operation() 356 struct pci_iommu_arena *arena = pci_isa_hose->sg_isa; verify_tb_operation() local
|
/kernel/linux/linux-6.6/arch/powerpc/platforms/pseries/ |
H A D | rtas-work-area.c | 26 * Don't let a single allocation claim the whole arena. 42 char *arena; member 90 * time. Requests that exceed the arena size will block in __rtas_work_area_alloc() 128 * reliably reserve an arena that satisfies RTAS addressing 132 * and adding the arena to a gen_pool. 137 const phys_addr_t pa_start = __pa(rwa_state.arena); in rtas_work_area_allocator_init() 144 if (!rwa_state.arena) in rtas_work_area_allocator_init() 157 err = gen_pool_add(pool, (unsigned long)rwa_state.arena, in rtas_work_area_allocator_init() 170 pr_debug("arena [%pa-%pa] (%uK), min/max alloc sizes %u/%u\n", in rtas_work_area_allocator_init() 203 * So set up the arena i in rtas_work_area_reserve_arena() [all...] |