/kernel/linux/linux-5.10/drivers/gpu/drm/omapdrm/ |
H A D | tcm.h | 48 /* 1d or 2d area */ 50 bool is2d; /* whether area is 1d or 2d */ 69 struct tcm_area *area); 70 s32 (*reserve_1d)(struct tcm *tcm, u32 slots, struct tcm_area *area); 71 s32 (*free)(struct tcm *tcm, struct tcm_area *area); 85 * area pointer is NULL 111 * Reserves a 2D area in the container. 114 * @param height Height(in pages) of area to be reserved. 115 * @param width Width(in pages) of area to be reserved. 116 * @param align Alignment requirement for top-left corner of area 131 tcm_reserve_2d(struct tcm *tcm, u16 width, u16 height, u16 align, s16 offset, u16 slot_bytes, struct tcm_area *area) tcm_reserve_2d() argument 165 tcm_reserve_1d(struct tcm *tcm, u32 slots, struct tcm_area *area) tcm_reserve_1d() argument 195 tcm_free(struct tcm_area *area) tcm_free() argument 243 tcm_area_is_valid(struct tcm_area *area) tcm_area_is_valid() argument 276 __tcm_area_width(struct tcm_area *area) __tcm_area_width() argument 282 __tcm_area_height(struct tcm_area *area) __tcm_area_height() argument 288 __tcm_sizeof(struct tcm_area *area) __tcm_sizeof() argument [all...] |
H A D | tcm-sita.c | 64 /* found a long enough free area */ in r2l_b2t_1d() 130 /* assume the area is free until we find an overlap */ in l2r_t2b() 133 /* check subsequent rows to see if complete area is free */ in l2r_t2b() 154 /* set area as in-use. iterate over rows */ in l2r_t2b() 163 struct tcm_area *area) in sita_reserve_1d() 171 area->p0.x = pos % tcm->width; in sita_reserve_1d() 172 area->p0.y = pos / tcm->width; in sita_reserve_1d() 173 area->p1.x = (pos + num_slots - 1) % tcm->width; in sita_reserve_1d() 174 area->p1.y = (pos + num_slots - 1) / tcm->width; in sita_reserve_1d() 183 struct tcm_area *area) in sita_reserve_2d() 162 sita_reserve_1d(struct tcm *tcm, u32 num_slots, struct tcm_area *area) sita_reserve_1d() argument 181 sita_reserve_2d(struct tcm *tcm, u16 h, u16 w, u16 align, s16 offset, u16 slot_bytes, struct tcm_area *area) sita_reserve_2d() argument 208 sita_free(struct tcm *tcm, struct tcm_area *area) sita_free() argument [all...] |
/kernel/linux/linux-6.6/drivers/gpu/drm/omapdrm/ |
H A D | tcm.h | 48 /* 1d or 2d area */ 50 bool is2d; /* whether area is 1d or 2d */ 69 struct tcm_area *area); 70 s32 (*reserve_1d)(struct tcm *tcm, u32 slots, struct tcm_area *area); 71 s32 (*free)(struct tcm *tcm, struct tcm_area *area); 85 * area pointer is NULL 111 * Reserves a 2D area in the container. 114 * @param height Height(in pages) of area to be reserved. 115 * @param width Width(in pages) of area to be reserved. 116 * @param align Alignment requirement for top-left corner of area 131 tcm_reserve_2d(struct tcm *tcm, u16 width, u16 height, u16 align, s16 offset, u16 slot_bytes, struct tcm_area *area) tcm_reserve_2d() argument 165 tcm_reserve_1d(struct tcm *tcm, u32 slots, struct tcm_area *area) tcm_reserve_1d() argument 195 tcm_free(struct tcm_area *area) tcm_free() argument 243 tcm_area_is_valid(struct tcm_area *area) tcm_area_is_valid() argument 276 __tcm_area_width(struct tcm_area *area) __tcm_area_width() argument 282 __tcm_area_height(struct tcm_area *area) __tcm_area_height() argument 288 __tcm_sizeof(struct tcm_area *area) __tcm_sizeof() argument [all...] |
H A D | tcm-sita.c | 56 /* found a long enough free area */ in r2l_b2t_1d() 122 /* assume the area is free until we find an overlap */ in l2r_t2b() 125 /* check subsequent rows to see if complete area is free */ in l2r_t2b() 146 /* set area as in-use. iterate over rows */ in l2r_t2b() 155 struct tcm_area *area) in sita_reserve_1d() 163 area->p0.x = pos % tcm->width; in sita_reserve_1d() 164 area->p0.y = pos / tcm->width; in sita_reserve_1d() 165 area->p1.x = (pos + num_slots - 1) % tcm->width; in sita_reserve_1d() 166 area->p1.y = (pos + num_slots - 1) / tcm->width; in sita_reserve_1d() 175 struct tcm_area *area) in sita_reserve_2d() 154 sita_reserve_1d(struct tcm *tcm, u32 num_slots, struct tcm_area *area) sita_reserve_1d() argument 173 sita_reserve_2d(struct tcm *tcm, u16 h, u16 w, u16 align, s16 offset, u16 slot_bytes, struct tcm_area *area) sita_reserve_2d() argument 200 sita_free(struct tcm *tcm, struct tcm_area *area) sita_free() argument [all...] |
/kernel/linux/linux-5.10/drivers/net/ethernet/netronome/nfp/nfpcore/ |
H A D | nfp_cppcore.c | 57 * @waitq: area wait queue 86 struct nfp_cpp_area *area; member 93 struct mutex mutex; /* Lock for the area's refcount */ 132 struct nfp_cpp_area *area = in __release_cpp_area() local 134 struct nfp_cpp *cpp = nfp_cpp_area_cpp(area); in __release_cpp_area() 136 if (area->cpp->op->area_cleanup) in __release_cpp_area() 137 area->cpp->op->area_cleanup(area); in __release_cpp_area() 140 __resource_del(&area->resource); in __release_cpp_area() 142 kfree(area); in __release_cpp_area() 145 nfp_cpp_area_put(struct nfp_cpp_area *area) nfp_cpp_area_put() argument 150 nfp_cpp_area_get(struct nfp_cpp_area *area) nfp_cpp_area_get() argument 180 struct nfp_cpp_area *area = container_of(res, nfp_cpp_free() local 288 struct nfp_cpp_area *area; nfp_cpp_area_alloc_with_name() local 383 struct nfp_cpp_area *area; nfp_cpp_area_alloc_acquire() local 403 nfp_cpp_area_free(struct nfp_cpp_area *area) nfp_cpp_area_free() argument 410 nfp_cpp_area_acquire_try(struct nfp_cpp_area *area, int *status) nfp_cpp_area_acquire_try() argument 417 __nfp_cpp_area_acquire(struct nfp_cpp_area *area) __nfp_cpp_area_acquire() argument 451 nfp_cpp_area_acquire(struct nfp_cpp_area *area) nfp_cpp_area_acquire() argument 473 nfp_cpp_area_acquire_nonblocking(struct nfp_cpp_area *area) nfp_cpp_area_acquire_nonblocking() argument 500 nfp_cpp_area_release(struct nfp_cpp_area *area) nfp_cpp_area_release() argument 522 nfp_cpp_area_release_free(struct nfp_cpp_area *area) nfp_cpp_area_release_free() argument 542 nfp_cpp_area_read(struct nfp_cpp_area *area, unsigned long offset, void *kernel_vaddr, size_t length) nfp_cpp_area_read() argument 563 nfp_cpp_area_write(struct nfp_cpp_area *area, unsigned long offset, const void *kernel_vaddr, size_t length) nfp_cpp_area_write() argument 622 nfp_cpp_area_resource(struct nfp_cpp_area *area) nfp_cpp_area_resource() argument 640 nfp_cpp_area_phys(struct nfp_cpp_area *area) nfp_cpp_area_phys() argument 661 nfp_cpp_area_iomem(struct nfp_cpp_area *area) nfp_cpp_area_iomem() argument 679 nfp_cpp_area_readl(struct nfp_cpp_area *area, unsigned long offset, u32 *value) nfp_cpp_area_readl() argument 701 nfp_cpp_area_writel(struct nfp_cpp_area *area, unsigned long offset, u32 value) nfp_cpp_area_writel() argument 721 nfp_cpp_area_readq(struct nfp_cpp_area *area, unsigned long offset, u64 *value) nfp_cpp_area_readq() argument 743 nfp_cpp_area_writeq(struct nfp_cpp_area *area, unsigned long offset, u64 value) nfp_cpp_area_writeq() argument 766 nfp_cpp_area_fill(struct nfp_cpp_area *area, unsigned long offset, u32 value, size_t length) nfp_cpp_area_fill() argument 795 struct nfp_cpp_area *area; nfp_cpp_area_cache_add() local 922 struct nfp_cpp_area *area; __nfp_cpp_read() local 991 struct nfp_cpp_area *area; __nfp_cpp_write() local [all...] |
/kernel/linux/linux-6.6/drivers/net/ethernet/netronome/nfp/nfpcore/ |
H A D | nfp_cppcore.c | 57 * @waitq: area wait queue 86 struct nfp_cpp_area *area; member 93 struct mutex mutex; /* Lock for the area's refcount */ 132 struct nfp_cpp_area *area = in __release_cpp_area() local 134 struct nfp_cpp *cpp = nfp_cpp_area_cpp(area); in __release_cpp_area() 136 if (area->cpp->op->area_cleanup) in __release_cpp_area() 137 area->cpp->op->area_cleanup(area); in __release_cpp_area() 140 __resource_del(&area->resource); in __release_cpp_area() 142 kfree(area); in __release_cpp_area() 145 nfp_cpp_area_put(struct nfp_cpp_area *area) nfp_cpp_area_put() argument 150 nfp_cpp_area_get(struct nfp_cpp_area *area) nfp_cpp_area_get() argument 180 struct nfp_cpp_area *area = container_of(res, nfp_cpp_free() local 288 struct nfp_cpp_area *area; nfp_cpp_area_alloc_with_name() local 383 struct nfp_cpp_area *area; nfp_cpp_area_alloc_acquire() local 403 nfp_cpp_area_free(struct nfp_cpp_area *area) nfp_cpp_area_free() argument 410 nfp_cpp_area_acquire_try(struct nfp_cpp_area *area, int *status) nfp_cpp_area_acquire_try() argument 417 __nfp_cpp_area_acquire(struct nfp_cpp_area *area) __nfp_cpp_area_acquire() argument 451 nfp_cpp_area_acquire(struct nfp_cpp_area *area) nfp_cpp_area_acquire() argument 473 nfp_cpp_area_acquire_nonblocking(struct nfp_cpp_area *area) nfp_cpp_area_acquire_nonblocking() argument 500 nfp_cpp_area_release(struct nfp_cpp_area *area) nfp_cpp_area_release() argument 522 nfp_cpp_area_release_free(struct nfp_cpp_area *area) nfp_cpp_area_release_free() argument 542 nfp_cpp_area_read(struct nfp_cpp_area *area, unsigned long offset, void *kernel_vaddr, size_t length) nfp_cpp_area_read() argument 563 nfp_cpp_area_write(struct nfp_cpp_area *area, unsigned long offset, const void *kernel_vaddr, size_t length) nfp_cpp_area_write() argument 622 nfp_cpp_area_resource(struct nfp_cpp_area *area) nfp_cpp_area_resource() argument 640 nfp_cpp_area_phys(struct nfp_cpp_area *area) nfp_cpp_area_phys() argument 661 nfp_cpp_area_iomem(struct nfp_cpp_area *area) nfp_cpp_area_iomem() argument 679 nfp_cpp_area_readl(struct nfp_cpp_area *area, unsigned long offset, u32 *value) nfp_cpp_area_readl() argument 701 nfp_cpp_area_writel(struct nfp_cpp_area *area, unsigned long offset, u32 value) nfp_cpp_area_writel() argument 721 nfp_cpp_area_readq(struct nfp_cpp_area *area, unsigned long offset, u64 *value) nfp_cpp_area_readq() argument 743 nfp_cpp_area_writeq(struct nfp_cpp_area *area, unsigned long offset, u64 value) nfp_cpp_area_writeq() argument 766 nfp_cpp_area_fill(struct nfp_cpp_area *area, unsigned long offset, u32 value, size_t length) nfp_cpp_area_fill() argument 795 struct nfp_cpp_area *area; nfp_cpp_area_cache_add() local 921 struct nfp_cpp_area *area; __nfp_cpp_read() local 990 struct nfp_cpp_area *area; __nfp_cpp_write() local [all...] |
/kernel/linux/linux-6.6/drivers/iommu/iommufd/ |
H A D | io_pagetable.h | 20 * populated in the page table. Each area is fully populated with pages. 25 * up or tearing down the area. When observed under the write side of the 26 * domain_rwsem a NULL pages must mean the area is still being setup and no 30 * for this area. It is locked by the pages->mutex. This simplifies the locking 45 /* How many bytes into the first page the area starts */ 62 int iopt_area_fill_domains(struct iopt_area *area, struct iopt_pages *pages); 63 void iopt_area_unfill_domains(struct iopt_area *area, struct iopt_pages *pages); 65 int iopt_area_fill_domain(struct iopt_area *area, struct iommu_domain *domain); 66 void iopt_area_unfill_domain(struct iopt_area *area, struct iopt_pages *pages, 68 void iopt_area_unmap_domain(struct iopt_area *area, 71 iopt_area_index(struct iopt_area *area) iopt_area_index() argument 76 iopt_area_last_index(struct iopt_area *area) iopt_area_last_index() argument 81 iopt_area_iova(struct iopt_area *area) iopt_area_iova() argument 86 iopt_area_last_iova(struct iopt_area *area) iopt_area_last_iova() argument 91 iopt_area_length(struct iopt_area *area) iopt_area_length() argument 101 iopt_area_start_byte(struct iopt_area *area, unsigned long iova) iopt_area_start_byte() argument 111 iopt_area_iova_to_index(struct iopt_area *area, unsigned long iova) iopt_area_iova_to_index() argument 150 struct iopt_area *area; global() member [all...] |
H A D | io_pagetable.c | 24 struct iopt_area *area; member 39 iter->area = iopt_area_iter_first(iopt, iova, iova); in iopt_area_contig_init() 40 if (!iter->area) in iopt_area_contig_init() 42 if (!iter->area->pages) { in iopt_area_contig_init() 43 iter->area = NULL; in iopt_area_contig_init() 46 return iter->area; in iopt_area_contig_init() 53 if (!iter->area) in iopt_area_contig_next() 55 last_iova = iopt_area_last_iova(iter->area); in iopt_area_contig_next() 60 iter->area = iopt_area_iter_next(iter->area, ite in iopt_area_contig_next() 189 iopt_insert_area(struct io_pagetable *iopt, struct iopt_area *area, struct iopt_pages *pages, unsigned long iova, unsigned long start_byte, unsigned long length, int iommu_prot) iopt_insert_area() argument 226 struct iopt_area *area; iopt_area_alloc() local 295 iopt_abort_area(struct iopt_area *area) iopt_abort_area() argument 432 struct iopt_area *area; iopt_get_pages() local 471 struct iopt_area *area; iopt_unmap_iova_range() local 699 struct iopt_area *area; iopt_unfill_domain() local 760 struct iopt_area *area; iopt_fill_domain() local 816 struct iopt_area *area; iopt_check_iova_alignment() local 996 iopt_area_split(struct iopt_area *area, unsigned long iova) iopt_area_split() argument 1107 struct iopt_area *area; iopt_cut_iova() local [all...] |
H A D | pages.c | 193 * index is the number of PAGE_SIZE units from the start of the area's 194 * iopt_pages. If the iova is sub page-size then the area has an iova that 197 static unsigned long iopt_area_index_to_iova(struct iopt_area *area, in iopt_area_index_to_iova() argument 201 WARN_ON(index < iopt_area_index(area) || in iopt_area_index_to_iova() 202 index > iopt_area_last_index(area)); in iopt_area_index_to_iova() 203 index -= iopt_area_index(area); in iopt_area_index_to_iova() 205 return iopt_area_iova(area); in iopt_area_index_to_iova() 206 return iopt_area_iova(area) - area->page_offset + index * PAGE_SIZE; in iopt_area_index_to_iova() 209 static unsigned long iopt_area_index_to_iova_last(struct iopt_area *area, in iopt_area_index_to_iova_last() argument 237 iopt_area_unmap_domain_range(struct iopt_area *area, struct iommu_domain *domain, unsigned long start_index, unsigned long last_index) iopt_area_unmap_domain_range() argument 375 batch_from_domain(struct pfn_batch *batch, struct iommu_domain *domain, struct iopt_area *area, unsigned long start_index, unsigned long last_index) batch_from_domain() argument 402 raw_pages_from_domain(struct iommu_domain *domain, struct iopt_area *area, unsigned long start_index, unsigned long last_index, struct page **out_pages) raw_pages_from_domain() argument 426 batch_from_domain_continue(struct pfn_batch *batch, struct iommu_domain *domain, struct iopt_area *area, unsigned long start_index, unsigned long last_index) batch_from_domain_continue() argument 475 batch_to_domain(struct pfn_batch *batch, struct iommu_domain *domain, struct iopt_area *area, unsigned long start_index) batch_to_domain() argument 980 struct iopt_area *area; pfn_reader_fill_span() local 1197 iopt_area_unpin_domain(struct pfn_batch *batch, struct iopt_area *area, struct iopt_pages *pages, struct iommu_domain *domain, unsigned long start_index, unsigned long last_index, unsigned long *unmapped_end_index, unsigned long real_last_index) iopt_area_unpin_domain() argument 1257 __iopt_area_unfill_domain(struct iopt_area *area, struct iopt_pages *pages, struct iommu_domain *domain, unsigned long last_index) __iopt_area_unfill_domain() argument 1309 iopt_area_unfill_partial_domain(struct iopt_area *area, struct iopt_pages *pages, struct iommu_domain *domain, unsigned long end_index) iopt_area_unfill_partial_domain() argument 1326 iopt_area_unmap_domain(struct iopt_area *area, struct iommu_domain *domain) iopt_area_unmap_domain() argument 1342 iopt_area_unfill_domain(struct iopt_area *area, struct iopt_pages *pages, struct iommu_domain *domain) iopt_area_unfill_domain() argument 1357 iopt_area_fill_domain(struct iopt_area *area, struct iommu_domain *domain) iopt_area_fill_domain() argument 1408 iopt_area_fill_domains(struct iopt_area *area, struct iopt_pages *pages) iopt_area_fill_domains() argument 1492 iopt_area_unfill_domains(struct iopt_area *area, struct iopt_pages *pages) iopt_area_unfill_domains() argument 1617 struct iopt_area *area; iopt_pages_fill_from_domain() local 1915 iopt_area_add_access(struct iopt_area *area, unsigned long start_index, unsigned long last_index, struct page **out_pages, unsigned int flags) iopt_area_add_access() argument 1971 iopt_area_remove_access(struct iopt_area *area, unsigned long start_index, unsigned long last_index) iopt_area_remove_access() argument [all...] |
/kernel/linux/linux-6.6/drivers/net/dsa/sja1105/ |
H A D | sja1105_ethtool.c | 81 enum sja1105_stats_area area; member 92 .area = MAC, 99 .area = MAC, 106 .area = MAC, 113 .area = MAC, 121 .area = MAC, 128 .area = MAC, 135 .area = MAC, 142 .area = MAC, 149 .area [all...] |
/kernel/linux/linux-5.10/arch/x86/xen/ |
H A D | grant-table.c | 26 struct vm_struct *area; member 40 *__shared = shared = gnttab_shared_vm_area.area->addr; in arch_gnttab_map_shared() 62 *__shared = shared = gnttab_status_vm_area.area->addr; in arch_gnttab_map_status() 81 if (shared == gnttab_status_vm_area.area->addr) in arch_gnttab_unmap() 96 struct gnttab_vm_area *area = data; in gnttab_apply() local 98 area->ptes[area->idx++] = pte; in gnttab_apply() 102 static int arch_gnttab_valloc(struct gnttab_vm_area *area, unsigned nr_frames) in arch_gnttab_valloc() argument 104 area->ptes = kmalloc_array(nr_frames, sizeof(*area in arch_gnttab_valloc() 121 arch_gnttab_vfree(struct gnttab_vm_area *area) arch_gnttab_vfree() argument [all...] |
/kernel/linux/linux-6.6/arch/x86/xen/ |
H A D | grant-table.c | 26 struct vm_struct *area; member 40 *__shared = shared = gnttab_shared_vm_area.area->addr; in arch_gnttab_map_shared() 62 *__shared = shared = gnttab_status_vm_area.area->addr; in arch_gnttab_map_status() 81 if (shared == gnttab_status_vm_area.area->addr) in arch_gnttab_unmap() 96 struct gnttab_vm_area *area = data; in gnttab_apply() local 98 area->ptes[area->idx++] = pte; in gnttab_apply() 102 static int arch_gnttab_valloc(struct gnttab_vm_area *area, unsigned nr_frames) in arch_gnttab_valloc() argument 104 area->ptes = kmalloc_array(nr_frames, sizeof(*area in arch_gnttab_valloc() 121 arch_gnttab_vfree(struct gnttab_vm_area *area) arch_gnttab_vfree() argument [all...] |
/kernel/linux/linux-6.6/lib/ |
H A D | logic_iomem.c | 154 struct logic_iomem_area *area = get_area(addr); in iounmap() local 156 if (!area) { in iounmap() 161 if (area->ops->unmap) in iounmap() 162 area->ops->unmap(area->priv); in iounmap() 165 area->ops = NULL; in iounmap() 166 area->priv = NULL; in iounmap() 221 struct logic_iomem_area *area = get_area(addr); \ 223 if (!area) \ 226 return (u ## sz) area 256 struct logic_iomem_area *area = get_area(addr); memset_io() local 279 struct logic_iomem_area *area = get_area(addr); memcpy_fromio() local 302 struct logic_iomem_area *area = get_area(addr); memcpy_toio() local [all...] |
/kernel/linux/linux-5.10/arch/powerpc/platforms/cell/ |
H A D | ras.c | 109 struct ptcal_area *area; in cbe_ptcal_enable_on_node() local 116 area = kmalloc(sizeof(*area), GFP_KERNEL); in cbe_ptcal_enable_on_node() 117 if (!area) in cbe_ptcal_enable_on_node() 120 area->nid = nid; in cbe_ptcal_enable_on_node() 121 area->order = order; in cbe_ptcal_enable_on_node() 122 area->pages = __alloc_pages_node(area->nid, in cbe_ptcal_enable_on_node() 124 area->order); in cbe_ptcal_enable_on_node() 126 if (!area in cbe_ptcal_enable_on_node() 208 struct ptcal_area *area, *tmp; cbe_ptcal_disable() local [all...] |
/kernel/linux/linux-6.6/arch/powerpc/platforms/cell/ |
H A D | ras.c | 109 struct ptcal_area *area; in cbe_ptcal_enable_on_node() local 116 area = kmalloc(sizeof(*area), GFP_KERNEL); in cbe_ptcal_enable_on_node() 117 if (!area) in cbe_ptcal_enable_on_node() 120 area->nid = nid; in cbe_ptcal_enable_on_node() 121 area->order = order; in cbe_ptcal_enable_on_node() 122 area->pages = __alloc_pages_node(area->nid, in cbe_ptcal_enable_on_node() 124 area->order); in cbe_ptcal_enable_on_node() 126 if (!area in cbe_ptcal_enable_on_node() 208 struct ptcal_area *area, *tmp; cbe_ptcal_disable() local [all...] |
/kernel/linux/linux-5.10/drivers/video/fbdev/ |
H A D | dnfb.c | 109 static void dnfb_copyarea(struct fb_info *info, const struct fb_copyarea *area); 149 void dnfb_copyarea(struct fb_info *info, const struct fb_copyarea *area) in dnfb_copyarea() argument 157 incr = (area->dy <= area->sy) ? 1 : -1; in dnfb_copyarea() 159 src = (ushort *)(info->screen_base + area->sy * info->fix.line_length + in dnfb_copyarea() 160 (area->sx >> 4)); in dnfb_copyarea() 161 dest = area->dy * (info->fix.line_length >> 1) + (area->dx >> 4); in dnfb_copyarea() 164 y_delta = (info->fix.line_length * 8) - area->sx - area in dnfb_copyarea() [all...] |
/kernel/linux/linux-6.6/drivers/video/fbdev/ |
H A D | dnfb.c | 109 static void dnfb_copyarea(struct fb_info *info, const struct fb_copyarea *area); 149 void dnfb_copyarea(struct fb_info *info, const struct fb_copyarea *area) in dnfb_copyarea() argument 157 incr = (area->dy <= area->sy) ? 1 : -1; in dnfb_copyarea() 159 src = (ushort *)(info->screen_base + area->sy * info->fix.line_length + in dnfb_copyarea() 160 (area->sx >> 4)); in dnfb_copyarea() 161 dest = area->dy * (info->fix.line_length >> 1) + (area->dx >> 4); in dnfb_copyarea() 164 y_delta = (info->fix.line_length * 8) - area->sx - area in dnfb_copyarea() [all...] |
/kernel/linux/linux-6.6/sound/core/ |
H A D | memalloc.c | 43 * snd_dma_alloc_dir_pages - allocate the buffer area according to the given 73 dmab->area = __snd_dma_alloc_pages(dmab, size); in snd_dma_alloc_dir_pages() 74 if (!dmab->area) in snd_dma_alloc_dir_pages() 82 * snd_dma_alloc_pages_fallback - allocate the buffer area according to the given type with fallback 109 if (! dmab->area) in snd_dma_alloc_pages_fallback() 181 * @area: VM area information 186 struct vm_area_struct *area) in snd_dma_buffer_mmap() 194 return ops->mmap(dmab, area); in snd_dma_buffer_mmap() 252 return virt_to_page(dmab->area in snd_sgbuf_get_page() 185 snd_dma_buffer_mmap(struct snd_dma_buffer *dmab, struct vm_area_struct *area) snd_dma_buffer_mmap() argument 330 snd_dma_continuous_mmap(struct snd_dma_buffer *dmab, struct vm_area_struct *area) snd_dma_continuous_mmap() argument 358 snd_dma_vmalloc_mmap(struct snd_dma_buffer *dmab, struct vm_area_struct *area) snd_dma_vmalloc_mmap() argument 447 snd_dma_iram_mmap(struct snd_dma_buffer *dmab, struct vm_area_struct *area) snd_dma_iram_mmap() argument 477 snd_dma_dev_mmap(struct snd_dma_buffer *dmab, struct vm_area_struct *area) snd_dma_dev_mmap() argument 505 snd_dma_wc_mmap(struct snd_dma_buffer *dmab, struct vm_area_struct *area) snd_dma_wc_mmap() argument 522 snd_dma_wc_mmap(struct snd_dma_buffer *dmab, struct vm_area_struct *area) snd_dma_wc_mmap() argument 577 snd_dma_noncontig_mmap(struct snd_dma_buffer *dmab, struct vm_area_struct *area) snd_dma_noncontig_mmap() argument 699 snd_dma_sg_wc_mmap(struct snd_dma_buffer *dmab, struct vm_area_struct *area) snd_dma_sg_wc_mmap() argument 845 snd_dma_sg_fallback_mmap(struct snd_dma_buffer *dmab, struct vm_area_struct *area) snd_dma_sg_fallback_mmap() argument 886 snd_dma_noncoherent_mmap(struct snd_dma_buffer *dmab, struct vm_area_struct *area) snd_dma_noncoherent_mmap() argument [all...] |
/kernel/linux/linux-5.10/drivers/video/fbdev/sis/ |
H A D | sis_accel.c | 356 void fbcon_sis_copyarea(struct fb_info *info, const struct fb_copyarea *area) in fbcon_sis_copyarea() argument 361 int width = area->width; in fbcon_sis_copyarea() 362 int height = area->height; in fbcon_sis_copyarea() 369 cfb_copyarea(info, area); in fbcon_sis_copyarea() 374 area->sx >= vxres || area->sy >= vyres || in fbcon_sis_copyarea() 375 area->dx >= vxres || area->dy >= vyres) in fbcon_sis_copyarea() 379 if((area->sx + width) > vxres) width = vxres - area in fbcon_sis_copyarea() [all...] |
/kernel/linux/linux-6.6/drivers/video/fbdev/sis/ |
H A D | sis_accel.c | 356 void fbcon_sis_copyarea(struct fb_info *info, const struct fb_copyarea *area) in fbcon_sis_copyarea() argument 361 int width = area->width; in fbcon_sis_copyarea() 362 int height = area->height; in fbcon_sis_copyarea() 369 cfb_copyarea(info, area); in fbcon_sis_copyarea() 374 area->sx >= vxres || area->sy >= vyres || in fbcon_sis_copyarea() 375 area->dx >= vxres || area->dy >= vyres) in fbcon_sis_copyarea() 379 if((area->sx + width) > vxres) width = vxres - area in fbcon_sis_copyarea() [all...] |
/kernel/linux/linux-5.10/sound/core/ |
H A D | memalloc.c | 35 dmab->area = dma_alloc_coherent(dmab->dev.dev, size, &dmab->addr, in snd_malloc_dev_pages() 38 if (dmab->area && dmab->dev.type == SNDRV_DMA_TYPE_DEV_UC) in snd_malloc_dev_pages() 39 set_memory_wc((unsigned long)dmab->area, in snd_malloc_dev_pages() 49 set_memory_wb((unsigned long)dmab->area, in snd_free_dev_pages() 52 dma_free_coherent(dmab->dev.dev, dmab->bytes, dmab->area, dmab->addr); in snd_free_dev_pages() 68 dmab->area = NULL; in snd_malloc_dev_iram() 80 dmab->area = gen_pool_dma_alloc_align(pool, size, &dmab->addr, in snd_malloc_dev_iram() 92 if (pool && dmab->area) in snd_free_dev_iram() 93 gen_pool_free(pool, (unsigned long)dmab->area, dmab->bytes); in snd_free_dev_iram() 114 * snd_dma_alloc_pages - allocate the buffer area accordin [all...] |
/kernel/linux/linux-6.6/kernel/ |
H A D | kcov.c | 55 /* The lock protects mode, size, area and t. */ 61 void *area; member 66 /* Size of remote area (in long's). */ 136 struct kcov_remote_area *area; in kcov_remote_area_get() local 140 area = list_entry(pos, struct kcov_remote_area, list); in kcov_remote_area_get() 141 if (area->size == size) { in kcov_remote_area_get() 142 list_del(&area->list); in kcov_remote_area_get() 143 return area; in kcov_remote_area_get() 150 static void kcov_remote_area_put(struct kcov_remote_area *area, in kcov_remote_area_put() argument 153 INIT_LIST_HEAD(&area in kcov_remote_area_put() 202 unsigned long *area; __sanitizer_cov_trace_pc() local 232 u64 *area; write_comp_data() local 346 kcov_start(struct task_struct *t, struct kcov *kcov, unsigned int size, void *area, enum kcov_mode mode, int sequence) kcov_start() argument 551 unsigned long *area = kcov->area; kcov_fault_in_area() local 689 void *area; kcov_ioctl() local 843 void *area; kcov_remote_start() local 987 void *area; kcov_remote_stop() local 1064 void *area = vmalloc_node(CONFIG_KCOV_IRQ_AREA_SIZE * kcov_init() local [all...] |
/kernel/linux/linux-5.10/kernel/ |
H A D | kcov.c | 54 /* The lock protects mode, size, area and t. */ 60 void *area; member 65 /* Size of remote area (in long's). */ 132 struct kcov_remote_area *area; in kcov_remote_area_get() local 136 area = list_entry(pos, struct kcov_remote_area, list); in kcov_remote_area_get() 137 if (area->size == size) { in kcov_remote_area_get() 138 list_del(&area->list); in kcov_remote_area_get() 139 return area; in kcov_remote_area_get() 146 static void kcov_remote_area_put(struct kcov_remote_area *area, in kcov_remote_area_put() argument 149 INIT_LIST_HEAD(&area in kcov_remote_area_put() 192 unsigned long *area; __sanitizer_cov_trace_pc() local 214 u64 *area; write_comp_data() local 325 kcov_start(struct task_struct *t, struct kcov *kcov, unsigned int size, void *area, enum kcov_mode mode, int sequence) kcov_start() argument 459 void *area; kcov_mmap() local 536 unsigned long *area = kcov->area; kcov_fault_in_area() local 817 void *area; kcov_remote_start() local 960 void *area; kcov_remote_stop() local 1035 void *area = vmalloc(CONFIG_KCOV_IRQ_AREA_SIZE * kcov_init() local [all...] |
/kernel/linux/linux-5.10/mm/ |
H A D | vmalloc.c | 157 * unmap_kernel_range_noflush - unmap kernel VM area 158 * @start: start of the VM area to unmap 159 * @size: size of the VM area to unmap 161 * Unmap PFN_UP(@size) pages at @addr. The VM area @addr and @size specify 275 * map_kernel_range_noflush - map kernel VM area with the specified pages 276 * @addr: start of the VM area to map 277 * @size: size of the VM area to map 281 * Map PFN_UP(@size) pages at @addr. The VM area @addr and @size specify should 442 * find a lowest match of free area. 734 * free area i 2061 struct vm_struct *area; __get_vm_area_node() local 2181 set_area_direct_map(const struct vm_struct *area, int (*set_direct_map)(struct page *page)) set_area_direct_map() argument 2192 vm_remove_mappings(struct vm_struct *area, int deallocate_pages) vm_remove_mappings() argument 2240 struct vm_struct *area; __vunmap() local 2389 struct vm_struct *area; vmap() local 2445 struct vm_struct *area; vmap_pfn() local 2465 __vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask, pgprot_t prot, int node) __vmalloc_area_node() argument 2553 struct vm_struct *area; __vmalloc_node_range() local 3024 struct vm_struct *area; remap_vmalloc_range_partial() local 3090 free_vm_area(struct vm_struct *area) free_vm_area() argument 3198 int area, area2, last_area, term_area; pcpu_get_vm_areas() local [all...] |
/kernel/linux/linux-6.6/arch/powerpc/include/asm/ |
H A D | rtas-work-area.h | 12 * struct rtas_work_area - RTAS work area descriptor. 14 * Descriptor for a "work area" in PAPR terminology that satisfies 29 * rtas_work_area_alloc() - Acquire a work area of the requested size. 35 * "work area" in PAPR. Although callers are allowed to block while 36 * holding a work area, the amount of memory reserved for this purpose 38 * is to release any allocated work area before returning from a 43 * allocating more than one work area simultaneously in a single task 47 * Return: A &struct rtas_work_area descriptor for the allocated work area. 63 * rtas_work_area_free() - Release a work area. 64 * @area 70 rtas_work_area_raw_buf(const struct rtas_work_area *area) rtas_work_area_raw_buf() argument 75 rtas_work_area_size(const struct rtas_work_area *area) rtas_work_area_size() argument 80 rtas_work_area_phys(const struct rtas_work_area *area) rtas_work_area_phys() argument [all...] |