Home
last modified time | relevance | path

Searched refs:pg_count (Results 1 - 25 of 35) sorted by relevance

12

/kernel/linux/linux-5.10/include/linux/
H A Dagpgart.h53 size_t pg_count; /* number of pages */ member
59 size_t pg_count; member
71 size_t pg_count; /* number of pages */ member
/kernel/linux/linux-6.6/include/linux/
H A Dagpgart.h53 size_t pg_count; /* number of pages */ member
59 size_t pg_count; member
71 size_t pg_count; /* number of pages */ member
/kernel/linux/linux-5.10/drivers/infiniband/hw/bnxt_re/
H A Dqplib_res.c70 for (i = 0; i < pbl->pg_count; i++) { in __free_pbl()
87 pbl->pg_count = 0; in __free_pbl()
100 pbl->pg_count++; in bnxt_qplib_fill_user_dma_pages()
131 pbl->pg_count = 0; in __alloc_pbl()
142 pbl->pg_count++; in __alloc_pbl()
262 for (i = 0; i < hwq->pbl[PBL_LVL_1].pg_count; in bnxt_qplib_alloc_init_hwq()
267 for (i = 0; i < hwq->pbl[PBL_LVL_1].pg_count; in bnxt_qplib_alloc_init_hwq()
285 for (i = 0; i < hwq->pbl[PBL_LVL_2].pg_count; i++) { in bnxt_qplib_alloc_init_hwq()
291 i = hwq->pbl[PBL_LVL_2].pg_count; in bnxt_qplib_alloc_init_hwq()
325 for (i = 0; i < hwq->pbl[PBL_LVL_1].pg_count; in bnxt_qplib_alloc_init_hwq()
426 int pg_count; bnxt_qplib_map_tqm_pgtbl() local
[all...]
/kernel/linux/linux-5.10/drivers/char/agp/
H A Dcompat_ioctl.h60 compat_size_t pg_count; /* number of pages */ member
72 compat_size_t pg_count; /* number of pages */ member
102 struct agp_memory *agp_allocate_memory_wrap(size_t pg_count, u32 type);
H A Dfrontend.c106 size_t pg_count; in agp_find_seg_in_client() local
109 pg_count = size / 4096; in agp_find_seg_in_client()
114 (seg[i].pg_count == pg_count) && in agp_find_seg_in_client()
178 seg[i].pg_count = user_seg[i].pg_count; in agp_create_segment()
274 struct agp_memory *agp_allocate_memory_wrap(size_t pg_count, u32 type) in agp_allocate_memory_wrap() argument
278 memory = agp_allocate_memory(agp_bridge, pg_count, type); in agp_allocate_memory_wrap()
884 memory = agp_allocate_memory_wrap(alloc.pg_count, alloc.type); in agpioc_allocate_wrap()
H A Dcompat_ioctl.c124 ksegment[seg].pg_count = usegment[seg].pg_count; in compat_agpioc_reserve_wrap()
161 memory = agp_allocate_memory_wrap(alloc.pg_count, alloc.type); in compat_agpioc_allocate_wrap()
H A Dintel-gtt.c239 static struct agp_memory *alloc_agpphysmem_i8xx(size_t pg_count, int type) in alloc_agpphysmem_i8xx() argument
244 switch (pg_count) { in alloc_agpphysmem_i8xx()
258 new = agp_create_memory(pg_count); in alloc_agpphysmem_i8xx()
263 if (pg_count == 4) { in alloc_agpphysmem_i8xx()
269 new->page_count = pg_count; in alloc_agpphysmem_i8xx()
270 new->num_scratch_pages = pg_count; in alloc_agpphysmem_i8xx()
981 static struct agp_memory *intel_fake_agp_alloc_by_type(size_t pg_count, in intel_fake_agp_alloc_by_type() argument
987 if (pg_count != intel_private.num_dcache_entries) in intel_fake_agp_alloc_by_type()
995 new->page_count = pg_count; in intel_fake_agp_alloc_by_type()
1001 return alloc_agpphysmem_i8xx(pg_count, typ in intel_fake_agp_alloc_by_type()
[all...]
/kernel/linux/linux-6.6/drivers/char/agp/
H A Dcompat_ioctl.h60 compat_size_t pg_count; /* number of pages */ member
72 compat_size_t pg_count; /* number of pages */ member
102 struct agp_memory *agp_allocate_memory_wrap(size_t pg_count, u32 type);
H A Dfrontend.c108 size_t pg_count; in agp_find_seg_in_client() local
111 pg_count = size / 4096; in agp_find_seg_in_client()
116 (seg[i].pg_count == pg_count) && in agp_find_seg_in_client()
180 seg[i].pg_count = user_seg[i].pg_count; in agp_create_segment()
276 struct agp_memory *agp_allocate_memory_wrap(size_t pg_count, u32 type) in agp_allocate_memory_wrap() argument
280 memory = agp_allocate_memory(agp_bridge, pg_count, type); in agp_allocate_memory_wrap()
886 memory = agp_allocate_memory_wrap(alloc.pg_count, alloc.type); in agpioc_allocate_wrap()
H A Dcompat_ioctl.c124 ksegment[seg].pg_count = usegment[seg].pg_count; in compat_agpioc_reserve_wrap()
161 memory = agp_allocate_memory_wrap(alloc.pg_count, alloc.type); in compat_agpioc_allocate_wrap()
H A Dintel-gtt.c240 static struct agp_memory *alloc_agpphysmem_i8xx(size_t pg_count, int type) in alloc_agpphysmem_i8xx() argument
245 switch (pg_count) { in alloc_agpphysmem_i8xx()
259 new = agp_create_memory(pg_count); in alloc_agpphysmem_i8xx()
264 if (pg_count == 4) { in alloc_agpphysmem_i8xx()
270 new->page_count = pg_count; in alloc_agpphysmem_i8xx()
271 new->num_scratch_pages = pg_count; in alloc_agpphysmem_i8xx()
979 static struct agp_memory *intel_fake_agp_alloc_by_type(size_t pg_count, in intel_fake_agp_alloc_by_type() argument
985 if (pg_count != intel_private.num_dcache_entries) in intel_fake_agp_alloc_by_type()
993 new->page_count = pg_count; in intel_fake_agp_alloc_by_type()
999 return alloc_agpphysmem_i8xx(pg_count, typ in intel_fake_agp_alloc_by_type()
[all...]
/kernel/linux/linux-5.10/arch/alpha/kernel/
H A Dpci_iommu.c854 iommu_reserve(struct pci_iommu_arena *arena, long pg_count, long align_mask) in iommu_reserve()
866 p = iommu_arena_find_pages(NULL, arena, pg_count, align_mask); in iommu_reserve()
875 for (i = 0; i < pg_count; ++i) in iommu_reserve()
878 arena->next_entry = p + pg_count; in iommu_reserve()
885 iommu_release(struct pci_iommu_arena *arena, long pg_start, long pg_count) in iommu_release()
895 for(i = pg_start; i < pg_start + pg_count; i++) in iommu_release()
899 iommu_arena_free(arena, pg_start, pg_count); in iommu_release()
904 iommu_bind(struct pci_iommu_arena *arena, long pg_start, long pg_count, in iommu_bind()
917 for(j = pg_start; j < pg_start + pg_count; j++) { in iommu_bind()
924 for(i = 0, j = pg_start; i < pg_count; in iommu_bind()
852 iommu_reserve(struct pci_iommu_arena *arena, long pg_count, long align_mask) iommu_reserve() argument
883 iommu_release(struct pci_iommu_arena *arena, long pg_start, long pg_count) iommu_release() argument
902 iommu_bind(struct pci_iommu_arena *arena, long pg_start, long pg_count, struct page **pages) iommu_bind() argument
931 iommu_unbind(struct pci_iommu_arena *arena, long pg_start, long pg_count) iommu_unbind() argument
[all...]
H A Dcore_titan.c591 long pg_count; in titan_agp_setup()
607 aper->pg_count = alpha_agpgart_size / PAGE_SIZE; in titan_agp_setup()
608 aper->pg_start = iommu_reserve(aper->arena, aper->pg_count, in titan_agp_setup()
609 aper->pg_count - 1); in titan_agp_setup()
618 agp->aperture.size = aper->pg_count * PAGE_SIZE;
630 status = iommu_release(aper->arena, aper->pg_start, aper->pg_count); in titan_agp_cleanup()
634 iommu_unbind(aper->arena, aper->pg_start, aper->pg_count); in titan_agp_cleanup()
636 aper->pg_count); in titan_agp_cleanup()
586 long pg_count; global() member
H A Dcore_marvel.c914 long pg_count; in marvel_agp_setup()
929 aper->pg_count = alpha_agpgart_size / PAGE_SIZE; in marvel_agp_setup()
930 aper->pg_start = iommu_reserve(aper->arena, aper->pg_count, in marvel_agp_setup()
931 aper->pg_count - 1); in marvel_agp_setup()
941 agp->aperture.size = aper->pg_count * PAGE_SIZE; in marvel_agp_cleanup()
953 status = iommu_release(aper->arena, aper->pg_start, aper->pg_count); in marvel_agp_cleanup()
957 iommu_unbind(aper->arena, aper->pg_start, aper->pg_count);
959 aper->pg_count); in marvel_agp_configure()
903 long pg_count; global() member
/kernel/linux/linux-6.6/arch/alpha/kernel/
H A Dpci_iommu.c831 iommu_reserve(struct pci_iommu_arena *arena, long pg_count, long align_mask) in iommu_reserve()
843 p = iommu_arena_find_pages(NULL, arena, pg_count, align_mask); in iommu_reserve()
852 for (i = 0; i < pg_count; ++i) in iommu_reserve()
855 arena->next_entry = p + pg_count; in iommu_reserve()
862 iommu_release(struct pci_iommu_arena *arena, long pg_start, long pg_count) in iommu_release()
872 for(i = pg_start; i < pg_start + pg_count; i++) in iommu_release()
876 iommu_arena_free(arena, pg_start, pg_count); in iommu_release()
881 iommu_bind(struct pci_iommu_arena *arena, long pg_start, long pg_count, in iommu_bind()
894 for(j = pg_start; j < pg_start + pg_count; j++) { in iommu_bind()
901 for(i = 0, j = pg_start; i < pg_count; in iommu_bind()
829 iommu_reserve(struct pci_iommu_arena *arena, long pg_count, long align_mask) iommu_reserve() argument
860 iommu_release(struct pci_iommu_arena *arena, long pg_start, long pg_count) iommu_release() argument
879 iommu_bind(struct pci_iommu_arena *arena, long pg_start, long pg_count, struct page **pages) iommu_bind() argument
908 iommu_unbind(struct pci_iommu_arena *arena, long pg_start, long pg_count) iommu_unbind() argument
[all...]
H A Dcore_titan.c591 long pg_count; in titan_agp_setup()
607 aper->pg_count = alpha_agpgart_size / PAGE_SIZE; in titan_agp_setup()
608 aper->pg_start = iommu_reserve(aper->arena, aper->pg_count, in titan_agp_setup()
609 aper->pg_count - 1); in titan_agp_setup()
618 agp->aperture.size = aper->pg_count * PAGE_SIZE;
630 status = iommu_release(aper->arena, aper->pg_start, aper->pg_count); in titan_agp_cleanup()
634 iommu_unbind(aper->arena, aper->pg_start, aper->pg_count); in titan_agp_cleanup()
636 aper->pg_count); in titan_agp_cleanup()
586 long pg_count; global() member
H A Dcore_marvel.c865 long pg_count; in marvel_agp_setup()
880 aper->pg_count = alpha_agpgart_size / PAGE_SIZE; in marvel_agp_setup()
881 aper->pg_start = iommu_reserve(aper->arena, aper->pg_count, in marvel_agp_setup()
882 aper->pg_count - 1); in marvel_agp_setup()
892 agp->aperture.size = aper->pg_count * PAGE_SIZE; in marvel_agp_cleanup()
904 status = iommu_release(aper->arena, aper->pg_start, aper->pg_count); in marvel_agp_cleanup()
908 iommu_unbind(aper->arena, aper->pg_start, aper->pg_count); in marvel_agp_cleanup()
910 aper->pg_count);
856 long pg_count; global() member
/kernel/linux/linux-6.6/include/uapi/linux/
H A Dagpgart.h81 __kernel_size_t pg_count; /* number of pages */ member
93 __kernel_size_t pg_count;/* number of pages */ member
/kernel/linux/patches/linux-6.6/prebuilts/usr/include/linux/
H A Dagpgart.h61 __kernel_size_t pg_count; member
71 __kernel_size_t pg_count; member
/kernel/linux/patches/linux-5.10/prebuilts/usr/include/linux/
H A Dagpgart.h61 __kernel_size_t pg_count; member
71 __kernel_size_t pg_count; member
/kernel/linux/linux-6.6/drivers/infiniband/hw/bnxt_re/
H A Dqplib_res.c70 for (i = 0; i < pbl->pg_count; i++) { in __free_pbl()
87 pbl->pg_count = 0; in __free_pbl()
100 pbl->pg_count++; in bnxt_qplib_fill_user_dma_pages()
131 pbl->pg_count = 0; in __alloc_pbl()
142 pbl->pg_count++; in __alloc_pbl()
263 for (i = 0; i < hwq->pbl[PBL_LVL_1].pg_count; in bnxt_qplib_alloc_init_hwq()
268 for (i = 0; i < hwq->pbl[PBL_LVL_1].pg_count; in bnxt_qplib_alloc_init_hwq()
286 for (i = 0; i < hwq->pbl[PBL_LVL_2].pg_count; i++) { in bnxt_qplib_alloc_init_hwq()
292 i = hwq->pbl[PBL_LVL_2].pg_count; in bnxt_qplib_alloc_init_hwq()
326 for (i = 0; i < hwq->pbl[PBL_LVL_1].pg_count; in bnxt_qplib_alloc_init_hwq()
427 int pg_count; bnxt_qplib_map_tqm_pgtbl() local
[all...]
/kernel/linux/linux-5.10/include/uapi/linux/
H A Dagpgart.h82 __kernel_size_t pg_count; /* number of pages */ member
94 __kernel_size_t pg_count;/* number of pages */ member
/kernel/linux/patches/linux-4.19/prebuilts/usr/include/linux/
H A Dagpgart.h49 __kernel_size_t pg_count; member
59 __kernel_size_t pg_count; member
/kernel/linux/linux-5.10/fs/nfs/
H A Dpagelist.c68 hdr->good_bytes = mirror->pg_count; in nfs_pgheader_init()
641 if (mirror->pg_count > mirror->pg_bsize) { in nfs_generic_pg_test()
651 if (((mirror->pg_count + req->wb_bytes) >> PAGE_SHIFT) * in nfs_generic_pg_test()
655 return min(mirror->pg_bsize - mirror->pg_count, (size_t)req->wb_bytes); in nfs_generic_pg_test()
818 mirror->pg_count = 0; in nfs_pageio_mirror_init()
906 pagecount = nfs_page_array_len(mirror->pg_base, mirror->pg_count); in nfs_generic_pgio()
947 nfs_pgio_rpcsetup(hdr, mirror->pg_count, desc->pg_ioflags, &cinfo); in nfs_generic_pgio()
1103 mirror->pg_count = 0; in nfs_pageio_do_add_request()
1120 mirror->pg_count += req->wb_bytes; in nfs_pageio_do_add_request()
1136 mirror->pg_bytes_written += mirror->pg_count; in nfs_pageio_doio()
[all...]
/kernel/linux/linux-6.6/fs/nfs/
H A Dpagelist.c106 hdr->good_bytes = mirror->pg_count; in nfs_pgheader_init()
731 if (mirror->pg_count > mirror->pg_bsize) { in nfs_generic_pg_test()
741 if (((mirror->pg_count + req->wb_bytes) >> PAGE_SHIFT) * in nfs_generic_pg_test()
745 return min(mirror->pg_bsize - mirror->pg_count, (size_t)req->wb_bytes); in nfs_generic_pg_test()
912 mirror->pg_count = 0; in nfs_pageio_mirror_init()
999 pagecount = nfs_page_array_len(pg_base, mirror->pg_count); in nfs_generic_pgio()
1050 nfs_pgio_rpcsetup(hdr, pg_base, mirror->pg_count, desc->pg_ioflags, in nfs_generic_pgio()
1221 mirror->pg_count = 0; in nfs_pageio_do_add_request()
1238 mirror->pg_count += req->wb_bytes; in nfs_pageio_do_add_request()
1254 mirror->pg_bytes_written += mirror->pg_count; in nfs_pageio_doio()
[all...]

Completed in 22 milliseconds

12