Home
last modified time | relevance | path

Searched refs:regions (Results 1 - 25 of 282) sorted by relevance

12345678910>>...12

/kernel/linux/linux-5.10/arch/arm/boot/compressed/
H A Dkaslr.c19 struct regions { struct
93 u32 end, struct regions *regions) in intersects_reserved_region()
106 if (regions->reserved_mem < 0) in intersects_reserved_region()
110 for (subnode = fdt_first_subnode(fdt, regions->reserved_mem); in intersects_reserved_region()
117 while (len >= (regions->reserved_mem_addr_cells + in intersects_reserved_region()
118 regions->reserved_mem_size_cells)) { in intersects_reserved_region()
121 if (regions->reserved_mem_addr_cells == 2) in intersects_reserved_region()
124 reg += regions->reserved_mem_addr_cells; in intersects_reserved_region()
125 len -= 4 * regions in intersects_reserved_region()
92 intersects_reserved_region(const void *fdt, u32 start, u32 end, struct regions *regions) intersects_reserved_region() argument
145 intersects_occupied_region(const void *fdt, u32 start, u32 end, struct regions *regions) intersects_occupied_region() argument
163 count_suitable_regions(const void *fdt, struct regions *regions, u32 *bitmap) count_suitable_regions() argument
330 struct regions regions; kaslr_early_init() local
[all...]
/kernel/linux/linux-5.10/arch/powerpc/mm/nohash/
H A Dkaslr_booke.c23 struct regions { struct
38 struct regions __initdata regions; variable
115 if (regions.reserved_mem < 0) in overlaps_reserved_region()
119 for (subnode = fdt_first_subnode(fdt, regions.reserved_mem); in overlaps_reserved_region()
127 while (len >= (regions.reserved_mem_addr_cells + in overlaps_reserved_region()
128 regions.reserved_mem_size_cells)) { in overlaps_reserved_region()
130 if (regions.reserved_mem_addr_cells == 2) in overlaps_reserved_region()
133 reg += regions.reserved_mem_addr_cells; in overlaps_reserved_region()
134 len -= 4 * regions in overlaps_reserved_region()
[all...]
/kernel/linux/linux-6.6/arch/powerpc/mm/nohash/
H A Dkaslr_booke.c23 struct regions { struct
38 struct regions __initdata regions; variable
113 if (regions.reserved_mem < 0) in overlaps_reserved_region()
117 for (subnode = fdt_first_subnode(fdt, regions.reserved_mem); in overlaps_reserved_region()
125 while (len >= (regions.reserved_mem_addr_cells + in overlaps_reserved_region()
126 regions.reserved_mem_size_cells)) { in overlaps_reserved_region()
128 if (regions.reserved_mem_addr_cells == 2) in overlaps_reserved_region()
131 reg += regions.reserved_mem_addr_cells; in overlaps_reserved_region()
132 len -= 4 * regions in overlaps_reserved_region()
[all...]
/kernel/linux/linux-6.6/mm/damon/
H A Dvaddr-test.h44 * discontiguous regions which cover every mapped areas. However, the three
45 * regions should not include the two biggest unmapped areas in the original
47 * heap and the mmap()-ed regions, and 2) the mmap()-ed regions and stack.
54 * three regions and returns. For more detail, refer to the comment of
60 * mapped. To cover every mappings, the three regions should start with 10,
63 * unmapped areas, and thus it should be converted to three regions of 10-25,
69 struct damon_addr_range regions[3] = {0,}; in damon_test_three_regions_in_vmas() local
84 __damon_va_three_regions(&mm, regions); in damon_test_three_regions_in_vmas()
86 KUNIT_EXPECT_EQ(test, 10ul, regions[ in damon_test_three_regions_in_vmas()
129 damon_do_test_apply_three_regions(struct kunit *test, unsigned long *regions, int nr_regions, struct damon_addr_range *three_regions, unsigned long *expected, int nr_expected) damon_do_test_apply_three_regions() argument
164 unsigned long regions[] = {10, 20, 20, 30, 50, 55, 55, 57, 57, 59, damon_test_apply_three_regions1() local
186 unsigned long regions[] = {10, 20, 20, 30, 50, 55, 55, 57, 57, 59, damon_test_apply_three_regions2() local
210 unsigned long regions[] = {10, 20, 20, 30, 50, 55, 55, 57, 57, 59, damon_test_apply_three_regions3() local
235 unsigned long regions[] = {10, 20, 20, 30, 50, 55, 55, 57, 57, 59, damon_test_apply_three_regions4() local
[all...]
H A Dvaddr.c56 * Functions for the initial monitoring target regions construction
60 * Size-evenly split a region into 'nr_pieces' small regions
104 * Find three regions separated by two biggest unmapped regions
107 * regions an array of three address ranges that results will be saved
109 * This function receives an address space and finds three regions in it which
110 * separated by the two biggest unmapped regions in the space. Please refer to
117 struct damon_addr_range regions[3]) in __damon_va_three_regions()
158 regions[0].start = ALIGN(start, DAMON_MIN_REGION); in __damon_va_three_regions()
159 regions[ in __damon_va_three_regions()
116 __damon_va_three_regions(struct mm_struct *mm, struct damon_addr_range regions[3]) __damon_va_three_regions() argument
173 damon_va_three_regions(struct damon_target *t, struct damon_addr_range regions[3]) damon_va_three_regions() argument
238 struct damon_addr_range regions[3]; __damon_va_init_regions() local
[all...]
/kernel/linux/linux-5.10/drivers/mtd/chips/
H A Djedec_probe.c274 const uint32_t regions[6]; member
306 .regions = {
318 .regions = {
333 .regions = {
348 .regions = {
363 .regions = {
378 .regions = {
394 .regions = {
411 .regions = {
428 .regions
[all...]
/kernel/linux/linux-6.6/drivers/mtd/chips/
H A Djedec_probe.c275 const uint32_t regions[6]; member
307 .regions = {
319 .regions = {
334 .regions = {
349 .regions = {
364 .regions = {
379 .regions = {
395 .regions = {
412 .regions = {
429 .regions
[all...]
/kernel/linux/linux-5.10/drivers/gpu/drm/nouveau/nvkm/nvfw/
H A Dacr.c89 hdr->regions.no_regions); in flcn_acr_desc_dump()
91 for (i = 0; i < ARRAY_SIZE(hdr->regions.region_props); i++) { in flcn_acr_desc_dump()
94 hdr->regions.region_props[i].start_addr); in flcn_acr_desc_dump()
96 hdr->regions.region_props[i].end_addr); in flcn_acr_desc_dump()
98 hdr->regions.region_props[i].region_id); in flcn_acr_desc_dump()
100 hdr->regions.region_props[i].read_mask); in flcn_acr_desc_dump()
102 hdr->regions.region_props[i].write_mask); in flcn_acr_desc_dump()
104 hdr->regions.region_props[i].client_mask); in flcn_acr_desc_dump()
132 hdr->regions.no_regions); in flcn_acr_desc_v1_dump()
134 for (i = 0; i < ARRAY_SIZE(hdr->regions in flcn_acr_desc_v1_dump()
[all...]
/kernel/linux/linux-6.6/drivers/vfio/cdx/
H A Dmain.c19 vdev->regions = kcalloc(count, sizeof(struct vfio_cdx_region), in vfio_cdx_open_device()
21 if (!vdev->regions) in vfio_cdx_open_device()
27 vdev->regions[i].addr = res->start; in vfio_cdx_open_device()
28 vdev->regions[i].size = resource_size(res); in vfio_cdx_open_device()
29 vdev->regions[i].type = res->flags; in vfio_cdx_open_device()
31 * Only regions addressed with PAGE granularity may be in vfio_cdx_open_device()
34 if (!(vdev->regions[i].addr & ~PAGE_MASK) && in vfio_cdx_open_device()
35 !(vdev->regions[i].size & ~PAGE_MASK)) in vfio_cdx_open_device()
36 vdev->regions[i].flags |= in vfio_cdx_open_device()
38 vdev->regions[ in vfio_cdx_open_device()
[all...]
/kernel/linux/linux-5.10/mm/
H A Dmemblock.c35 * Memblock is a method of managing memory regions during the early
40 * regions. There are several types of these collections:
46 * * ``reserved`` - describes the regions that were allocated
54 * which contains an array of memory regions along with
62 * arrays during addition of new regions. This feature should be used
112 .memory.regions = memblock_memory_init_regions,
117 .reserved.regions = memblock_reserved_init_regions,
128 .regions = memblock_physmem_init_regions,
144 for (i = 0, rgn = &memblock_type->regions[0]; \
146 i++, rgn = &memblock_type->regions[
[all...]
/kernel/linux/linux-5.10/drivers/vfio/platform/
H A Dvfio_platform_common.c144 vdev->regions = kcalloc(cnt, sizeof(struct vfio_platform_region), in vfio_platform_regions_init()
146 if (!vdev->regions) in vfio_platform_regions_init()
156 vdev->regions[i].addr = res->start; in vfio_platform_regions_init()
157 vdev->regions[i].size = resource_size(res); in vfio_platform_regions_init()
158 vdev->regions[i].flags = 0; in vfio_platform_regions_init()
162 vdev->regions[i].type = VFIO_PLATFORM_REGION_TYPE_MMIO; in vfio_platform_regions_init()
163 vdev->regions[i].flags |= VFIO_REGION_INFO_FLAG_READ; in vfio_platform_regions_init()
165 vdev->regions[i].flags |= in vfio_platform_regions_init()
169 * Only regions addressed with PAGE granularity may be in vfio_platform_regions_init()
172 if (!(vdev->regions[ in vfio_platform_regions_init()
[all...]
/kernel/linux/linux-6.6/drivers/vfio/platform/
H A Dvfio_platform_common.c144 vdev->regions = kcalloc(cnt, sizeof(struct vfio_platform_region), in vfio_platform_regions_init()
146 if (!vdev->regions) in vfio_platform_regions_init()
153 vdev->regions[i].addr = res->start; in vfio_platform_regions_init()
154 vdev->regions[i].size = resource_size(res); in vfio_platform_regions_init()
155 vdev->regions[i].flags = 0; in vfio_platform_regions_init()
159 vdev->regions[i].type = VFIO_PLATFORM_REGION_TYPE_MMIO; in vfio_platform_regions_init()
160 vdev->regions[i].flags |= VFIO_REGION_INFO_FLAG_READ; in vfio_platform_regions_init()
162 vdev->regions[i].flags |= in vfio_platform_regions_init()
166 * Only regions addressed with PAGE granularity may be in vfio_platform_regions_init()
169 if (!(vdev->regions[ in vfio_platform_regions_init()
[all...]
/kernel/linux/linux-6.6/tools/testing/memblock/tests/
H A Dalloc_nid_api.c66 struct memblock_region *rgn = &memblock.reserved.regions[0]; in alloc_nid_top_down_simple_check()
118 struct memblock_region *rgn = &memblock.reserved.regions[0]; in alloc_nid_top_down_end_misaligned_check()
169 struct memblock_region *rgn = &memblock.reserved.regions[0]; in alloc_nid_exact_address_generic_check()
221 struct memblock_region *rgn = &memblock.reserved.regions[0]; in alloc_nid_top_down_narrow_range_check()
307 * Expect a merge of both regions. Only the region size gets updated.
311 struct memblock_region *rgn = &memblock.reserved.regions[0]; in alloc_nid_min_reserved_generic_check()
359 * Expect a merge of regions. Only the region size gets updated.
363 struct memblock_region *rgn = &memblock.reserved.regions[0]; in alloc_nid_max_reserved_generic_check()
399 * there are two reserved regions at the borders, with a gap big enough to fit
416 struct memblock_region *rgn1 = &memblock.reserved.regions[ in alloc_nid_top_down_reserved_with_space_check()
[all...]
H A Dbasic_api.c17 ASSERT_NE(memblock.memory.regions, NULL); in memblock_initialization_check()
22 ASSERT_NE(memblock.reserved.regions, NULL); in memblock_initialization_check()
37 * and size to the collection of available memory regions (memblock.memory).
45 rgn = &memblock.memory.regions[0]; in memblock_add_simple_check()
70 * NUMA node and memory flags to the collection of available memory regions.
78 rgn = &memblock.memory.regions[0]; in memblock_add_node_simple_check()
114 * available memory regions (memblock.memory). The total size and
121 rgn1 = &memblock.memory.regions[0]; in memblock_add_disjoint_check()
122 rgn2 = &memblock.memory.regions[1]; in memblock_add_disjoint_check()
167 * and has size of two regions minu
[all...]
H A Dalloc_exact_nid_api.c30 struct memblock_region *new_rgn = &memblock.reserved.regions[0]; in alloc_exact_nid_top_down_numa_simple_check()
31 struct memblock_region *req_node = &memblock.memory.regions[nid_req]; in alloc_exact_nid_top_down_numa_simple_check()
82 struct memblock_region *new_rgn = &memblock.reserved.regions[1]; in alloc_exact_nid_top_down_numa_part_reserved_check()
83 struct memblock_region *req_node = &memblock.memory.regions[nid_req]; in alloc_exact_nid_top_down_numa_part_reserved_check()
143 struct memblock_region *new_rgn = &memblock.reserved.regions[0]; in alloc_exact_nid_top_down_numa_split_range_low_check()
144 struct memblock_region *req_node = &memblock.memory.regions[nid_req]; in alloc_exact_nid_top_down_numa_split_range_low_check()
200 struct memblock_region *new_rgn = &memblock.reserved.regions[0]; in alloc_exact_nid_top_down_numa_no_overlap_split_check()
201 struct memblock_region *req_node = &memblock.memory.regions[nid_req]; in alloc_exact_nid_top_down_numa_no_overlap_split_check()
202 struct memblock_region *node2 = &memblock.memory.regions[6]; in alloc_exact_nid_top_down_numa_no_overlap_split_check()
258 struct memblock_region *new_rgn = &memblock.reserved.regions[ in alloc_exact_nid_top_down_numa_no_overlap_low_check()
[all...]
H A Dalloc_helpers_api.c20 struct memblock_region *rgn = &memblock.reserved.regions[0]; in alloc_from_simple_generic_check()
63 struct memblock_region *rgn = &memblock.reserved.regions[0]; in alloc_from_misaligned_generic_check()
110 struct memblock_region *rgn = &memblock.reserved.regions[0]; in alloc_from_top_down_high_addr_check()
149 * regions get merged into one.
153 struct memblock_region *rgn = &memblock.reserved.regions[0]; in alloc_from_top_down_no_space_above_check()
186 * Expect successful allocation and merge of both regions.
190 struct memblock_region *rgn = &memblock.reserved.regions[0]; in alloc_from_top_down_min_addr_cap_check()
236 struct memblock_region *rgn = &memblock.reserved.regions[0]; in alloc_from_bottom_up_high_addr_check()
278 struct memblock_region *rgn = &memblock.reserved.regions[0]; in alloc_from_bottom_up_no_space_above_check()
314 struct memblock_region *rgn = &memblock.reserved.regions[ in alloc_from_bottom_up_min_addr_cap_check()
[all...]
/kernel/linux/linux-6.6/drivers/gpu/drm/nouveau/nvkm/nvfw/
H A Dacr.c130 hdr->regions.no_regions); in flcn_acr_desc_dump()
132 for (i = 0; i < ARRAY_SIZE(hdr->regions.region_props); i++) { in flcn_acr_desc_dump()
135 hdr->regions.region_props[i].start_addr); in flcn_acr_desc_dump()
137 hdr->regions.region_props[i].end_addr); in flcn_acr_desc_dump()
139 hdr->regions.region_props[i].region_id); in flcn_acr_desc_dump()
141 hdr->regions.region_props[i].read_mask); in flcn_acr_desc_dump()
143 hdr->regions.region_props[i].write_mask); in flcn_acr_desc_dump()
145 hdr->regions.region_props[i].client_mask); in flcn_acr_desc_dump()
173 hdr->regions.no_regions); in flcn_acr_desc_v1_dump()
175 for (i = 0; i < ARRAY_SIZE(hdr->regions in flcn_acr_desc_v1_dump()
[all...]
/kernel/linux/linux-6.6/drivers/md/
H A Ddm-bio-prison-v1.c29 struct prison_region regions[]; member
47 prison = kzalloc(struct_size(prison, regions, num_locks), GFP_KERNEL); in dm_bio_prison_create()
53 spin_lock_init(&prison->regions[i].lock); in dm_bio_prison_create()
54 prison->regions[i].cell = RB_ROOT; in dm_bio_prison_create()
184 spin_lock_irq(&prison->regions[l].lock); in bio_detain()
185 r = __bio_detain(&prison->regions[l].cell, key, inmate, cell_prealloc, cell_result); in bio_detain()
186 spin_unlock_irq(&prison->regions[l].lock); in bio_detain()
232 spin_lock_irq(&prison->regions[l].lock); in dm_cell_release()
233 __cell_release(&prison->regions[l].cell, cell, bios); in dm_cell_release()
234 spin_unlock_irq(&prison->regions[ in dm_cell_release()
[all...]
/kernel/linux/linux-5.10/drivers/vfio/fsl-mc/
H A Dvfio_fsl_mc.c100 vdev->regions = kcalloc(count, sizeof(struct vfio_fsl_mc_region), in vfio_fsl_mc_regions_init()
102 if (!vdev->regions) in vfio_fsl_mc_regions_init()
106 struct resource *res = &mc_dev->regions[i]; in vfio_fsl_mc_regions_init()
109 vdev->regions[i].addr = res->start; in vfio_fsl_mc_regions_init()
110 vdev->regions[i].size = resource_size(res); in vfio_fsl_mc_regions_init()
111 vdev->regions[i].type = mc_dev->regions[i].flags & IORESOURCE_BITS; in vfio_fsl_mc_regions_init()
113 * Only regions addressed with PAGE granularity may be in vfio_fsl_mc_regions_init()
116 if (!no_mmap && !(vdev->regions[i].addr & ~PAGE_MASK) && in vfio_fsl_mc_regions_init()
117 !(vdev->regions[ in vfio_fsl_mc_regions_init()
[all...]
/kernel/linux/linux-6.6/drivers/vfio/fsl-mc/
H A Dvfio_fsl_mc.c30 vdev->regions = kcalloc(count, sizeof(struct vfio_fsl_mc_region), in vfio_fsl_mc_open_device()
32 if (!vdev->regions) in vfio_fsl_mc_open_device()
36 struct resource *res = &mc_dev->regions[i]; in vfio_fsl_mc_open_device()
39 vdev->regions[i].addr = res->start; in vfio_fsl_mc_open_device()
40 vdev->regions[i].size = resource_size(res); in vfio_fsl_mc_open_device()
41 vdev->regions[i].type = mc_dev->regions[i].flags & IORESOURCE_BITS; in vfio_fsl_mc_open_device()
43 * Only regions addressed with PAGE granularity may be in vfio_fsl_mc_open_device()
46 if (!no_mmap && !(vdev->regions[i].addr & ~PAGE_MASK) && in vfio_fsl_mc_open_device()
47 !(vdev->regions[ in vfio_fsl_mc_open_device()
[all...]
/kernel/linux/linux-6.6/drivers/virt/nitro_enclaves/
H A Dne_misc_dev_test.c23 * regions = {}
34 * regions = {}
45 * regions = {
58 * regions = {
72 * regions = {
87 * regions = {
102 * regions = {
117 phys_contig_mem_regions.regions = kunit_kcalloc(test, MAX_PHYS_REGIONS, in ne_misc_dev_test_merge_phys_contig_memory_regions()
118 sizeof(*phys_contig_mem_regions.regions), in ne_misc_dev_test_merge_phys_contig_memory_regions()
120 KUNIT_ASSERT_TRUE(test, phys_contig_mem_regions.regions); in ne_misc_dev_test_merge_phys_contig_memory_regions()
[all...]
/kernel/linux/linux-6.6/drivers/net/dsa/sja1105/
H A Dsja1105_devlink.c7 /* Since devlink regions have a fixed size and the static config has a variable
85 priv->regions = kcalloc(num_regions, sizeof(struct devlink_region *), in sja1105_setup_devlink_regions()
87 if (!priv->regions) in sja1105_setup_devlink_regions()
97 dsa_devlink_region_destroy(priv->regions[i]); in sja1105_setup_devlink_regions()
99 kfree(priv->regions); in sja1105_setup_devlink_regions()
103 priv->regions[i] = region; in sja1105_setup_devlink_regions()
115 dsa_devlink_region_destroy(priv->regions[i]); in sja1105_teardown_devlink_regions()
117 kfree(priv->regions); in sja1105_teardown_devlink_regions()
/kernel/linux/linux-6.6/drivers/soc/qcom/
H A Dsmem.c43 * the partition and holds properties for the two internal memory regions. The
44 * two regions are cached and non-cached memory respectively. Each region
273 * @num_regions: number of @regions
274 * @regions: list of the memory regions defining the shared memory
288 struct smem_region regions[]; member
442 header = smem->regions[0].virt_base; in qcom_smem_alloc_global()
529 header = smem->regions[0].virt_base; in qcom_smem_get_global()
537 region = &smem->regions[i]; in qcom_smem_get_global()
725 header = __smem->regions[ in qcom_smem_get_free_space()
[all...]
/kernel/linux/linux-6.6/mm/
H A Dmemblock.c39 * Memblock is a method of managing memory regions during the early
44 * regions. There are several types of these collections:
50 * * ``reserved`` - describes the regions that were allocated
58 * which contains an array of memory regions along with
66 * arrays during addition of new regions. This feature should be used
116 .memory.regions = memblock_memory_init_regions,
121 .reserved.regions = memblock_reserved_init_regions,
132 .regions = memblock_physmem_init_regions,
148 for (i = 0, rgn = &memblock_type->regions[0]; \
150 i++, rgn = &memblock_type->regions[
[all...]
/kernel/linux/linux-6.6/drivers/perf/
H A Dmarvell_cn10k_tad_pmu.c32 struct tad_region *regions; member
53 new += readq(tad_pmu->regions[i].base + in tad_pmu_event_counter_read()
71 writeq_relaxed(0, tad_pmu->regions[i].base + in tad_pmu_event_counter_stop()
92 writeq_relaxed(0, tad_pmu->regions[i].base + in tad_pmu_event_counter_start()
100 writeq_relaxed(reg_val, tad_pmu->regions[i].base + in tad_pmu_event_counter_start()
258 struct tad_region *regions; in tad_pmu_probe() local
299 regions = devm_kcalloc(&pdev->dev, tad_cnt, in tad_pmu_probe()
300 sizeof(*regions), GFP_KERNEL); in tad_pmu_probe()
301 if (!regions) in tad_pmu_probe()
304 /* ioremap the distributed TAD pmu regions */ in tad_pmu_probe()
[all...]

Completed in 17 milliseconds

12345678910>>...12