Lines Matching refs:size

65 				"#size-cells", NULL);
96 static void *unflatten_dt_alloc(void **mem, unsigned long size,
103 *mem += size;
282 * Return: The size of unflattened device tree or error code
368 void *(*dt_alloc)(u64 size, u64 align),
371 int size;
387 pr_debug("size: %08x\n", fdt_totalsize(blob));
395 /* First pass, scan for size */
396 size = unflatten_dt_nodes(blob, NULL, dad, NULL);
397 if (size <= 0)
400 size = ALIGN(size, 4);
401 pr_debug(" size is %d, allocating...\n", size);
404 mem = dt_alloc(size + 4, __alignof__(struct device_node));
408 memset(mem, 0, size);
410 *(__be32 *)(mem + size) = cpu_to_be32(0xdeadbeef);
417 if (be32_to_cpup(mem + size) != 0xdeadbeef)
419 be32_to_cpup(mem + size));
433 static void *kernel_tree_alloc(u64 size, u64 align)
435 return kzalloc(size, GFP_KERNEL);
480 phys_addr_t size, bool nomap)
488 if (memblock_overlaps_region(&memblock.memory, base, size) &&
489 memblock_is_region_reserved(base, size))
492 return memblock_mark_nomap(base, size);
494 return memblock_reserve(base, size);
504 phys_addr_t base, size;
524 size = dt_mem_next_cell(dt_root_size_cells, &prop);
526 if (size &&
527 early_init_dt_reserve_memory(base, size, nomap) == 0)
528 pr_debug("Reserved memory: reserved region for node '%s': base %pa, size %lu MiB\n",
529 uname, &base, (unsigned long)(size / SZ_1M));
531 pr_err("Reserved memory: failed to reserve memory for node '%s': base %pa, size %lu MiB\n",
532 uname, &base, (unsigned long)(size / SZ_1M));
536 fdt_reserved_mem_save_node(node, uname, base, size);
544 * __reserved_mem_check_root() - check if #size-cells, #address-cells provided
552 prop = of_get_flat_dt_prop(node, "#size-cells", NULL);
593 if (err == -ENOENT && of_get_flat_dt_prop(child, "size", NULL))
633 u64 base, size;
643 fdt_get_mem_rsv(initial_boot_params, n, &base, &size);
644 if (!size)
646 memblock_reserve(base, size);
753 int *size)
755 return fdt_getprop(initial_boot_params, node, name, size);
867 int size;
871 prop = of_get_flat_dt_prop(dt_root, "compatible", &size);
873 while (size > 0) {
875 size -= strlen(prop) + 1;
998 rgn[i].size = dt_mem_next_cell(dt_root_size_cells, &prop);
1000 pr_debug("cap_mem_regions[%d]: base=%pa, size=%pa\n",
1001 i, &rgn[i].base, &rgn[i].size);
1004 memblock_cap_memory_range(rgn[0].base, rgn[0].size);
1005 for (i = 1; i < MAX_USABLE_RANGES && rgn[i].size; i++)
1006 memblock_add(rgn[i].base, rgn[i].size);
1060 * early_init_dt_scan_root - fetch the top level address and size cells
1074 prop = of_get_flat_dt_prop(node, "#size-cells", NULL);
1125 pr_debug("memory scan node %s, reg size %d,\n",
1129 u64 base, size;
1132 size = dt_mem_next_cell(dt_root_size_cells, &reg);
1134 if (size == 0)
1136 pr_debug(" - %llx, %llx\n", base, size);
1138 early_init_dt_add_memory_arch(base, size);
1145 if (memblock_mark_hotplug(base, size))
1147 base, base + size);
1220 void __init __weak early_init_dt_add_memory_arch(u64 base, u64 size)
1224 if (size < PAGE_SIZE - (base & ~PAGE_MASK)) {
1226 base, base + size);
1231 size -= PAGE_SIZE - (base & ~PAGE_MASK);
1234 size &= PAGE_MASK;
1238 base, base + size);
1242 if (base + size - 1 > MAX_MEMBLOCK_ADDR) {
1244 ((u64)MAX_MEMBLOCK_ADDR) + 1, base + size);
1245 size = MAX_MEMBLOCK_ADDR - base + 1;
1248 if (base + size < phys_offset) {
1250 base, base + size);
1256 size -= phys_offset - base;
1259 memblock_add(base, size);
1262 static void * __init early_init_dt_alloc_memory_arch(u64 size, u64 align)
1264 void *ptr = memblock_alloc(size, align);
1268 __func__, size, align);
1294 /* Initialize {size,address}-cells info */
1353 int size;
1361 size = fdt_totalsize(initial_boot_params);
1362 dt = early_init_dt_alloc_memory_arch(size,
1366 memcpy(dt, initial_boot_params, size);
1394 of_fdt_raw_attr.size = fdt_totalsize(initial_boot_params);