Lines Matching refs:size

107  * @size:	size of the allocated space, 8 byte aligned
114 __le32 size;
142 * @size: size of the partition
151 __le32 size;
182 * @size: size of the partition
193 __le32 size;
204 * @size: size of partition
210 size_t size;
219 * @size: size of the data, including padding bytes
227 __le32 size; /* includes padding bytes */
237 * @size: size of the smem region
244 __le32 size;
256 * @size: size of the memory region
261 size_t size;
306 return p + le32_to_cpu(phdr->size) - ALIGN(sizeof(*e), cacheline);
331 le32_to_cpu(e->size);
339 return p - le32_to_cpu(e->size) - ALIGN(sizeof(*e), cacheline);
353 return p - le32_to_cpu(e->size);
376 size_t size)
385 p_end = (void *)phdr + part->size;
407 alloc_size = sizeof(*hdr) + ALIGN(size, 8);
415 hdr->size = cpu_to_le32(ALIGN(size, 8));
416 hdr->padding_data = cpu_to_le16(le32_to_cpu(hdr->size) - size);
437 size_t size)
447 size = ALIGN(size, 8);
448 if (WARN_ON(size > le32_to_cpu(header->available)))
452 entry->size = cpu_to_le32(size);
462 le32_add_cpu(&header->free_offset, size);
463 le32_add_cpu(&header->available, -size);
472 * @size: number of bytes to be allocated
474 * Allocate space for a given smem item of size @size, given that the item is
477 int qcom_smem_alloc(unsigned host, unsigned item, size_t size)
503 ret = qcom_smem_alloc_private(__smem, part, item, size);
506 ret = qcom_smem_alloc_private(__smem, part, item, size);
508 ret = qcom_smem_alloc_global(__smem, item, size);
519 size_t *size)
540 e_size = le32_to_cpu(entry->size);
543 if (WARN_ON(e_size + entry_offset > region->size))
546 if (size != NULL)
547 *size = e_size;
559 size_t *size)
568 p_end = (void *)phdr + part->size;
578 if (size != NULL) {
579 e_size = le32_to_cpu(e->size);
582 if (WARN_ON(e_size > part->size || padding_data > e_size))
585 *size = e_size - padding_data;
614 if (size != NULL) {
615 e_size = le32_to_cpu(e->size);
618 if (WARN_ON(e_size > part->size || padding_data > e_size))
621 *size = e_size - padding_data;
647 * qcom_smem_get() - resolve ptr of size of a smem item
650 * @size: pointer to be filled out with size of the item
653 * item is returned in @size.
655 void *qcom_smem_get(unsigned host, unsigned item, size_t *size)
676 ptr = qcom_smem_get_private(__smem, part, item, size);
679 ptr = qcom_smem_get_private(__smem, part, item, size);
681 ptr = qcom_smem_get_global(__smem, item, size);
714 if (ret > le32_to_cpu(part->size))
722 if (ret > le32_to_cpu(part->size))
728 if (ret > __smem->regions[0].size)
736 static bool addr_in_range(void __iomem *base, size_t size, void *addr)
738 return base && ((void __iomem *)addr >= base && (void __iomem *)addr < base + size);
758 if (addr_in_range(part->virt_base, part->size, p)) {
767 if (addr_in_range(part->virt_base, part->size, p)) {
776 if (addr_in_range(area->virt_base, area->size, p)) {
865 u32 size;
868 header = devm_ioremap_wc(smem->dev, phys_addr, le32_to_cpu(entry->size));
889 size = le32_to_cpu(header->size);
890 if (size != le32_to_cpu(entry->size)) {
891 dev_err(smem->dev, "bad partition size (%u != %u)\n",
892 size, le32_to_cpu(entry->size));
896 if (le32_to_cpu(header->offset_free_uncached) > size) {
898 le32_to_cpu(header->offset_free_uncached), size);
926 if (!le32_to_cpu(entry->size))
951 smem->global_partition.size = le32_to_cpu(entry->size);
975 if (!le32_to_cpu(entry->size))
1004 smem->partitions[remote_host].size = le32_to_cpu(entry->size);
1017 ptable_start = region->aux_base + region->size - SZ_4K;
1027 static int qcom_smem_map_global(struct qcom_smem *smem, u32 size)
1033 smem->regions[0].size = size;
1034 smem->regions[0].virt_base = devm_ioremap_wc(smem->dev, phys_addr, size);
1062 region->size = resource_size(&r);
1076 u32 size;
1095 smem->regions[0].size = rmem->size;
1120 smem->regions[i].size);
1148 size = readl_relaxed(&header->available) + readl_relaxed(&header->free_offset);
1166 qcom_smem_map_global(smem, size);