Lines Matching refs:cma
15 #define pr_fmt(fmt) "cma: " fmt
30 #include <linux/cma.h>
34 #include <trace/events/cma.h>
37 #include "cma.h"
39 struct cma cma_areas[MAX_CMA_AREAS];
43 phys_addr_t cma_get_base(const struct cma *cma)
45 return PFN_PHYS(cma->base_pfn);
48 unsigned long cma_get_size(const struct cma *cma)
50 return cma->count << PAGE_SHIFT;
53 const char *cma_get_name(const struct cma *cma)
55 return cma->name;
58 static unsigned long cma_bitmap_aligned_mask(const struct cma *cma,
61 if (align_order <= cma->order_per_bit)
63 return (1UL << (align_order - cma->order_per_bit)) - 1;
70 static unsigned long cma_bitmap_aligned_offset(const struct cma *cma,
73 return (cma->base_pfn & ((1UL << align_order) - 1))
74 >> cma->order_per_bit;
77 static unsigned long cma_bitmap_pages_to_bits(const struct cma *cma,
80 return ALIGN(pages, 1UL << cma->order_per_bit) >> cma->order_per_bit;
83 static void cma_clear_bitmap(struct cma *cma, unsigned long pfn,
89 bitmap_no = (pfn - cma->base_pfn) >> cma->order_per_bit;
90 bitmap_count = cma_bitmap_pages_to_bits(cma, count);
92 spin_lock_irqsave(&cma->lock, flags);
93 bitmap_clear(cma->bitmap, bitmap_no, bitmap_count);
94 spin_unlock_irqrestore(&cma->lock, flags);
97 static void __init cma_activate_area(struct cma *cma)
99 unsigned long base_pfn = cma->base_pfn, pfn;
102 cma->bitmap = bitmap_zalloc(cma_bitmap_maxno(cma), GFP_KERNEL);
103 if (!cma->bitmap)
113 for (pfn = base_pfn + 1; pfn < base_pfn + cma->count; pfn++) {
119 for (pfn = base_pfn; pfn < base_pfn + cma->count;
123 spin_lock_init(&cma->lock);
126 INIT_HLIST_HEAD(&cma->mem_head);
127 spin_lock_init(&cma->mem_head_lock);
133 bitmap_free(cma->bitmap);
136 if (!cma->reserve_pages_on_error) {
137 for (pfn = base_pfn; pfn < base_pfn + cma->count; pfn++)
140 totalcma_pages -= cma->count;
141 cma->count = 0;
142 pr_err("CMA area %s could not be activated\n", cma->name);
157 void __init cma_reserve_pages_on_error(struct cma *cma)
159 cma->reserve_pages_on_error = true;
170 * @res_cma: Pointer to store the created cma region.
177 struct cma **res_cma)
179 struct cma *cma;
202 cma = &cma_areas[cma_area_count];
205 snprintf(cma->name, CMA_MAX_NAME, name);
207 snprintf(cma->name, CMA_MAX_NAME, "cma%d\n", cma_area_count);
209 cma->base_pfn = PFN_DOWN(base);
210 cma->count = size >> PAGE_SHIFT;
211 cma->order_per_bit = order_per_bit;
212 *res_cma = cma;
228 * @res_cma: Pointer to store the created cma region.
242 bool fixed, const char *name, struct cma **res_cma,
330 * It will place the new cma area close to the start of the node
332 * cma area and not into it.
391 static void cma_debug_show_areas(struct cma *cma)
396 unsigned long nbits = cma_bitmap_maxno(cma);
398 spin_lock_irq(&cma->lock);
401 next_zero_bit = find_next_zero_bit(cma->bitmap, nbits, start);
404 next_set_bit = find_next_bit(cma->bitmap, nbits, next_zero_bit);
406 nr_part = nr_zero << cma->order_per_bit;
412 pr_cont("=> %lu free of %lu total pages\n", nr_total, cma->count);
413 spin_unlock_irq(&cma->lock);
416 static inline void cma_debug_show_areas(struct cma *cma) { }
421 * @cma: Contiguous memory region for which the allocation is performed.
429 struct page *cma_alloc(struct cma *cma, unsigned long count,
440 if (!cma || !cma->count || !cma->bitmap)
443 pr_debug("%s(cma %p, name: %s, count %lu, align %d)\n", __func__,
444 (void *)cma, cma->name, count, align);
449 trace_cma_alloc_start(cma->name, count, align);
451 mask = cma_bitmap_aligned_mask(cma, align);
452 offset = cma_bitmap_aligned_offset(cma, align);
453 bitmap_maxno = cma_bitmap_maxno(cma);
454 bitmap_count = cma_bitmap_pages_to_bits(cma, count);
460 spin_lock_irq(&cma->lock);
461 bitmap_no = bitmap_find_next_zero_area_off(cma->bitmap,
465 spin_unlock_irq(&cma->lock);
468 bitmap_set(cma->bitmap, bitmap_no, bitmap_count);
474 spin_unlock_irq(&cma->lock);
476 pfn = cma->base_pfn + (bitmap_no << cma->order_per_bit);
486 cma_clear_bitmap(cma, pfn, count);
493 trace_cma_alloc_busy_retry(cma->name, pfn, pfn_to_page(pfn),
499 trace_cma_alloc_finish(cma->name, pfn, page, count, align, ret);
513 __func__, cma->name, count, ret);
514 cma_debug_show_areas(cma);
521 cma_sysfs_account_success_pages(cma, count);
524 if (cma)
525 cma_sysfs_account_fail_pages(cma, count);
531 bool cma_pages_valid(struct cma *cma, const struct page *pages,
536 if (!cma || !pages)
541 if (pfn < cma->base_pfn || pfn >= cma->base_pfn + cma->count) {
552 * @cma: Contiguous memory region for which the allocation is performed.
560 bool cma_release(struct cma *cma, const struct page *pages,
565 if (!cma_pages_valid(cma, pages, count))
572 VM_BUG_ON(pfn + count > cma->base_pfn + cma->count);
575 cma_clear_bitmap(cma, pfn, count);
576 trace_cma_release(cma->name, pfn, pages, count);
581 int cma_for_each_area(int (*it)(struct cma *cma, void *data), void *data)