Lines Matching refs:alloc
60 static size_t binder_alloc_buffer_size(struct binder_alloc *alloc,
63 if (list_is_last(&buffer->entry, &alloc->buffers))
64 return alloc->buffer + alloc->buffer_size - buffer->user_data;
68 static void binder_insert_free_buffer(struct binder_alloc *alloc,
71 struct rb_node **p = &alloc->free_buffers.rb_node;
79 new_buffer_size = binder_alloc_buffer_size(alloc, new_buffer);
83 alloc->pid, new_buffer_size, new_buffer);
90 buffer_size = binder_alloc_buffer_size(alloc, buffer);
98 rb_insert_color(&new_buffer->rb_node, &alloc->free_buffers);
102 struct binder_alloc *alloc, struct binder_buffer *new_buffer)
104 struct rb_node **p = &alloc->allocated_buffers.rb_node;
123 rb_insert_color(&new_buffer->rb_node, &alloc->allocated_buffers);
127 struct binder_alloc *alloc,
130 struct rb_node *n = alloc->allocated_buffers.rb_node;
161 * @alloc: binder_alloc for this proc
170 struct binder_buffer *binder_alloc_prepare_to_free(struct binder_alloc *alloc,
175 mutex_lock(&alloc->mutex);
176 buffer = binder_alloc_prepare_to_free_locked(alloc, user_ptr);
177 mutex_unlock(&alloc->mutex);
181 static int binder_update_page_range(struct binder_alloc *alloc, int allocate,
192 "%d: %s pages %pK-%pK\n", alloc->pid,
198 trace_binder_update_page_range(alloc, allocate, start, end);
204 page = &alloc->pages[(page_addr - alloc->buffer) / PAGE_SIZE];
211 if (need_mm && mmget_not_zero(alloc->vma_vm_mm))
212 mm = alloc->vma_vm_mm;
216 vma = alloc->vma;
222 alloc->pid);
231 index = (page_addr - alloc->buffer) / PAGE_SIZE;
232 page = &alloc->pages[index];
235 trace_binder_alloc_lru_start(alloc, index);
240 trace_binder_alloc_lru_end(alloc, index);
247 trace_binder_alloc_page_start(alloc, index);
253 alloc->pid, page_addr);
256 page->alloc = alloc;
263 alloc->pid, user_page_addr);
267 if (index + 1 > alloc->pages_high)
268 alloc->pages_high = index + 1;
270 trace_binder_alloc_page_end(alloc, index);
283 index = (page_addr - alloc->buffer) / PAGE_SIZE;
284 page = &alloc->pages[index];
286 trace_binder_free_lru_start(alloc, index);
291 trace_binder_free_lru_end(alloc, index);
313 static inline void binder_alloc_set_vma(struct binder_alloc *alloc,
317 alloc->vma_vm_mm = vma->vm_mm;
319 * If we see alloc->vma is not NULL, buffer data structures set up
321 * We also want to guarantee new alloc->vma_vm_mm is always visible
322 * if alloc->vma is set.
325 alloc->vma = vma;
329 struct binder_alloc *alloc)
333 if (alloc->vma) {
336 vma = alloc->vma;
341 static void debug_low_async_space_locked(struct binder_alloc *alloc, int pid)
355 for (n = rb_first(&alloc->allocated_buffers); n != NULL;
362 total_alloc_size += binder_alloc_buffer_size(alloc, buffer);
370 if (num_buffers > 50 || total_alloc_size > alloc->buffer_size / 4) {
373 alloc->pid, pid, num_buffers, total_alloc_size);
378 struct binder_alloc *alloc,
385 struct rb_node *n = alloc->free_buffers.rb_node;
394 if (!binder_alloc_get_vma(alloc)) {
397 alloc->pid);
407 alloc->pid, data_size, offsets_size);
414 alloc->pid, extra_buffers_size);
421 if (is_async && alloc->free_async_space < size) {
424 alloc->pid, size);
431 buffer_size = binder_alloc_buffer_size(alloc, buffer);
451 for (n = rb_first(&alloc->allocated_buffers); n != NULL;
454 buffer_size = binder_alloc_buffer_size(alloc, buffer);
460 for (n = rb_first(&alloc->free_buffers); n != NULL;
463 buffer_size = binder_alloc_buffer_size(alloc, buffer);
471 alloc->pid, size);
481 buffer_size = binder_alloc_buffer_size(alloc, buffer);
486 alloc->pid, size, buffer, buffer_size);
495 ret = binder_update_page_range(alloc, 1, (void __user *)
505 pr_err("%s: %d failed to alloc new buffer struct\n",
506 __func__, alloc->pid);
512 binder_insert_free_buffer(alloc, new_buffer);
515 rb_erase(best_fit, &alloc->free_buffers);
518 binder_insert_allocated_buffer_locked(alloc, buffer);
521 alloc->pid, size, buffer);
528 alloc->free_async_space -= size;
531 alloc->pid, size, alloc->free_async_space);
532 if (alloc->free_async_space < alloc->buffer_size / 10) {
538 debug_low_async_space_locked(alloc, pid);
544 binder_update_page_range(alloc, 0, (void __user *)
552 * @alloc: binder_alloc for this proc
566 struct binder_buffer *binder_alloc_new_buf(struct binder_alloc *alloc,
575 mutex_lock(&alloc->mutex);
576 buffer = binder_alloc_new_buf_locked(alloc, data_size, offsets_size,
578 mutex_unlock(&alloc->mutex);
593 static void binder_delete_free_buffer(struct binder_alloc *alloc,
599 BUG_ON(alloc->buffers.next == &buffer->entry);
606 alloc->pid, buffer->user_data,
610 if (!list_is_last(&buffer->entry, &alloc->buffers)) {
616 alloc->pid,
625 alloc->pid, buffer->user_data);
632 alloc->pid, buffer->user_data,
635 binder_update_page_range(alloc, 0, buffer_start_page(buffer),
642 static void binder_free_buf_locked(struct binder_alloc *alloc,
647 buffer_size = binder_alloc_buffer_size(alloc, buffer);
655 alloc->pid, buffer, size, buffer_size);
660 BUG_ON(buffer->user_data < alloc->buffer);
661 BUG_ON(buffer->user_data > alloc->buffer + alloc->buffer_size);
664 alloc->free_async_space += buffer_size;
667 alloc->pid, size, alloc->free_async_space);
670 binder_update_page_range(alloc, 0,
675 rb_erase(&buffer->rb_node, &alloc->allocated_buffers);
677 if (!list_is_last(&buffer->entry, &alloc->buffers)) {
681 rb_erase(&next->rb_node, &alloc->free_buffers);
682 binder_delete_free_buffer(alloc, next);
685 if (alloc->buffers.next != &buffer->entry) {
689 binder_delete_free_buffer(alloc, buffer);
690 rb_erase(&prev->rb_node, &alloc->free_buffers);
694 binder_insert_free_buffer(alloc, buffer);
697 static void binder_alloc_clear_buf(struct binder_alloc *alloc,
701 * @alloc: binder_alloc for this proc
706 void binder_alloc_free_buf(struct binder_alloc *alloc,
713 * increase contention for the alloc mutex if clear_on_free
718 binder_alloc_clear_buf(alloc, buffer);
721 mutex_lock(&alloc->mutex);
722 binder_free_buf_locked(alloc, buffer);
723 mutex_unlock(&alloc->mutex);
728 * @alloc: alloc structure for this proc
739 int binder_alloc_mmap_handler(struct binder_alloc *alloc,
747 if (alloc->buffer_size) {
752 alloc->buffer_size = min_t(unsigned long, vma->vm_end - vma->vm_start,
756 alloc->buffer = (void __user *)vma->vm_start;
758 alloc->pages = kcalloc(alloc->buffer_size / PAGE_SIZE,
759 sizeof(alloc->pages[0]),
761 if (alloc->pages == NULL) {
763 failure_string = "alloc page array";
770 failure_string = "alloc buffer struct";
774 buffer->user_data = alloc->buffer;
775 list_add(&buffer->entry, &alloc->buffers);
777 binder_insert_free_buffer(alloc, buffer);
778 alloc->free_async_space = alloc->buffer_size / 2;
779 binder_alloc_set_vma(alloc, vma);
780 mmgrab(alloc->vma_vm_mm);
785 kfree(alloc->pages);
786 alloc->pages = NULL;
788 alloc->buffer = NULL;
790 alloc->buffer_size = 0;
795 alloc->pid, vma->vm_start, vma->vm_end,
801 void binder_alloc_deferred_release(struct binder_alloc *alloc)
808 mutex_lock(&alloc->mutex);
809 BUG_ON(alloc->vma);
811 while ((n = rb_first(&alloc->allocated_buffers))) {
818 binder_alloc_clear_buf(alloc, buffer);
821 binder_free_buf_locked(alloc, buffer);
825 while (!list_empty(&alloc->buffers)) {
826 buffer = list_first_entry(&alloc->buffers,
831 WARN_ON_ONCE(!list_empty(&alloc->buffers));
836 if (alloc->pages) {
839 for (i = 0; i < alloc->buffer_size / PAGE_SIZE; i++) {
843 if (!alloc->pages[i].page_ptr)
847 &alloc->pages[i].lru);
848 page_addr = alloc->buffer + i * PAGE_SIZE;
851 __func__, alloc->pid, i, page_addr,
853 __free_page(alloc->pages[i].page_ptr);
856 kfree(alloc->pages);
858 mutex_unlock(&alloc->mutex);
859 if (alloc->vma_vm_mm)
860 mmdrop(alloc->vma_vm_mm);
864 __func__, alloc->pid, buffers, page_count);
880 * @alloc: binder_alloc for this proc
886 struct binder_alloc *alloc)
890 mutex_lock(&alloc->mutex);
891 for (n = rb_first(&alloc->allocated_buffers); n != NULL; n = rb_next(n))
894 mutex_unlock(&alloc->mutex);
900 * @alloc: binder_alloc for this proc
903 struct binder_alloc *alloc)
911 mutex_lock(&alloc->mutex);
916 if (binder_alloc_get_vma(alloc) != NULL) {
917 for (i = 0; i < alloc->buffer_size / PAGE_SIZE; i++) {
918 page = &alloc->pages[i];
927 mutex_unlock(&alloc->mutex);
929 seq_printf(m, " pages high watermark: %zu\n", alloc->pages_high);
934 * @alloc: binder_alloc for this proc
938 int binder_alloc_get_allocated_count(struct binder_alloc *alloc)
943 mutex_lock(&alloc->mutex);
944 for (n = rb_first(&alloc->allocated_buffers); n != NULL; n = rb_next(n))
946 mutex_unlock(&alloc->mutex);
953 * @alloc: binder_alloc for this proc
956 * Clears alloc->vma to prevent new incoming transactions from
959 void binder_alloc_vma_close(struct binder_alloc *alloc)
961 binder_alloc_set_vma(alloc, NULL);
983 struct binder_alloc *alloc;
988 alloc = page->alloc;
989 if (!mutex_trylock(&alloc->mutex))
995 index = page - alloc->pages;
996 page_addr = (uintptr_t)alloc->buffer + index * PAGE_SIZE;
998 mm = alloc->vma_vm_mm;
1004 if (vma && vma != binder_alloc_get_vma(alloc))
1011 trace_binder_unmap_user_start(alloc, index);
1015 trace_binder_unmap_user_end(alloc, index);
1020 trace_binder_unmap_kernel_start(alloc, index);
1025 trace_binder_unmap_kernel_end(alloc, index);
1028 mutex_unlock(&alloc->mutex);
1037 mutex_unlock(&alloc->mutex);
1067 * @alloc: binder_alloc for this proc
1072 void binder_alloc_init(struct binder_alloc *alloc)
1074 alloc->pid = current->group_leader->pid;
1075 mutex_init(&alloc->mutex);
1076 INIT_LIST_HEAD(&alloc->buffers);
1099 * @alloc: binder_alloc for this proc
1116 static inline bool check_buffer(struct binder_alloc *alloc,
1120 size_t buffer_size = binder_alloc_buffer_size(alloc, buffer);
1131 * @alloc: binder_alloc for this proc
1143 * guaranteed that the corresponding elements of @alloc->pages[]
1148 static struct page *binder_alloc_get_page(struct binder_alloc *alloc,
1154 (buffer->user_data - alloc->buffer);
1159 lru_page = &alloc->pages[index];
1166 * @alloc: binder_alloc for this proc
1171 static void binder_alloc_clear_buf(struct binder_alloc *alloc,
1174 size_t bytes = binder_alloc_buffer_size(alloc, buffer);
1183 page = binder_alloc_get_page(alloc, buffer,
1196 * @alloc: binder_alloc for this proc
1207 binder_alloc_copy_user_to_buffer(struct binder_alloc *alloc,
1213 if (!check_buffer(alloc, buffer, buffer_offset, bytes))
1223 page = binder_alloc_get_page(alloc, buffer,
1238 static int binder_alloc_do_buffer_copy(struct binder_alloc *alloc,
1246 if (!check_buffer(alloc, buffer, buffer_offset, bytes))
1256 page = binder_alloc_get_page(alloc, buffer,
1278 int binder_alloc_copy_to_buffer(struct binder_alloc *alloc,
1284 return binder_alloc_do_buffer_copy(alloc, true, buffer, buffer_offset,
1288 int binder_alloc_copy_from_buffer(struct binder_alloc *alloc,
1294 return binder_alloc_do_buffer_copy(alloc, false, buffer, buffer_offset,