Lines Matching refs:alloc
60 static size_t binder_alloc_buffer_size(struct binder_alloc *alloc,
63 if (list_is_last(&buffer->entry, &alloc->buffers))
64 return alloc->buffer + alloc->buffer_size - buffer->user_data;
68 static void binder_insert_free_buffer(struct binder_alloc *alloc,
71 struct rb_node **p = &alloc->free_buffers.rb_node;
79 new_buffer_size = binder_alloc_buffer_size(alloc, new_buffer);
83 alloc->pid, new_buffer_size, new_buffer);
90 buffer_size = binder_alloc_buffer_size(alloc, buffer);
98 rb_insert_color(&new_buffer->rb_node, &alloc->free_buffers);
102 struct binder_alloc *alloc, struct binder_buffer *new_buffer)
104 struct rb_node **p = &alloc->allocated_buffers.rb_node;
123 rb_insert_color(&new_buffer->rb_node, &alloc->allocated_buffers);
127 struct binder_alloc *alloc,
130 struct rb_node *n = alloc->allocated_buffers.rb_node;
161 * @alloc: binder_alloc for this proc
170 struct binder_buffer *binder_alloc_prepare_to_free(struct binder_alloc *alloc,
175 mutex_lock(&alloc->mutex);
176 buffer = binder_alloc_prepare_to_free_locked(alloc, user_ptr);
177 mutex_unlock(&alloc->mutex);
181 static int binder_update_page_range(struct binder_alloc *alloc, int allocate,
192 "%d: %s pages %pK-%pK\n", alloc->pid,
198 trace_binder_update_page_range(alloc, allocate, start, end);
204 page = &alloc->pages[(page_addr - alloc->buffer) / PAGE_SIZE];
211 if (need_mm && mmget_not_zero(alloc->mm))
212 mm = alloc->mm;
216 vma = alloc->vma;
222 alloc->pid);
231 index = (page_addr - alloc->buffer) / PAGE_SIZE;
232 page = &alloc->pages[index];
235 trace_binder_alloc_lru_start(alloc, index);
240 trace_binder_alloc_lru_end(alloc, index);
247 trace_binder_alloc_page_start(alloc, index);
253 alloc->pid, page_addr);
256 page->alloc = alloc;
263 alloc->pid, user_page_addr);
267 if (index + 1 > alloc->pages_high)
268 alloc->pages_high = index + 1;
270 trace_binder_alloc_page_end(alloc, index);
283 index = (page_addr - alloc->buffer) / PAGE_SIZE;
284 page = &alloc->pages[index];
286 trace_binder_free_lru_start(alloc, index);
291 trace_binder_free_lru_end(alloc, index);
312 static inline void binder_alloc_set_vma(struct binder_alloc *alloc,
316 smp_store_release(&alloc->vma, vma);
320 struct binder_alloc *alloc)
323 return smp_load_acquire(&alloc->vma);
326 static bool debug_low_async_space_locked(struct binder_alloc *alloc, int pid)
340 for (n = rb_first(&alloc->allocated_buffers); n != NULL;
347 total_alloc_size += binder_alloc_buffer_size(alloc, buffer);
356 if (num_buffers > 50 || total_alloc_size > alloc->buffer_size / 4) {
359 alloc->pid, pid, num_buffers, total_alloc_size);
360 if (!alloc->oneway_spam_detected) {
361 alloc->oneway_spam_detected = true;
369 struct binder_alloc *alloc,
376 struct rb_node *n = alloc->free_buffers.rb_node;
386 if (!binder_alloc_get_vma(alloc)) {
389 alloc->pid);
399 alloc->pid, data_size, offsets_size);
406 alloc->pid, extra_buffers_size);
413 if (is_async && alloc->free_async_space < size) {
416 alloc->pid, size);
423 buffer_size = binder_alloc_buffer_size(alloc, buffer);
443 for (n = rb_first(&alloc->allocated_buffers); n != NULL;
446 buffer_size = binder_alloc_buffer_size(alloc, buffer);
452 for (n = rb_first(&alloc->free_buffers); n != NULL;
455 buffer_size = binder_alloc_buffer_size(alloc, buffer);
463 alloc->pid, size);
473 buffer_size = binder_alloc_buffer_size(alloc, buffer);
478 alloc->pid, size, buffer, buffer_size);
487 ret = binder_update_page_range(alloc, 1, (void __user *)
497 pr_err("%s: %d failed to alloc new buffer struct\n",
498 __func__, alloc->pid);
504 binder_insert_free_buffer(alloc, new_buffer);
507 rb_erase(best_fit, &alloc->free_buffers);
510 binder_insert_allocated_buffer_locked(alloc, buffer);
513 alloc->pid, size, buffer);
521 alloc->free_async_space -= size;
524 alloc->pid, size, alloc->free_async_space);
525 if (alloc->free_async_space < alloc->buffer_size / 10) {
531 buffer->oneway_spam_suspect = debug_low_async_space_locked(alloc, pid);
533 alloc->oneway_spam_detected = false;
539 binder_update_page_range(alloc, 0, (void __user *)
547 * @alloc: binder_alloc for this proc
561 struct binder_buffer *binder_alloc_new_buf(struct binder_alloc *alloc,
570 mutex_lock(&alloc->mutex);
571 buffer = binder_alloc_new_buf_locked(alloc, data_size, offsets_size,
573 mutex_unlock(&alloc->mutex);
588 static void binder_delete_free_buffer(struct binder_alloc *alloc,
594 BUG_ON(alloc->buffers.next == &buffer->entry);
601 alloc->pid, buffer->user_data,
605 if (!list_is_last(&buffer->entry, &alloc->buffers)) {
611 alloc->pid,
620 alloc->pid, buffer->user_data);
627 alloc->pid, buffer->user_data,
630 binder_update_page_range(alloc, 0, buffer_start_page(buffer),
637 static void binder_free_buf_locked(struct binder_alloc *alloc,
642 buffer_size = binder_alloc_buffer_size(alloc, buffer);
650 alloc->pid, buffer, size, buffer_size);
655 BUG_ON(buffer->user_data < alloc->buffer);
656 BUG_ON(buffer->user_data > alloc->buffer + alloc->buffer_size);
659 alloc->free_async_space += buffer_size;
662 alloc->pid, size, alloc->free_async_space);
665 binder_update_page_range(alloc, 0,
670 rb_erase(&buffer->rb_node, &alloc->allocated_buffers);
672 if (!list_is_last(&buffer->entry, &alloc->buffers)) {
676 rb_erase(&next->rb_node, &alloc->free_buffers);
677 binder_delete_free_buffer(alloc, next);
680 if (alloc->buffers.next != &buffer->entry) {
684 binder_delete_free_buffer(alloc, buffer);
685 rb_erase(&prev->rb_node, &alloc->free_buffers);
689 binder_insert_free_buffer(alloc, buffer);
692 static void binder_alloc_clear_buf(struct binder_alloc *alloc,
696 * @alloc: binder_alloc for this proc
701 void binder_alloc_free_buf(struct binder_alloc *alloc,
708 * increase contention for the alloc mutex if clear_on_free
713 binder_alloc_clear_buf(alloc, buffer);
716 mutex_lock(&alloc->mutex);
717 binder_free_buf_locked(alloc, buffer);
718 mutex_unlock(&alloc->mutex);
723 * @alloc: alloc structure for this proc
734 int binder_alloc_mmap_handler(struct binder_alloc *alloc,
741 if (unlikely(vma->vm_mm != alloc->mm)) {
748 if (alloc->buffer_size) {
753 alloc->buffer_size = min_t(unsigned long, vma->vm_end - vma->vm_start,
757 alloc->buffer = (void __user *)vma->vm_start;
759 alloc->pages = kcalloc(alloc->buffer_size / PAGE_SIZE,
760 sizeof(alloc->pages[0]),
762 if (alloc->pages == NULL) {
764 failure_string = "alloc page array";
771 failure_string = "alloc buffer struct";
775 buffer->user_data = alloc->buffer;
776 list_add(&buffer->entry, &alloc->buffers);
778 binder_insert_free_buffer(alloc, buffer);
779 alloc->free_async_space = alloc->buffer_size / 2;
782 binder_alloc_set_vma(alloc, vma);
787 kfree(alloc->pages);
788 alloc->pages = NULL;
790 alloc->buffer = NULL;
792 alloc->buffer_size = 0;
798 alloc->pid, vma->vm_start, vma->vm_end,
804 void binder_alloc_deferred_release(struct binder_alloc *alloc)
811 mutex_lock(&alloc->mutex);
812 BUG_ON(alloc->vma);
814 while ((n = rb_first(&alloc->allocated_buffers))) {
821 binder_alloc_clear_buf(alloc, buffer);
824 binder_free_buf_locked(alloc, buffer);
828 while (!list_empty(&alloc->buffers)) {
829 buffer = list_first_entry(&alloc->buffers,
834 WARN_ON_ONCE(!list_empty(&alloc->buffers));
839 if (alloc->pages) {
842 for (i = 0; i < alloc->buffer_size / PAGE_SIZE; i++) {
846 if (!alloc->pages[i].page_ptr)
850 &alloc->pages[i].lru);
851 page_addr = alloc->buffer + i * PAGE_SIZE;
854 __func__, alloc->pid, i, page_addr,
856 __free_page(alloc->pages[i].page_ptr);
859 kfree(alloc->pages);
861 mutex_unlock(&alloc->mutex);
862 if (alloc->mm)
863 mmdrop(alloc->mm);
867 __func__, alloc->pid, buffers, page_count);
883 * @alloc: binder_alloc for this proc
889 struct binder_alloc *alloc)
893 mutex_lock(&alloc->mutex);
894 for (n = rb_first(&alloc->allocated_buffers); n != NULL; n = rb_next(n))
897 mutex_unlock(&alloc->mutex);
903 * @alloc: binder_alloc for this proc
906 struct binder_alloc *alloc)
914 mutex_lock(&alloc->mutex);
919 if (binder_alloc_get_vma(alloc) != NULL) {
920 for (i = 0; i < alloc->buffer_size / PAGE_SIZE; i++) {
921 page = &alloc->pages[i];
930 mutex_unlock(&alloc->mutex);
932 seq_printf(m, " pages high watermark: %zu\n", alloc->pages_high);
937 * @alloc: binder_alloc for this proc
941 int binder_alloc_get_allocated_count(struct binder_alloc *alloc)
946 mutex_lock(&alloc->mutex);
947 for (n = rb_first(&alloc->allocated_buffers); n != NULL; n = rb_next(n))
949 mutex_unlock(&alloc->mutex);
956 * @alloc: binder_alloc for this proc
959 * Clears alloc->vma to prevent new incoming transactions from
962 void binder_alloc_vma_close(struct binder_alloc *alloc)
964 binder_alloc_set_vma(alloc, NULL);
986 struct binder_alloc *alloc;
991 alloc = page->alloc;
992 if (!mutex_trylock(&alloc->mutex))
998 index = page - alloc->pages;
999 page_addr = (uintptr_t)alloc->buffer + index * PAGE_SIZE;
1001 mm = alloc->mm;
1007 if (vma && vma != binder_alloc_get_vma(alloc))
1014 trace_binder_unmap_user_start(alloc, index);
1018 trace_binder_unmap_user_end(alloc, index);
1023 trace_binder_unmap_kernel_start(alloc, index);
1028 trace_binder_unmap_kernel_end(alloc, index);
1031 mutex_unlock(&alloc->mutex);
1040 mutex_unlock(&alloc->mutex);
1066 * @alloc: binder_alloc for this proc
1071 void binder_alloc_init(struct binder_alloc *alloc)
1073 alloc->pid = current->group_leader->pid;
1074 alloc->mm = current->mm;
1075 mmgrab(alloc->mm);
1076 mutex_init(&alloc->mutex);
1077 INIT_LIST_HEAD(&alloc->buffers);
1100 * @alloc: binder_alloc for this proc
1117 static inline bool check_buffer(struct binder_alloc *alloc,
1121 size_t buffer_size = binder_alloc_buffer_size(alloc, buffer);
1132 * @alloc: binder_alloc for this proc
1144 * guaranteed that the corresponding elements of @alloc->pages[]
1149 static struct page *binder_alloc_get_page(struct binder_alloc *alloc,
1155 (buffer->user_data - alloc->buffer);
1160 lru_page = &alloc->pages[index];
1167 * @alloc: binder_alloc for this proc
1172 static void binder_alloc_clear_buf(struct binder_alloc *alloc,
1175 size_t bytes = binder_alloc_buffer_size(alloc, buffer);
1183 page = binder_alloc_get_page(alloc, buffer,
1194 * @alloc: binder_alloc for this proc
1205 binder_alloc_copy_user_to_buffer(struct binder_alloc *alloc,
1211 if (!check_buffer(alloc, buffer, buffer_offset, bytes))
1221 page = binder_alloc_get_page(alloc, buffer,
1236 static int binder_alloc_do_buffer_copy(struct binder_alloc *alloc,
1244 if (!check_buffer(alloc, buffer, buffer_offset, bytes))
1252 page = binder_alloc_get_page(alloc, buffer,
1267 int binder_alloc_copy_to_buffer(struct binder_alloc *alloc,
1273 return binder_alloc_do_buffer_copy(alloc, true, buffer, buffer_offset,
1277 int binder_alloc_copy_from_buffer(struct binder_alloc *alloc,
1283 return binder_alloc_do_buffer_copy(alloc, false, buffer, buffer_offset,