Lines Matching refs:cb

16 static int cb_map_mem(struct hl_ctx *ctx, struct hl_cb *cb)
39 INIT_LIST_HEAD(&cb->va_block_list);
41 for (bus_addr = cb->bus_address;
42 bus_addr < cb->bus_address + cb->size;
63 list_add_tail(&va_block->node, &cb->va_block_list);
68 bus_addr = cb->bus_address;
70 list_for_each_entry(va_block, &cb->va_block_list, node) {
73 &cb->va_block_list));
88 cb->is_mmu_mapped = true;
93 list_for_each_entry(va_block, &cb->va_block_list, node) {
106 list_for_each_entry_safe(va_block, tmp, &cb->va_block_list, node) {
115 static void cb_unmap_mem(struct hl_ctx *ctx, struct hl_cb *cb)
122 list_for_each_entry(va_block, &cb->va_block_list, node)
125 &cb->va_block_list)))
134 list_for_each_entry_safe(va_block, tmp, &cb->va_block_list, node) {
141 static void cb_fini(struct hl_device *hdev, struct hl_cb *cb)
143 if (cb->is_internal)
145 (uintptr_t)cb->kernel_address, cb->size);
147 hdev->asic_funcs->asic_dma_free_coherent(hdev, cb->size,
148 cb->kernel_address, cb->bus_address);
150 kfree(cb);
153 static void cb_do_release(struct hl_device *hdev, struct hl_cb *cb)
155 if (cb->is_pool) {
157 list_add(&cb->pool_list, &hdev->cb_pool);
160 cb_fini(hdev, cb);
167 struct hl_cb *cb;
169 cb = container_of(ref, struct hl_cb, refcount);
170 hdev = cb->hdev;
172 hl_debugfs_remove_cb(cb);
174 if (cb->is_mmu_mapped)
175 cb_unmap_mem(cb->ctx, cb);
177 hl_ctx_put(cb->ctx);
179 cb_do_release(hdev, cb);
185 struct hl_cb *cb;
198 cb = kzalloc(sizeof(*cb), GFP_ATOMIC);
200 cb = kzalloc(sizeof(*cb), GFP_KERNEL);
202 if (!cb)
208 kfree(cb);
213 cb->is_internal = true;
214 cb->bus_address = hdev->internal_cb_va_base + cb_offset;
217 &cb->bus_address, GFP_ATOMIC);
220 &cb->bus_address,
228 kfree(cb);
232 cb->kernel_address = p;
233 cb->size = cb_size;
235 return cb;
242 struct hl_cb *cb;
275 cb = list_first_entry(&hdev->cb_pool,
276 typeof(*cb), pool_list);
277 list_del(&cb->pool_list);
288 cb = hl_cb_alloc(hdev, cb_size, ctx_id, internal_cb);
289 if (!cb) {
295 cb->hdev = hdev;
296 cb->ctx = ctx;
297 hl_ctx_get(hdev, cb->ctx);
307 rc = cb_map_mem(ctx, cb);
313 rc = idr_alloc(&mgr->cb_handles, cb, 1, 0, GFP_ATOMIC);
321 cb->id = (u64) rc;
323 kref_init(&cb->refcount);
324 spin_lock_init(&cb->lock);
330 *handle = cb->id | HL_MMAP_TYPE_CB;
333 hl_debugfs_add_cb(cb);
338 if (cb->is_mmu_mapped)
339 cb_unmap_mem(cb->ctx, cb);
341 hl_ctx_put(cb->ctx);
342 cb_do_release(hdev, cb);
351 struct hl_cb *cb;
364 cb = idr_find(&mgr->cb_handles, handle);
365 if (cb) {
368 kref_put(&cb->refcount, cb_release);
426 struct hl_cb *cb = (struct hl_cb *) vma->vm_private_data;
429 new_mmap_size = cb->mmap_size - (vma->vm_end - vma->vm_start);
432 cb->mmap_size = new_mmap_size;
436 spin_lock(&cb->lock);
437 cb->mmap = false;
438 spin_unlock(&cb->lock);
440 hl_cb_put(cb);
451 struct hl_cb *cb;
462 cb = hl_cb_get(hdev, &hpriv->cb_mgr, handle);
463 if (!cb) {
471 if (user_cb_size != ALIGN(cb->size, PAGE_SIZE)) {
473 "CB mmap failed, mmap size 0x%lx != 0x%x cb size\n",
474 vma->vm_end - vma->vm_start, cb->size);
489 spin_lock(&cb->lock);
491 if (cb->mmap) {
498 cb->mmap = true;
500 spin_unlock(&cb->lock);
505 * Note: We're transferring the cb reference to
509 vma->vm_private_data = cb;
511 rc = hdev->asic_funcs->cb_mmap(hdev, vma, cb->kernel_address,
512 cb->bus_address, cb->size);
514 spin_lock(&cb->lock);
515 cb->mmap = false;
519 cb->mmap_size = cb->size;
524 spin_unlock(&cb->lock);
526 hl_cb_put(cb);
533 struct hl_cb *cb;
536 cb = idr_find(&mgr->cb_handles, handle);
538 if (!cb) {
545 kref_get(&cb->refcount);
549 return cb;
553 void hl_cb_put(struct hl_cb *cb)
555 kref_put(&cb->refcount, cb_release);
566 struct hl_cb *cb;
572 idr_for_each_entry(idp, cb, id) {
573 if (kref_put(&cb->refcount, cb_release) != 1)
576 id, cb->ctx->asid);
586 struct hl_cb *cb;
598 cb = hl_cb_get(hdev, &hdev->kernel_cb_mgr, (u32) cb_handle);
600 WARN(!cb, "Kernel CB handle invalid 0x%x\n", (u32) cb_handle);
601 if (!cb)
604 return cb;
614 struct hl_cb *cb;
621 cb = hl_cb_alloc(hdev, hdev->asic_prop.cb_pool_cb_size,
623 if (cb) {
624 cb->is_pool = true;
625 list_add(&cb->pool_list, &hdev->cb_pool);
637 struct hl_cb *cb, *tmp;
639 list_for_each_entry_safe(cb, tmp, &hdev->cb_pool, pool_list) {
640 list_del(&cb->pool_list);
641 cb_fini(hdev, cb);