Lines Matching refs:cb
17 static int cb_map_mem(struct hl_ctx *ctx, struct hl_cb *cb)
30 if (cb->is_mmu_mapped)
33 cb->roundup_size = roundup(cb->size, page_size);
35 cb->virtual_addr = (u64) gen_pool_alloc(ctx->cb_va_pool, cb->roundup_size);
36 if (!cb->virtual_addr) {
43 rc = hl_mmu_map_contiguous(ctx, cb->virtual_addr, cb->bus_address, cb->roundup_size);
45 dev_err(hdev->dev, "Failed to map VA %#llx to CB\n", cb->virtual_addr);
55 cb->is_mmu_mapped = true;
60 hl_mmu_unmap_contiguous(ctx, cb->virtual_addr, cb->roundup_size);
63 gen_pool_free(ctx->cb_va_pool, cb->virtual_addr, cb->roundup_size);
68 static void cb_unmap_mem(struct hl_ctx *ctx, struct hl_cb *cb)
73 hl_mmu_unmap_contiguous(ctx, cb->virtual_addr, cb->roundup_size);
77 gen_pool_free(ctx->cb_va_pool, cb->virtual_addr, cb->roundup_size);
80 static void cb_fini(struct hl_device *hdev, struct hl_cb *cb)
82 if (cb->is_internal)
84 (uintptr_t)cb->kernel_address, cb->size);
86 hl_asic_dma_free_coherent(hdev, cb->size, cb->kernel_address, cb->bus_address);
88 kfree(cb);
91 static void cb_do_release(struct hl_device *hdev, struct hl_cb *cb)
93 if (cb->is_pool) {
94 atomic_set(&cb->is_handle_destroyed, 0);
96 list_add(&cb->pool_list, &hdev->cb_pool);
99 cb_fini(hdev, cb);
106 struct hl_cb *cb = NULL;
119 cb = kzalloc(sizeof(*cb), GFP_ATOMIC);
121 if (!cb)
122 cb = kzalloc(sizeof(*cb), GFP_KERNEL);
124 if (!cb)
130 kfree(cb);
135 cb->is_internal = true;
136 cb->bus_address = hdev->internal_cb_va_base + cb_offset;
138 p = hl_asic_dma_alloc_coherent(hdev, cb_size, &cb->bus_address, GFP_ATOMIC);
140 p = hl_asic_dma_alloc_coherent(hdev, cb_size, &cb->bus_address, GFP_KERNEL);
142 p = hl_asic_dma_alloc_coherent(hdev, cb_size, &cb->bus_address,
150 kfree(cb);
154 cb->kernel_address = p;
155 cb->size = cb_size;
157 return cb;
170 struct hl_cb *cb = buf->private;
172 hl_debugfs_remove_cb(cb);
174 if (cb->is_mmu_mapped)
175 cb_unmap_mem(cb->ctx, cb);
177 hl_ctx_put(cb->ctx);
179 cb_do_release(cb->hdev, cb);
185 struct hl_cb *cb;
199 cb = list_first_entry(&cb_args->hdev->cb_pool,
200 typeof(*cb), pool_list);
201 list_del(&cb->pool_list);
212 cb = hl_cb_alloc(cb_args->hdev, cb_args->cb_size, ctx_id, cb_args->internal_cb);
213 if (!cb)
217 cb->hdev = cb_args->hdev;
218 cb->ctx = cb_args->ctx;
219 cb->buf = buf;
220 cb->buf->mappable_size = cb->size;
221 cb->buf->private = cb;
223 hl_ctx_get(cb->ctx);
233 rc = cb_map_mem(cb_args->ctx, cb);
238 hl_debugfs_add_cb(cb);
243 hl_ctx_put(cb->ctx);
244 cb_do_release(cb_args->hdev, cb);
252 struct hl_cb *cb = buf->private;
254 return cb->hdev->asic_funcs->mmap(cb->hdev, vma, cb->kernel_address,
255 cb->bus_address, cb->size);
305 struct hl_cb *cb;
308 cb = hl_cb_get(mmg, cb_handle);
309 if (!cb) {
316 rc = atomic_cmpxchg(&cb->is_handle_destroyed, 0, 1);
317 hl_cb_put(cb);
337 struct hl_cb *cb;
340 cb = hl_cb_get(mmg, handle);
341 if (!cb) {
348 if (cb->is_mmu_mapped) {
349 *device_va = cb->virtual_addr;
356 *usage_cnt = atomic_read(&cb->cs_cnt);
360 hl_cb_put(cb);
438 void hl_cb_put(struct hl_cb *cb)
440 hl_mmap_mem_buf_put(cb->buf);
447 struct hl_cb *cb;
458 cb = hl_cb_get(&hdev->kernel_mem_mgr, cb_handle);
460 if (!cb) {
466 return cb;
476 struct hl_cb *cb;
483 cb = hl_cb_alloc(hdev, hdev->asic_prop.cb_pool_cb_size,
485 if (cb) {
486 cb->is_pool = true;
487 list_add(&cb->pool_list, &hdev->cb_pool);
499 struct hl_cb *cb, *tmp;
501 list_for_each_entry_safe(cb, tmp, &hdev->cb_pool, pool_list) {
502 list_del(&cb->pool_list);
503 cb_fini(hdev, cb);