Lines Matching defs:hdev

29 static int set_alloc_page_size(struct hl_device *hdev, struct hl_mem_in *args, u32 *page_size)
31 struct asic_fixed_properties *prop = &hdev->asic_prop;
42 dev_err(hdev->dev, "user page size (%#llx) is not power of 2\n", psize);
90 struct hl_device *hdev = ctx->hdev;
91 struct hl_vm *vm = &hdev->vm;
100 rc = set_alloc_page_size(hdev, args, &page_size);
108 dev_err(hdev->dev, "Cannot allocate 0 bytes\n");
121 dev_err(hdev->dev,
163 dev_err(hdev->dev,
179 dev_err(hdev->dev, "Failed to get handle for page\n");
190 atomic64_add(phys_pg_pack->total_size, &hdev->dram_used_mem);
215 * @hdev: habanalabs device structure.
225 static int dma_map_host_va(struct hl_device *hdev, u64 addr, u64 size,
237 rc = hl_pin_host_memory(hdev, addr, size, userptr);
247 rc = hdev->asic_funcs->asic_dma_map_sgtable(hdev, userptr->sgt, DMA_BIDIRECTIONAL);
249 dev_err(hdev->dev, "failed to map sgt with DMA region\n");
256 hl_unpin_host_memory(hdev, userptr);
266 * @hdev: habanalabs device structure.
273 static void dma_unmap_host_va(struct hl_device *hdev,
276 hl_unpin_host_memory(hdev, userptr);
303 * @hdev: habanalabs device structure.
312 static void free_phys_pg_pack(struct hl_device *hdev,
315 struct hl_vm *vm = &hdev->vm;
355 struct hl_device *hdev = ctx->hdev;
356 struct hl_vm *vm = &hdev->vm;
364 dev_err(hdev->dev, "free device memory failed, no match for handle %u\n", handle);
370 dev_err(hdev->dev, "handle %u is mapped, cannot free\n", handle);
381 atomic64_sub(phys_pg_pack->total_size, &hdev->dram_used_mem);
383 free_phys_pg_pack(hdev, phys_pg_pack);
390 * @hdev: habanalabs device structure.
398 static void clear_va_list_locked(struct hl_device *hdev,
411 * @hdev: habanalabs device structure.
419 static void print_va_list_locked(struct hl_device *hdev,
425 dev_dbg(hdev->dev, "print va list:\n");
428 dev_dbg(hdev->dev,
436 * @hdev: pointer to the habanalabs device structure.
446 static void merge_va_blocks_locked(struct hl_device *hdev,
471 * @hdev: pointer to the habanalabs device structure.
482 static int add_va_block_locked(struct hl_device *hdev,
488 print_va_list_locked(hdev, va_list);
494 dev_err(hdev->dev,
517 merge_va_blocks_locked(hdev, va_list, va_block);
519 print_va_list_locked(hdev, va_list);
526 * @hdev: pointer to the habanalabs device structure.
534 static inline int add_va_block(struct hl_device *hdev,
540 rc = add_va_block_locked(hdev, &va_range->list, start, end);
579 * @hdev: pointer to the habanalabs device structure.
593 static u64 get_va_block(struct hl_device *hdev,
600 struct asic_fixed_properties *prop = &hdev->asic_prop;
606 bool is_hint_dram_addr = hl_is_dram_va(hdev, hint_addr);
630 dev_err(hdev->dev,
636 dev_dbg(hdev->dev,
644 print_va_list_locked(hdev, &va_range->list);
688 dev_err(hdev->dev, "no available va block for size %llu\n",
697 dev_err(hdev->dev,
727 rc = add_va_block_locked(hdev, &va_range->list, prev_start, prev_end);
734 print_va_list_locked(hdev, &va_range->list);
743 * @hdev: pointer to the habanalabs device structure.
756 u64 hl_reserve_va_block(struct hl_device *hdev, struct hl_ctx *ctx,
759 return get_va_block(hdev, ctx->va_range[type], size, 0,
792 * @hdev: pointer to the habanalabs device structure
800 int hl_unreserve_va_block(struct hl_device *hdev, struct hl_ctx *ctx,
808 dev_err(hdev->dev,
814 rc = add_va_block(hdev, ctx->va_range[type], start_addr,
817 dev_warn(hdev->dev,
845 huge_page_size = ctx->hdev->asic_prop.pmmu_huge.page_size;
946 struct hl_device *hdev = ctx->hdev;
958 dev_err(hdev->dev,
972 is_host_addr = !hl_is_dram_va(hdev, vaddr);
978 dev_warn_ratelimited(hdev->dev,
993 if (hdev->pldm || (is_host_addr && (i & 0x7FFF) == 0))
1009 struct hl_device *hdev = ctx->hdev;
1014 is_host_addr = !hl_is_dram_va(hdev, vaddr);
1021 dev_warn_ratelimited(hdev->dev,
1032 if (hdev->pldm || (is_host_addr && (i & 0x7FFF) == 0))
1054 struct hl_device *hdev = ctx->hdev;
1058 struct hl_vm *vm = &hdev->vm;
1067 do_prefetch = hdev->supports_mmu_prefetch && (args->flags & HL_MEM_PREFETCH);
1075 u32 page_size = hdev->asic_prop.pmmu.page_size,
1076 huge_page_size = hdev->asic_prop.pmmu_huge.page_size;
1078 rc = dma_map_host_va(hdev, addr, size, &userptr);
1085 dev_err(hdev->dev,
1123 dev_err(hdev->dev,
1140 va_block_align = hdev->asic_prop.dmmu.page_size;
1149 dev_err(hdev->dev,
1165 dev_err(hdev->dev,
1171 dev_dbg(hdev->dev,
1176 ret_vaddr = get_va_block(hdev, va_range, phys_pg_pack->total_size,
1180 dev_err(hdev->dev, "no available va block for handle %u\n",
1186 mutex_lock(&hdev->mmu_lock);
1190 dev_err(hdev->dev, "mapping page pack failed for handle %u\n", handle);
1191 mutex_unlock(&hdev->mmu_lock);
1195 rc = hl_mmu_invalidate_cache_range(hdev, false, *vm_type | MMU_OP_SKIP_LOW_CACHE_INV,
1197 mutex_unlock(&hdev->mmu_lock);
1225 free_phys_pg_pack(hdev, phys_pg_pack);
1230 if (add_va_block(hdev, va_range, ret_vaddr,
1232 dev_warn(hdev->dev,
1242 free_phys_pg_pack(hdev, phys_pg_pack);
1245 dma_unmap_host_va(hdev, userptr);
1278 struct hl_device *hdev = ctx->hdev;
1286 prop = &hdev->asic_prop;
1293 dev_err(hdev->dev, "unmap failed, no mem hnode for vaddr 0x%llx\n", vaddr);
1299 dev_err(hdev->dev, "failed to unmap %#llx, memory is exported\n", vaddr);
1315 dev_err(hdev->dev,
1322 hdev->asic_prop.pmmu.page_size)
1331 dev_warn(hdev->dev,
1339 dev_err(hdev->dev, "vaddr 0x%llx is not mapped\n", vaddr);
1352 mutex_lock(&hdev->mmu_lock);
1362 rc = hl_mmu_invalidate_cache_range(hdev, true, *vm_type, ctx->asid, vaddr,
1365 mutex_unlock(&hdev->mmu_lock);
1376 tmp_rc = add_va_block(hdev, va_range, vaddr,
1379 dev_warn(hdev->dev,
1391 free_phys_pg_pack(hdev, phys_pg_pack);
1392 dma_unmap_host_va(hdev, userptr);
1399 free_phys_pg_pack(hdev, phys_pg_pack);
1408 static int map_block(struct hl_device *hdev, u64 address, u64 *handle, u32 *size)
1417 rc = hdev->asic_funcs->get_hw_block_id(hdev, address, size, &block_id);
1463 struct hl_device *hdev = hpriv->hdev;
1478 dev_err(hdev->dev,
1489 rc = hdev->asic_funcs->hw_block_mmap(hdev, vma, block_id, block_size);
1534 static struct sg_table *alloc_sgt_from_device_pages(struct hl_device *hdev, u64 *pages, u64 npages,
1544 prop = &hdev->asic_prop;
1554 dev_err_ratelimited(hdev->dev,
1616 bar_address = hdev->dram_pci_bar_start + cur_device_address;
1643 bar_address = hdev->dram_pci_bar_start +
1687 struct hl_device *hdev;
1691 hdev = hl_dmabuf->ctx->hdev;
1693 rc = pci_p2pdma_distance(hdev->pdev, attachment->dev, true);
1706 struct hl_device *hdev;
1710 hdev = hl_dmabuf->ctx->hdev;
1714 dev_dbg(hdev->dev, "Failed to map dmabuf because p2p is disabled\n");
1719 sgt = alloc_sgt_from_device_pages(hdev,
1727 sgt = alloc_sgt_from_device_pages(hdev,
1736 dev_err(hdev->dev, "failed (%ld) to initialize sgt for dmabuf\n", PTR_ERR(sgt));
1768 struct hl_device *hdev = ctx->hdev;
1776 dev_dbg(hdev->dev, "map address %#llx not found\n", addr);
1782 dev_dbg(hdev->dev, "invalid handle %#llx for map address %#llx\n",
1817 atomic_dec(&ctx->hdev->dmabuf_export_cnt);
1838 struct hl_device *hdev = ctx->hdev;
1848 dev_err(hdev->dev, "failed to export dma-buf\n");
1854 dev_err(hdev->dev, "failed to get a file descriptor for a dma-buf, %d\n", fd);
1861 atomic_inc(&ctx->hdev->dmabuf_export_cnt);
1879 static int validate_export_params_common(struct hl_device *hdev, u64 device_addr, u64 size)
1882 dev_dbg(hdev->dev,
1889 dev_dbg(hdev->dev,
1898 static int validate_export_params_no_mmu(struct hl_device *hdev, u64 device_addr, u64 size)
1900 struct asic_fixed_properties *prop = &hdev->asic_prop;
1904 rc = validate_export_params_common(hdev, device_addr, size);
1911 dev_dbg(hdev->dev,
1917 bar_address = hdev->dram_pci_bar_start + (device_addr - prop->dram_base_address);
1919 if ((bar_address + size) > (hdev->dram_pci_bar_start + prop->dram_pci_bar_size) ||
1921 dev_dbg(hdev->dev,
1930 static int validate_export_params(struct hl_device *hdev, u64 device_addr, u64 size, u64 offset,
1933 struct asic_fixed_properties *prop = &hdev->asic_prop;
1937 rc = validate_export_params_common(hdev, device_addr, size);
1942 dev_dbg(hdev->dev, "offset %#llx and size %#llx exceed total map size %#llx\n",
1949 bar_address = hdev->dram_pci_bar_start +
1953 (hdev->dram_pci_bar_start + prop->dram_pci_bar_size) ||
1955 dev_dbg(hdev->dev,
1967 static struct hl_vm_phys_pg_pack *get_phys_pg_pack_from_hash_node(struct hl_device *hdev,
1971 struct hl_vm *vm = &hdev->vm;
1977 dev_dbg(hdev->dev, "no match for handle 0x%x\n", (u32) hnode->handle);
1984 dev_dbg(hdev->dev, "handle 0x%llx does not represent DRAM memory\n", hnode->handle);
2014 struct hl_device *hdev;
2018 hdev = ctx->hdev;
2019 prop = &hdev->asic_prop;
2023 dev_dbg(hdev->dev, "offset is not allowed in device without virtual memory\n");
2039 phys_pg_pack = get_phys_pg_pack_from_hash_node(hdev, hnode);
2044 rc = validate_export_params(hdev, export_addr, size, offset, phys_pg_pack);
2052 rc = validate_export_params_no_mmu(hdev, export_addr, size);
2178 struct hl_device *hdev = hpriv->hdev;
2184 if (!hl_device_operational(hdev, &status)) {
2185 dev_dbg_ratelimited(hdev->dev,
2187 hdev->status[status]);
2194 dev_err(hdev->dev,
2208 if (!hdev->asic_prop.dram_supports_virtual_memory) {
2212 &hdev->dram_used_mem);
2214 dev_dbg(hdev->dev, "DRAM alloc is not supported\n");
2237 if (!hdev->asic_prop.dram_supports_virtual_memory) {
2241 &hdev->dram_used_mem);
2243 dev_dbg(hdev->dev, "DRAM alloc is not supported\n");
2264 rc = map_block(hdev, args->in.map_block.block_addr,
2285 dev_err(hdev->dev, "Unknown opcode for memory IOCTL\n");
2294 static int get_user_memory(struct hl_device *hdev, u64 addr, u64 size,
2301 dev_err(hdev->dev, "user pointer is invalid - 0x%llx\n", addr);
2313 dev_err(hdev->dev,
2328 dev_err(hdev->dev, "failed to create SG table from pages\n");
2343 * @hdev: pointer to the habanalabs device structure.
2352 int hl_pin_host_memory(struct hl_device *hdev, u64 addr, u64 size,
2360 dev_err(hdev->dev, "size to pin is invalid - %llu\n", size);
2370 dev_err(hdev->dev,
2391 rc = get_user_memory(hdev, addr, size, npages, start, offset,
2394 dev_err(hdev->dev,
2400 hl_debugfs_add_userptr(hdev, userptr);
2411 * @hdev: pointer to the habanalabs device structure
2418 void hl_unpin_host_memory(struct hl_device *hdev, struct hl_userptr *userptr)
2420 hl_debugfs_remove_userptr(hdev, userptr);
2423 hdev->asic_funcs->hl_dma_unmap_sgtable(hdev, userptr->sgt, userptr->dir);
2436 * @hdev: pointer to the habanalabs device structure.
2443 void hl_userptr_delete_list(struct hl_device *hdev,
2449 hl_unpin_host_memory(hdev, userptr);
2458 * @hdev: pointer to the habanalabs device structure.
2468 bool hl_userptr_is_pinned(struct hl_device *hdev, u64 addr,
2482 * @hdev: pointer to the habanalabs device structure.
2493 static int va_range_init(struct hl_device *hdev, struct hl_va_range **va_ranges,
2521 dev_err(hdev->dev, "too small vm range for va list\n");
2525 rc = add_va_block(hdev, va_range, start, end);
2528 dev_err(hdev->dev, "Failed to init host va list\n");
2541 * @hdev: pointer to the habanalabs structure.
2547 static void va_range_fini(struct hl_device *hdev, struct hl_va_range *va_range)
2550 clear_va_list_locked(hdev, &va_range->list);
2588 struct hl_device *hdev = ctx->hdev;
2602 dev_err(hdev->dev, "failed to init context %d\n", ctx->asid);
2611 rc = va_range_init(hdev, ctx->va_range, HL_VA_RANGE_TYPE_HOST,
2614 dev_err(hdev->dev, "failed to init host vm range\n");
2618 if (hdev->pmmu_huge_range) {
2621 rc = va_range_init(hdev,
2626 dev_err(hdev->dev,
2638 rc = va_range_init(hdev, ctx->va_range, HL_VA_RANGE_TYPE_DRAM,
2641 dev_err(hdev->dev, "failed to init dram vm range\n");
2645 hl_debugfs_add_ctx_mem_hash(hdev, ctx);
2652 if (hdev->pmmu_huge_range) {
2654 clear_va_list_locked(hdev,
2659 if (hdev->pmmu_huge_range)
2662 clear_va_list_locked(hdev, &ctx->va_range[HL_VA_RANGE_TYPE_HOST]->list);
2677 struct asic_fixed_properties *prop = &ctx->hdev->asic_prop;
2688 if (ctx->hdev->mmu_disable)
2730 struct hl_device *hdev = ctx->hdev;
2732 struct hl_vm *vm = &hdev->vm;
2738 if (hdev->mmu_disable)
2741 hl_debugfs_remove_ctx_mem_hash(hdev, ctx);
2747 if (!hdev->reset_info.hard_reset_pending && !hash_empty(ctx->mem_hash))
2748 dev_dbg(hdev->dev,
2752 dev_dbg(hdev->dev,
2759 mutex_lock(&hdev->mmu_lock);
2762 hl_mmu_invalidate_cache(hdev, true, MMU_OP_USERPTR);
2763 hl_mmu_invalidate_cache(hdev, true, MMU_OP_PHYS_PACK);
2765 mutex_unlock(&hdev->mmu_lock);
2772 dev_dbg(hdev->dev,
2776 atomic64_sub(phys_pg_list->total_size, &hdev->dram_used_mem);
2783 free_phys_pg_pack(hdev, phys_pg_list);
2785 va_range_fini(hdev, ctx->va_range[HL_VA_RANGE_TYPE_DRAM]);
2786 va_range_fini(hdev, ctx->va_range[HL_VA_RANGE_TYPE_HOST]);
2788 if (hdev->pmmu_huge_range)
2789 va_range_fini(hdev, ctx->va_range[HL_VA_RANGE_TYPE_HOST_HUGE]);
2799 !hdev->asic_prop.dram_supports_virtual_memory)
2800 atomic64_set(&hdev->dram_used_mem, 0);
2805 * @hdev: pointer to the habanalabs device structure.
2812 int hl_vm_init(struct hl_device *hdev)
2814 struct asic_fixed_properties *prop = &hdev->asic_prop;
2815 struct hl_vm *vm = &hdev->vm;
2826 dev_err(hdev->dev, "Failed to create dram page pool\n");
2837 dev_err(hdev->dev,
2845 atomic64_set(&hdev->dram_used_mem, 0);
2859 * @hdev: pointer to the habanalabs device structure.
2866 void hl_vm_fini(struct hl_device *hdev)
2868 struct hl_vm *vm = &hdev->vm;
2878 dev_warn(hdev->dev, "dram_pg_pool was not destroyed on %s\n",
2909 dev_crit(ctx->hdev->dev, "HW block mem list isn't empty\n");