1// SPDX-License-Identifier: GPL-2.0
2
3/*
4 * Copyright 2016-2020 HabanaLabs, Ltd.
5 * All Rights Reserved.
6 */
7
8#include <linux/slab.h>
9
10#include "habanalabs.h"
11
12static bool is_dram_va(struct hl_device *hdev, u64 virt_addr)
13{
14	struct asic_fixed_properties *prop = &hdev->asic_prop;
15
16	return hl_mem_area_inside_range(virt_addr, prop->dmmu.page_size,
17					prop->dmmu.start_addr,
18					prop->dmmu.end_addr);
19}
20
21/**
22 * hl_mmu_init() - initialize the MMU module.
23 * @hdev: habanalabs device structure.
24 *
25 * This function does the following:
26 * - Create a pool of pages for pgt_infos.
27 * - Create a shadow table for pgt
28 *
29 * Return: 0 for success, non-zero for failure.
30 */
31int hl_mmu_init(struct hl_device *hdev)
32{
33	if (hdev->mmu_enable)
34		return hdev->mmu_func.init(hdev);
35
36	return 0;
37}
38
39/**
40 * hl_mmu_fini() - release the MMU module.
41 * @hdev: habanalabs device structure.
42 *
43 * This function does the following:
44 * - Disable MMU in H/W.
45 * - Free the pgt_infos pool.
46 *
47 * All contexts should be freed before calling this function.
48 */
49void hl_mmu_fini(struct hl_device *hdev)
50{
51	if (hdev->mmu_enable)
52		hdev->mmu_func.fini(hdev);
53}
54
55/**
56 * hl_mmu_ctx_init() - initialize a context for using the MMU module.
57 * @ctx: pointer to the context structure to initialize.
58 *
59 * Initialize a mutex to protect the concurrent mapping flow, a hash to hold all
60 * page tables hops related to this context.
61 * Return: 0 on success, non-zero otherwise.
62 */
63int hl_mmu_ctx_init(struct hl_ctx *ctx)
64{
65	struct hl_device *hdev = ctx->hdev;
66
67	if (hdev->mmu_enable)
68		return hdev->mmu_func.ctx_init(ctx);
69
70	return 0;
71}
72
73/*
74 * hl_mmu_ctx_fini - disable a ctx from using the mmu module
75 *
76 * @ctx: pointer to the context structure
77 *
78 * This function does the following:
79 * - Free any pgts which were not freed yet
80 * - Free the mutex
81 * - Free DRAM default page mapping hops
82 */
83void hl_mmu_ctx_fini(struct hl_ctx *ctx)
84{
85	struct hl_device *hdev = ctx->hdev;
86
87	if (hdev->mmu_enable)
88		hdev->mmu_func.ctx_fini(ctx);
89}
90
91/*
92 * hl_mmu_unmap - unmaps a virtual addr
93 *
94 * @ctx: pointer to the context structure
95 * @virt_addr: virt addr to map from
96 * @page_size: size of the page to unmap
97 * @flush_pte: whether to do a PCI flush
98 *
99 * This function does the following:
100 * - Check that the virt addr is mapped
101 * - Unmap the virt addr and frees pgts if possible
102 * - Returns 0 on success, -EINVAL if the given addr is not mapped
103 *
104 * Because this function changes the page tables in the device and because it
105 * changes the MMU hash, it must be protected by a lock.
106 * However, because it maps only a single page, the lock should be implemented
107 * in a higher level in order to protect the entire mapping of the memory area
108 *
109 * For optimization reasons PCI flush may be requested once after unmapping of
110 * large area.
111 */
112int hl_mmu_unmap(struct hl_ctx *ctx, u64 virt_addr, u32 page_size,
113		bool flush_pte)
114{
115	struct hl_device *hdev = ctx->hdev;
116	struct asic_fixed_properties *prop = &hdev->asic_prop;
117	struct hl_mmu_properties *mmu_prop;
118	u64 real_virt_addr;
119	u32 real_page_size, npages;
120	int i, rc = 0;
121	bool is_dram_addr;
122
123	if (!hdev->mmu_enable)
124		return 0;
125
126	is_dram_addr = is_dram_va(hdev, virt_addr);
127
128	if (is_dram_addr)
129		mmu_prop = &prop->dmmu;
130	else if ((page_size % prop->pmmu_huge.page_size) == 0)
131		mmu_prop = &prop->pmmu_huge;
132	else
133		mmu_prop = &prop->pmmu;
134
135	/*
136	 * The H/W handles mapping of specific page sizes. Hence if the page
137	 * size is bigger, we break it to sub-pages and unmap them separately.
138	 */
139	if ((page_size % mmu_prop->page_size) == 0) {
140		real_page_size = mmu_prop->page_size;
141	} else {
142		dev_err(hdev->dev,
143			"page size of %u is not %uKB aligned, can't unmap\n",
144			page_size, mmu_prop->page_size >> 10);
145
146		return -EFAULT;
147	}
148
149	npages = page_size / real_page_size;
150	real_virt_addr = virt_addr;
151
152	for (i = 0 ; i < npages ; i++) {
153		rc = hdev->mmu_func.unmap(ctx, real_virt_addr, is_dram_addr);
154		if (rc)
155			break;
156
157		real_virt_addr += real_page_size;
158	}
159
160	if (flush_pte)
161		hdev->mmu_func.flush(ctx);
162
163	return rc;
164}
165
166/*
167 * hl_mmu_map - maps a virtual addr to physical addr
168 *
169 * @ctx: pointer to the context structure
170 * @virt_addr: virt addr to map from
171 * @phys_addr: phys addr to map to
172 * @page_size: physical page size
173 * @flush_pte: whether to do a PCI flush
174 *
175 * This function does the following:
176 * - Check that the virt addr is not mapped
177 * - Allocate pgts as necessary in order to map the virt addr to the phys
178 * - Returns 0 on success, -EINVAL if addr is already mapped, or -ENOMEM.
179 *
180 * Because this function changes the page tables in the device and because it
181 * changes the MMU hash, it must be protected by a lock.
182 * However, because it maps only a single page, the lock should be implemented
183 * in a higher level in order to protect the entire mapping of the memory area
184 *
185 * For optimization reasons PCI flush may be requested once after mapping of
186 * large area.
187 */
188int hl_mmu_map(struct hl_ctx *ctx, u64 virt_addr, u64 phys_addr, u32 page_size,
189		bool flush_pte)
190{
191	struct hl_device *hdev = ctx->hdev;
192	struct asic_fixed_properties *prop = &hdev->asic_prop;
193	struct hl_mmu_properties *mmu_prop;
194	u64 real_virt_addr, real_phys_addr;
195	u32 real_page_size, npages;
196	int i, rc, mapped_cnt = 0;
197	bool is_dram_addr;
198
199	if (!hdev->mmu_enable)
200		return 0;
201
202	is_dram_addr = is_dram_va(hdev, virt_addr);
203
204	if (is_dram_addr)
205		mmu_prop = &prop->dmmu;
206	else if ((page_size % prop->pmmu_huge.page_size) == 0)
207		mmu_prop = &prop->pmmu_huge;
208	else
209		mmu_prop = &prop->pmmu;
210
211	/*
212	 * The H/W handles mapping of specific page sizes. Hence if the page
213	 * size is bigger, we break it to sub-pages and map them separately.
214	 */
215	if ((page_size % mmu_prop->page_size) == 0) {
216		real_page_size = mmu_prop->page_size;
217	} else {
218		dev_err(hdev->dev,
219			"page size of %u is not %uKB aligned, can't unmap\n",
220			page_size, mmu_prop->page_size >> 10);
221
222		return -EFAULT;
223	}
224
225	WARN_ONCE((phys_addr & (real_page_size - 1)),
226		"Mapping 0x%llx with page size of 0x%x is erroneous! Address must be divisible by page size",
227		phys_addr, real_page_size);
228
229	npages = page_size / real_page_size;
230	real_virt_addr = virt_addr;
231	real_phys_addr = phys_addr;
232
233	for (i = 0 ; i < npages ; i++) {
234		rc = hdev->mmu_func.map(ctx, real_virt_addr, real_phys_addr,
235				real_page_size, is_dram_addr);
236		if (rc)
237			goto err;
238
239		real_virt_addr += real_page_size;
240		real_phys_addr += real_page_size;
241		mapped_cnt++;
242	}
243
244	if (flush_pte)
245		hdev->mmu_func.flush(ctx);
246
247	return 0;
248
249err:
250	real_virt_addr = virt_addr;
251	for (i = 0 ; i < mapped_cnt ; i++) {
252		if (hdev->mmu_func.unmap(ctx, real_virt_addr, is_dram_addr))
253			dev_warn_ratelimited(hdev->dev,
254				"failed to unmap va: 0x%llx\n", real_virt_addr);
255
256		real_virt_addr += real_page_size;
257	}
258
259	hdev->mmu_func.flush(ctx);
260
261	return rc;
262}
263
264/*
265 * hl_mmu_swap_out - marks all mapping of the given ctx as swapped out
266 *
267 * @ctx: pointer to the context structure
268 *
269 */
270void hl_mmu_swap_out(struct hl_ctx *ctx)
271{
272	struct hl_device *hdev = ctx->hdev;
273
274	if (hdev->mmu_enable)
275		hdev->mmu_func.swap_out(ctx);
276}
277
278/*
279 * hl_mmu_swap_in - marks all mapping of the given ctx as swapped in
280 *
281 * @ctx: pointer to the context structure
282 *
283 */
284void hl_mmu_swap_in(struct hl_ctx *ctx)
285{
286	struct hl_device *hdev = ctx->hdev;
287
288	if (hdev->mmu_enable)
289		hdev->mmu_func.swap_in(ctx);
290}
291
292int hl_mmu_if_set_funcs(struct hl_device *hdev)
293{
294	if (!hdev->mmu_enable)
295		return 0;
296
297	switch (hdev->asic_type) {
298	case ASIC_GOYA:
299	case ASIC_GAUDI:
300		hl_mmu_v1_set_funcs(hdev);
301		break;
302	default:
303		dev_err(hdev->dev, "Unrecognized ASIC type %d\n",
304			hdev->asic_type);
305		return -EOPNOTSUPP;
306	}
307
308	return 0;
309}
310