1 // SPDX-License-Identifier: GPL-2.0
2
3 /*
4 * Copyright 2016-2019 HabanaLabs, Ltd.
5 * All Rights Reserved.
6 */
7
8 #include "habanalabs.h"
9 #include "../include/hw_ip/mmu/mmu_general.h"
10
11 #include <linux/pci.h>
12 #include <linux/debugfs.h>
13 #include <linux/uaccess.h>
14
15 #define MMU_ADDR_BUF_SIZE 40
16 #define MMU_ASID_BUF_SIZE 10
17 #define MMU_KBUF_SIZE (MMU_ADDR_BUF_SIZE + MMU_ASID_BUF_SIZE)
18
19 static struct dentry *hl_debug_root;
20
hl_debugfs_i2c_read(struct hl_device *hdev, u8 i2c_bus, u8 i2c_addr, u8 i2c_reg, long *val)21 static int hl_debugfs_i2c_read(struct hl_device *hdev, u8 i2c_bus, u8 i2c_addr,
22 u8 i2c_reg, long *val)
23 {
24 struct cpucp_packet pkt;
25 int rc;
26
27 if (hl_device_disabled_or_in_reset(hdev))
28 return -EBUSY;
29
30 memset(&pkt, 0, sizeof(pkt));
31
32 pkt.ctl = cpu_to_le32(CPUCP_PACKET_I2C_RD <<
33 CPUCP_PKT_CTL_OPCODE_SHIFT);
34 pkt.i2c_bus = i2c_bus;
35 pkt.i2c_addr = i2c_addr;
36 pkt.i2c_reg = i2c_reg;
37
38 rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
39 0, val);
40
41 if (rc)
42 dev_err(hdev->dev, "Failed to read from I2C, error %d\n", rc);
43
44 return rc;
45 }
46
hl_debugfs_i2c_write(struct hl_device *hdev, u8 i2c_bus, u8 i2c_addr, u8 i2c_reg, u32 val)47 static int hl_debugfs_i2c_write(struct hl_device *hdev, u8 i2c_bus, u8 i2c_addr,
48 u8 i2c_reg, u32 val)
49 {
50 struct cpucp_packet pkt;
51 int rc;
52
53 if (hl_device_disabled_or_in_reset(hdev))
54 return -EBUSY;
55
56 memset(&pkt, 0, sizeof(pkt));
57
58 pkt.ctl = cpu_to_le32(CPUCP_PACKET_I2C_WR <<
59 CPUCP_PKT_CTL_OPCODE_SHIFT);
60 pkt.i2c_bus = i2c_bus;
61 pkt.i2c_addr = i2c_addr;
62 pkt.i2c_reg = i2c_reg;
63 pkt.value = cpu_to_le64(val);
64
65 rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
66 0, NULL);
67
68 if (rc)
69 dev_err(hdev->dev, "Failed to write to I2C, error %d\n", rc);
70
71 return rc;
72 }
73
hl_debugfs_led_set(struct hl_device *hdev, u8 led, u8 state)74 static void hl_debugfs_led_set(struct hl_device *hdev, u8 led, u8 state)
75 {
76 struct cpucp_packet pkt;
77 int rc;
78
79 if (hl_device_disabled_or_in_reset(hdev))
80 return;
81
82 memset(&pkt, 0, sizeof(pkt));
83
84 pkt.ctl = cpu_to_le32(CPUCP_PACKET_LED_SET <<
85 CPUCP_PKT_CTL_OPCODE_SHIFT);
86 pkt.led_index = cpu_to_le32(led);
87 pkt.value = cpu_to_le64(state);
88
89 rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
90 0, NULL);
91
92 if (rc)
93 dev_err(hdev->dev, "Failed to set LED %d, error %d\n", led, rc);
94 }
95
command_buffers_show(struct seq_file *s, void *data)96 static int command_buffers_show(struct seq_file *s, void *data)
97 {
98 struct hl_debugfs_entry *entry = s->private;
99 struct hl_dbg_device_entry *dev_entry = entry->dev_entry;
100 struct hl_cb *cb;
101 bool first = true;
102
103 spin_lock(&dev_entry->cb_spinlock);
104
105 list_for_each_entry(cb, &dev_entry->cb_list, debugfs_list) {
106 if (first) {
107 first = false;
108 seq_puts(s, "\n");
109 seq_puts(s, " CB ID CTX ID CB size CB RefCnt mmap? CS counter\n");
110 seq_puts(s, "---------------------------------------------------------------\n");
111 }
112 seq_printf(s,
113 " %03llu %d 0x%08x %d %d %d\n",
114 cb->id, cb->ctx->asid, cb->size,
115 kref_read(&cb->refcount),
116 cb->mmap, cb->cs_cnt);
117 }
118
119 spin_unlock(&dev_entry->cb_spinlock);
120
121 if (!first)
122 seq_puts(s, "\n");
123
124 return 0;
125 }
126
command_submission_show(struct seq_file *s, void *data)127 static int command_submission_show(struct seq_file *s, void *data)
128 {
129 struct hl_debugfs_entry *entry = s->private;
130 struct hl_dbg_device_entry *dev_entry = entry->dev_entry;
131 struct hl_cs *cs;
132 bool first = true;
133
134 spin_lock(&dev_entry->cs_spinlock);
135
136 list_for_each_entry(cs, &dev_entry->cs_list, debugfs_list) {
137 if (first) {
138 first = false;
139 seq_puts(s, "\n");
140 seq_puts(s, " CS ID CTX ASID CS RefCnt Submitted Completed\n");
141 seq_puts(s, "------------------------------------------------------\n");
142 }
143 seq_printf(s,
144 " %llu %d %d %d %d\n",
145 cs->sequence, cs->ctx->asid,
146 kref_read(&cs->refcount),
147 cs->submitted, cs->completed);
148 }
149
150 spin_unlock(&dev_entry->cs_spinlock);
151
152 if (!first)
153 seq_puts(s, "\n");
154
155 return 0;
156 }
157
command_submission_jobs_show(struct seq_file *s, void *data)158 static int command_submission_jobs_show(struct seq_file *s, void *data)
159 {
160 struct hl_debugfs_entry *entry = s->private;
161 struct hl_dbg_device_entry *dev_entry = entry->dev_entry;
162 struct hl_cs_job *job;
163 bool first = true;
164
165 spin_lock(&dev_entry->cs_job_spinlock);
166
167 list_for_each_entry(job, &dev_entry->cs_job_list, debugfs_list) {
168 if (first) {
169 first = false;
170 seq_puts(s, "\n");
171 seq_puts(s, " JOB ID CS ID CTX ASID H/W Queue\n");
172 seq_puts(s, "---------------------------------------\n");
173 }
174 if (job->cs)
175 seq_printf(s,
176 " %02d %llu %d %d\n",
177 job->id, job->cs->sequence, job->cs->ctx->asid,
178 job->hw_queue_id);
179 else
180 seq_printf(s,
181 " %02d 0 %d %d\n",
182 job->id, HL_KERNEL_ASID_ID, job->hw_queue_id);
183 }
184
185 spin_unlock(&dev_entry->cs_job_spinlock);
186
187 if (!first)
188 seq_puts(s, "\n");
189
190 return 0;
191 }
192
userptr_show(struct seq_file *s, void *data)193 static int userptr_show(struct seq_file *s, void *data)
194 {
195 struct hl_debugfs_entry *entry = s->private;
196 struct hl_dbg_device_entry *dev_entry = entry->dev_entry;
197 struct hl_userptr *userptr;
198 char dma_dir[4][30] = {"DMA_BIDIRECTIONAL", "DMA_TO_DEVICE",
199 "DMA_FROM_DEVICE", "DMA_NONE"};
200 bool first = true;
201
202 spin_lock(&dev_entry->userptr_spinlock);
203
204 list_for_each_entry(userptr, &dev_entry->userptr_list, debugfs_list) {
205 if (first) {
206 first = false;
207 seq_puts(s, "\n");
208 seq_puts(s, " user virtual address size dma dir\n");
209 seq_puts(s, "----------------------------------------------------------\n");
210 }
211 seq_printf(s,
212 " 0x%-14llx %-10u %-30s\n",
213 userptr->addr, userptr->size, dma_dir[userptr->dir]);
214 }
215
216 spin_unlock(&dev_entry->userptr_spinlock);
217
218 if (!first)
219 seq_puts(s, "\n");
220
221 return 0;
222 }
223
vm_show(struct seq_file *s, void *data)224 static int vm_show(struct seq_file *s, void *data)
225 {
226 struct hl_debugfs_entry *entry = s->private;
227 struct hl_dbg_device_entry *dev_entry = entry->dev_entry;
228 struct hl_ctx *ctx;
229 struct hl_vm *vm;
230 struct hl_vm_hash_node *hnode;
231 struct hl_userptr *userptr;
232 struct hl_vm_phys_pg_pack *phys_pg_pack = NULL;
233 enum vm_type_t *vm_type;
234 bool once = true;
235 u64 j;
236 int i;
237
238 if (!dev_entry->hdev->mmu_enable)
239 return 0;
240
241 spin_lock(&dev_entry->ctx_mem_hash_spinlock);
242
243 list_for_each_entry(ctx, &dev_entry->ctx_mem_hash_list, debugfs_list) {
244 once = false;
245 seq_puts(s, "\n\n----------------------------------------------------");
246 seq_puts(s, "\n----------------------------------------------------\n\n");
247 seq_printf(s, "ctx asid: %u\n", ctx->asid);
248
249 seq_puts(s, "\nmappings:\n\n");
250 seq_puts(s, " virtual address size handle\n");
251 seq_puts(s, "----------------------------------------------------\n");
252 mutex_lock(&ctx->mem_hash_lock);
253 hash_for_each(ctx->mem_hash, i, hnode, node) {
254 vm_type = hnode->ptr;
255
256 if (*vm_type == VM_TYPE_USERPTR) {
257 userptr = hnode->ptr;
258 seq_printf(s,
259 " 0x%-14llx %-10u\n",
260 hnode->vaddr, userptr->size);
261 } else {
262 phys_pg_pack = hnode->ptr;
263 seq_printf(s,
264 " 0x%-14llx %-10llu %-4u\n",
265 hnode->vaddr, phys_pg_pack->total_size,
266 phys_pg_pack->handle);
267 }
268 }
269 mutex_unlock(&ctx->mem_hash_lock);
270
271 vm = &ctx->hdev->vm;
272 spin_lock(&vm->idr_lock);
273
274 if (!idr_is_empty(&vm->phys_pg_pack_handles))
275 seq_puts(s, "\n\nallocations:\n");
276
277 idr_for_each_entry(&vm->phys_pg_pack_handles, phys_pg_pack, i) {
278 if (phys_pg_pack->asid != ctx->asid)
279 continue;
280
281 seq_printf(s, "\nhandle: %u\n", phys_pg_pack->handle);
282 seq_printf(s, "page size: %u\n\n",
283 phys_pg_pack->page_size);
284 seq_puts(s, " physical address\n");
285 seq_puts(s, "---------------------\n");
286 for (j = 0 ; j < phys_pg_pack->npages ; j++) {
287 seq_printf(s, " 0x%-14llx\n",
288 phys_pg_pack->pages[j]);
289 }
290 }
291 spin_unlock(&vm->idr_lock);
292
293 }
294
295 spin_unlock(&dev_entry->ctx_mem_hash_spinlock);
296
297 if (!once)
298 seq_puts(s, "\n");
299
300 return 0;
301 }
302
303 /* these inline functions are copied from mmu.c */
get_hop0_addr(struct hl_ctx *ctx)304 static inline u64 get_hop0_addr(struct hl_ctx *ctx)
305 {
306 return ctx->hdev->asic_prop.mmu_pgt_addr +
307 (ctx->asid * ctx->hdev->asic_prop.mmu_hop_table_size);
308 }
309
get_hopN_pte_addr(struct hl_ctx *ctx, u64 hop_addr, u64 virt_addr, u64 mask, u64 shift)310 static inline u64 get_hopN_pte_addr(struct hl_ctx *ctx, u64 hop_addr,
311 u64 virt_addr, u64 mask, u64 shift)
312 {
313 return hop_addr + ctx->hdev->asic_prop.mmu_pte_size *
314 ((virt_addr & mask) >> shift);
315 }
316
get_hop0_pte_addr(struct hl_ctx *ctx, struct hl_mmu_properties *mmu_specs, u64 hop_addr, u64 vaddr)317 static inline u64 get_hop0_pte_addr(struct hl_ctx *ctx,
318 struct hl_mmu_properties *mmu_specs,
319 u64 hop_addr, u64 vaddr)
320 {
321 return get_hopN_pte_addr(ctx, hop_addr, vaddr, mmu_specs->hop0_mask,
322 mmu_specs->hop0_shift);
323 }
324
get_hop1_pte_addr(struct hl_ctx *ctx, struct hl_mmu_properties *mmu_specs, u64 hop_addr, u64 vaddr)325 static inline u64 get_hop1_pte_addr(struct hl_ctx *ctx,
326 struct hl_mmu_properties *mmu_specs,
327 u64 hop_addr, u64 vaddr)
328 {
329 return get_hopN_pte_addr(ctx, hop_addr, vaddr, mmu_specs->hop1_mask,
330 mmu_specs->hop1_shift);
331 }
332
get_hop2_pte_addr(struct hl_ctx *ctx, struct hl_mmu_properties *mmu_specs, u64 hop_addr, u64 vaddr)333 static inline u64 get_hop2_pte_addr(struct hl_ctx *ctx,
334 struct hl_mmu_properties *mmu_specs,
335 u64 hop_addr, u64 vaddr)
336 {
337 return get_hopN_pte_addr(ctx, hop_addr, vaddr, mmu_specs->hop2_mask,
338 mmu_specs->hop2_shift);
339 }
340
get_hop3_pte_addr(struct hl_ctx *ctx, struct hl_mmu_properties *mmu_specs, u64 hop_addr, u64 vaddr)341 static inline u64 get_hop3_pte_addr(struct hl_ctx *ctx,
342 struct hl_mmu_properties *mmu_specs,
343 u64 hop_addr, u64 vaddr)
344 {
345 return get_hopN_pte_addr(ctx, hop_addr, vaddr, mmu_specs->hop3_mask,
346 mmu_specs->hop3_shift);
347 }
348
get_hop4_pte_addr(struct hl_ctx *ctx, struct hl_mmu_properties *mmu_specs, u64 hop_addr, u64 vaddr)349 static inline u64 get_hop4_pte_addr(struct hl_ctx *ctx,
350 struct hl_mmu_properties *mmu_specs,
351 u64 hop_addr, u64 vaddr)
352 {
353 return get_hopN_pte_addr(ctx, hop_addr, vaddr, mmu_specs->hop4_mask,
354 mmu_specs->hop4_shift);
355 }
356
get_hop5_pte_addr(struct hl_ctx *ctx, struct hl_mmu_properties *mmu_specs, u64 hop_addr, u64 vaddr)357 static inline u64 get_hop5_pte_addr(struct hl_ctx *ctx,
358 struct hl_mmu_properties *mmu_specs,
359 u64 hop_addr, u64 vaddr)
360 {
361 return get_hopN_pte_addr(ctx, hop_addr, vaddr, mmu_specs->hop5_mask,
362 mmu_specs->hop5_shift);
363 }
364
get_next_hop_addr(u64 curr_pte)365 static inline u64 get_next_hop_addr(u64 curr_pte)
366 {
367 if (curr_pte & PAGE_PRESENT_MASK)
368 return curr_pte & HOP_PHYS_ADDR_MASK;
369 else
370 return ULLONG_MAX;
371 }
372
mmu_show(struct seq_file *s, void *data)373 static int mmu_show(struct seq_file *s, void *data)
374 {
375 struct hl_debugfs_entry *entry = s->private;
376 struct hl_dbg_device_entry *dev_entry = entry->dev_entry;
377 struct hl_device *hdev = dev_entry->hdev;
378 struct asic_fixed_properties *prop = &hdev->asic_prop;
379 struct hl_mmu_properties *mmu_prop;
380 struct hl_ctx *ctx;
381 bool is_dram_addr;
382
383 u64 hop0_addr = 0, hop0_pte_addr = 0, hop0_pte = 0,
384 hop1_addr = 0, hop1_pte_addr = 0, hop1_pte = 0,
385 hop2_addr = 0, hop2_pte_addr = 0, hop2_pte = 0,
386 hop3_addr = 0, hop3_pte_addr = 0, hop3_pte = 0,
387 hop4_addr = 0, hop4_pte_addr = 0, hop4_pte = 0,
388 hop5_addr = 0, hop5_pte_addr = 0, hop5_pte = 0,
389 virt_addr = dev_entry->mmu_addr;
390
391 if (!hdev->mmu_enable)
392 return 0;
393
394 if (dev_entry->mmu_asid == HL_KERNEL_ASID_ID)
395 ctx = hdev->kernel_ctx;
396 else
397 ctx = hdev->compute_ctx;
398
399 if (!ctx) {
400 dev_err(hdev->dev, "no ctx available\n");
401 return 0;
402 }
403
404 is_dram_addr = hl_mem_area_inside_range(virt_addr, prop->dmmu.page_size,
405 prop->dmmu.start_addr,
406 prop->dmmu.end_addr);
407
408 /* shifts and masks are the same in PMMU and HPMMU, use one of them */
409 mmu_prop = is_dram_addr ? &prop->dmmu : &prop->pmmu;
410
411 mutex_lock(&ctx->mmu_lock);
412
413 /* the following lookup is copied from unmap() in mmu.c */
414
415 hop0_addr = get_hop0_addr(ctx);
416 hop0_pte_addr = get_hop0_pte_addr(ctx, mmu_prop, hop0_addr, virt_addr);
417 hop0_pte = hdev->asic_funcs->read_pte(hdev, hop0_pte_addr);
418 hop1_addr = get_next_hop_addr(hop0_pte);
419
420 if (hop1_addr == ULLONG_MAX)
421 goto not_mapped;
422
423 hop1_pte_addr = get_hop1_pte_addr(ctx, mmu_prop, hop1_addr, virt_addr);
424 hop1_pte = hdev->asic_funcs->read_pte(hdev, hop1_pte_addr);
425 hop2_addr = get_next_hop_addr(hop1_pte);
426
427 if (hop2_addr == ULLONG_MAX)
428 goto not_mapped;
429
430 hop2_pte_addr = get_hop2_pte_addr(ctx, mmu_prop, hop2_addr, virt_addr);
431 hop2_pte = hdev->asic_funcs->read_pte(hdev, hop2_pte_addr);
432 hop3_addr = get_next_hop_addr(hop2_pte);
433
434 if (hop3_addr == ULLONG_MAX)
435 goto not_mapped;
436
437 hop3_pte_addr = get_hop3_pte_addr(ctx, mmu_prop, hop3_addr, virt_addr);
438 hop3_pte = hdev->asic_funcs->read_pte(hdev, hop3_pte_addr);
439
440 if (mmu_prop->num_hops == MMU_ARCH_5_HOPS) {
441 if (!(hop3_pte & LAST_MASK)) {
442 hop4_addr = get_next_hop_addr(hop3_pte);
443
444 if (hop4_addr == ULLONG_MAX)
445 goto not_mapped;
446
447 hop4_pte_addr = get_hop4_pte_addr(ctx, mmu_prop,
448 hop4_addr, virt_addr);
449 hop4_pte = hdev->asic_funcs->read_pte(hdev,
450 hop4_pte_addr);
451 if (!(hop4_pte & PAGE_PRESENT_MASK))
452 goto not_mapped;
453 } else {
454 if (!(hop3_pte & PAGE_PRESENT_MASK))
455 goto not_mapped;
456 }
457 } else {
458 hop4_addr = get_next_hop_addr(hop3_pte);
459
460 if (hop4_addr == ULLONG_MAX)
461 goto not_mapped;
462
463 hop4_pte_addr = get_hop4_pte_addr(ctx, mmu_prop,
464 hop4_addr, virt_addr);
465 hop4_pte = hdev->asic_funcs->read_pte(hdev,
466 hop4_pte_addr);
467 if (!(hop4_pte & LAST_MASK)) {
468 hop5_addr = get_next_hop_addr(hop4_pte);
469
470 if (hop5_addr == ULLONG_MAX)
471 goto not_mapped;
472
473 hop5_pte_addr = get_hop5_pte_addr(ctx, mmu_prop,
474 hop5_addr, virt_addr);
475 hop5_pte = hdev->asic_funcs->read_pte(hdev,
476 hop5_pte_addr);
477 if (!(hop5_pte & PAGE_PRESENT_MASK))
478 goto not_mapped;
479 } else {
480 if (!(hop4_pte & PAGE_PRESENT_MASK))
481 goto not_mapped;
482 }
483 }
484
485 seq_printf(s, "asid: %u, virt_addr: 0x%llx\n",
486 dev_entry->mmu_asid, dev_entry->mmu_addr);
487
488 seq_printf(s, "hop0_addr: 0x%llx\n", hop0_addr);
489 seq_printf(s, "hop0_pte_addr: 0x%llx\n", hop0_pte_addr);
490 seq_printf(s, "hop0_pte: 0x%llx\n", hop0_pte);
491
492 seq_printf(s, "hop1_addr: 0x%llx\n", hop1_addr);
493 seq_printf(s, "hop1_pte_addr: 0x%llx\n", hop1_pte_addr);
494 seq_printf(s, "hop1_pte: 0x%llx\n", hop1_pte);
495
496 seq_printf(s, "hop2_addr: 0x%llx\n", hop2_addr);
497 seq_printf(s, "hop2_pte_addr: 0x%llx\n", hop2_pte_addr);
498 seq_printf(s, "hop2_pte: 0x%llx\n", hop2_pte);
499
500 seq_printf(s, "hop3_addr: 0x%llx\n", hop3_addr);
501 seq_printf(s, "hop3_pte_addr: 0x%llx\n", hop3_pte_addr);
502 seq_printf(s, "hop3_pte: 0x%llx\n", hop3_pte);
503
504 if (mmu_prop->num_hops == MMU_ARCH_5_HOPS) {
505 if (!(hop3_pte & LAST_MASK)) {
506 seq_printf(s, "hop4_addr: 0x%llx\n", hop4_addr);
507 seq_printf(s, "hop4_pte_addr: 0x%llx\n", hop4_pte_addr);
508 seq_printf(s, "hop4_pte: 0x%llx\n", hop4_pte);
509 }
510 } else {
511 seq_printf(s, "hop4_addr: 0x%llx\n", hop4_addr);
512 seq_printf(s, "hop4_pte_addr: 0x%llx\n", hop4_pte_addr);
513 seq_printf(s, "hop4_pte: 0x%llx\n", hop4_pte);
514
515 if (!(hop4_pte & LAST_MASK)) {
516 seq_printf(s, "hop5_addr: 0x%llx\n", hop5_addr);
517 seq_printf(s, "hop5_pte_addr: 0x%llx\n", hop5_pte_addr);
518 seq_printf(s, "hop5_pte: 0x%llx\n", hop5_pte);
519 }
520 }
521
522 goto out;
523
524 not_mapped:
525 dev_err(hdev->dev, "virt addr 0x%llx is not mapped to phys addr\n",
526 virt_addr);
527 out:
528 mutex_unlock(&ctx->mmu_lock);
529
530 return 0;
531 }
532
mmu_asid_va_write(struct file *file, const char __user *buf, size_t count, loff_t *f_pos)533 static ssize_t mmu_asid_va_write(struct file *file, const char __user *buf,
534 size_t count, loff_t *f_pos)
535 {
536 struct seq_file *s = file->private_data;
537 struct hl_debugfs_entry *entry = s->private;
538 struct hl_dbg_device_entry *dev_entry = entry->dev_entry;
539 struct hl_device *hdev = dev_entry->hdev;
540 char kbuf[MMU_KBUF_SIZE];
541 char *c;
542 ssize_t rc;
543
544 if (!hdev->mmu_enable)
545 return count;
546
547 if (count > sizeof(kbuf) - 1)
548 goto err;
549 if (copy_from_user(kbuf, buf, count))
550 goto err;
551 kbuf[count] = 0;
552
553 c = strchr(kbuf, ' ');
554 if (!c)
555 goto err;
556 *c = '\0';
557
558 rc = kstrtouint(kbuf, 10, &dev_entry->mmu_asid);
559 if (rc)
560 goto err;
561
562 if (strncmp(c+1, "0x", 2))
563 goto err;
564 rc = kstrtoull(c+3, 16, &dev_entry->mmu_addr);
565 if (rc)
566 goto err;
567
568 return count;
569
570 err:
571 dev_err(hdev->dev, "usage: echo <asid> <0xaddr> > mmu\n");
572
573 return -EINVAL;
574 }
575
engines_show(struct seq_file *s, void *data)576 static int engines_show(struct seq_file *s, void *data)
577 {
578 struct hl_debugfs_entry *entry = s->private;
579 struct hl_dbg_device_entry *dev_entry = entry->dev_entry;
580 struct hl_device *hdev = dev_entry->hdev;
581
582 if (atomic_read(&hdev->in_reset)) {
583 dev_warn_ratelimited(hdev->dev,
584 "Can't check device idle during reset\n");
585 return 0;
586 }
587
588 hdev->asic_funcs->is_device_idle(hdev, NULL, s);
589
590 return 0;
591 }
592
hl_is_device_va(struct hl_device *hdev, u64 addr)593 static bool hl_is_device_va(struct hl_device *hdev, u64 addr)
594 {
595 struct asic_fixed_properties *prop = &hdev->asic_prop;
596
597 if (!hdev->mmu_enable)
598 goto out;
599
600 if (hdev->dram_supports_virtual_memory &&
601 (addr >= prop->dmmu.start_addr && addr < prop->dmmu.end_addr))
602 return true;
603
604 if (addr >= prop->pmmu.start_addr &&
605 addr < prop->pmmu.end_addr)
606 return true;
607
608 if (addr >= prop->pmmu_huge.start_addr &&
609 addr < prop->pmmu_huge.end_addr)
610 return true;
611 out:
612 return false;
613 }
614
device_va_to_pa(struct hl_device *hdev, u64 virt_addr, u64 *phys_addr)615 static int device_va_to_pa(struct hl_device *hdev, u64 virt_addr,
616 u64 *phys_addr)
617 {
618 struct hl_ctx *ctx = hdev->compute_ctx;
619 struct asic_fixed_properties *prop = &hdev->asic_prop;
620 struct hl_mmu_properties *mmu_prop;
621 u64 hop_addr, hop_pte_addr, hop_pte;
622 u64 offset_mask = HOP4_MASK | FLAGS_MASK;
623 int rc = 0;
624 bool is_dram_addr;
625
626 if (!ctx) {
627 dev_err(hdev->dev, "no ctx available\n");
628 return -EINVAL;
629 }
630
631 is_dram_addr = hl_mem_area_inside_range(virt_addr, prop->dmmu.page_size,
632 prop->dmmu.start_addr,
633 prop->dmmu.end_addr);
634
635 /* shifts and masks are the same in PMMU and HPMMU, use one of them */
636 mmu_prop = is_dram_addr ? &prop->dmmu : &prop->pmmu;
637
638 mutex_lock(&ctx->mmu_lock);
639
640 /* hop 0 */
641 hop_addr = get_hop0_addr(ctx);
642 hop_pte_addr = get_hop0_pte_addr(ctx, mmu_prop, hop_addr, virt_addr);
643 hop_pte = hdev->asic_funcs->read_pte(hdev, hop_pte_addr);
644
645 /* hop 1 */
646 hop_addr = get_next_hop_addr(hop_pte);
647 if (hop_addr == ULLONG_MAX)
648 goto not_mapped;
649 hop_pte_addr = get_hop1_pte_addr(ctx, mmu_prop, hop_addr, virt_addr);
650 hop_pte = hdev->asic_funcs->read_pte(hdev, hop_pte_addr);
651
652 /* hop 2 */
653 hop_addr = get_next_hop_addr(hop_pte);
654 if (hop_addr == ULLONG_MAX)
655 goto not_mapped;
656 hop_pte_addr = get_hop2_pte_addr(ctx, mmu_prop, hop_addr, virt_addr);
657 hop_pte = hdev->asic_funcs->read_pte(hdev, hop_pte_addr);
658
659 /* hop 3 */
660 hop_addr = get_next_hop_addr(hop_pte);
661 if (hop_addr == ULLONG_MAX)
662 goto not_mapped;
663 hop_pte_addr = get_hop3_pte_addr(ctx, mmu_prop, hop_addr, virt_addr);
664 hop_pte = hdev->asic_funcs->read_pte(hdev, hop_pte_addr);
665
666 if (!(hop_pte & LAST_MASK)) {
667 /* hop 4 */
668 hop_addr = get_next_hop_addr(hop_pte);
669 if (hop_addr == ULLONG_MAX)
670 goto not_mapped;
671 hop_pte_addr = get_hop4_pte_addr(ctx, mmu_prop, hop_addr,
672 virt_addr);
673 hop_pte = hdev->asic_funcs->read_pte(hdev, hop_pte_addr);
674
675 offset_mask = FLAGS_MASK;
676 }
677
678 if (!(hop_pte & PAGE_PRESENT_MASK))
679 goto not_mapped;
680
681 *phys_addr = (hop_pte & ~offset_mask) | (virt_addr & offset_mask);
682
683 goto out;
684
685 not_mapped:
686 dev_err(hdev->dev, "virt addr 0x%llx is not mapped to phys addr\n",
687 virt_addr);
688 rc = -EINVAL;
689 out:
690 mutex_unlock(&ctx->mmu_lock);
691 return rc;
692 }
693
hl_data_read32(struct file *f, char __user *buf, size_t count, loff_t *ppos)694 static ssize_t hl_data_read32(struct file *f, char __user *buf,
695 size_t count, loff_t *ppos)
696 {
697 struct hl_dbg_device_entry *entry = file_inode(f)->i_private;
698 struct hl_device *hdev = entry->hdev;
699 char tmp_buf[32];
700 u64 addr = entry->addr;
701 u32 val;
702 ssize_t rc;
703
704 if (atomic_read(&hdev->in_reset)) {
705 dev_warn_ratelimited(hdev->dev, "Can't read during reset\n");
706 return 0;
707 }
708
709 if (*ppos)
710 return 0;
711
712 if (hl_is_device_va(hdev, addr)) {
713 rc = device_va_to_pa(hdev, addr, &addr);
714 if (rc)
715 return rc;
716 }
717
718 rc = hdev->asic_funcs->debugfs_read32(hdev, addr, &val);
719 if (rc) {
720 dev_err(hdev->dev, "Failed to read from 0x%010llx\n", addr);
721 return rc;
722 }
723
724 sprintf(tmp_buf, "0x%08x\n", val);
725 return simple_read_from_buffer(buf, count, ppos, tmp_buf,
726 strlen(tmp_buf));
727 }
728
hl_data_write32(struct file *f, const char __user *buf, size_t count, loff_t *ppos)729 static ssize_t hl_data_write32(struct file *f, const char __user *buf,
730 size_t count, loff_t *ppos)
731 {
732 struct hl_dbg_device_entry *entry = file_inode(f)->i_private;
733 struct hl_device *hdev = entry->hdev;
734 u64 addr = entry->addr;
735 u32 value;
736 ssize_t rc;
737
738 if (atomic_read(&hdev->in_reset)) {
739 dev_warn_ratelimited(hdev->dev, "Can't write during reset\n");
740 return 0;
741 }
742
743 rc = kstrtouint_from_user(buf, count, 16, &value);
744 if (rc)
745 return rc;
746
747 if (hl_is_device_va(hdev, addr)) {
748 rc = device_va_to_pa(hdev, addr, &addr);
749 if (rc)
750 return rc;
751 }
752
753 rc = hdev->asic_funcs->debugfs_write32(hdev, addr, value);
754 if (rc) {
755 dev_err(hdev->dev, "Failed to write 0x%08x to 0x%010llx\n",
756 value, addr);
757 return rc;
758 }
759
760 return count;
761 }
762
hl_data_read64(struct file *f, char __user *buf, size_t count, loff_t *ppos)763 static ssize_t hl_data_read64(struct file *f, char __user *buf,
764 size_t count, loff_t *ppos)
765 {
766 struct hl_dbg_device_entry *entry = file_inode(f)->i_private;
767 struct hl_device *hdev = entry->hdev;
768 char tmp_buf[32];
769 u64 addr = entry->addr;
770 u64 val;
771 ssize_t rc;
772
773 if (*ppos)
774 return 0;
775
776 if (hl_is_device_va(hdev, addr)) {
777 rc = device_va_to_pa(hdev, addr, &addr);
778 if (rc)
779 return rc;
780 }
781
782 rc = hdev->asic_funcs->debugfs_read64(hdev, addr, &val);
783 if (rc) {
784 dev_err(hdev->dev, "Failed to read from 0x%010llx\n", addr);
785 return rc;
786 }
787
788 sprintf(tmp_buf, "0x%016llx\n", val);
789 return simple_read_from_buffer(buf, count, ppos, tmp_buf,
790 strlen(tmp_buf));
791 }
792
hl_data_write64(struct file *f, const char __user *buf, size_t count, loff_t *ppos)793 static ssize_t hl_data_write64(struct file *f, const char __user *buf,
794 size_t count, loff_t *ppos)
795 {
796 struct hl_dbg_device_entry *entry = file_inode(f)->i_private;
797 struct hl_device *hdev = entry->hdev;
798 u64 addr = entry->addr;
799 u64 value;
800 ssize_t rc;
801
802 rc = kstrtoull_from_user(buf, count, 16, &value);
803 if (rc)
804 return rc;
805
806 if (hl_is_device_va(hdev, addr)) {
807 rc = device_va_to_pa(hdev, addr, &addr);
808 if (rc)
809 return rc;
810 }
811
812 rc = hdev->asic_funcs->debugfs_write64(hdev, addr, value);
813 if (rc) {
814 dev_err(hdev->dev, "Failed to write 0x%016llx to 0x%010llx\n",
815 value, addr);
816 return rc;
817 }
818
819 return count;
820 }
821
hl_get_power_state(struct file *f, char __user *buf, size_t count, loff_t *ppos)822 static ssize_t hl_get_power_state(struct file *f, char __user *buf,
823 size_t count, loff_t *ppos)
824 {
825 struct hl_dbg_device_entry *entry = file_inode(f)->i_private;
826 struct hl_device *hdev = entry->hdev;
827 char tmp_buf[200];
828 int i;
829
830 if (*ppos)
831 return 0;
832
833 if (hdev->pdev->current_state == PCI_D0)
834 i = 1;
835 else if (hdev->pdev->current_state == PCI_D3hot)
836 i = 2;
837 else
838 i = 3;
839
840 sprintf(tmp_buf,
841 "current power state: %d\n1 - D0\n2 - D3hot\n3 - Unknown\n", i);
842 return simple_read_from_buffer(buf, count, ppos, tmp_buf,
843 strlen(tmp_buf));
844 }
845
hl_set_power_state(struct file *f, const char __user *buf, size_t count, loff_t *ppos)846 static ssize_t hl_set_power_state(struct file *f, const char __user *buf,
847 size_t count, loff_t *ppos)
848 {
849 struct hl_dbg_device_entry *entry = file_inode(f)->i_private;
850 struct hl_device *hdev = entry->hdev;
851 u32 value;
852 ssize_t rc;
853
854 rc = kstrtouint_from_user(buf, count, 10, &value);
855 if (rc)
856 return rc;
857
858 if (value == 1) {
859 pci_set_power_state(hdev->pdev, PCI_D0);
860 pci_restore_state(hdev->pdev);
861 rc = pci_enable_device(hdev->pdev);
862 if (rc < 0)
863 return rc;
864 } else if (value == 2) {
865 pci_save_state(hdev->pdev);
866 pci_disable_device(hdev->pdev);
867 pci_set_power_state(hdev->pdev, PCI_D3hot);
868 } else {
869 dev_dbg(hdev->dev, "invalid power state value %u\n", value);
870 return -EINVAL;
871 }
872
873 return count;
874 }
875
hl_i2c_data_read(struct file *f, char __user *buf, size_t count, loff_t *ppos)876 static ssize_t hl_i2c_data_read(struct file *f, char __user *buf,
877 size_t count, loff_t *ppos)
878 {
879 struct hl_dbg_device_entry *entry = file_inode(f)->i_private;
880 struct hl_device *hdev = entry->hdev;
881 char tmp_buf[32];
882 long val;
883 ssize_t rc;
884
885 if (*ppos)
886 return 0;
887
888 rc = hl_debugfs_i2c_read(hdev, entry->i2c_bus, entry->i2c_addr,
889 entry->i2c_reg, &val);
890 if (rc) {
891 dev_err(hdev->dev,
892 "Failed to read from I2C bus %d, addr %d, reg %d\n",
893 entry->i2c_bus, entry->i2c_addr, entry->i2c_reg);
894 return rc;
895 }
896
897 sprintf(tmp_buf, "0x%02lx\n", val);
898 rc = simple_read_from_buffer(buf, count, ppos, tmp_buf,
899 strlen(tmp_buf));
900
901 return rc;
902 }
903
hl_i2c_data_write(struct file *f, const char __user *buf, size_t count, loff_t *ppos)904 static ssize_t hl_i2c_data_write(struct file *f, const char __user *buf,
905 size_t count, loff_t *ppos)
906 {
907 struct hl_dbg_device_entry *entry = file_inode(f)->i_private;
908 struct hl_device *hdev = entry->hdev;
909 u32 value;
910 ssize_t rc;
911
912 rc = kstrtouint_from_user(buf, count, 16, &value);
913 if (rc)
914 return rc;
915
916 rc = hl_debugfs_i2c_write(hdev, entry->i2c_bus, entry->i2c_addr,
917 entry->i2c_reg, value);
918 if (rc) {
919 dev_err(hdev->dev,
920 "Failed to write 0x%02x to I2C bus %d, addr %d, reg %d\n",
921 value, entry->i2c_bus, entry->i2c_addr, entry->i2c_reg);
922 return rc;
923 }
924
925 return count;
926 }
927
hl_led0_write(struct file *f, const char __user *buf, size_t count, loff_t *ppos)928 static ssize_t hl_led0_write(struct file *f, const char __user *buf,
929 size_t count, loff_t *ppos)
930 {
931 struct hl_dbg_device_entry *entry = file_inode(f)->i_private;
932 struct hl_device *hdev = entry->hdev;
933 u32 value;
934 ssize_t rc;
935
936 rc = kstrtouint_from_user(buf, count, 10, &value);
937 if (rc)
938 return rc;
939
940 value = value ? 1 : 0;
941
942 hl_debugfs_led_set(hdev, 0, value);
943
944 return count;
945 }
946
hl_led1_write(struct file *f, const char __user *buf, size_t count, loff_t *ppos)947 static ssize_t hl_led1_write(struct file *f, const char __user *buf,
948 size_t count, loff_t *ppos)
949 {
950 struct hl_dbg_device_entry *entry = file_inode(f)->i_private;
951 struct hl_device *hdev = entry->hdev;
952 u32 value;
953 ssize_t rc;
954
955 rc = kstrtouint_from_user(buf, count, 10, &value);
956 if (rc)
957 return rc;
958
959 value = value ? 1 : 0;
960
961 hl_debugfs_led_set(hdev, 1, value);
962
963 return count;
964 }
965
hl_led2_write(struct file *f, const char __user *buf, size_t count, loff_t *ppos)966 static ssize_t hl_led2_write(struct file *f, const char __user *buf,
967 size_t count, loff_t *ppos)
968 {
969 struct hl_dbg_device_entry *entry = file_inode(f)->i_private;
970 struct hl_device *hdev = entry->hdev;
971 u32 value;
972 ssize_t rc;
973
974 rc = kstrtouint_from_user(buf, count, 10, &value);
975 if (rc)
976 return rc;
977
978 value = value ? 1 : 0;
979
980 hl_debugfs_led_set(hdev, 2, value);
981
982 return count;
983 }
984
hl_device_read(struct file *f, char __user *buf, size_t count, loff_t *ppos)985 static ssize_t hl_device_read(struct file *f, char __user *buf,
986 size_t count, loff_t *ppos)
987 {
988 static const char *help =
989 "Valid values: disable, enable, suspend, resume, cpu_timeout\n";
990 return simple_read_from_buffer(buf, count, ppos, help, strlen(help));
991 }
992
hl_device_write(struct file *f, const char __user *buf, size_t count, loff_t *ppos)993 static ssize_t hl_device_write(struct file *f, const char __user *buf,
994 size_t count, loff_t *ppos)
995 {
996 struct hl_dbg_device_entry *entry = file_inode(f)->i_private;
997 struct hl_device *hdev = entry->hdev;
998 char data[30] = {0};
999
1000 /* don't allow partial writes */
1001 if (*ppos != 0)
1002 return 0;
1003
1004 simple_write_to_buffer(data, 29, ppos, buf, count);
1005
1006 if (strncmp("disable", data, strlen("disable")) == 0) {
1007 hdev->disabled = true;
1008 } else if (strncmp("enable", data, strlen("enable")) == 0) {
1009 hdev->disabled = false;
1010 } else if (strncmp("suspend", data, strlen("suspend")) == 0) {
1011 hdev->asic_funcs->suspend(hdev);
1012 } else if (strncmp("resume", data, strlen("resume")) == 0) {
1013 hdev->asic_funcs->resume(hdev);
1014 } else if (strncmp("cpu_timeout", data, strlen("cpu_timeout")) == 0) {
1015 hdev->device_cpu_disabled = true;
1016 } else {
1017 dev_err(hdev->dev,
1018 "Valid values: disable, enable, suspend, resume, cpu_timeout\n");
1019 count = -EINVAL;
1020 }
1021
1022 return count;
1023 }
1024
hl_clk_gate_read(struct file *f, char __user *buf, size_t count, loff_t *ppos)1025 static ssize_t hl_clk_gate_read(struct file *f, char __user *buf,
1026 size_t count, loff_t *ppos)
1027 {
1028 struct hl_dbg_device_entry *entry = file_inode(f)->i_private;
1029 struct hl_device *hdev = entry->hdev;
1030 char tmp_buf[200];
1031 ssize_t rc;
1032
1033 if (*ppos)
1034 return 0;
1035
1036 sprintf(tmp_buf, "0x%llx\n", hdev->clock_gating_mask);
1037 rc = simple_read_from_buffer(buf, count, ppos, tmp_buf,
1038 strlen(tmp_buf) + 1);
1039
1040 return rc;
1041 }
1042
hl_clk_gate_write(struct file *f, const char __user *buf, size_t count, loff_t *ppos)1043 static ssize_t hl_clk_gate_write(struct file *f, const char __user *buf,
1044 size_t count, loff_t *ppos)
1045 {
1046 struct hl_dbg_device_entry *entry = file_inode(f)->i_private;
1047 struct hl_device *hdev = entry->hdev;
1048 u64 value;
1049 ssize_t rc;
1050
1051 if (atomic_read(&hdev->in_reset)) {
1052 dev_warn_ratelimited(hdev->dev,
1053 "Can't change clock gating during reset\n");
1054 return 0;
1055 }
1056
1057 rc = kstrtoull_from_user(buf, count, 16, &value);
1058 if (rc)
1059 return rc;
1060
1061 hdev->clock_gating_mask = value;
1062 hdev->asic_funcs->set_clock_gating(hdev);
1063
1064 return count;
1065 }
1066
hl_stop_on_err_read(struct file *f, char __user *buf, size_t count, loff_t *ppos)1067 static ssize_t hl_stop_on_err_read(struct file *f, char __user *buf,
1068 size_t count, loff_t *ppos)
1069 {
1070 struct hl_dbg_device_entry *entry = file_inode(f)->i_private;
1071 struct hl_device *hdev = entry->hdev;
1072 char tmp_buf[200];
1073 ssize_t rc;
1074
1075 if (*ppos)
1076 return 0;
1077
1078 sprintf(tmp_buf, "%d\n", hdev->stop_on_err);
1079 rc = simple_read_from_buffer(buf, strlen(tmp_buf) + 1, ppos, tmp_buf,
1080 strlen(tmp_buf) + 1);
1081
1082 return rc;
1083 }
1084
hl_stop_on_err_write(struct file *f, const char __user *buf, size_t count, loff_t *ppos)1085 static ssize_t hl_stop_on_err_write(struct file *f, const char __user *buf,
1086 size_t count, loff_t *ppos)
1087 {
1088 struct hl_dbg_device_entry *entry = file_inode(f)->i_private;
1089 struct hl_device *hdev = entry->hdev;
1090 u32 value;
1091 ssize_t rc;
1092
1093 if (atomic_read(&hdev->in_reset)) {
1094 dev_warn_ratelimited(hdev->dev,
1095 "Can't change stop on error during reset\n");
1096 return 0;
1097 }
1098
1099 rc = kstrtouint_from_user(buf, count, 10, &value);
1100 if (rc)
1101 return rc;
1102
1103 hdev->stop_on_err = value ? 1 : 0;
1104
1105 hl_device_reset(hdev, false, false);
1106
1107 return count;
1108 }
1109
1110 static const struct file_operations hl_data32b_fops = {
1111 .owner = THIS_MODULE,
1112 .read = hl_data_read32,
1113 .write = hl_data_write32
1114 };
1115
1116 static const struct file_operations hl_data64b_fops = {
1117 .owner = THIS_MODULE,
1118 .read = hl_data_read64,
1119 .write = hl_data_write64
1120 };
1121
1122 static const struct file_operations hl_i2c_data_fops = {
1123 .owner = THIS_MODULE,
1124 .read = hl_i2c_data_read,
1125 .write = hl_i2c_data_write
1126 };
1127
1128 static const struct file_operations hl_power_fops = {
1129 .owner = THIS_MODULE,
1130 .read = hl_get_power_state,
1131 .write = hl_set_power_state
1132 };
1133
1134 static const struct file_operations hl_led0_fops = {
1135 .owner = THIS_MODULE,
1136 .write = hl_led0_write
1137 };
1138
1139 static const struct file_operations hl_led1_fops = {
1140 .owner = THIS_MODULE,
1141 .write = hl_led1_write
1142 };
1143
1144 static const struct file_operations hl_led2_fops = {
1145 .owner = THIS_MODULE,
1146 .write = hl_led2_write
1147 };
1148
1149 static const struct file_operations hl_device_fops = {
1150 .owner = THIS_MODULE,
1151 .read = hl_device_read,
1152 .write = hl_device_write
1153 };
1154
1155 static const struct file_operations hl_clk_gate_fops = {
1156 .owner = THIS_MODULE,
1157 .read = hl_clk_gate_read,
1158 .write = hl_clk_gate_write
1159 };
1160
1161 static const struct file_operations hl_stop_on_err_fops = {
1162 .owner = THIS_MODULE,
1163 .read = hl_stop_on_err_read,
1164 .write = hl_stop_on_err_write
1165 };
1166
1167 static const struct hl_info_list hl_debugfs_list[] = {
1168 {"command_buffers", command_buffers_show, NULL},
1169 {"command_submission", command_submission_show, NULL},
1170 {"command_submission_jobs", command_submission_jobs_show, NULL},
1171 {"userptr", userptr_show, NULL},
1172 {"vm", vm_show, NULL},
1173 {"mmu", mmu_show, mmu_asid_va_write},
1174 {"engines", engines_show, NULL}
1175 };
1176
hl_debugfs_open(struct inode *inode, struct file *file)1177 static int hl_debugfs_open(struct inode *inode, struct file *file)
1178 {
1179 struct hl_debugfs_entry *node = inode->i_private;
1180
1181 return single_open(file, node->info_ent->show, node);
1182 }
1183
hl_debugfs_write(struct file *file, const char __user *buf, size_t count, loff_t *f_pos)1184 static ssize_t hl_debugfs_write(struct file *file, const char __user *buf,
1185 size_t count, loff_t *f_pos)
1186 {
1187 struct hl_debugfs_entry *node = file->f_inode->i_private;
1188
1189 if (node->info_ent->write)
1190 return node->info_ent->write(file, buf, count, f_pos);
1191 else
1192 return -EINVAL;
1193
1194 }
1195
1196 static const struct file_operations hl_debugfs_fops = {
1197 .owner = THIS_MODULE,
1198 .open = hl_debugfs_open,
1199 .read = seq_read,
1200 .write = hl_debugfs_write,
1201 .llseek = seq_lseek,
1202 .release = single_release,
1203 };
1204
hl_debugfs_add_device(struct hl_device *hdev)1205 void hl_debugfs_add_device(struct hl_device *hdev)
1206 {
1207 struct hl_dbg_device_entry *dev_entry = &hdev->hl_debugfs;
1208 int count = ARRAY_SIZE(hl_debugfs_list);
1209 struct hl_debugfs_entry *entry;
1210 struct dentry *ent;
1211 int i;
1212
1213 dev_entry->hdev = hdev;
1214 dev_entry->entry_arr = kmalloc_array(count,
1215 sizeof(struct hl_debugfs_entry),
1216 GFP_KERNEL);
1217 if (!dev_entry->entry_arr)
1218 return;
1219
1220 INIT_LIST_HEAD(&dev_entry->file_list);
1221 INIT_LIST_HEAD(&dev_entry->cb_list);
1222 INIT_LIST_HEAD(&dev_entry->cs_list);
1223 INIT_LIST_HEAD(&dev_entry->cs_job_list);
1224 INIT_LIST_HEAD(&dev_entry->userptr_list);
1225 INIT_LIST_HEAD(&dev_entry->ctx_mem_hash_list);
1226 mutex_init(&dev_entry->file_mutex);
1227 spin_lock_init(&dev_entry->cb_spinlock);
1228 spin_lock_init(&dev_entry->cs_spinlock);
1229 spin_lock_init(&dev_entry->cs_job_spinlock);
1230 spin_lock_init(&dev_entry->userptr_spinlock);
1231 spin_lock_init(&dev_entry->ctx_mem_hash_spinlock);
1232
1233 dev_entry->root = debugfs_create_dir(dev_name(hdev->dev),
1234 hl_debug_root);
1235
1236 debugfs_create_x64("addr",
1237 0644,
1238 dev_entry->root,
1239 &dev_entry->addr);
1240
1241 debugfs_create_file("data32",
1242 0644,
1243 dev_entry->root,
1244 dev_entry,
1245 &hl_data32b_fops);
1246
1247 debugfs_create_file("data64",
1248 0644,
1249 dev_entry->root,
1250 dev_entry,
1251 &hl_data64b_fops);
1252
1253 debugfs_create_file("set_power_state",
1254 0200,
1255 dev_entry->root,
1256 dev_entry,
1257 &hl_power_fops);
1258
1259 debugfs_create_u8("i2c_bus",
1260 0644,
1261 dev_entry->root,
1262 &dev_entry->i2c_bus);
1263
1264 debugfs_create_u8("i2c_addr",
1265 0644,
1266 dev_entry->root,
1267 &dev_entry->i2c_addr);
1268
1269 debugfs_create_u8("i2c_reg",
1270 0644,
1271 dev_entry->root,
1272 &dev_entry->i2c_reg);
1273
1274 debugfs_create_file("i2c_data",
1275 0644,
1276 dev_entry->root,
1277 dev_entry,
1278 &hl_i2c_data_fops);
1279
1280 debugfs_create_file("led0",
1281 0200,
1282 dev_entry->root,
1283 dev_entry,
1284 &hl_led0_fops);
1285
1286 debugfs_create_file("led1",
1287 0200,
1288 dev_entry->root,
1289 dev_entry,
1290 &hl_led1_fops);
1291
1292 debugfs_create_file("led2",
1293 0200,
1294 dev_entry->root,
1295 dev_entry,
1296 &hl_led2_fops);
1297
1298 debugfs_create_file("device",
1299 0200,
1300 dev_entry->root,
1301 dev_entry,
1302 &hl_device_fops);
1303
1304 debugfs_create_file("clk_gate",
1305 0200,
1306 dev_entry->root,
1307 dev_entry,
1308 &hl_clk_gate_fops);
1309
1310 debugfs_create_file("stop_on_err",
1311 0644,
1312 dev_entry->root,
1313 dev_entry,
1314 &hl_stop_on_err_fops);
1315
1316 for (i = 0, entry = dev_entry->entry_arr ; i < count ; i++, entry++) {
1317
1318 ent = debugfs_create_file(hl_debugfs_list[i].name,
1319 0444,
1320 dev_entry->root,
1321 entry,
1322 &hl_debugfs_fops);
1323 entry->dent = ent;
1324 entry->info_ent = &hl_debugfs_list[i];
1325 entry->dev_entry = dev_entry;
1326 }
1327 }
1328
hl_debugfs_remove_device(struct hl_device *hdev)1329 void hl_debugfs_remove_device(struct hl_device *hdev)
1330 {
1331 struct hl_dbg_device_entry *entry = &hdev->hl_debugfs;
1332
1333 debugfs_remove_recursive(entry->root);
1334
1335 mutex_destroy(&entry->file_mutex);
1336 kfree(entry->entry_arr);
1337 }
1338
hl_debugfs_add_file(struct hl_fpriv *hpriv)1339 void hl_debugfs_add_file(struct hl_fpriv *hpriv)
1340 {
1341 struct hl_dbg_device_entry *dev_entry = &hpriv->hdev->hl_debugfs;
1342
1343 mutex_lock(&dev_entry->file_mutex);
1344 list_add(&hpriv->debugfs_list, &dev_entry->file_list);
1345 mutex_unlock(&dev_entry->file_mutex);
1346 }
1347
hl_debugfs_remove_file(struct hl_fpriv *hpriv)1348 void hl_debugfs_remove_file(struct hl_fpriv *hpriv)
1349 {
1350 struct hl_dbg_device_entry *dev_entry = &hpriv->hdev->hl_debugfs;
1351
1352 mutex_lock(&dev_entry->file_mutex);
1353 list_del(&hpriv->debugfs_list);
1354 mutex_unlock(&dev_entry->file_mutex);
1355 }
1356
hl_debugfs_add_cb(struct hl_cb *cb)1357 void hl_debugfs_add_cb(struct hl_cb *cb)
1358 {
1359 struct hl_dbg_device_entry *dev_entry = &cb->hdev->hl_debugfs;
1360
1361 spin_lock(&dev_entry->cb_spinlock);
1362 list_add(&cb->debugfs_list, &dev_entry->cb_list);
1363 spin_unlock(&dev_entry->cb_spinlock);
1364 }
1365
hl_debugfs_remove_cb(struct hl_cb *cb)1366 void hl_debugfs_remove_cb(struct hl_cb *cb)
1367 {
1368 struct hl_dbg_device_entry *dev_entry = &cb->hdev->hl_debugfs;
1369
1370 spin_lock(&dev_entry->cb_spinlock);
1371 list_del(&cb->debugfs_list);
1372 spin_unlock(&dev_entry->cb_spinlock);
1373 }
1374
hl_debugfs_add_cs(struct hl_cs *cs)1375 void hl_debugfs_add_cs(struct hl_cs *cs)
1376 {
1377 struct hl_dbg_device_entry *dev_entry = &cs->ctx->hdev->hl_debugfs;
1378
1379 spin_lock(&dev_entry->cs_spinlock);
1380 list_add(&cs->debugfs_list, &dev_entry->cs_list);
1381 spin_unlock(&dev_entry->cs_spinlock);
1382 }
1383
hl_debugfs_remove_cs(struct hl_cs *cs)1384 void hl_debugfs_remove_cs(struct hl_cs *cs)
1385 {
1386 struct hl_dbg_device_entry *dev_entry = &cs->ctx->hdev->hl_debugfs;
1387
1388 spin_lock(&dev_entry->cs_spinlock);
1389 list_del(&cs->debugfs_list);
1390 spin_unlock(&dev_entry->cs_spinlock);
1391 }
1392
hl_debugfs_add_job(struct hl_device *hdev, struct hl_cs_job *job)1393 void hl_debugfs_add_job(struct hl_device *hdev, struct hl_cs_job *job)
1394 {
1395 struct hl_dbg_device_entry *dev_entry = &hdev->hl_debugfs;
1396
1397 spin_lock(&dev_entry->cs_job_spinlock);
1398 list_add(&job->debugfs_list, &dev_entry->cs_job_list);
1399 spin_unlock(&dev_entry->cs_job_spinlock);
1400 }
1401
hl_debugfs_remove_job(struct hl_device *hdev, struct hl_cs_job *job)1402 void hl_debugfs_remove_job(struct hl_device *hdev, struct hl_cs_job *job)
1403 {
1404 struct hl_dbg_device_entry *dev_entry = &hdev->hl_debugfs;
1405
1406 spin_lock(&dev_entry->cs_job_spinlock);
1407 list_del(&job->debugfs_list);
1408 spin_unlock(&dev_entry->cs_job_spinlock);
1409 }
1410
hl_debugfs_add_userptr(struct hl_device *hdev, struct hl_userptr *userptr)1411 void hl_debugfs_add_userptr(struct hl_device *hdev, struct hl_userptr *userptr)
1412 {
1413 struct hl_dbg_device_entry *dev_entry = &hdev->hl_debugfs;
1414
1415 spin_lock(&dev_entry->userptr_spinlock);
1416 list_add(&userptr->debugfs_list, &dev_entry->userptr_list);
1417 spin_unlock(&dev_entry->userptr_spinlock);
1418 }
1419
hl_debugfs_remove_userptr(struct hl_device *hdev, struct hl_userptr *userptr)1420 void hl_debugfs_remove_userptr(struct hl_device *hdev,
1421 struct hl_userptr *userptr)
1422 {
1423 struct hl_dbg_device_entry *dev_entry = &hdev->hl_debugfs;
1424
1425 spin_lock(&dev_entry->userptr_spinlock);
1426 list_del(&userptr->debugfs_list);
1427 spin_unlock(&dev_entry->userptr_spinlock);
1428 }
1429
hl_debugfs_add_ctx_mem_hash(struct hl_device *hdev, struct hl_ctx *ctx)1430 void hl_debugfs_add_ctx_mem_hash(struct hl_device *hdev, struct hl_ctx *ctx)
1431 {
1432 struct hl_dbg_device_entry *dev_entry = &hdev->hl_debugfs;
1433
1434 spin_lock(&dev_entry->ctx_mem_hash_spinlock);
1435 list_add(&ctx->debugfs_list, &dev_entry->ctx_mem_hash_list);
1436 spin_unlock(&dev_entry->ctx_mem_hash_spinlock);
1437 }
1438
hl_debugfs_remove_ctx_mem_hash(struct hl_device *hdev, struct hl_ctx *ctx)1439 void hl_debugfs_remove_ctx_mem_hash(struct hl_device *hdev, struct hl_ctx *ctx)
1440 {
1441 struct hl_dbg_device_entry *dev_entry = &hdev->hl_debugfs;
1442
1443 spin_lock(&dev_entry->ctx_mem_hash_spinlock);
1444 list_del(&ctx->debugfs_list);
1445 spin_unlock(&dev_entry->ctx_mem_hash_spinlock);
1446 }
1447
hl_debugfs_init(void)1448 void __init hl_debugfs_init(void)
1449 {
1450 hl_debug_root = debugfs_create_dir("habanalabs", NULL);
1451 }
1452
hl_debugfs_fini(void)1453 void hl_debugfs_fini(void)
1454 {
1455 debugfs_remove_recursive(hl_debug_root);
1456 }
1457