1 // SPDX-License-Identifier: GPL-2.0
4 * Copyright 2016-2019 HabanaLabs, Ltd.
8 #include "habanalabs.h"
9 #include "../include/hw_ip/mmu/mmu_general.h"
11 #include <linux/pci.h>
12 #include <linux/debugfs.h>
13 #include <linux/uaccess.h>
15 #define MMU_ADDR_BUF_SIZE 40
16 #define MMU_ASID_BUF_SIZE 10
17 #define MMU_KBUF_SIZE (MMU_ADDR_BUF_SIZE + MMU_ASID_BUF_SIZE)
19 static struct dentry
*hl_debug_root
;
21 static int hl_debugfs_i2c_read(struct hl_device
*hdev
, u8 i2c_bus
, u8 i2c_addr
,
22 u8 i2c_reg
, long *val
)
24 struct cpucp_packet pkt
;
28 if (!hl_device_operational(hdev
, NULL
))
31 memset(&pkt
, 0, sizeof(pkt
));
33 pkt
.ctl
= cpu_to_le32(CPUCP_PACKET_I2C_RD
<<
34 CPUCP_PKT_CTL_OPCODE_SHIFT
);
35 pkt
.i2c_bus
= i2c_bus
;
36 pkt
.i2c_addr
= i2c_addr
;
37 pkt
.i2c_reg
= i2c_reg
;
39 rc
= hdev
->asic_funcs
->send_cpu_message(hdev
, (u32
*) &pkt
, sizeof(pkt
),
45 dev_err(hdev
->dev
, "Failed to read from I2C, error %d\n", rc
);
50 static int hl_debugfs_i2c_write(struct hl_device
*hdev
, u8 i2c_bus
, u8 i2c_addr
,
53 struct cpucp_packet pkt
;
56 if (!hl_device_operational(hdev
, NULL
))
59 memset(&pkt
, 0, sizeof(pkt
));
61 pkt
.ctl
= cpu_to_le32(CPUCP_PACKET_I2C_WR
<<
62 CPUCP_PKT_CTL_OPCODE_SHIFT
);
63 pkt
.i2c_bus
= i2c_bus
;
64 pkt
.i2c_addr
= i2c_addr
;
65 pkt
.i2c_reg
= i2c_reg
;
66 pkt
.value
= cpu_to_le64(val
);
68 rc
= hdev
->asic_funcs
->send_cpu_message(hdev
, (u32
*) &pkt
, sizeof(pkt
),
72 dev_err(hdev
->dev
, "Failed to write to I2C, error %d\n", rc
);
77 static void hl_debugfs_led_set(struct hl_device
*hdev
, u8 led
, u8 state
)
79 struct cpucp_packet pkt
;
82 if (!hl_device_operational(hdev
, NULL
))
85 memset(&pkt
, 0, sizeof(pkt
));
87 pkt
.ctl
= cpu_to_le32(CPUCP_PACKET_LED_SET
<<
88 CPUCP_PKT_CTL_OPCODE_SHIFT
);
89 pkt
.led_index
= cpu_to_le32(led
);
90 pkt
.value
= cpu_to_le64(state
);
92 rc
= hdev
->asic_funcs
->send_cpu_message(hdev
, (u32
*) &pkt
, sizeof(pkt
),
96 dev_err(hdev
->dev
, "Failed to set LED %d, error %d\n", led
, rc
);
99 static int command_buffers_show(struct seq_file
*s
, void *data
)
101 struct hl_debugfs_entry
*entry
= s
->private;
102 struct hl_dbg_device_entry
*dev_entry
= entry
->dev_entry
;
106 spin_lock(&dev_entry
->cb_spinlock
);
108 list_for_each_entry(cb
, &dev_entry
->cb_list
, debugfs_list
) {
112 seq_puts(s
, " CB ID CTX ID CB size CB RefCnt mmap? CS counter\n");
113 seq_puts(s
, "---------------------------------------------------------------\n");
116 " %03llu %d 0x%08x %d %d %d\n",
117 cb
->id
, cb
->ctx
->asid
, cb
->size
,
118 kref_read(&cb
->refcount
),
119 cb
->mmap
, atomic_read(&cb
->cs_cnt
));
122 spin_unlock(&dev_entry
->cb_spinlock
);
130 static int command_submission_show(struct seq_file
*s
, void *data
)
132 struct hl_debugfs_entry
*entry
= s
->private;
133 struct hl_dbg_device_entry
*dev_entry
= entry
->dev_entry
;
137 spin_lock(&dev_entry
->cs_spinlock
);
139 list_for_each_entry(cs
, &dev_entry
->cs_list
, debugfs_list
) {
143 seq_puts(s
, " CS ID CTX ASID CS RefCnt Submitted Completed\n");
144 seq_puts(s
, "------------------------------------------------------\n");
147 " %llu %d %d %d %d\n",
148 cs
->sequence
, cs
->ctx
->asid
,
149 kref_read(&cs
->refcount
),
150 cs
->submitted
, cs
->completed
);
153 spin_unlock(&dev_entry
->cs_spinlock
);
161 static int command_submission_jobs_show(struct seq_file
*s
, void *data
)
163 struct hl_debugfs_entry
*entry
= s
->private;
164 struct hl_dbg_device_entry
*dev_entry
= entry
->dev_entry
;
165 struct hl_cs_job
*job
;
168 spin_lock(&dev_entry
->cs_job_spinlock
);
170 list_for_each_entry(job
, &dev_entry
->cs_job_list
, debugfs_list
) {
174 seq_puts(s
, " JOB ID CS ID CTX ASID JOB RefCnt H/W Queue\n");
175 seq_puts(s
, "----------------------------------------------------\n");
179 " %02d %llu %d %d %d\n",
180 job
->id
, job
->cs
->sequence
, job
->cs
->ctx
->asid
,
181 kref_read(&job
->refcount
), job
->hw_queue_id
);
184 " %02d 0 %d %d %d\n",
185 job
->id
, HL_KERNEL_ASID_ID
,
186 kref_read(&job
->refcount
), job
->hw_queue_id
);
189 spin_unlock(&dev_entry
->cs_job_spinlock
);
197 static int userptr_show(struct seq_file
*s
, void *data
)
199 struct hl_debugfs_entry
*entry
= s
->private;
200 struct hl_dbg_device_entry
*dev_entry
= entry
->dev_entry
;
201 struct hl_userptr
*userptr
;
202 char dma_dir
[4][30] = {"DMA_BIDIRECTIONAL", "DMA_TO_DEVICE",
203 "DMA_FROM_DEVICE", "DMA_NONE"};
206 spin_lock(&dev_entry
->userptr_spinlock
);
208 list_for_each_entry(userptr
, &dev_entry
->userptr_list
, debugfs_list
) {
212 seq_puts(s
, " user virtual address size dma dir\n");
213 seq_puts(s
, "----------------------------------------------------------\n");
216 " 0x%-14llx %-10u %-30s\n",
217 userptr
->addr
, userptr
->size
, dma_dir
[userptr
->dir
]);
220 spin_unlock(&dev_entry
->userptr_spinlock
);
228 static int vm_show(struct seq_file
*s
, void *data
)
230 struct hl_debugfs_entry
*entry
= s
->private;
231 struct hl_dbg_device_entry
*dev_entry
= entry
->dev_entry
;
234 struct hl_vm_hash_node
*hnode
;
235 struct hl_userptr
*userptr
;
236 struct hl_vm_phys_pg_pack
*phys_pg_pack
= NULL
;
237 enum vm_type_t
*vm_type
;
242 if (!dev_entry
->hdev
->mmu_enable
)
245 spin_lock(&dev_entry
->ctx_mem_hash_spinlock
);
247 list_for_each_entry(ctx
, &dev_entry
->ctx_mem_hash_list
, debugfs_list
) {
249 seq_puts(s
, "\n\n----------------------------------------------------");
250 seq_puts(s
, "\n----------------------------------------------------\n\n");
251 seq_printf(s
, "ctx asid: %u\n", ctx
->asid
);
253 seq_puts(s
, "\nmappings:\n\n");
254 seq_puts(s
, " virtual address size handle\n");
255 seq_puts(s
, "----------------------------------------------------\n");
256 mutex_lock(&ctx
->mem_hash_lock
);
257 hash_for_each(ctx
->mem_hash
, i
, hnode
, node
) {
258 vm_type
= hnode
->ptr
;
260 if (*vm_type
== VM_TYPE_USERPTR
) {
261 userptr
= hnode
->ptr
;
263 " 0x%-14llx %-10u\n",
264 hnode
->vaddr
, userptr
->size
);
266 phys_pg_pack
= hnode
->ptr
;
268 " 0x%-14llx %-10llu %-4u\n",
269 hnode
->vaddr
, phys_pg_pack
->total_size
,
270 phys_pg_pack
->handle
);
273 mutex_unlock(&ctx
->mem_hash_lock
);
276 spin_lock(&vm
->idr_lock
);
278 if (!idr_is_empty(&vm
->phys_pg_pack_handles
))
279 seq_puts(s
, "\n\nallocations:\n");
281 idr_for_each_entry(&vm
->phys_pg_pack_handles
, phys_pg_pack
, i
) {
282 if (phys_pg_pack
->asid
!= ctx
->asid
)
285 seq_printf(s
, "\nhandle: %u\n", phys_pg_pack
->handle
);
286 seq_printf(s
, "page size: %u\n\n",
287 phys_pg_pack
->page_size
);
288 seq_puts(s
, " physical address\n");
289 seq_puts(s
, "---------------------\n");
290 for (j
= 0 ; j
< phys_pg_pack
->npages
; j
++) {
291 seq_printf(s
, " 0x%-14llx\n",
292 phys_pg_pack
->pages
[j
]);
295 spin_unlock(&vm
->idr_lock
);
299 spin_unlock(&dev_entry
->ctx_mem_hash_spinlock
);
307 static int mmu_show(struct seq_file
*s
, void *data
)
309 struct hl_debugfs_entry
*entry
= s
->private;
310 struct hl_dbg_device_entry
*dev_entry
= entry
->dev_entry
;
311 struct hl_device
*hdev
= dev_entry
->hdev
;
313 struct hl_mmu_hop_info hops_info
;
314 u64 virt_addr
= dev_entry
->mmu_addr
;
317 if (!hdev
->mmu_enable
)
320 if (dev_entry
->mmu_asid
== HL_KERNEL_ASID_ID
)
321 ctx
= hdev
->kernel_ctx
;
323 ctx
= hdev
->compute_ctx
;
326 dev_err(hdev
->dev
, "no ctx available\n");
330 if (hl_mmu_get_tlb_info(ctx
, virt_addr
, &hops_info
)) {
331 dev_err(hdev
->dev
, "virt addr 0x%llx is not mapped to phys addr\n",
336 seq_printf(s
, "asid: %u, virt_addr: 0x%llx\n",
337 dev_entry
->mmu_asid
, dev_entry
->mmu_addr
);
339 for (i
= 0 ; i
< hops_info
.used_hops
; i
++) {
340 seq_printf(s
, "hop%d_addr: 0x%llx\n",
341 i
, hops_info
.hop_info
[i
].hop_addr
);
342 seq_printf(s
, "hop%d_pte_addr: 0x%llx\n",
343 i
, hops_info
.hop_info
[i
].hop_pte_addr
);
344 seq_printf(s
, "hop%d_pte: 0x%llx\n",
345 i
, hops_info
.hop_info
[i
].hop_pte_val
);
351 static ssize_t
mmu_asid_va_write(struct file
*file
, const char __user
*buf
,
352 size_t count
, loff_t
*f_pos
)
354 struct seq_file
*s
= file
->private_data
;
355 struct hl_debugfs_entry
*entry
= s
->private;
356 struct hl_dbg_device_entry
*dev_entry
= entry
->dev_entry
;
357 struct hl_device
*hdev
= dev_entry
->hdev
;
358 char kbuf
[MMU_KBUF_SIZE
];
362 if (!hdev
->mmu_enable
)
365 if (count
> sizeof(kbuf
) - 1)
367 if (copy_from_user(kbuf
, buf
, count
))
371 c
= strchr(kbuf
, ' ');
376 rc
= kstrtouint(kbuf
, 10, &dev_entry
->mmu_asid
);
380 if (strncmp(c
+1, "0x", 2))
382 rc
= kstrtoull(c
+3, 16, &dev_entry
->mmu_addr
);
389 dev_err(hdev
->dev
, "usage: echo <asid> <0xaddr> > mmu\n");
394 static int engines_show(struct seq_file
*s
, void *data
)
396 struct hl_debugfs_entry
*entry
= s
->private;
397 struct hl_dbg_device_entry
*dev_entry
= entry
->dev_entry
;
398 struct hl_device
*hdev
= dev_entry
->hdev
;
400 if (atomic_read(&hdev
->in_reset
)) {
401 dev_warn_ratelimited(hdev
->dev
,
402 "Can't check device idle during reset\n");
406 hdev
->asic_funcs
->is_device_idle(hdev
, NULL
, s
);
411 static bool hl_is_device_va(struct hl_device
*hdev
, u64 addr
)
413 struct asic_fixed_properties
*prop
= &hdev
->asic_prop
;
415 if (!hdev
->mmu_enable
)
418 if (prop
->dram_supports_virtual_memory
&&
419 (addr
>= prop
->dmmu
.start_addr
&& addr
< prop
->dmmu
.end_addr
))
422 if (addr
>= prop
->pmmu
.start_addr
&&
423 addr
< prop
->pmmu
.end_addr
)
426 if (addr
>= prop
->pmmu_huge
.start_addr
&&
427 addr
< prop
->pmmu_huge
.end_addr
)
433 static int device_va_to_pa(struct hl_device
*hdev
, u64 virt_addr
,
436 struct hl_ctx
*ctx
= hdev
->compute_ctx
;
440 dev_err(hdev
->dev
, "no ctx available\n");
444 rc
= hl_mmu_va_to_pa(ctx
, virt_addr
, phys_addr
);
446 dev_err(hdev
->dev
, "virt addr 0x%llx is not mapped to phys addr\n",
454 static ssize_t
hl_data_read32(struct file
*f
, char __user
*buf
,
455 size_t count
, loff_t
*ppos
)
457 struct hl_dbg_device_entry
*entry
= file_inode(f
)->i_private
;
458 struct hl_device
*hdev
= entry
->hdev
;
460 u64 addr
= entry
->addr
;
464 if (atomic_read(&hdev
->in_reset
)) {
465 dev_warn_ratelimited(hdev
->dev
, "Can't read during reset\n");
472 if (hl_is_device_va(hdev
, addr
)) {
473 rc
= device_va_to_pa(hdev
, addr
, &addr
);
478 rc
= hdev
->asic_funcs
->debugfs_read32(hdev
, addr
, &val
);
480 dev_err(hdev
->dev
, "Failed to read from 0x%010llx\n", addr
);
484 sprintf(tmp_buf
, "0x%08x\n", val
);
485 return simple_read_from_buffer(buf
, count
, ppos
, tmp_buf
,
489 static ssize_t
hl_data_write32(struct file
*f
, const char __user
*buf
,
490 size_t count
, loff_t
*ppos
)
492 struct hl_dbg_device_entry
*entry
= file_inode(f
)->i_private
;
493 struct hl_device
*hdev
= entry
->hdev
;
494 u64 addr
= entry
->addr
;
498 if (atomic_read(&hdev
->in_reset
)) {
499 dev_warn_ratelimited(hdev
->dev
, "Can't write during reset\n");
503 rc
= kstrtouint_from_user(buf
, count
, 16, &value
);
507 if (hl_is_device_va(hdev
, addr
)) {
508 rc
= device_va_to_pa(hdev
, addr
, &addr
);
513 rc
= hdev
->asic_funcs
->debugfs_write32(hdev
, addr
, value
);
515 dev_err(hdev
->dev
, "Failed to write 0x%08x to 0x%010llx\n",
523 static ssize_t
hl_data_read64(struct file
*f
, char __user
*buf
,
524 size_t count
, loff_t
*ppos
)
526 struct hl_dbg_device_entry
*entry
= file_inode(f
)->i_private
;
527 struct hl_device
*hdev
= entry
->hdev
;
529 u64 addr
= entry
->addr
;
536 if (hl_is_device_va(hdev
, addr
)) {
537 rc
= device_va_to_pa(hdev
, addr
, &addr
);
542 rc
= hdev
->asic_funcs
->debugfs_read64(hdev
, addr
, &val
);
544 dev_err(hdev
->dev
, "Failed to read from 0x%010llx\n", addr
);
548 sprintf(tmp_buf
, "0x%016llx\n", val
);
549 return simple_read_from_buffer(buf
, count
, ppos
, tmp_buf
,
553 static ssize_t
hl_data_write64(struct file
*f
, const char __user
*buf
,
554 size_t count
, loff_t
*ppos
)
556 struct hl_dbg_device_entry
*entry
= file_inode(f
)->i_private
;
557 struct hl_device
*hdev
= entry
->hdev
;
558 u64 addr
= entry
->addr
;
562 rc
= kstrtoull_from_user(buf
, count
, 16, &value
);
566 if (hl_is_device_va(hdev
, addr
)) {
567 rc
= device_va_to_pa(hdev
, addr
, &addr
);
572 rc
= hdev
->asic_funcs
->debugfs_write64(hdev
, addr
, value
);
574 dev_err(hdev
->dev
, "Failed to write 0x%016llx to 0x%010llx\n",
582 static ssize_t
hl_get_power_state(struct file
*f
, char __user
*buf
,
583 size_t count
, loff_t
*ppos
)
585 struct hl_dbg_device_entry
*entry
= file_inode(f
)->i_private
;
586 struct hl_device
*hdev
= entry
->hdev
;
593 if (hdev
->pdev
->current_state
== PCI_D0
)
595 else if (hdev
->pdev
->current_state
== PCI_D3hot
)
601 "current power state: %d\n1 - D0\n2 - D3hot\n3 - Unknown\n", i
);
602 return simple_read_from_buffer(buf
, count
, ppos
, tmp_buf
,
606 static ssize_t
hl_set_power_state(struct file
*f
, const char __user
*buf
,
607 size_t count
, loff_t
*ppos
)
609 struct hl_dbg_device_entry
*entry
= file_inode(f
)->i_private
;
610 struct hl_device
*hdev
= entry
->hdev
;
614 rc
= kstrtouint_from_user(buf
, count
, 10, &value
);
619 pci_set_power_state(hdev
->pdev
, PCI_D0
);
620 pci_restore_state(hdev
->pdev
);
621 rc
= pci_enable_device(hdev
->pdev
);
622 } else if (value
== 2) {
623 pci_save_state(hdev
->pdev
);
624 pci_disable_device(hdev
->pdev
);
625 pci_set_power_state(hdev
->pdev
, PCI_D3hot
);
627 dev_dbg(hdev
->dev
, "invalid power state value %u\n", value
);
634 static ssize_t
hl_i2c_data_read(struct file
*f
, char __user
*buf
,
635 size_t count
, loff_t
*ppos
)
637 struct hl_dbg_device_entry
*entry
= file_inode(f
)->i_private
;
638 struct hl_device
*hdev
= entry
->hdev
;
646 rc
= hl_debugfs_i2c_read(hdev
, entry
->i2c_bus
, entry
->i2c_addr
,
647 entry
->i2c_reg
, &val
);
650 "Failed to read from I2C bus %d, addr %d, reg %d\n",
651 entry
->i2c_bus
, entry
->i2c_addr
, entry
->i2c_reg
);
655 sprintf(tmp_buf
, "0x%02lx\n", val
);
656 rc
= simple_read_from_buffer(buf
, count
, ppos
, tmp_buf
,
662 static ssize_t
hl_i2c_data_write(struct file
*f
, const char __user
*buf
,
663 size_t count
, loff_t
*ppos
)
665 struct hl_dbg_device_entry
*entry
= file_inode(f
)->i_private
;
666 struct hl_device
*hdev
= entry
->hdev
;
670 rc
= kstrtouint_from_user(buf
, count
, 16, &value
);
674 rc
= hl_debugfs_i2c_write(hdev
, entry
->i2c_bus
, entry
->i2c_addr
,
675 entry
->i2c_reg
, value
);
678 "Failed to write 0x%02x to I2C bus %d, addr %d, reg %d\n",
679 value
, entry
->i2c_bus
, entry
->i2c_addr
, entry
->i2c_reg
);
686 static ssize_t
hl_led0_write(struct file
*f
, const char __user
*buf
,
687 size_t count
, loff_t
*ppos
)
689 struct hl_dbg_device_entry
*entry
= file_inode(f
)->i_private
;
690 struct hl_device
*hdev
= entry
->hdev
;
694 rc
= kstrtouint_from_user(buf
, count
, 10, &value
);
698 value
= value
? 1 : 0;
700 hl_debugfs_led_set(hdev
, 0, value
);
705 static ssize_t
hl_led1_write(struct file
*f
, const char __user
*buf
,
706 size_t count
, loff_t
*ppos
)
708 struct hl_dbg_device_entry
*entry
= file_inode(f
)->i_private
;
709 struct hl_device
*hdev
= entry
->hdev
;
713 rc
= kstrtouint_from_user(buf
, count
, 10, &value
);
717 value
= value
? 1 : 0;
719 hl_debugfs_led_set(hdev
, 1, value
);
724 static ssize_t
hl_led2_write(struct file
*f
, const char __user
*buf
,
725 size_t count
, loff_t
*ppos
)
727 struct hl_dbg_device_entry
*entry
= file_inode(f
)->i_private
;
728 struct hl_device
*hdev
= entry
->hdev
;
732 rc
= kstrtouint_from_user(buf
, count
, 10, &value
);
736 value
= value
? 1 : 0;
738 hl_debugfs_led_set(hdev
, 2, value
);
743 static ssize_t
hl_device_read(struct file
*f
, char __user
*buf
,
744 size_t count
, loff_t
*ppos
)
746 static const char *help
=
747 "Valid values: disable, enable, suspend, resume, cpu_timeout\n";
748 return simple_read_from_buffer(buf
, count
, ppos
, help
, strlen(help
));
751 static ssize_t
hl_device_write(struct file
*f
, const char __user
*buf
,
752 size_t count
, loff_t
*ppos
)
754 struct hl_dbg_device_entry
*entry
= file_inode(f
)->i_private
;
755 struct hl_device
*hdev
= entry
->hdev
;
758 /* don't allow partial writes */
762 simple_write_to_buffer(data
, 29, ppos
, buf
, count
);
764 if (strncmp("disable", data
, strlen("disable")) == 0) {
765 hdev
->disabled
= true;
766 } else if (strncmp("enable", data
, strlen("enable")) == 0) {
767 hdev
->disabled
= false;
768 } else if (strncmp("suspend", data
, strlen("suspend")) == 0) {
769 hdev
->asic_funcs
->suspend(hdev
);
770 } else if (strncmp("resume", data
, strlen("resume")) == 0) {
771 hdev
->asic_funcs
->resume(hdev
);
772 } else if (strncmp("cpu_timeout", data
, strlen("cpu_timeout")) == 0) {
773 hdev
->device_cpu_disabled
= true;
776 "Valid values: disable, enable, suspend, resume, cpu_timeout\n");
783 static ssize_t
hl_clk_gate_read(struct file
*f
, char __user
*buf
,
784 size_t count
, loff_t
*ppos
)
786 struct hl_dbg_device_entry
*entry
= file_inode(f
)->i_private
;
787 struct hl_device
*hdev
= entry
->hdev
;
794 sprintf(tmp_buf
, "0x%llx\n", hdev
->clock_gating_mask
);
795 rc
= simple_read_from_buffer(buf
, count
, ppos
, tmp_buf
,
796 strlen(tmp_buf
) + 1);
801 static ssize_t
hl_clk_gate_write(struct file
*f
, const char __user
*buf
,
802 size_t count
, loff_t
*ppos
)
804 struct hl_dbg_device_entry
*entry
= file_inode(f
)->i_private
;
805 struct hl_device
*hdev
= entry
->hdev
;
809 if (atomic_read(&hdev
->in_reset
)) {
810 dev_warn_ratelimited(hdev
->dev
,
811 "Can't change clock gating during reset\n");
815 rc
= kstrtoull_from_user(buf
, count
, 16, &value
);
819 hdev
->clock_gating_mask
= value
;
820 hdev
->asic_funcs
->set_clock_gating(hdev
);
825 static ssize_t
hl_stop_on_err_read(struct file
*f
, char __user
*buf
,
826 size_t count
, loff_t
*ppos
)
828 struct hl_dbg_device_entry
*entry
= file_inode(f
)->i_private
;
829 struct hl_device
*hdev
= entry
->hdev
;
836 sprintf(tmp_buf
, "%d\n", hdev
->stop_on_err
);
837 rc
= simple_read_from_buffer(buf
, strlen(tmp_buf
) + 1, ppos
, tmp_buf
,
838 strlen(tmp_buf
) + 1);
843 static ssize_t
hl_stop_on_err_write(struct file
*f
, const char __user
*buf
,
844 size_t count
, loff_t
*ppos
)
846 struct hl_dbg_device_entry
*entry
= file_inode(f
)->i_private
;
847 struct hl_device
*hdev
= entry
->hdev
;
851 if (atomic_read(&hdev
->in_reset
)) {
852 dev_warn_ratelimited(hdev
->dev
,
853 "Can't change stop on error during reset\n");
857 rc
= kstrtouint_from_user(buf
, count
, 10, &value
);
861 hdev
->stop_on_err
= value
? 1 : 0;
863 hl_device_reset(hdev
, false, false);
868 static const struct file_operations hl_data32b_fops
= {
869 .owner
= THIS_MODULE
,
870 .read
= hl_data_read32
,
871 .write
= hl_data_write32
874 static const struct file_operations hl_data64b_fops
= {
875 .owner
= THIS_MODULE
,
876 .read
= hl_data_read64
,
877 .write
= hl_data_write64
880 static const struct file_operations hl_i2c_data_fops
= {
881 .owner
= THIS_MODULE
,
882 .read
= hl_i2c_data_read
,
883 .write
= hl_i2c_data_write
886 static const struct file_operations hl_power_fops
= {
887 .owner
= THIS_MODULE
,
888 .read
= hl_get_power_state
,
889 .write
= hl_set_power_state
892 static const struct file_operations hl_led0_fops
= {
893 .owner
= THIS_MODULE
,
894 .write
= hl_led0_write
897 static const struct file_operations hl_led1_fops
= {
898 .owner
= THIS_MODULE
,
899 .write
= hl_led1_write
902 static const struct file_operations hl_led2_fops
= {
903 .owner
= THIS_MODULE
,
904 .write
= hl_led2_write
907 static const struct file_operations hl_device_fops
= {
908 .owner
= THIS_MODULE
,
909 .read
= hl_device_read
,
910 .write
= hl_device_write
913 static const struct file_operations hl_clk_gate_fops
= {
914 .owner
= THIS_MODULE
,
915 .read
= hl_clk_gate_read
,
916 .write
= hl_clk_gate_write
919 static const struct file_operations hl_stop_on_err_fops
= {
920 .owner
= THIS_MODULE
,
921 .read
= hl_stop_on_err_read
,
922 .write
= hl_stop_on_err_write
925 static const struct hl_info_list hl_debugfs_list
[] = {
926 {"command_buffers", command_buffers_show
, NULL
},
927 {"command_submission", command_submission_show
, NULL
},
928 {"command_submission_jobs", command_submission_jobs_show
, NULL
},
929 {"userptr", userptr_show
, NULL
},
930 {"vm", vm_show
, NULL
},
931 {"mmu", mmu_show
, mmu_asid_va_write
},
932 {"engines", engines_show
, NULL
}
935 static int hl_debugfs_open(struct inode
*inode
, struct file
*file
)
937 struct hl_debugfs_entry
*node
= inode
->i_private
;
939 return single_open(file
, node
->info_ent
->show
, node
);
942 static ssize_t
hl_debugfs_write(struct file
*file
, const char __user
*buf
,
943 size_t count
, loff_t
*f_pos
)
945 struct hl_debugfs_entry
*node
= file
->f_inode
->i_private
;
947 if (node
->info_ent
->write
)
948 return node
->info_ent
->write(file
, buf
, count
, f_pos
);
954 static const struct file_operations hl_debugfs_fops
= {
955 .owner
= THIS_MODULE
,
956 .open
= hl_debugfs_open
,
958 .write
= hl_debugfs_write
,
960 .release
= single_release
,
963 void hl_debugfs_add_device(struct hl_device
*hdev
)
965 struct hl_dbg_device_entry
*dev_entry
= &hdev
->hl_debugfs
;
966 int count
= ARRAY_SIZE(hl_debugfs_list
);
967 struct hl_debugfs_entry
*entry
;
971 dev_entry
->hdev
= hdev
;
972 dev_entry
->entry_arr
= kmalloc_array(count
,
973 sizeof(struct hl_debugfs_entry
),
975 if (!dev_entry
->entry_arr
)
978 INIT_LIST_HEAD(&dev_entry
->file_list
);
979 INIT_LIST_HEAD(&dev_entry
->cb_list
);
980 INIT_LIST_HEAD(&dev_entry
->cs_list
);
981 INIT_LIST_HEAD(&dev_entry
->cs_job_list
);
982 INIT_LIST_HEAD(&dev_entry
->userptr_list
);
983 INIT_LIST_HEAD(&dev_entry
->ctx_mem_hash_list
);
984 mutex_init(&dev_entry
->file_mutex
);
985 spin_lock_init(&dev_entry
->cb_spinlock
);
986 spin_lock_init(&dev_entry
->cs_spinlock
);
987 spin_lock_init(&dev_entry
->cs_job_spinlock
);
988 spin_lock_init(&dev_entry
->userptr_spinlock
);
989 spin_lock_init(&dev_entry
->ctx_mem_hash_spinlock
);
991 dev_entry
->root
= debugfs_create_dir(dev_name(hdev
->dev
),
994 debugfs_create_x64("addr",
999 debugfs_create_file("data32",
1005 debugfs_create_file("data64",
1011 debugfs_create_file("set_power_state",
1017 debugfs_create_u8("i2c_bus",
1020 &dev_entry
->i2c_bus
);
1022 debugfs_create_u8("i2c_addr",
1025 &dev_entry
->i2c_addr
);
1027 debugfs_create_u8("i2c_reg",
1030 &dev_entry
->i2c_reg
);
1032 debugfs_create_file("i2c_data",
1038 debugfs_create_file("led0",
1044 debugfs_create_file("led1",
1050 debugfs_create_file("led2",
1056 debugfs_create_file("device",
1062 debugfs_create_file("clk_gate",
1068 debugfs_create_file("stop_on_err",
1072 &hl_stop_on_err_fops
);
1074 for (i
= 0, entry
= dev_entry
->entry_arr
; i
< count
; i
++, entry
++) {
1076 ent
= debugfs_create_file(hl_debugfs_list
[i
].name
,
1082 entry
->info_ent
= &hl_debugfs_list
[i
];
1083 entry
->dev_entry
= dev_entry
;
1087 void hl_debugfs_remove_device(struct hl_device
*hdev
)
1089 struct hl_dbg_device_entry
*entry
= &hdev
->hl_debugfs
;
1091 debugfs_remove_recursive(entry
->root
);
1093 mutex_destroy(&entry
->file_mutex
);
1094 kfree(entry
->entry_arr
);
1097 void hl_debugfs_add_file(struct hl_fpriv
*hpriv
)
1099 struct hl_dbg_device_entry
*dev_entry
= &hpriv
->hdev
->hl_debugfs
;
1101 mutex_lock(&dev_entry
->file_mutex
);
1102 list_add(&hpriv
->debugfs_list
, &dev_entry
->file_list
);
1103 mutex_unlock(&dev_entry
->file_mutex
);
1106 void hl_debugfs_remove_file(struct hl_fpriv
*hpriv
)
1108 struct hl_dbg_device_entry
*dev_entry
= &hpriv
->hdev
->hl_debugfs
;
1110 mutex_lock(&dev_entry
->file_mutex
);
1111 list_del(&hpriv
->debugfs_list
);
1112 mutex_unlock(&dev_entry
->file_mutex
);
1115 void hl_debugfs_add_cb(struct hl_cb
*cb
)
1117 struct hl_dbg_device_entry
*dev_entry
= &cb
->hdev
->hl_debugfs
;
1119 spin_lock(&dev_entry
->cb_spinlock
);
1120 list_add(&cb
->debugfs_list
, &dev_entry
->cb_list
);
1121 spin_unlock(&dev_entry
->cb_spinlock
);
1124 void hl_debugfs_remove_cb(struct hl_cb
*cb
)
1126 struct hl_dbg_device_entry
*dev_entry
= &cb
->hdev
->hl_debugfs
;
1128 spin_lock(&dev_entry
->cb_spinlock
);
1129 list_del(&cb
->debugfs_list
);
1130 spin_unlock(&dev_entry
->cb_spinlock
);
1133 void hl_debugfs_add_cs(struct hl_cs
*cs
)
1135 struct hl_dbg_device_entry
*dev_entry
= &cs
->ctx
->hdev
->hl_debugfs
;
1137 spin_lock(&dev_entry
->cs_spinlock
);
1138 list_add(&cs
->debugfs_list
, &dev_entry
->cs_list
);
1139 spin_unlock(&dev_entry
->cs_spinlock
);
1142 void hl_debugfs_remove_cs(struct hl_cs
*cs
)
1144 struct hl_dbg_device_entry
*dev_entry
= &cs
->ctx
->hdev
->hl_debugfs
;
1146 spin_lock(&dev_entry
->cs_spinlock
);
1147 list_del(&cs
->debugfs_list
);
1148 spin_unlock(&dev_entry
->cs_spinlock
);
1151 void hl_debugfs_add_job(struct hl_device
*hdev
, struct hl_cs_job
*job
)
1153 struct hl_dbg_device_entry
*dev_entry
= &hdev
->hl_debugfs
;
1155 spin_lock(&dev_entry
->cs_job_spinlock
);
1156 list_add(&job
->debugfs_list
, &dev_entry
->cs_job_list
);
1157 spin_unlock(&dev_entry
->cs_job_spinlock
);
1160 void hl_debugfs_remove_job(struct hl_device
*hdev
, struct hl_cs_job
*job
)
1162 struct hl_dbg_device_entry
*dev_entry
= &hdev
->hl_debugfs
;
1164 spin_lock(&dev_entry
->cs_job_spinlock
);
1165 list_del(&job
->debugfs_list
);
1166 spin_unlock(&dev_entry
->cs_job_spinlock
);
1169 void hl_debugfs_add_userptr(struct hl_device
*hdev
, struct hl_userptr
*userptr
)
1171 struct hl_dbg_device_entry
*dev_entry
= &hdev
->hl_debugfs
;
1173 spin_lock(&dev_entry
->userptr_spinlock
);
1174 list_add(&userptr
->debugfs_list
, &dev_entry
->userptr_list
);
1175 spin_unlock(&dev_entry
->userptr_spinlock
);
1178 void hl_debugfs_remove_userptr(struct hl_device
*hdev
,
1179 struct hl_userptr
*userptr
)
1181 struct hl_dbg_device_entry
*dev_entry
= &hdev
->hl_debugfs
;
1183 spin_lock(&dev_entry
->userptr_spinlock
);
1184 list_del(&userptr
->debugfs_list
);
1185 spin_unlock(&dev_entry
->userptr_spinlock
);
1188 void hl_debugfs_add_ctx_mem_hash(struct hl_device
*hdev
, struct hl_ctx
*ctx
)
1190 struct hl_dbg_device_entry
*dev_entry
= &hdev
->hl_debugfs
;
1192 spin_lock(&dev_entry
->ctx_mem_hash_spinlock
);
1193 list_add(&ctx
->debugfs_list
, &dev_entry
->ctx_mem_hash_list
);
1194 spin_unlock(&dev_entry
->ctx_mem_hash_spinlock
);
1197 void hl_debugfs_remove_ctx_mem_hash(struct hl_device
*hdev
, struct hl_ctx
*ctx
)
1199 struct hl_dbg_device_entry
*dev_entry
= &hdev
->hl_debugfs
;
1201 spin_lock(&dev_entry
->ctx_mem_hash_spinlock
);
1202 list_del(&ctx
->debugfs_list
);
1203 spin_unlock(&dev_entry
->ctx_mem_hash_spinlock
);
1206 void __init
hl_debugfs_init(void)
1208 hl_debug_root
= debugfs_create_dir("habanalabs", NULL
);
1211 void hl_debugfs_fini(void)
1213 debugfs_remove_recursive(hl_debug_root
);