1 // SPDX-License-Identifier: GPL-2.0
4 * Copyright 2016-2019 HabanaLabs, Ltd.
8 #include "habanalabs.h"
9 #include "include/hw_ip/mmu/mmu_general.h"
11 #include <linux/pci.h>
12 #include <linux/debugfs.h>
13 #include <linux/uaccess.h>
15 #define MMU_ADDR_BUF_SIZE 40
16 #define MMU_ASID_BUF_SIZE 10
17 #define MMU_KBUF_SIZE (MMU_ADDR_BUF_SIZE + MMU_ASID_BUF_SIZE)
19 static struct dentry
*hl_debug_root
;
21 static int hl_debugfs_i2c_read(struct hl_device
*hdev
, u8 i2c_bus
, u8 i2c_addr
,
24 struct armcp_packet pkt
;
27 if (hl_device_disabled_or_in_reset(hdev
))
30 memset(&pkt
, 0, sizeof(pkt
));
32 pkt
.ctl
= cpu_to_le32(ARMCP_PACKET_I2C_RD
<<
33 ARMCP_PKT_CTL_OPCODE_SHIFT
);
34 pkt
.i2c_bus
= i2c_bus
;
35 pkt
.i2c_addr
= i2c_addr
;
36 pkt
.i2c_reg
= i2c_reg
;
38 rc
= hdev
->asic_funcs
->send_cpu_message(hdev
, (u32
*) &pkt
, sizeof(pkt
),
39 HL_DEVICE_TIMEOUT_USEC
, (long *) val
);
42 dev_err(hdev
->dev
, "Failed to read from I2C, error %d\n", rc
);
47 static int hl_debugfs_i2c_write(struct hl_device
*hdev
, u8 i2c_bus
, u8 i2c_addr
,
50 struct armcp_packet pkt
;
53 if (hl_device_disabled_or_in_reset(hdev
))
56 memset(&pkt
, 0, sizeof(pkt
));
58 pkt
.ctl
= cpu_to_le32(ARMCP_PACKET_I2C_WR
<<
59 ARMCP_PKT_CTL_OPCODE_SHIFT
);
60 pkt
.i2c_bus
= i2c_bus
;
61 pkt
.i2c_addr
= i2c_addr
;
62 pkt
.i2c_reg
= i2c_reg
;
63 pkt
.value
= cpu_to_le64(val
);
65 rc
= hdev
->asic_funcs
->send_cpu_message(hdev
, (u32
*) &pkt
, sizeof(pkt
),
66 HL_DEVICE_TIMEOUT_USEC
, NULL
);
69 dev_err(hdev
->dev
, "Failed to write to I2C, error %d\n", rc
);
74 static void hl_debugfs_led_set(struct hl_device
*hdev
, u8 led
, u8 state
)
76 struct armcp_packet pkt
;
79 if (hl_device_disabled_or_in_reset(hdev
))
82 memset(&pkt
, 0, sizeof(pkt
));
84 pkt
.ctl
= cpu_to_le32(ARMCP_PACKET_LED_SET
<<
85 ARMCP_PKT_CTL_OPCODE_SHIFT
);
86 pkt
.led_index
= cpu_to_le32(led
);
87 pkt
.value
= cpu_to_le64(state
);
89 rc
= hdev
->asic_funcs
->send_cpu_message(hdev
, (u32
*) &pkt
, sizeof(pkt
),
90 HL_DEVICE_TIMEOUT_USEC
, NULL
);
93 dev_err(hdev
->dev
, "Failed to set LED %d, error %d\n", led
, rc
);
96 static int command_buffers_show(struct seq_file
*s
, void *data
)
98 struct hl_debugfs_entry
*entry
= s
->private;
99 struct hl_dbg_device_entry
*dev_entry
= entry
->dev_entry
;
103 spin_lock(&dev_entry
->cb_spinlock
);
105 list_for_each_entry(cb
, &dev_entry
->cb_list
, debugfs_list
) {
109 seq_puts(s
, " CB ID CTX ID CB size CB RefCnt mmap? CS counter\n");
110 seq_puts(s
, "---------------------------------------------------------------\n");
113 " %03d %d 0x%08x %d %d %d\n",
114 cb
->id
, cb
->ctx_id
, cb
->size
,
115 kref_read(&cb
->refcount
),
116 cb
->mmap
, cb
->cs_cnt
);
119 spin_unlock(&dev_entry
->cb_spinlock
);
127 static int command_submission_show(struct seq_file
*s
, void *data
)
129 struct hl_debugfs_entry
*entry
= s
->private;
130 struct hl_dbg_device_entry
*dev_entry
= entry
->dev_entry
;
134 spin_lock(&dev_entry
->cs_spinlock
);
136 list_for_each_entry(cs
, &dev_entry
->cs_list
, debugfs_list
) {
140 seq_puts(s
, " CS ID CTX ASID CS RefCnt Submitted Completed\n");
141 seq_puts(s
, "------------------------------------------------------\n");
144 " %llu %d %d %d %d\n",
145 cs
->sequence
, cs
->ctx
->asid
,
146 kref_read(&cs
->refcount
),
147 cs
->submitted
, cs
->completed
);
150 spin_unlock(&dev_entry
->cs_spinlock
);
158 static int command_submission_jobs_show(struct seq_file
*s
, void *data
)
160 struct hl_debugfs_entry
*entry
= s
->private;
161 struct hl_dbg_device_entry
*dev_entry
= entry
->dev_entry
;
162 struct hl_cs_job
*job
;
165 spin_lock(&dev_entry
->cs_job_spinlock
);
167 list_for_each_entry(job
, &dev_entry
->cs_job_list
, debugfs_list
) {
171 seq_puts(s
, " JOB ID CS ID CTX ASID H/W Queue\n");
172 seq_puts(s
, "---------------------------------------\n");
176 " %02d %llu %d %d\n",
177 job
->id
, job
->cs
->sequence
, job
->cs
->ctx
->asid
,
182 job
->id
, HL_KERNEL_ASID_ID
, job
->hw_queue_id
);
185 spin_unlock(&dev_entry
->cs_job_spinlock
);
193 static int userptr_show(struct seq_file
*s
, void *data
)
195 struct hl_debugfs_entry
*entry
= s
->private;
196 struct hl_dbg_device_entry
*dev_entry
= entry
->dev_entry
;
197 struct hl_userptr
*userptr
;
198 char dma_dir
[4][30] = {"DMA_BIDIRECTIONAL", "DMA_TO_DEVICE",
199 "DMA_FROM_DEVICE", "DMA_NONE"};
202 spin_lock(&dev_entry
->userptr_spinlock
);
204 list_for_each_entry(userptr
, &dev_entry
->userptr_list
, debugfs_list
) {
208 seq_puts(s
, " user virtual address size dma dir\n");
209 seq_puts(s
, "----------------------------------------------------------\n");
212 " 0x%-14llx %-10u %-30s\n",
213 userptr
->addr
, userptr
->size
, dma_dir
[userptr
->dir
]);
216 spin_unlock(&dev_entry
->userptr_spinlock
);
224 static int vm_show(struct seq_file
*s
, void *data
)
226 struct hl_debugfs_entry
*entry
= s
->private;
227 struct hl_dbg_device_entry
*dev_entry
= entry
->dev_entry
;
230 struct hl_vm_hash_node
*hnode
;
231 struct hl_userptr
*userptr
;
232 struct hl_vm_phys_pg_pack
*phys_pg_pack
= NULL
;
233 enum vm_type_t
*vm_type
;
238 if (!dev_entry
->hdev
->mmu_enable
)
241 spin_lock(&dev_entry
->ctx_mem_hash_spinlock
);
243 list_for_each_entry(ctx
, &dev_entry
->ctx_mem_hash_list
, debugfs_list
) {
245 seq_puts(s
, "\n\n----------------------------------------------------");
246 seq_puts(s
, "\n----------------------------------------------------\n\n");
247 seq_printf(s
, "ctx asid: %u\n", ctx
->asid
);
249 seq_puts(s
, "\nmappings:\n\n");
250 seq_puts(s
, " virtual address size handle\n");
251 seq_puts(s
, "----------------------------------------------------\n");
252 mutex_lock(&ctx
->mem_hash_lock
);
253 hash_for_each(ctx
->mem_hash
, i
, hnode
, node
) {
254 vm_type
= hnode
->ptr
;
256 if (*vm_type
== VM_TYPE_USERPTR
) {
257 userptr
= hnode
->ptr
;
259 " 0x%-14llx %-10u\n",
260 hnode
->vaddr
, userptr
->size
);
262 phys_pg_pack
= hnode
->ptr
;
264 " 0x%-14llx %-10llu %-4u\n",
265 hnode
->vaddr
, phys_pg_pack
->total_size
,
266 phys_pg_pack
->handle
);
269 mutex_unlock(&ctx
->mem_hash_lock
);
272 spin_lock(&vm
->idr_lock
);
274 if (!idr_is_empty(&vm
->phys_pg_pack_handles
))
275 seq_puts(s
, "\n\nallocations:\n");
277 idr_for_each_entry(&vm
->phys_pg_pack_handles
, phys_pg_pack
, i
) {
278 if (phys_pg_pack
->asid
!= ctx
->asid
)
281 seq_printf(s
, "\nhandle: %u\n", phys_pg_pack
->handle
);
282 seq_printf(s
, "page size: %u\n\n",
283 phys_pg_pack
->page_size
);
284 seq_puts(s
, " physical address\n");
285 seq_puts(s
, "---------------------\n");
286 for (j
= 0 ; j
< phys_pg_pack
->npages
; j
++) {
287 seq_printf(s
, " 0x%-14llx\n",
288 phys_pg_pack
->pages
[j
]);
291 spin_unlock(&vm
->idr_lock
);
295 spin_unlock(&dev_entry
->ctx_mem_hash_spinlock
);
303 /* these inline functions are copied from mmu.c */
304 static inline u64
get_hop0_addr(struct hl_ctx
*ctx
)
306 return ctx
->hdev
->asic_prop
.mmu_pgt_addr
+
307 (ctx
->asid
* ctx
->hdev
->asic_prop
.mmu_hop_table_size
);
310 static inline u64
get_hopN_pte_addr(struct hl_ctx
*ctx
, u64 hop_addr
,
311 u64 virt_addr
, u64 mask
, u64 shift
)
313 return hop_addr
+ ctx
->hdev
->asic_prop
.mmu_pte_size
*
314 ((virt_addr
& mask
) >> shift
);
317 static inline u64
get_hop0_pte_addr(struct hl_ctx
*ctx
,
318 struct hl_mmu_properties
*mmu_specs
,
319 u64 hop_addr
, u64 vaddr
)
321 return get_hopN_pte_addr(ctx
, hop_addr
, vaddr
, mmu_specs
->hop0_mask
,
322 mmu_specs
->hop0_shift
);
325 static inline u64
get_hop1_pte_addr(struct hl_ctx
*ctx
,
326 struct hl_mmu_properties
*mmu_specs
,
327 u64 hop_addr
, u64 vaddr
)
329 return get_hopN_pte_addr(ctx
, hop_addr
, vaddr
, mmu_specs
->hop1_mask
,
330 mmu_specs
->hop1_shift
);
333 static inline u64
get_hop2_pte_addr(struct hl_ctx
*ctx
,
334 struct hl_mmu_properties
*mmu_specs
,
335 u64 hop_addr
, u64 vaddr
)
337 return get_hopN_pte_addr(ctx
, hop_addr
, vaddr
, mmu_specs
->hop2_mask
,
338 mmu_specs
->hop2_shift
);
341 static inline u64
get_hop3_pte_addr(struct hl_ctx
*ctx
,
342 struct hl_mmu_properties
*mmu_specs
,
343 u64 hop_addr
, u64 vaddr
)
345 return get_hopN_pte_addr(ctx
, hop_addr
, vaddr
, mmu_specs
->hop3_mask
,
346 mmu_specs
->hop3_shift
);
349 static inline u64
get_hop4_pte_addr(struct hl_ctx
*ctx
,
350 struct hl_mmu_properties
*mmu_specs
,
351 u64 hop_addr
, u64 vaddr
)
353 return get_hopN_pte_addr(ctx
, hop_addr
, vaddr
, mmu_specs
->hop4_mask
,
354 mmu_specs
->hop4_shift
);
357 static inline u64
get_next_hop_addr(u64 curr_pte
)
359 if (curr_pte
& PAGE_PRESENT_MASK
)
360 return curr_pte
& HOP_PHYS_ADDR_MASK
;
365 static int mmu_show(struct seq_file
*s
, void *data
)
367 struct hl_debugfs_entry
*entry
= s
->private;
368 struct hl_dbg_device_entry
*dev_entry
= entry
->dev_entry
;
369 struct hl_device
*hdev
= dev_entry
->hdev
;
370 struct asic_fixed_properties
*prop
= &hdev
->asic_prop
;
371 struct hl_mmu_properties
*mmu_prop
;
375 u64 hop0_addr
= 0, hop0_pte_addr
= 0, hop0_pte
= 0,
376 hop1_addr
= 0, hop1_pte_addr
= 0, hop1_pte
= 0,
377 hop2_addr
= 0, hop2_pte_addr
= 0, hop2_pte
= 0,
378 hop3_addr
= 0, hop3_pte_addr
= 0, hop3_pte
= 0,
379 hop4_addr
= 0, hop4_pte_addr
= 0, hop4_pte
= 0,
380 virt_addr
= dev_entry
->mmu_addr
;
382 if (!hdev
->mmu_enable
)
385 if (dev_entry
->mmu_asid
== HL_KERNEL_ASID_ID
)
386 ctx
= hdev
->kernel_ctx
;
388 ctx
= hdev
->compute_ctx
;
391 dev_err(hdev
->dev
, "no ctx available\n");
395 is_dram_addr
= hl_mem_area_inside_range(virt_addr
, prop
->dmmu
.page_size
,
396 prop
->va_space_dram_start_address
,
397 prop
->va_space_dram_end_address
);
399 mmu_prop
= is_dram_addr
? &prop
->dmmu
: &prop
->pmmu
;
401 mutex_lock(&ctx
->mmu_lock
);
403 /* the following lookup is copied from unmap() in mmu.c */
405 hop0_addr
= get_hop0_addr(ctx
);
406 hop0_pte_addr
= get_hop0_pte_addr(ctx
, mmu_prop
, hop0_addr
, virt_addr
);
407 hop0_pte
= hdev
->asic_funcs
->read_pte(hdev
, hop0_pte_addr
);
408 hop1_addr
= get_next_hop_addr(hop0_pte
);
410 if (hop1_addr
== ULLONG_MAX
)
413 hop1_pte_addr
= get_hop1_pte_addr(ctx
, mmu_prop
, hop1_addr
, virt_addr
);
414 hop1_pte
= hdev
->asic_funcs
->read_pte(hdev
, hop1_pte_addr
);
415 hop2_addr
= get_next_hop_addr(hop1_pte
);
417 if (hop2_addr
== ULLONG_MAX
)
420 hop2_pte_addr
= get_hop2_pte_addr(ctx
, mmu_prop
, hop2_addr
, virt_addr
);
421 hop2_pte
= hdev
->asic_funcs
->read_pte(hdev
, hop2_pte_addr
);
422 hop3_addr
= get_next_hop_addr(hop2_pte
);
424 if (hop3_addr
== ULLONG_MAX
)
427 hop3_pte_addr
= get_hop3_pte_addr(ctx
, mmu_prop
, hop3_addr
, virt_addr
);
428 hop3_pte
= hdev
->asic_funcs
->read_pte(hdev
, hop3_pte_addr
);
430 if (!(hop3_pte
& LAST_MASK
)) {
431 hop4_addr
= get_next_hop_addr(hop3_pte
);
433 if (hop4_addr
== ULLONG_MAX
)
436 hop4_pte_addr
= get_hop4_pte_addr(ctx
, mmu_prop
, hop4_addr
,
438 hop4_pte
= hdev
->asic_funcs
->read_pte(hdev
, hop4_pte_addr
);
439 if (!(hop4_pte
& PAGE_PRESENT_MASK
))
442 if (!(hop3_pte
& PAGE_PRESENT_MASK
))
446 seq_printf(s
, "asid: %u, virt_addr: 0x%llx\n",
447 dev_entry
->mmu_asid
, dev_entry
->mmu_addr
);
449 seq_printf(s
, "hop0_addr: 0x%llx\n", hop0_addr
);
450 seq_printf(s
, "hop0_pte_addr: 0x%llx\n", hop0_pte_addr
);
451 seq_printf(s
, "hop0_pte: 0x%llx\n", hop0_pte
);
453 seq_printf(s
, "hop1_addr: 0x%llx\n", hop1_addr
);
454 seq_printf(s
, "hop1_pte_addr: 0x%llx\n", hop1_pte_addr
);
455 seq_printf(s
, "hop1_pte: 0x%llx\n", hop1_pte
);
457 seq_printf(s
, "hop2_addr: 0x%llx\n", hop2_addr
);
458 seq_printf(s
, "hop2_pte_addr: 0x%llx\n", hop2_pte_addr
);
459 seq_printf(s
, "hop2_pte: 0x%llx\n", hop2_pte
);
461 seq_printf(s
, "hop3_addr: 0x%llx\n", hop3_addr
);
462 seq_printf(s
, "hop3_pte_addr: 0x%llx\n", hop3_pte_addr
);
463 seq_printf(s
, "hop3_pte: 0x%llx\n", hop3_pte
);
465 if (!(hop3_pte
& LAST_MASK
)) {
466 seq_printf(s
, "hop4_addr: 0x%llx\n", hop4_addr
);
467 seq_printf(s
, "hop4_pte_addr: 0x%llx\n", hop4_pte_addr
);
468 seq_printf(s
, "hop4_pte: 0x%llx\n", hop4_pte
);
474 dev_err(hdev
->dev
, "virt addr 0x%llx is not mapped to phys addr\n",
477 mutex_unlock(&ctx
->mmu_lock
);
482 static ssize_t
mmu_write(struct file
*file
, const char __user
*buf
,
483 size_t count
, loff_t
*f_pos
)
485 struct seq_file
*s
= file
->private_data
;
486 struct hl_debugfs_entry
*entry
= s
->private;
487 struct hl_dbg_device_entry
*dev_entry
= entry
->dev_entry
;
488 struct hl_device
*hdev
= dev_entry
->hdev
;
489 char kbuf
[MMU_KBUF_SIZE
];
493 if (!hdev
->mmu_enable
)
496 if (count
> sizeof(kbuf
) - 1)
498 if (copy_from_user(kbuf
, buf
, count
))
502 c
= strchr(kbuf
, ' ');
507 rc
= kstrtouint(kbuf
, 10, &dev_entry
->mmu_asid
);
511 if (strncmp(c
+1, "0x", 2))
513 rc
= kstrtoull(c
+3, 16, &dev_entry
->mmu_addr
);
520 dev_err(hdev
->dev
, "usage: echo <asid> <0xaddr> > mmu\n");
525 static int engines_show(struct seq_file
*s
, void *data
)
527 struct hl_debugfs_entry
*entry
= s
->private;
528 struct hl_dbg_device_entry
*dev_entry
= entry
->dev_entry
;
529 struct hl_device
*hdev
= dev_entry
->hdev
;
531 if (atomic_read(&hdev
->in_reset
)) {
532 dev_warn_ratelimited(hdev
->dev
,
533 "Can't check device idle during reset\n");
537 hdev
->asic_funcs
->is_device_idle(hdev
, NULL
, s
);
542 static bool hl_is_device_va(struct hl_device
*hdev
, u64 addr
)
544 struct asic_fixed_properties
*prop
= &hdev
->asic_prop
;
546 if (!hdev
->mmu_enable
)
549 if (hdev
->dram_supports_virtual_memory
&&
550 addr
>= prop
->va_space_dram_start_address
&&
551 addr
< prop
->va_space_dram_end_address
)
554 if (addr
>= prop
->va_space_host_start_address
&&
555 addr
< prop
->va_space_host_end_address
)
561 static int device_va_to_pa(struct hl_device
*hdev
, u64 virt_addr
,
564 struct hl_ctx
*ctx
= hdev
->compute_ctx
;
565 struct asic_fixed_properties
*prop
= &hdev
->asic_prop
;
566 struct hl_mmu_properties
*mmu_prop
;
567 u64 hop_addr
, hop_pte_addr
, hop_pte
;
568 u64 offset_mask
= HOP4_MASK
| FLAGS_MASK
;
573 dev_err(hdev
->dev
, "no ctx available\n");
577 is_dram_addr
= hl_mem_area_inside_range(virt_addr
, prop
->dmmu
.page_size
,
578 prop
->va_space_dram_start_address
,
579 prop
->va_space_dram_end_address
);
581 mmu_prop
= is_dram_addr
? &prop
->dmmu
: &prop
->pmmu
;
583 mutex_lock(&ctx
->mmu_lock
);
586 hop_addr
= get_hop0_addr(ctx
);
587 hop_pte_addr
= get_hop0_pte_addr(ctx
, mmu_prop
, hop_addr
, virt_addr
);
588 hop_pte
= hdev
->asic_funcs
->read_pte(hdev
, hop_pte_addr
);
591 hop_addr
= get_next_hop_addr(hop_pte
);
592 if (hop_addr
== ULLONG_MAX
)
594 hop_pte_addr
= get_hop1_pte_addr(ctx
, mmu_prop
, hop_addr
, virt_addr
);
595 hop_pte
= hdev
->asic_funcs
->read_pte(hdev
, hop_pte_addr
);
598 hop_addr
= get_next_hop_addr(hop_pte
);
599 if (hop_addr
== ULLONG_MAX
)
601 hop_pte_addr
= get_hop2_pte_addr(ctx
, mmu_prop
, hop_addr
, virt_addr
);
602 hop_pte
= hdev
->asic_funcs
->read_pte(hdev
, hop_pte_addr
);
605 hop_addr
= get_next_hop_addr(hop_pte
);
606 if (hop_addr
== ULLONG_MAX
)
608 hop_pte_addr
= get_hop3_pte_addr(ctx
, mmu_prop
, hop_addr
, virt_addr
);
609 hop_pte
= hdev
->asic_funcs
->read_pte(hdev
, hop_pte_addr
);
611 if (!(hop_pte
& LAST_MASK
)) {
613 hop_addr
= get_next_hop_addr(hop_pte
);
614 if (hop_addr
== ULLONG_MAX
)
616 hop_pte_addr
= get_hop4_pte_addr(ctx
, mmu_prop
, hop_addr
,
618 hop_pte
= hdev
->asic_funcs
->read_pte(hdev
, hop_pte_addr
);
620 offset_mask
= FLAGS_MASK
;
623 if (!(hop_pte
& PAGE_PRESENT_MASK
))
626 *phys_addr
= (hop_pte
& ~offset_mask
) | (virt_addr
& offset_mask
);
631 dev_err(hdev
->dev
, "virt addr 0x%llx is not mapped to phys addr\n",
635 mutex_unlock(&ctx
->mmu_lock
);
639 static ssize_t
hl_data_read32(struct file
*f
, char __user
*buf
,
640 size_t count
, loff_t
*ppos
)
642 struct hl_dbg_device_entry
*entry
= file_inode(f
)->i_private
;
643 struct hl_device
*hdev
= entry
->hdev
;
645 u64 addr
= entry
->addr
;
649 if (atomic_read(&hdev
->in_reset
)) {
650 dev_warn_ratelimited(hdev
->dev
, "Can't read during reset\n");
657 if (hl_is_device_va(hdev
, addr
)) {
658 rc
= device_va_to_pa(hdev
, addr
, &addr
);
663 rc
= hdev
->asic_funcs
->debugfs_read32(hdev
, addr
, &val
);
665 dev_err(hdev
->dev
, "Failed to read from 0x%010llx\n", addr
);
669 sprintf(tmp_buf
, "0x%08x\n", val
);
670 return simple_read_from_buffer(buf
, count
, ppos
, tmp_buf
,
674 static ssize_t
hl_data_write32(struct file
*f
, const char __user
*buf
,
675 size_t count
, loff_t
*ppos
)
677 struct hl_dbg_device_entry
*entry
= file_inode(f
)->i_private
;
678 struct hl_device
*hdev
= entry
->hdev
;
679 u64 addr
= entry
->addr
;
683 if (atomic_read(&hdev
->in_reset
)) {
684 dev_warn_ratelimited(hdev
->dev
, "Can't write during reset\n");
688 rc
= kstrtouint_from_user(buf
, count
, 16, &value
);
692 if (hl_is_device_va(hdev
, addr
)) {
693 rc
= device_va_to_pa(hdev
, addr
, &addr
);
698 rc
= hdev
->asic_funcs
->debugfs_write32(hdev
, addr
, value
);
700 dev_err(hdev
->dev
, "Failed to write 0x%08x to 0x%010llx\n",
708 static ssize_t
hl_get_power_state(struct file
*f
, char __user
*buf
,
709 size_t count
, loff_t
*ppos
)
711 struct hl_dbg_device_entry
*entry
= file_inode(f
)->i_private
;
712 struct hl_device
*hdev
= entry
->hdev
;
719 if (hdev
->pdev
->current_state
== PCI_D0
)
721 else if (hdev
->pdev
->current_state
== PCI_D3hot
)
727 "current power state: %d\n1 - D0\n2 - D3hot\n3 - Unknown\n", i
);
728 return simple_read_from_buffer(buf
, count
, ppos
, tmp_buf
,
732 static ssize_t
hl_set_power_state(struct file
*f
, const char __user
*buf
,
733 size_t count
, loff_t
*ppos
)
735 struct hl_dbg_device_entry
*entry
= file_inode(f
)->i_private
;
736 struct hl_device
*hdev
= entry
->hdev
;
740 rc
= kstrtouint_from_user(buf
, count
, 10, &value
);
745 pci_set_power_state(hdev
->pdev
, PCI_D0
);
746 pci_restore_state(hdev
->pdev
);
747 rc
= pci_enable_device(hdev
->pdev
);
748 } else if (value
== 2) {
749 pci_save_state(hdev
->pdev
);
750 pci_disable_device(hdev
->pdev
);
751 pci_set_power_state(hdev
->pdev
, PCI_D3hot
);
753 dev_dbg(hdev
->dev
, "invalid power state value %u\n", value
);
760 static ssize_t
hl_i2c_data_read(struct file
*f
, char __user
*buf
,
761 size_t count
, loff_t
*ppos
)
763 struct hl_dbg_device_entry
*entry
= file_inode(f
)->i_private
;
764 struct hl_device
*hdev
= entry
->hdev
;
772 rc
= hl_debugfs_i2c_read(hdev
, entry
->i2c_bus
, entry
->i2c_addr
,
773 entry
->i2c_reg
, &val
);
776 "Failed to read from I2C bus %d, addr %d, reg %d\n",
777 entry
->i2c_bus
, entry
->i2c_addr
, entry
->i2c_reg
);
781 sprintf(tmp_buf
, "0x%02x\n", val
);
782 rc
= simple_read_from_buffer(buf
, count
, ppos
, tmp_buf
,
788 static ssize_t
hl_i2c_data_write(struct file
*f
, const char __user
*buf
,
789 size_t count
, loff_t
*ppos
)
791 struct hl_dbg_device_entry
*entry
= file_inode(f
)->i_private
;
792 struct hl_device
*hdev
= entry
->hdev
;
796 rc
= kstrtouint_from_user(buf
, count
, 16, &value
);
800 rc
= hl_debugfs_i2c_write(hdev
, entry
->i2c_bus
, entry
->i2c_addr
,
801 entry
->i2c_reg
, value
);
804 "Failed to write 0x%02x to I2C bus %d, addr %d, reg %d\n",
805 value
, entry
->i2c_bus
, entry
->i2c_addr
, entry
->i2c_reg
);
812 static ssize_t
hl_led0_write(struct file
*f
, const char __user
*buf
,
813 size_t count
, loff_t
*ppos
)
815 struct hl_dbg_device_entry
*entry
= file_inode(f
)->i_private
;
816 struct hl_device
*hdev
= entry
->hdev
;
820 rc
= kstrtouint_from_user(buf
, count
, 10, &value
);
824 value
= value
? 1 : 0;
826 hl_debugfs_led_set(hdev
, 0, value
);
831 static ssize_t
hl_led1_write(struct file
*f
, const char __user
*buf
,
832 size_t count
, loff_t
*ppos
)
834 struct hl_dbg_device_entry
*entry
= file_inode(f
)->i_private
;
835 struct hl_device
*hdev
= entry
->hdev
;
839 rc
= kstrtouint_from_user(buf
, count
, 10, &value
);
843 value
= value
? 1 : 0;
845 hl_debugfs_led_set(hdev
, 1, value
);
850 static ssize_t
hl_led2_write(struct file
*f
, const char __user
*buf
,
851 size_t count
, loff_t
*ppos
)
853 struct hl_dbg_device_entry
*entry
= file_inode(f
)->i_private
;
854 struct hl_device
*hdev
= entry
->hdev
;
858 rc
= kstrtouint_from_user(buf
, count
, 10, &value
);
862 value
= value
? 1 : 0;
864 hl_debugfs_led_set(hdev
, 2, value
);
869 static ssize_t
hl_device_read(struct file
*f
, char __user
*buf
,
870 size_t count
, loff_t
*ppos
)
872 static const char *help
=
873 "Valid values: disable, enable, suspend, resume, cpu_timeout\n";
874 return simple_read_from_buffer(buf
, count
, ppos
, help
, strlen(help
));
877 static ssize_t
hl_device_write(struct file
*f
, const char __user
*buf
,
878 size_t count
, loff_t
*ppos
)
880 struct hl_dbg_device_entry
*entry
= file_inode(f
)->i_private
;
881 struct hl_device
*hdev
= entry
->hdev
;
884 /* don't allow partial writes */
888 simple_write_to_buffer(data
, 29, ppos
, buf
, count
);
890 if (strncmp("disable", data
, strlen("disable")) == 0) {
891 hdev
->disabled
= true;
892 } else if (strncmp("enable", data
, strlen("enable")) == 0) {
893 hdev
->disabled
= false;
894 } else if (strncmp("suspend", data
, strlen("suspend")) == 0) {
895 hdev
->asic_funcs
->suspend(hdev
);
896 } else if (strncmp("resume", data
, strlen("resume")) == 0) {
897 hdev
->asic_funcs
->resume(hdev
);
898 } else if (strncmp("cpu_timeout", data
, strlen("cpu_timeout")) == 0) {
899 hdev
->device_cpu_disabled
= true;
902 "Valid values: disable, enable, suspend, resume, cpu_timeout\n");
909 static const struct file_operations hl_data32b_fops
= {
910 .owner
= THIS_MODULE
,
911 .read
= hl_data_read32
,
912 .write
= hl_data_write32
915 static const struct file_operations hl_i2c_data_fops
= {
916 .owner
= THIS_MODULE
,
917 .read
= hl_i2c_data_read
,
918 .write
= hl_i2c_data_write
921 static const struct file_operations hl_power_fops
= {
922 .owner
= THIS_MODULE
,
923 .read
= hl_get_power_state
,
924 .write
= hl_set_power_state
927 static const struct file_operations hl_led0_fops
= {
928 .owner
= THIS_MODULE
,
929 .write
= hl_led0_write
932 static const struct file_operations hl_led1_fops
= {
933 .owner
= THIS_MODULE
,
934 .write
= hl_led1_write
937 static const struct file_operations hl_led2_fops
= {
938 .owner
= THIS_MODULE
,
939 .write
= hl_led2_write
942 static const struct file_operations hl_device_fops
= {
943 .owner
= THIS_MODULE
,
944 .read
= hl_device_read
,
945 .write
= hl_device_write
948 static const struct hl_info_list hl_debugfs_list
[] = {
949 {"command_buffers", command_buffers_show
, NULL
},
950 {"command_submission", command_submission_show
, NULL
},
951 {"command_submission_jobs", command_submission_jobs_show
, NULL
},
952 {"userptr", userptr_show
, NULL
},
953 {"vm", vm_show
, NULL
},
954 {"mmu", mmu_show
, mmu_write
},
955 {"engines", engines_show
, NULL
}
958 static int hl_debugfs_open(struct inode
*inode
, struct file
*file
)
960 struct hl_debugfs_entry
*node
= inode
->i_private
;
962 return single_open(file
, node
->info_ent
->show
, node
);
965 static ssize_t
hl_debugfs_write(struct file
*file
, const char __user
*buf
,
966 size_t count
, loff_t
*f_pos
)
968 struct hl_debugfs_entry
*node
= file
->f_inode
->i_private
;
970 if (node
->info_ent
->write
)
971 return node
->info_ent
->write(file
, buf
, count
, f_pos
);
977 static const struct file_operations hl_debugfs_fops
= {
978 .owner
= THIS_MODULE
,
979 .open
= hl_debugfs_open
,
981 .write
= hl_debugfs_write
,
983 .release
= single_release
,
986 void hl_debugfs_add_device(struct hl_device
*hdev
)
988 struct hl_dbg_device_entry
*dev_entry
= &hdev
->hl_debugfs
;
989 int count
= ARRAY_SIZE(hl_debugfs_list
);
990 struct hl_debugfs_entry
*entry
;
994 dev_entry
->hdev
= hdev
;
995 dev_entry
->entry_arr
= kmalloc_array(count
,
996 sizeof(struct hl_debugfs_entry
),
998 if (!dev_entry
->entry_arr
)
1001 INIT_LIST_HEAD(&dev_entry
->file_list
);
1002 INIT_LIST_HEAD(&dev_entry
->cb_list
);
1003 INIT_LIST_HEAD(&dev_entry
->cs_list
);
1004 INIT_LIST_HEAD(&dev_entry
->cs_job_list
);
1005 INIT_LIST_HEAD(&dev_entry
->userptr_list
);
1006 INIT_LIST_HEAD(&dev_entry
->ctx_mem_hash_list
);
1007 mutex_init(&dev_entry
->file_mutex
);
1008 spin_lock_init(&dev_entry
->cb_spinlock
);
1009 spin_lock_init(&dev_entry
->cs_spinlock
);
1010 spin_lock_init(&dev_entry
->cs_job_spinlock
);
1011 spin_lock_init(&dev_entry
->userptr_spinlock
);
1012 spin_lock_init(&dev_entry
->ctx_mem_hash_spinlock
);
1014 dev_entry
->root
= debugfs_create_dir(dev_name(hdev
->dev
),
1017 debugfs_create_x64("addr",
1022 debugfs_create_file("data32",
1028 debugfs_create_file("set_power_state",
1034 debugfs_create_u8("i2c_bus",
1037 &dev_entry
->i2c_bus
);
1039 debugfs_create_u8("i2c_addr",
1042 &dev_entry
->i2c_addr
);
1044 debugfs_create_u8("i2c_reg",
1047 &dev_entry
->i2c_reg
);
1049 debugfs_create_file("i2c_data",
1055 debugfs_create_file("led0",
1061 debugfs_create_file("led1",
1067 debugfs_create_file("led2",
1073 debugfs_create_file("device",
1079 for (i
= 0, entry
= dev_entry
->entry_arr
; i
< count
; i
++, entry
++) {
1081 ent
= debugfs_create_file(hl_debugfs_list
[i
].name
,
1087 entry
->info_ent
= &hl_debugfs_list
[i
];
1088 entry
->dev_entry
= dev_entry
;
1092 void hl_debugfs_remove_device(struct hl_device
*hdev
)
1094 struct hl_dbg_device_entry
*entry
= &hdev
->hl_debugfs
;
1096 debugfs_remove_recursive(entry
->root
);
1098 mutex_destroy(&entry
->file_mutex
);
1099 kfree(entry
->entry_arr
);
1102 void hl_debugfs_add_file(struct hl_fpriv
*hpriv
)
1104 struct hl_dbg_device_entry
*dev_entry
= &hpriv
->hdev
->hl_debugfs
;
1106 mutex_lock(&dev_entry
->file_mutex
);
1107 list_add(&hpriv
->debugfs_list
, &dev_entry
->file_list
);
1108 mutex_unlock(&dev_entry
->file_mutex
);
1111 void hl_debugfs_remove_file(struct hl_fpriv
*hpriv
)
1113 struct hl_dbg_device_entry
*dev_entry
= &hpriv
->hdev
->hl_debugfs
;
1115 mutex_lock(&dev_entry
->file_mutex
);
1116 list_del(&hpriv
->debugfs_list
);
1117 mutex_unlock(&dev_entry
->file_mutex
);
1120 void hl_debugfs_add_cb(struct hl_cb
*cb
)
1122 struct hl_dbg_device_entry
*dev_entry
= &cb
->hdev
->hl_debugfs
;
1124 spin_lock(&dev_entry
->cb_spinlock
);
1125 list_add(&cb
->debugfs_list
, &dev_entry
->cb_list
);
1126 spin_unlock(&dev_entry
->cb_spinlock
);
1129 void hl_debugfs_remove_cb(struct hl_cb
*cb
)
1131 struct hl_dbg_device_entry
*dev_entry
= &cb
->hdev
->hl_debugfs
;
1133 spin_lock(&dev_entry
->cb_spinlock
);
1134 list_del(&cb
->debugfs_list
);
1135 spin_unlock(&dev_entry
->cb_spinlock
);
1138 void hl_debugfs_add_cs(struct hl_cs
*cs
)
1140 struct hl_dbg_device_entry
*dev_entry
= &cs
->ctx
->hdev
->hl_debugfs
;
1142 spin_lock(&dev_entry
->cs_spinlock
);
1143 list_add(&cs
->debugfs_list
, &dev_entry
->cs_list
);
1144 spin_unlock(&dev_entry
->cs_spinlock
);
1147 void hl_debugfs_remove_cs(struct hl_cs
*cs
)
1149 struct hl_dbg_device_entry
*dev_entry
= &cs
->ctx
->hdev
->hl_debugfs
;
1151 spin_lock(&dev_entry
->cs_spinlock
);
1152 list_del(&cs
->debugfs_list
);
1153 spin_unlock(&dev_entry
->cs_spinlock
);
1156 void hl_debugfs_add_job(struct hl_device
*hdev
, struct hl_cs_job
*job
)
1158 struct hl_dbg_device_entry
*dev_entry
= &hdev
->hl_debugfs
;
1160 spin_lock(&dev_entry
->cs_job_spinlock
);
1161 list_add(&job
->debugfs_list
, &dev_entry
->cs_job_list
);
1162 spin_unlock(&dev_entry
->cs_job_spinlock
);
1165 void hl_debugfs_remove_job(struct hl_device
*hdev
, struct hl_cs_job
*job
)
1167 struct hl_dbg_device_entry
*dev_entry
= &hdev
->hl_debugfs
;
1169 spin_lock(&dev_entry
->cs_job_spinlock
);
1170 list_del(&job
->debugfs_list
);
1171 spin_unlock(&dev_entry
->cs_job_spinlock
);
1174 void hl_debugfs_add_userptr(struct hl_device
*hdev
, struct hl_userptr
*userptr
)
1176 struct hl_dbg_device_entry
*dev_entry
= &hdev
->hl_debugfs
;
1178 spin_lock(&dev_entry
->userptr_spinlock
);
1179 list_add(&userptr
->debugfs_list
, &dev_entry
->userptr_list
);
1180 spin_unlock(&dev_entry
->userptr_spinlock
);
1183 void hl_debugfs_remove_userptr(struct hl_device
*hdev
,
1184 struct hl_userptr
*userptr
)
1186 struct hl_dbg_device_entry
*dev_entry
= &hdev
->hl_debugfs
;
1188 spin_lock(&dev_entry
->userptr_spinlock
);
1189 list_del(&userptr
->debugfs_list
);
1190 spin_unlock(&dev_entry
->userptr_spinlock
);
1193 void hl_debugfs_add_ctx_mem_hash(struct hl_device
*hdev
, struct hl_ctx
*ctx
)
1195 struct hl_dbg_device_entry
*dev_entry
= &hdev
->hl_debugfs
;
1197 spin_lock(&dev_entry
->ctx_mem_hash_spinlock
);
1198 list_add(&ctx
->debugfs_list
, &dev_entry
->ctx_mem_hash_list
);
1199 spin_unlock(&dev_entry
->ctx_mem_hash_spinlock
);
1202 void hl_debugfs_remove_ctx_mem_hash(struct hl_device
*hdev
, struct hl_ctx
*ctx
)
1204 struct hl_dbg_device_entry
*dev_entry
= &hdev
->hl_debugfs
;
1206 spin_lock(&dev_entry
->ctx_mem_hash_spinlock
);
1207 list_del(&ctx
->debugfs_list
);
1208 spin_unlock(&dev_entry
->ctx_mem_hash_spinlock
);
1211 void __init
hl_debugfs_init(void)
1213 hl_debug_root
= debugfs_create_dir("habanalabs", NULL
);
1216 void hl_debugfs_fini(void)
1218 debugfs_remove_recursive(hl_debug_root
);