1 // SPDX-License-Identifier: GPL-2.0
4 * Copyright 2016-2019 HabanaLabs, Ltd.
8 #include "habanalabs.h"
10 #include <linux/slab.h>
13 * struct hl_eqe_work - This structure is used to schedule work of EQ
14 * entry and cpucp_reset event
16 * @eq_work: workqueue object to run when EQ entry is received
17 * @hdev: pointer to device structure
18 * @eq_entry: copy of the EQ entry
21 struct work_struct eq_work
;
22 struct hl_device
*hdev
;
23 struct hl_eq_entry eq_entry
;
27 * hl_cq_inc_ptr - increment ci or pi of cq
29 * @ptr: the current ci or pi value of the completion queue
31 * Increment ptr by 1. If it reaches the number of completion queue
32 * entries, set it to 0
34 inline u32
hl_cq_inc_ptr(u32 ptr
)
37 if (unlikely(ptr
== HL_CQ_LENGTH
))
43 * hl_eq_inc_ptr - increment ci of eq
45 * @ptr: the current ci value of the event queue
47 * Increment ptr by 1. If it reaches the number of event queue
48 * entries, set it to 0
50 inline u32
hl_eq_inc_ptr(u32 ptr
)
53 if (unlikely(ptr
== HL_EQ_LENGTH
))
58 static void irq_handle_eqe(struct work_struct
*work
)
60 struct hl_eqe_work
*eqe_work
= container_of(work
, struct hl_eqe_work
,
62 struct hl_device
*hdev
= eqe_work
->hdev
;
64 hdev
->asic_funcs
->handle_eqe(hdev
, &eqe_work
->eq_entry
);
70 * hl_irq_handler_cq - irq handler for completion queue
73 * @arg: pointer to completion queue structure
76 irqreturn_t
hl_irq_handler_cq(int irq
, void *arg
)
78 struct hl_cq
*cq
= arg
;
79 struct hl_device
*hdev
= cq
->hdev
;
80 struct hl_hw_queue
*queue
;
81 struct hl_cs_job
*job
;
82 bool shadow_index_valid
;
84 struct hl_cq_entry
*cq_entry
, *cq_base
;
88 "Device disabled but received IRQ %d for CQ %d\n",
89 irq
, cq
->hw_queue_id
);
93 cq_base
= cq
->kernel_address
;
96 bool entry_ready
= ((le32_to_cpu(cq_base
[cq
->ci
].data
) &
98 >> CQ_ENTRY_READY_SHIFT
);
103 cq_entry
= (struct hl_cq_entry
*) &cq_base
[cq
->ci
];
105 /* Make sure we read CQ entry contents after we've
106 * checked the ownership bit.
110 shadow_index_valid
= ((le32_to_cpu(cq_entry
->data
) &
111 CQ_ENTRY_SHADOW_INDEX_VALID_MASK
)
112 >> CQ_ENTRY_SHADOW_INDEX_VALID_SHIFT
);
114 shadow_index
= (u16
) ((le32_to_cpu(cq_entry
->data
) &
115 CQ_ENTRY_SHADOW_INDEX_MASK
)
116 >> CQ_ENTRY_SHADOW_INDEX_SHIFT
);
118 queue
= &hdev
->kernel_queues
[cq
->hw_queue_id
];
120 if ((shadow_index_valid
) && (!hdev
->disabled
)) {
121 job
= queue
->shadow_queue
[hl_pi_2_offset(shadow_index
)];
122 queue_work(hdev
->cq_wq
[cq
->cq_idx
], &job
->finish_work
);
125 atomic_inc(&queue
->ci
);
127 /* Clear CQ entry ready bit */
128 cq_entry
->data
= cpu_to_le32(le32_to_cpu(cq_entry
->data
) &
129 ~CQ_ENTRY_READY_MASK
);
131 cq
->ci
= hl_cq_inc_ptr(cq
->ci
);
133 /* Increment free slots */
134 atomic_inc(&cq
->free_slots_cnt
);
141 * hl_irq_handler_eq - irq handler for event queue
144 * @arg: pointer to event queue structure
147 irqreturn_t
hl_irq_handler_eq(int irq
, void *arg
)
149 struct hl_eq
*eq
= arg
;
150 struct hl_device
*hdev
= eq
->hdev
;
151 struct hl_eq_entry
*eq_entry
;
152 struct hl_eq_entry
*eq_base
;
153 struct hl_eqe_work
*handle_eqe_work
;
155 eq_base
= eq
->kernel_address
;
159 ((le32_to_cpu(eq_base
[eq
->ci
].hdr
.ctl
) &
160 EQ_CTL_READY_MASK
) >> EQ_CTL_READY_SHIFT
);
165 eq_entry
= &eq_base
[eq
->ci
];
168 * Make sure we read EQ entry contents after we've
169 * checked the ownership bit.
173 if (hdev
->disabled
) {
175 "Device disabled but received IRQ %d for EQ\n",
180 handle_eqe_work
= kmalloc(sizeof(*handle_eqe_work
), GFP_ATOMIC
);
181 if (handle_eqe_work
) {
182 INIT_WORK(&handle_eqe_work
->eq_work
, irq_handle_eqe
);
183 handle_eqe_work
->hdev
= hdev
;
185 memcpy(&handle_eqe_work
->eq_entry
, eq_entry
,
188 queue_work(hdev
->eq_wq
, &handle_eqe_work
->eq_work
);
191 /* Clear EQ entry ready bit */
193 cpu_to_le32(le32_to_cpu(eq_entry
->hdr
.ctl
) &
196 eq
->ci
= hl_eq_inc_ptr(eq
->ci
);
198 hdev
->asic_funcs
->update_eq_ci(hdev
, eq
->ci
);
205 * hl_cq_init - main initialization function for an cq object
207 * @hdev: pointer to device structure
208 * @q: pointer to cq structure
209 * @hw_queue_id: The H/W queue ID this completion queue belongs to
211 * Allocate dma-able memory for the completion queue and initialize fields
212 * Returns 0 on success
214 int hl_cq_init(struct hl_device
*hdev
, struct hl_cq
*q
, u32 hw_queue_id
)
218 p
= hdev
->asic_funcs
->asic_dma_alloc_coherent(hdev
, HL_CQ_SIZE_IN_BYTES
,
219 &q
->bus_address
, GFP_KERNEL
| __GFP_ZERO
);
224 q
->kernel_address
= p
;
225 q
->hw_queue_id
= hw_queue_id
;
229 atomic_set(&q
->free_slots_cnt
, HL_CQ_LENGTH
);
235 * hl_cq_fini - destroy completion queue
237 * @hdev: pointer to device structure
238 * @q: pointer to cq structure
240 * Free the completion queue memory
242 void hl_cq_fini(struct hl_device
*hdev
, struct hl_cq
*q
)
244 hdev
->asic_funcs
->asic_dma_free_coherent(hdev
, HL_CQ_SIZE_IN_BYTES
,
249 void hl_cq_reset(struct hl_device
*hdev
, struct hl_cq
*q
)
254 atomic_set(&q
->free_slots_cnt
, HL_CQ_LENGTH
);
257 * It's not enough to just reset the PI/CI because the H/W may have
258 * written valid completion entries before it was halted and therefore
259 * we need to clean the actual queues so we won't process old entries
260 * when the device is operational again
263 memset(q
->kernel_address
, 0, HL_CQ_SIZE_IN_BYTES
);
267 * hl_eq_init - main initialization function for an event queue object
269 * @hdev: pointer to device structure
270 * @q: pointer to eq structure
272 * Allocate dma-able memory for the event queue and initialize fields
273 * Returns 0 on success
275 int hl_eq_init(struct hl_device
*hdev
, struct hl_eq
*q
)
279 p
= hdev
->asic_funcs
->cpu_accessible_dma_pool_alloc(hdev
,
286 q
->kernel_address
= p
;
293 * hl_eq_fini - destroy event queue
295 * @hdev: pointer to device structure
296 * @q: pointer to eq structure
298 * Free the event queue memory
300 void hl_eq_fini(struct hl_device
*hdev
, struct hl_eq
*q
)
302 flush_workqueue(hdev
->eq_wq
);
304 hdev
->asic_funcs
->cpu_accessible_dma_pool_free(hdev
,
309 void hl_eq_reset(struct hl_device
*hdev
, struct hl_eq
*q
)
314 * It's not enough to just reset the PI/CI because the H/W may have
315 * written valid completion entries before it was halted and therefore
316 * we need to clean the actual queues so we won't process old entries
317 * when the device is operational again
320 memset(q
->kernel_address
, 0, HL_EQ_SIZE_IN_BYTES
);