1 // SPDX-License-Identifier: GPL-2.0
4 * Copyright 2016-2019 HabanaLabs, Ltd.
8 #include "habanalabs.h"
10 #include <linux/slab.h>
13 * This structure is used to schedule work of EQ entry and armcp_reset event
15 * @eq_work - workqueue object to run when EQ entry is received
16 * @hdev - pointer to device structure
17 * @eq_entry - copy of the EQ entry
20 struct work_struct eq_work
;
21 struct hl_device
*hdev
;
22 struct hl_eq_entry eq_entry
;
26 * hl_cq_inc_ptr - increment ci or pi of cq
28 * @ptr: the current ci or pi value of the completion queue
30 * Increment ptr by 1. If it reaches the number of completion queue
31 * entries, set it to 0
33 inline u32
hl_cq_inc_ptr(u32 ptr
)
36 if (unlikely(ptr
== HL_CQ_LENGTH
))
42 * hl_eq_inc_ptr - increment ci of eq
44 * @ptr: the current ci value of the event queue
46 * Increment ptr by 1. If it reaches the number of event queue
47 * entries, set it to 0
49 inline u32
hl_eq_inc_ptr(u32 ptr
)
52 if (unlikely(ptr
== HL_EQ_LENGTH
))
57 static void irq_handle_eqe(struct work_struct
*work
)
59 struct hl_eqe_work
*eqe_work
= container_of(work
, struct hl_eqe_work
,
61 struct hl_device
*hdev
= eqe_work
->hdev
;
63 hdev
->asic_funcs
->handle_eqe(hdev
, &eqe_work
->eq_entry
);
69 * hl_irq_handler_cq - irq handler for completion queue
72 * @arg: pointer to completion queue structure
75 irqreturn_t
hl_irq_handler_cq(int irq
, void *arg
)
77 struct hl_cq
*cq
= arg
;
78 struct hl_device
*hdev
= cq
->hdev
;
79 struct hl_hw_queue
*queue
;
80 struct hl_cs_job
*job
;
81 bool shadow_index_valid
;
83 struct hl_cq_entry
*cq_entry
, *cq_base
;
87 "Device disabled but received IRQ %d for CQ %d\n",
88 irq
, cq
->hw_queue_id
);
92 cq_base
= (struct hl_cq_entry
*) (uintptr_t) cq
->kernel_address
;
95 bool entry_ready
= ((le32_to_cpu(cq_base
[cq
->ci
].data
) &
97 >> CQ_ENTRY_READY_SHIFT
);
102 cq_entry
= (struct hl_cq_entry
*) &cq_base
[cq
->ci
];
104 /* Make sure we read CQ entry contents after we've
105 * checked the ownership bit.
109 shadow_index_valid
= ((le32_to_cpu(cq_entry
->data
) &
110 CQ_ENTRY_SHADOW_INDEX_VALID_MASK
)
111 >> CQ_ENTRY_SHADOW_INDEX_VALID_SHIFT
);
113 shadow_index
= (u16
) ((le32_to_cpu(cq_entry
->data
) &
114 CQ_ENTRY_SHADOW_INDEX_MASK
)
115 >> CQ_ENTRY_SHADOW_INDEX_SHIFT
);
117 queue
= &hdev
->kernel_queues
[cq
->hw_queue_id
];
119 if ((shadow_index_valid
) && (!hdev
->disabled
)) {
120 job
= queue
->shadow_queue
[hl_pi_2_offset(shadow_index
)];
121 queue_work(hdev
->cq_wq
, &job
->finish_work
);
124 /* Update ci of the context's queue. There is no
125 * need to protect it with spinlock because this update is
126 * done only inside IRQ and there is a different IRQ per
129 queue
->ci
= hl_queue_inc_ptr(queue
->ci
);
131 /* Clear CQ entry ready bit */
132 cq_entry
->data
= cpu_to_le32(le32_to_cpu(cq_entry
->data
) &
133 ~CQ_ENTRY_READY_MASK
);
135 cq
->ci
= hl_cq_inc_ptr(cq
->ci
);
137 /* Increment free slots */
138 atomic_inc(&cq
->free_slots_cnt
);
145 * hl_irq_handler_eq - irq handler for event queue
148 * @arg: pointer to event queue structure
151 irqreturn_t
hl_irq_handler_eq(int irq
, void *arg
)
153 struct hl_eq
*eq
= arg
;
154 struct hl_device
*hdev
= eq
->hdev
;
155 struct hl_eq_entry
*eq_entry
;
156 struct hl_eq_entry
*eq_base
;
157 struct hl_eqe_work
*handle_eqe_work
;
159 eq_base
= (struct hl_eq_entry
*) (uintptr_t) eq
->kernel_address
;
163 ((le32_to_cpu(eq_base
[eq
->ci
].hdr
.ctl
) &
164 EQ_CTL_READY_MASK
) >> EQ_CTL_READY_SHIFT
);
169 eq_entry
= &eq_base
[eq
->ci
];
172 * Make sure we read EQ entry contents after we've
173 * checked the ownership bit.
177 if (hdev
->disabled
) {
179 "Device disabled but received IRQ %d for EQ\n",
184 handle_eqe_work
= kmalloc(sizeof(*handle_eqe_work
), GFP_ATOMIC
);
185 if (handle_eqe_work
) {
186 INIT_WORK(&handle_eqe_work
->eq_work
, irq_handle_eqe
);
187 handle_eqe_work
->hdev
= hdev
;
189 memcpy(&handle_eqe_work
->eq_entry
, eq_entry
,
192 queue_work(hdev
->eq_wq
, &handle_eqe_work
->eq_work
);
195 /* Clear EQ entry ready bit */
197 cpu_to_le32(le32_to_cpu(eq_entry
->hdr
.ctl
) &
200 eq
->ci
= hl_eq_inc_ptr(eq
->ci
);
202 hdev
->asic_funcs
->update_eq_ci(hdev
, eq
->ci
);
209 * hl_cq_init - main initialization function for an cq object
211 * @hdev: pointer to device structure
212 * @q: pointer to cq structure
213 * @hw_queue_id: The H/W queue ID this completion queue belongs to
215 * Allocate dma-able memory for the completion queue and initialize fields
216 * Returns 0 on success
218 int hl_cq_init(struct hl_device
*hdev
, struct hl_cq
*q
, u32 hw_queue_id
)
222 BUILD_BUG_ON(HL_CQ_SIZE_IN_BYTES
> HL_PAGE_SIZE
);
224 p
= hdev
->asic_funcs
->asic_dma_alloc_coherent(hdev
, HL_CQ_SIZE_IN_BYTES
,
225 &q
->bus_address
, GFP_KERNEL
| __GFP_ZERO
);
230 q
->kernel_address
= (u64
) (uintptr_t) p
;
231 q
->hw_queue_id
= hw_queue_id
;
235 atomic_set(&q
->free_slots_cnt
, HL_CQ_LENGTH
);
241 * hl_cq_fini - destroy completion queue
243 * @hdev: pointer to device structure
244 * @q: pointer to cq structure
246 * Free the completion queue memory
248 void hl_cq_fini(struct hl_device
*hdev
, struct hl_cq
*q
)
250 hdev
->asic_funcs
->asic_dma_free_coherent(hdev
, HL_CQ_SIZE_IN_BYTES
,
251 (void *) (uintptr_t) q
->kernel_address
, q
->bus_address
);
254 void hl_cq_reset(struct hl_device
*hdev
, struct hl_cq
*q
)
259 atomic_set(&q
->free_slots_cnt
, HL_CQ_LENGTH
);
262 * It's not enough to just reset the PI/CI because the H/W may have
263 * written valid completion entries before it was halted and therefore
264 * we need to clean the actual queues so we won't process old entries
265 * when the device is operational again
268 memset((void *) (uintptr_t) q
->kernel_address
, 0, HL_CQ_SIZE_IN_BYTES
);
272 * hl_eq_init - main initialization function for an event queue object
274 * @hdev: pointer to device structure
275 * @q: pointer to eq structure
277 * Allocate dma-able memory for the event queue and initialize fields
278 * Returns 0 on success
280 int hl_eq_init(struct hl_device
*hdev
, struct hl_eq
*q
)
284 BUILD_BUG_ON(HL_EQ_SIZE_IN_BYTES
> HL_PAGE_SIZE
);
286 p
= hdev
->asic_funcs
->cpu_accessible_dma_pool_alloc(hdev
,
293 q
->kernel_address
= (u64
) (uintptr_t) p
;
300 * hl_eq_fini - destroy event queue
302 * @hdev: pointer to device structure
303 * @q: pointer to eq structure
305 * Free the event queue memory
307 void hl_eq_fini(struct hl_device
*hdev
, struct hl_eq
*q
)
309 flush_workqueue(hdev
->eq_wq
);
311 hdev
->asic_funcs
->cpu_accessible_dma_pool_free(hdev
,
313 (void *) (uintptr_t) q
->kernel_address
);
316 void hl_eq_reset(struct hl_device
*hdev
, struct hl_eq
*q
)
321 * It's not enough to just reset the PI/CI because the H/W may have
322 * written valid completion entries before it was halted and therefore
323 * we need to clean the actual queues so we won't process old entries
324 * when the device is operational again
327 memset((void *) (uintptr_t) q
->kernel_address
, 0, HL_EQ_SIZE_IN_BYTES
);