1 // SPDX-License-Identifier: GPL-2.0
4 * Copyright 2016-2019 HabanaLabs, Ltd.
8 #include "habanalabs.h"
10 #include <linux/slab.h>
12 static void hl_ctx_fini(struct hl_ctx
*ctx
)
14 struct hl_device
*hdev
= ctx
->hdev
;
19 * If we arrived here, there are no jobs waiting for this context
20 * on its queues so we can safely remove it.
21 * This is because for each CS, we increment the ref count and for
22 * every CS that was finished we decrement it and we won't arrive
23 * to this function unless the ref count is 0
26 for (i
= 0 ; i
< hdev
->asic_prop
.max_pending_cs
; i
++)
27 hl_fence_put(ctx
->cs_pending
[i
]);
29 kfree(ctx
->cs_pending
);
31 if (ctx
->asid
!= HL_KERNEL_ASID_ID
) {
32 dev_dbg(hdev
->dev
, "closing user context %d\n", ctx
->asid
);
34 /* The engines are stopped as there is no executing CS, but the
35 * Coresight might be still working by accessing addresses
36 * related to the stopped engines. Hence stop it explicitly.
37 * Stop only if this is the compute context, as there can be
38 * only one compute context
40 if ((hdev
->in_debug
) && (hdev
->compute_ctx
== ctx
))
41 hl_device_set_debug_mode(hdev
, false);
43 hdev
->asic_funcs
->ctx_fini(ctx
);
44 hl_cb_va_pool_fini(ctx
);
46 hl_asid_free(hdev
, ctx
->asid
);
48 /* Scrub both SRAM and DRAM */
49 hdev
->asic_funcs
->scrub_device_mem(hdev
, 0, 0);
51 if ((!hdev
->pldm
) && (hdev
->pdev
) &&
52 (!hdev
->asic_funcs
->is_device_idle(hdev
,
55 "device not idle after user context is closed (0x%llx)\n",
58 dev_dbg(hdev
->dev
, "closing kernel context\n");
63 void hl_ctx_do_release(struct kref
*ref
)
67 ctx
= container_of(ref
, struct hl_ctx
, refcount
);
72 hl_hpriv_put(ctx
->hpriv
);
77 int hl_ctx_create(struct hl_device
*hdev
, struct hl_fpriv
*hpriv
)
79 struct hl_ctx_mgr
*mgr
= &hpriv
->ctx_mgr
;
83 ctx
= kzalloc(sizeof(*ctx
), GFP_KERNEL
);
89 mutex_lock(&mgr
->ctx_lock
);
90 rc
= idr_alloc(&mgr
->ctx_handles
, ctx
, 1, 0, GFP_KERNEL
);
91 mutex_unlock(&mgr
->ctx_lock
);
94 dev_err(hdev
->dev
, "Failed to allocate IDR for a new CTX\n");
100 rc
= hl_ctx_init(hdev
, ctx
, false);
102 goto remove_from_idr
;
107 /* TODO: remove for multiple contexts per process */
110 /* TODO: remove the following line for multiple process support */
111 hdev
->compute_ctx
= ctx
;
116 mutex_lock(&mgr
->ctx_lock
);
117 idr_remove(&mgr
->ctx_handles
, ctx
->handle
);
118 mutex_unlock(&mgr
->ctx_lock
);
125 void hl_ctx_free(struct hl_device
*hdev
, struct hl_ctx
*ctx
)
127 if (kref_put(&ctx
->refcount
, hl_ctx_do_release
) == 1)
131 "user process released device but its command submissions are still executing\n");
134 int hl_ctx_init(struct hl_device
*hdev
, struct hl_ctx
*ctx
, bool is_kernel_ctx
)
140 kref_init(&ctx
->refcount
);
142 ctx
->cs_sequence
= 1;
143 spin_lock_init(&ctx
->cs_lock
);
144 atomic_set(&ctx
->thread_ctx_switch_token
, 1);
145 ctx
->thread_ctx_switch_wait_token
= 0;
146 ctx
->cs_pending
= kcalloc(hdev
->asic_prop
.max_pending_cs
,
147 sizeof(struct hl_fence
*),
149 if (!ctx
->cs_pending
)
153 ctx
->asid
= HL_KERNEL_ASID_ID
; /* Kernel driver gets ASID 0 */
154 rc
= hl_mmu_ctx_init(ctx
);
156 dev_err(hdev
->dev
, "Failed to init mmu ctx module\n");
157 goto err_free_cs_pending
;
160 ctx
->asid
= hl_asid_alloc(hdev
);
162 dev_err(hdev
->dev
, "No free ASID, failed to create context\n");
164 goto err_free_cs_pending
;
167 rc
= hl_vm_ctx_init(ctx
);
169 dev_err(hdev
->dev
, "Failed to init mem ctx module\n");
174 rc
= hl_cb_va_pool_init(ctx
);
177 "Failed to init VA pool for mapped CB\n");
178 goto err_vm_ctx_fini
;
181 rc
= hdev
->asic_funcs
->ctx_init(ctx
);
183 dev_err(hdev
->dev
, "ctx_init failed\n");
184 goto err_cb_va_pool_fini
;
187 dev_dbg(hdev
->dev
, "create user context %d\n", ctx
->asid
);
193 hl_cb_va_pool_fini(ctx
);
197 hl_asid_free(hdev
, ctx
->asid
);
199 kfree(ctx
->cs_pending
);
204 void hl_ctx_get(struct hl_device
*hdev
, struct hl_ctx
*ctx
)
206 kref_get(&ctx
->refcount
);
209 int hl_ctx_put(struct hl_ctx
*ctx
)
211 return kref_put(&ctx
->refcount
, hl_ctx_do_release
);
214 struct hl_fence
*hl_ctx_get_fence(struct hl_ctx
*ctx
, u64 seq
)
216 struct asic_fixed_properties
*asic_prop
= &ctx
->hdev
->asic_prop
;
217 struct hl_fence
*fence
;
219 spin_lock(&ctx
->cs_lock
);
221 if (seq
>= ctx
->cs_sequence
) {
222 spin_unlock(&ctx
->cs_lock
);
223 return ERR_PTR(-EINVAL
);
226 if (seq
+ asic_prop
->max_pending_cs
< ctx
->cs_sequence
) {
227 spin_unlock(&ctx
->cs_lock
);
231 fence
= ctx
->cs_pending
[seq
& (asic_prop
->max_pending_cs
- 1)];
234 spin_unlock(&ctx
->cs_lock
);
240 * hl_ctx_mgr_init - initialize the context manager
242 * @mgr: pointer to context manager structure
244 * This manager is an object inside the hpriv object of the user process.
245 * The function is called when a user process opens the FD.
247 void hl_ctx_mgr_init(struct hl_ctx_mgr
*mgr
)
249 mutex_init(&mgr
->ctx_lock
);
250 idr_init(&mgr
->ctx_handles
);
254 * hl_ctx_mgr_fini - finalize the context manager
256 * @hdev: pointer to device structure
257 * @mgr: pointer to context manager structure
259 * This function goes over all the contexts in the manager and frees them.
260 * It is called when a process closes the FD.
262 void hl_ctx_mgr_fini(struct hl_device
*hdev
, struct hl_ctx_mgr
*mgr
)
268 idp
= &mgr
->ctx_handles
;
270 idr_for_each_entry(idp
, ctx
, id
)
271 hl_ctx_free(hdev
, ctx
);
273 idr_destroy(&mgr
->ctx_handles
);
274 mutex_destroy(&mgr
->ctx_lock
);