1 // SPDX-License-Identifier: GPL-2.0
4 * Copyright 2016-2019 HabanaLabs, Ltd.
8 #include "habanalabs.h"
10 #include <linux/slab.h>
12 static void hl_ctx_fini(struct hl_ctx
*ctx
)
14 struct hl_device
*hdev
= ctx
->hdev
;
18 * If we arrived here, there are no jobs waiting for this context
19 * on its queues so we can safely remove it.
20 * This is because for each CS, we increment the ref count and for
21 * every CS that was finished we decrement it and we won't arrive
22 * to this function unless the ref count is 0
25 for (i
= 0 ; i
< HL_MAX_PENDING_CS
; i
++)
26 dma_fence_put(ctx
->cs_pending
[i
]);
28 if (ctx
->asid
!= HL_KERNEL_ASID_ID
) {
29 /* The engines are stopped as there is no executing CS, but the
30 * Coresight might be still working by accessing addresses
31 * related to the stopped engines. Hence stop it explicitly.
32 * Stop only if this is the compute context, as there can be
33 * only one compute context
35 if ((hdev
->in_debug
) && (hdev
->compute_ctx
== ctx
))
36 hl_device_set_debug_mode(hdev
, false);
39 hl_asid_free(hdev
, ctx
->asid
);
45 void hl_ctx_do_release(struct kref
*ref
)
49 ctx
= container_of(ref
, struct hl_ctx
, refcount
);
54 hl_hpriv_put(ctx
->hpriv
);
59 int hl_ctx_create(struct hl_device
*hdev
, struct hl_fpriv
*hpriv
)
61 struct hl_ctx_mgr
*mgr
= &hpriv
->ctx_mgr
;
65 ctx
= kzalloc(sizeof(*ctx
), GFP_KERNEL
);
71 mutex_lock(&mgr
->ctx_lock
);
72 rc
= idr_alloc(&mgr
->ctx_handles
, ctx
, 1, 0, GFP_KERNEL
);
73 mutex_unlock(&mgr
->ctx_lock
);
76 dev_err(hdev
->dev
, "Failed to allocate IDR for a new CTX\n");
82 rc
= hl_ctx_init(hdev
, ctx
, false);
89 /* TODO: remove for multiple contexts per process */
92 /* TODO: remove the following line for multiple process support */
93 hdev
->compute_ctx
= ctx
;
98 mutex_lock(&mgr
->ctx_lock
);
99 idr_remove(&mgr
->ctx_handles
, ctx
->handle
);
100 mutex_unlock(&mgr
->ctx_lock
);
107 void hl_ctx_free(struct hl_device
*hdev
, struct hl_ctx
*ctx
)
109 if (kref_put(&ctx
->refcount
, hl_ctx_do_release
) == 1)
113 "Context %d closed or terminated but its CS are executing\n",
117 int hl_ctx_init(struct hl_device
*hdev
, struct hl_ctx
*ctx
, bool is_kernel_ctx
)
123 kref_init(&ctx
->refcount
);
125 ctx
->cs_sequence
= 1;
126 spin_lock_init(&ctx
->cs_lock
);
127 atomic_set(&ctx
->thread_ctx_switch_token
, 1);
128 ctx
->thread_ctx_switch_wait_token
= 0;
131 ctx
->asid
= HL_KERNEL_ASID_ID
; /* Kernel driver gets ASID 0 */
132 rc
= hl_mmu_ctx_init(ctx
);
134 dev_err(hdev
->dev
, "Failed to init mmu ctx module\n");
138 ctx
->asid
= hl_asid_alloc(hdev
);
140 dev_err(hdev
->dev
, "No free ASID, failed to create context\n");
144 rc
= hl_vm_ctx_init(ctx
);
146 dev_err(hdev
->dev
, "Failed to init mem ctx module\n");
155 if (ctx
->asid
!= HL_KERNEL_ASID_ID
)
156 hl_asid_free(hdev
, ctx
->asid
);
161 void hl_ctx_get(struct hl_device
*hdev
, struct hl_ctx
*ctx
)
163 kref_get(&ctx
->refcount
);
166 int hl_ctx_put(struct hl_ctx
*ctx
)
168 return kref_put(&ctx
->refcount
, hl_ctx_do_release
);
171 struct dma_fence
*hl_ctx_get_fence(struct hl_ctx
*ctx
, u64 seq
)
173 struct hl_device
*hdev
= ctx
->hdev
;
174 struct dma_fence
*fence
;
176 spin_lock(&ctx
->cs_lock
);
178 if (seq
>= ctx
->cs_sequence
) {
179 dev_notice_ratelimited(hdev
->dev
,
180 "Can't wait on seq %llu because current CS is at seq %llu\n",
181 seq
, ctx
->cs_sequence
);
182 spin_unlock(&ctx
->cs_lock
);
183 return ERR_PTR(-EINVAL
);
187 if (seq
+ HL_MAX_PENDING_CS
< ctx
->cs_sequence
) {
189 "Can't wait on seq %llu because current CS is at seq %llu (Fence is gone)\n",
190 seq
, ctx
->cs_sequence
);
191 spin_unlock(&ctx
->cs_lock
);
195 fence
= dma_fence_get(
196 ctx
->cs_pending
[seq
& (HL_MAX_PENDING_CS
- 1)]);
197 spin_unlock(&ctx
->cs_lock
);
203 * hl_ctx_mgr_init - initialize the context manager
205 * @mgr: pointer to context manager structure
207 * This manager is an object inside the hpriv object of the user process.
208 * The function is called when a user process opens the FD.
210 void hl_ctx_mgr_init(struct hl_ctx_mgr
*mgr
)
212 mutex_init(&mgr
->ctx_lock
);
213 idr_init(&mgr
->ctx_handles
);
217 * hl_ctx_mgr_fini - finalize the context manager
219 * @hdev: pointer to device structure
220 * @mgr: pointer to context manager structure
222 * This function goes over all the contexts in the manager and frees them.
223 * It is called when a process closes the FD.
225 void hl_ctx_mgr_fini(struct hl_device
*hdev
, struct hl_ctx_mgr
*mgr
)
231 idp
= &mgr
->ctx_handles
;
233 idr_for_each_entry(idp
, ctx
, id
)
234 hl_ctx_free(hdev
, ctx
);
236 idr_destroy(&mgr
->ctx_handles
);
237 mutex_destroy(&mgr
->ctx_lock
);