1 // SPDX-License-Identifier: GPL-2.0
4 * Copyright 2016-2019 HabanaLabs, Ltd.
8 #include "habanalabs.h"
10 #include <linux/slab.h>
12 static void hl_ctx_fini(struct hl_ctx
*ctx
)
14 struct hl_device
*hdev
= ctx
->hdev
;
18 * If we arrived here, there are no jobs waiting for this context
19 * on its queues so we can safely remove it.
20 * This is because for each CS, we increment the ref count and for
21 * every CS that was finished we decrement it and we won't arrive
22 * to this function unless the ref count is 0
25 for (i
= 0 ; i
< HL_MAX_PENDING_CS
; i
++)
26 dma_fence_put(ctx
->cs_pending
[i
]);
28 if (ctx
->asid
!= HL_KERNEL_ASID_ID
) {
30 * The engines are stopped as there is no executing CS, but the
31 * Coresight might be still working by accessing addresses
32 * related to the stopped engines. Hence stop it explicitly.
34 hdev
->asic_funcs
->halt_coresight(hdev
);
36 hl_asid_free(hdev
, ctx
->asid
);
40 void hl_ctx_do_release(struct kref
*ref
)
44 ctx
= container_of(ref
, struct hl_ctx
, refcount
);
49 hl_hpriv_put(ctx
->hpriv
);
54 int hl_ctx_create(struct hl_device
*hdev
, struct hl_fpriv
*hpriv
)
56 struct hl_ctx_mgr
*mgr
= &hpriv
->ctx_mgr
;
60 ctx
= kzalloc(sizeof(*ctx
), GFP_KERNEL
);
66 rc
= hl_ctx_init(hdev
, ctx
, false);
73 /* TODO: remove for multiple contexts */
77 mutex_lock(&mgr
->ctx_lock
);
78 rc
= idr_alloc(&mgr
->ctx_handles
, ctx
, 1, 0, GFP_KERNEL
);
79 mutex_unlock(&mgr
->ctx_lock
);
82 dev_err(hdev
->dev
, "Failed to allocate IDR for a new CTX\n");
83 hl_ctx_free(hdev
, ctx
);
95 void hl_ctx_free(struct hl_device
*hdev
, struct hl_ctx
*ctx
)
97 if (kref_put(&ctx
->refcount
, hl_ctx_do_release
) == 1)
101 "Context %d closed or terminated but its CS are executing\n",
105 int hl_ctx_init(struct hl_device
*hdev
, struct hl_ctx
*ctx
, bool is_kernel_ctx
)
111 kref_init(&ctx
->refcount
);
113 ctx
->cs_sequence
= 1;
114 spin_lock_init(&ctx
->cs_lock
);
115 atomic_set(&ctx
->thread_ctx_switch_token
, 1);
116 ctx
->thread_ctx_switch_wait_token
= 0;
119 ctx
->asid
= HL_KERNEL_ASID_ID
; /* KMD gets ASID 0 */
121 ctx
->asid
= hl_asid_alloc(hdev
);
123 dev_err(hdev
->dev
, "No free ASID, failed to create context\n");
127 rc
= hl_vm_ctx_init(ctx
);
129 dev_err(hdev
->dev
, "Failed to init mem ctx module\n");
138 if (ctx
->asid
!= HL_KERNEL_ASID_ID
)
139 hl_asid_free(hdev
, ctx
->asid
);
144 void hl_ctx_get(struct hl_device
*hdev
, struct hl_ctx
*ctx
)
146 kref_get(&ctx
->refcount
);
149 int hl_ctx_put(struct hl_ctx
*ctx
)
151 return kref_put(&ctx
->refcount
, hl_ctx_do_release
);
154 struct dma_fence
*hl_ctx_get_fence(struct hl_ctx
*ctx
, u64 seq
)
156 struct hl_device
*hdev
= ctx
->hdev
;
157 struct dma_fence
*fence
;
159 spin_lock(&ctx
->cs_lock
);
161 if (seq
>= ctx
->cs_sequence
) {
162 dev_notice(hdev
->dev
,
163 "Can't wait on seq %llu because current CS is at seq %llu\n",
164 seq
, ctx
->cs_sequence
);
165 spin_unlock(&ctx
->cs_lock
);
166 return ERR_PTR(-EINVAL
);
170 if (seq
+ HL_MAX_PENDING_CS
< ctx
->cs_sequence
) {
172 "Can't wait on seq %llu because current CS is at seq %llu (Fence is gone)\n",
173 seq
, ctx
->cs_sequence
);
174 spin_unlock(&ctx
->cs_lock
);
178 fence
= dma_fence_get(
179 ctx
->cs_pending
[seq
& (HL_MAX_PENDING_CS
- 1)]);
180 spin_unlock(&ctx
->cs_lock
);
186 * hl_ctx_mgr_init - initialize the context manager
188 * @mgr: pointer to context manager structure
190 * This manager is an object inside the hpriv object of the user process.
191 * The function is called when a user process opens the FD.
193 void hl_ctx_mgr_init(struct hl_ctx_mgr
*mgr
)
195 mutex_init(&mgr
->ctx_lock
);
196 idr_init(&mgr
->ctx_handles
);
200 * hl_ctx_mgr_fini - finalize the context manager
202 * @hdev: pointer to device structure
203 * @mgr: pointer to context manager structure
205 * This function goes over all the contexts in the manager and frees them.
206 * It is called when a process closes the FD.
208 void hl_ctx_mgr_fini(struct hl_device
*hdev
, struct hl_ctx_mgr
*mgr
)
214 idp
= &mgr
->ctx_handles
;
216 idr_for_each_entry(idp
, ctx
, id
)
217 hl_ctx_free(hdev
, ctx
);
219 idr_destroy(&mgr
->ctx_handles
);
220 mutex_destroy(&mgr
->ctx_lock
);