Merge tag 'block-5.11-2021-01-10' of git://git.kernel.dk/linux-block
[linux/fpc-iii.git] / drivers / misc / habanalabs / common / context.c
blobf65e6559149bc8c08f3c62208618b3570af33ba6
1 // SPDX-License-Identifier: GPL-2.0
3 /*
4 * Copyright 2016-2019 HabanaLabs, Ltd.
5 * All Rights Reserved.
6 */
8 #include "habanalabs.h"
10 #include <linux/slab.h>
12 static void hl_ctx_fini(struct hl_ctx *ctx)
14 struct hl_device *hdev = ctx->hdev;
15 u64 idle_mask = 0;
16 int i;
19 * If we arrived here, there are no jobs waiting for this context
20 * on its queues so we can safely remove it.
21 * This is because for each CS, we increment the ref count and for
22 * every CS that was finished we decrement it and we won't arrive
23 * to this function unless the ref count is 0
26 for (i = 0 ; i < hdev->asic_prop.max_pending_cs ; i++)
27 hl_fence_put(ctx->cs_pending[i]);
29 kfree(ctx->cs_pending);
31 if (ctx->asid != HL_KERNEL_ASID_ID) {
32 dev_dbg(hdev->dev, "closing user context %d\n", ctx->asid);
34 /* The engines are stopped as there is no executing CS, but the
35 * Coresight might be still working by accessing addresses
36 * related to the stopped engines. Hence stop it explicitly.
37 * Stop only if this is the compute context, as there can be
38 * only one compute context
40 if ((hdev->in_debug) && (hdev->compute_ctx == ctx))
41 hl_device_set_debug_mode(hdev, false);
43 hdev->asic_funcs->ctx_fini(ctx);
44 hl_cb_va_pool_fini(ctx);
45 hl_vm_ctx_fini(ctx);
46 hl_asid_free(hdev, ctx->asid);
48 /* Scrub both SRAM and DRAM */
49 hdev->asic_funcs->scrub_device_mem(hdev, 0, 0);
51 if ((!hdev->pldm) && (hdev->pdev) &&
52 (!hdev->asic_funcs->is_device_idle(hdev,
53 &idle_mask, NULL)))
54 dev_notice(hdev->dev,
55 "device not idle after user context is closed (0x%llx)\n",
56 idle_mask);
57 } else {
58 dev_dbg(hdev->dev, "closing kernel context\n");
59 hl_mmu_ctx_fini(ctx);
63 void hl_ctx_do_release(struct kref *ref)
65 struct hl_ctx *ctx;
67 ctx = container_of(ref, struct hl_ctx, refcount);
69 hl_ctx_fini(ctx);
71 if (ctx->hpriv)
72 hl_hpriv_put(ctx->hpriv);
74 kfree(ctx);
77 int hl_ctx_create(struct hl_device *hdev, struct hl_fpriv *hpriv)
79 struct hl_ctx_mgr *mgr = &hpriv->ctx_mgr;
80 struct hl_ctx *ctx;
81 int rc;
83 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
84 if (!ctx) {
85 rc = -ENOMEM;
86 goto out_err;
89 mutex_lock(&mgr->ctx_lock);
90 rc = idr_alloc(&mgr->ctx_handles, ctx, 1, 0, GFP_KERNEL);
91 mutex_unlock(&mgr->ctx_lock);
93 if (rc < 0) {
94 dev_err(hdev->dev, "Failed to allocate IDR for a new CTX\n");
95 goto free_ctx;
98 ctx->handle = rc;
100 rc = hl_ctx_init(hdev, ctx, false);
101 if (rc)
102 goto remove_from_idr;
104 hl_hpriv_get(hpriv);
105 ctx->hpriv = hpriv;
107 /* TODO: remove for multiple contexts per process */
108 hpriv->ctx = ctx;
110 /* TODO: remove the following line for multiple process support */
111 hdev->compute_ctx = ctx;
113 return 0;
115 remove_from_idr:
116 mutex_lock(&mgr->ctx_lock);
117 idr_remove(&mgr->ctx_handles, ctx->handle);
118 mutex_unlock(&mgr->ctx_lock);
119 free_ctx:
120 kfree(ctx);
121 out_err:
122 return rc;
125 void hl_ctx_free(struct hl_device *hdev, struct hl_ctx *ctx)
127 if (kref_put(&ctx->refcount, hl_ctx_do_release) == 1)
128 return;
130 dev_warn(hdev->dev,
131 "user process released device but its command submissions are still executing\n");
134 int hl_ctx_init(struct hl_device *hdev, struct hl_ctx *ctx, bool is_kernel_ctx)
136 int rc = 0;
138 ctx->hdev = hdev;
140 kref_init(&ctx->refcount);
142 ctx->cs_sequence = 1;
143 spin_lock_init(&ctx->cs_lock);
144 atomic_set(&ctx->thread_ctx_switch_token, 1);
145 ctx->thread_ctx_switch_wait_token = 0;
146 ctx->cs_pending = kcalloc(hdev->asic_prop.max_pending_cs,
147 sizeof(struct hl_fence *),
148 GFP_KERNEL);
149 if (!ctx->cs_pending)
150 return -ENOMEM;
152 if (is_kernel_ctx) {
153 ctx->asid = HL_KERNEL_ASID_ID; /* Kernel driver gets ASID 0 */
154 rc = hl_mmu_ctx_init(ctx);
155 if (rc) {
156 dev_err(hdev->dev, "Failed to init mmu ctx module\n");
157 goto err_free_cs_pending;
159 } else {
160 ctx->asid = hl_asid_alloc(hdev);
161 if (!ctx->asid) {
162 dev_err(hdev->dev, "No free ASID, failed to create context\n");
163 rc = -ENOMEM;
164 goto err_free_cs_pending;
167 rc = hl_vm_ctx_init(ctx);
168 if (rc) {
169 dev_err(hdev->dev, "Failed to init mem ctx module\n");
170 rc = -ENOMEM;
171 goto err_asid_free;
174 rc = hl_cb_va_pool_init(ctx);
175 if (rc) {
176 dev_err(hdev->dev,
177 "Failed to init VA pool for mapped CB\n");
178 goto err_vm_ctx_fini;
181 rc = hdev->asic_funcs->ctx_init(ctx);
182 if (rc) {
183 dev_err(hdev->dev, "ctx_init failed\n");
184 goto err_cb_va_pool_fini;
187 dev_dbg(hdev->dev, "create user context %d\n", ctx->asid);
190 return 0;
192 err_cb_va_pool_fini:
193 hl_cb_va_pool_fini(ctx);
194 err_vm_ctx_fini:
195 hl_vm_ctx_fini(ctx);
196 err_asid_free:
197 hl_asid_free(hdev, ctx->asid);
198 err_free_cs_pending:
199 kfree(ctx->cs_pending);
201 return rc;
204 void hl_ctx_get(struct hl_device *hdev, struct hl_ctx *ctx)
206 kref_get(&ctx->refcount);
209 int hl_ctx_put(struct hl_ctx *ctx)
211 return kref_put(&ctx->refcount, hl_ctx_do_release);
214 struct hl_fence *hl_ctx_get_fence(struct hl_ctx *ctx, u64 seq)
216 struct asic_fixed_properties *asic_prop = &ctx->hdev->asic_prop;
217 struct hl_fence *fence;
219 spin_lock(&ctx->cs_lock);
221 if (seq >= ctx->cs_sequence) {
222 spin_unlock(&ctx->cs_lock);
223 return ERR_PTR(-EINVAL);
226 if (seq + asic_prop->max_pending_cs < ctx->cs_sequence) {
227 spin_unlock(&ctx->cs_lock);
228 return NULL;
231 fence = ctx->cs_pending[seq & (asic_prop->max_pending_cs - 1)];
232 hl_fence_get(fence);
234 spin_unlock(&ctx->cs_lock);
236 return fence;
240 * hl_ctx_mgr_init - initialize the context manager
242 * @mgr: pointer to context manager structure
244 * This manager is an object inside the hpriv object of the user process.
245 * The function is called when a user process opens the FD.
247 void hl_ctx_mgr_init(struct hl_ctx_mgr *mgr)
249 mutex_init(&mgr->ctx_lock);
250 idr_init(&mgr->ctx_handles);
254 * hl_ctx_mgr_fini - finalize the context manager
256 * @hdev: pointer to device structure
257 * @mgr: pointer to context manager structure
259 * This function goes over all the contexts in the manager and frees them.
260 * It is called when a process closes the FD.
262 void hl_ctx_mgr_fini(struct hl_device *hdev, struct hl_ctx_mgr *mgr)
264 struct hl_ctx *ctx;
265 struct idr *idp;
266 u32 id;
268 idp = &mgr->ctx_handles;
270 idr_for_each_entry(idp, ctx, id)
271 hl_ctx_free(hdev, ctx);
273 idr_destroy(&mgr->ctx_handles);
274 mutex_destroy(&mgr->ctx_lock);