WIP FPC-III support
[linux/fpc-iii.git] / drivers / misc / habanalabs / common / habanalabs_ioctl.c
blob12efbd9d2e3a0b7459dcb8298e69248bef1e1750
1 // SPDX-License-Identifier: GPL-2.0
3 /*
4 * Copyright 2016-2019 HabanaLabs, Ltd.
5 * All Rights Reserved.
6 */
8 #include <uapi/misc/habanalabs.h>
9 #include "habanalabs.h"
11 #include <linux/kernel.h>
12 #include <linux/fs.h>
13 #include <linux/uaccess.h>
14 #include <linux/slab.h>
16 static u32 hl_debug_struct_size[HL_DEBUG_OP_TIMESTAMP + 1] = {
17 [HL_DEBUG_OP_ETR] = sizeof(struct hl_debug_params_etr),
18 [HL_DEBUG_OP_ETF] = sizeof(struct hl_debug_params_etf),
19 [HL_DEBUG_OP_STM] = sizeof(struct hl_debug_params_stm),
20 [HL_DEBUG_OP_FUNNEL] = 0,
21 [HL_DEBUG_OP_BMON] = sizeof(struct hl_debug_params_bmon),
22 [HL_DEBUG_OP_SPMU] = sizeof(struct hl_debug_params_spmu),
23 [HL_DEBUG_OP_TIMESTAMP] = 0
27 static int device_status_info(struct hl_device *hdev, struct hl_info_args *args)
29 struct hl_info_device_status dev_stat = {0};
30 u32 size = args->return_size;
31 void __user *out = (void __user *) (uintptr_t) args->return_pointer;
33 if ((!size) || (!out))
34 return -EINVAL;
36 dev_stat.status = hl_device_status(hdev);
38 return copy_to_user(out, &dev_stat,
39 min((size_t)size, sizeof(dev_stat))) ? -EFAULT : 0;
42 static int hw_ip_info(struct hl_device *hdev, struct hl_info_args *args)
44 struct hl_info_hw_ip_info hw_ip = {0};
45 u32 size = args->return_size;
46 void __user *out = (void __user *) (uintptr_t) args->return_pointer;
47 struct asic_fixed_properties *prop = &hdev->asic_prop;
48 u64 sram_kmd_size, dram_kmd_size;
50 if ((!size) || (!out))
51 return -EINVAL;
53 sram_kmd_size = (prop->sram_user_base_address -
54 prop->sram_base_address);
55 dram_kmd_size = (prop->dram_user_base_address -
56 prop->dram_base_address);
58 hw_ip.device_id = hdev->asic_funcs->get_pci_id(hdev);
59 hw_ip.sram_base_address = prop->sram_user_base_address;
60 hw_ip.dram_base_address = prop->dram_user_base_address;
61 hw_ip.tpc_enabled_mask = prop->tpc_enabled_mask;
62 hw_ip.sram_size = prop->sram_size - sram_kmd_size;
63 hw_ip.dram_size = prop->dram_size - dram_kmd_size;
64 if (hw_ip.dram_size > PAGE_SIZE)
65 hw_ip.dram_enabled = 1;
66 hw_ip.num_of_events = prop->num_of_events;
68 memcpy(hw_ip.cpucp_version, prop->cpucp_info.cpucp_version,
69 min(VERSION_MAX_LEN, HL_INFO_VERSION_MAX_LEN));
71 memcpy(hw_ip.card_name, prop->cpucp_info.card_name,
72 min(CARD_NAME_MAX_LEN, HL_INFO_CARD_NAME_MAX_LEN));
74 hw_ip.cpld_version = le32_to_cpu(prop->cpucp_info.cpld_version);
75 hw_ip.module_id = le32_to_cpu(prop->cpucp_info.card_location);
77 hw_ip.psoc_pci_pll_nr = prop->psoc_pci_pll_nr;
78 hw_ip.psoc_pci_pll_nf = prop->psoc_pci_pll_nf;
79 hw_ip.psoc_pci_pll_od = prop->psoc_pci_pll_od;
80 hw_ip.psoc_pci_pll_div_factor = prop->psoc_pci_pll_div_factor;
82 return copy_to_user(out, &hw_ip,
83 min((size_t)size, sizeof(hw_ip))) ? -EFAULT : 0;
86 static int hw_events_info(struct hl_device *hdev, bool aggregate,
87 struct hl_info_args *args)
89 u32 size, max_size = args->return_size;
90 void __user *out = (void __user *) (uintptr_t) args->return_pointer;
91 void *arr;
93 if ((!max_size) || (!out))
94 return -EINVAL;
96 arr = hdev->asic_funcs->get_events_stat(hdev, aggregate, &size);
98 return copy_to_user(out, arr, min(max_size, size)) ? -EFAULT : 0;
101 static int dram_usage_info(struct hl_fpriv *hpriv, struct hl_info_args *args)
103 struct hl_device *hdev = hpriv->hdev;
104 struct hl_info_dram_usage dram_usage = {0};
105 u32 max_size = args->return_size;
106 void __user *out = (void __user *) (uintptr_t) args->return_pointer;
107 struct asic_fixed_properties *prop = &hdev->asic_prop;
108 u64 dram_kmd_size;
110 if ((!max_size) || (!out))
111 return -EINVAL;
113 dram_kmd_size = (prop->dram_user_base_address -
114 prop->dram_base_address);
115 dram_usage.dram_free_mem = (prop->dram_size - dram_kmd_size) -
116 atomic64_read(&hdev->dram_used_mem);
117 if (hpriv->ctx)
118 dram_usage.ctx_dram_mem =
119 atomic64_read(&hpriv->ctx->dram_phys_mem);
121 return copy_to_user(out, &dram_usage,
122 min((size_t) max_size, sizeof(dram_usage))) ? -EFAULT : 0;
125 static int hw_idle(struct hl_device *hdev, struct hl_info_args *args)
127 struct hl_info_hw_idle hw_idle = {0};
128 u32 max_size = args->return_size;
129 void __user *out = (void __user *) (uintptr_t) args->return_pointer;
131 if ((!max_size) || (!out))
132 return -EINVAL;
134 hw_idle.is_idle = hdev->asic_funcs->is_device_idle(hdev,
135 &hw_idle.busy_engines_mask_ext, NULL);
137 return copy_to_user(out, &hw_idle,
138 min((size_t) max_size, sizeof(hw_idle))) ? -EFAULT : 0;
141 static int debug_coresight(struct hl_device *hdev, struct hl_debug_args *args)
143 struct hl_debug_params *params;
144 void *input = NULL, *output = NULL;
145 int rc;
147 params = kzalloc(sizeof(*params), GFP_KERNEL);
148 if (!params)
149 return -ENOMEM;
151 params->reg_idx = args->reg_idx;
152 params->enable = args->enable;
153 params->op = args->op;
155 if (args->input_ptr && args->input_size) {
156 input = kzalloc(hl_debug_struct_size[args->op], GFP_KERNEL);
157 if (!input) {
158 rc = -ENOMEM;
159 goto out;
162 if (copy_from_user(input, u64_to_user_ptr(args->input_ptr),
163 args->input_size)) {
164 rc = -EFAULT;
165 dev_err(hdev->dev, "failed to copy input debug data\n");
166 goto out;
169 params->input = input;
172 if (args->output_ptr && args->output_size) {
173 output = kzalloc(args->output_size, GFP_KERNEL);
174 if (!output) {
175 rc = -ENOMEM;
176 goto out;
179 params->output = output;
180 params->output_size = args->output_size;
183 rc = hdev->asic_funcs->debug_coresight(hdev, params);
184 if (rc) {
185 dev_err(hdev->dev,
186 "debug coresight operation failed %d\n", rc);
187 goto out;
190 if (output && copy_to_user((void __user *) (uintptr_t) args->output_ptr,
191 output, args->output_size)) {
192 dev_err(hdev->dev, "copy to user failed in debug ioctl\n");
193 rc = -EFAULT;
194 goto out;
198 out:
199 kfree(params);
200 kfree(output);
201 kfree(input);
203 return rc;
206 static int device_utilization(struct hl_device *hdev, struct hl_info_args *args)
208 struct hl_info_device_utilization device_util = {0};
209 u32 max_size = args->return_size;
210 void __user *out = (void __user *) (uintptr_t) args->return_pointer;
212 if ((!max_size) || (!out))
213 return -EINVAL;
215 if ((args->period_ms < 100) || (args->period_ms > 1000) ||
216 (args->period_ms % 100)) {
217 dev_err(hdev->dev,
218 "period %u must be between 100 - 1000 and must be divisible by 100\n",
219 args->period_ms);
220 return -EINVAL;
223 device_util.utilization = hl_device_utilization(hdev, args->period_ms);
225 return copy_to_user(out, &device_util,
226 min((size_t) max_size, sizeof(device_util))) ? -EFAULT : 0;
229 static int get_clk_rate(struct hl_device *hdev, struct hl_info_args *args)
231 struct hl_info_clk_rate clk_rate = {0};
232 u32 max_size = args->return_size;
233 void __user *out = (void __user *) (uintptr_t) args->return_pointer;
234 int rc;
236 if ((!max_size) || (!out))
237 return -EINVAL;
239 rc = hdev->asic_funcs->get_clk_rate(hdev, &clk_rate.cur_clk_rate_mhz,
240 &clk_rate.max_clk_rate_mhz);
241 if (rc)
242 return rc;
244 return copy_to_user(out, &clk_rate,
245 min((size_t) max_size, sizeof(clk_rate))) ? -EFAULT : 0;
248 static int get_reset_count(struct hl_device *hdev, struct hl_info_args *args)
250 struct hl_info_reset_count reset_count = {0};
251 u32 max_size = args->return_size;
252 void __user *out = (void __user *) (uintptr_t) args->return_pointer;
254 if ((!max_size) || (!out))
255 return -EINVAL;
257 reset_count.hard_reset_cnt = hdev->hard_reset_cnt;
258 reset_count.soft_reset_cnt = hdev->soft_reset_cnt;
260 return copy_to_user(out, &reset_count,
261 min((size_t) max_size, sizeof(reset_count))) ? -EFAULT : 0;
264 static int time_sync_info(struct hl_device *hdev, struct hl_info_args *args)
266 struct hl_info_time_sync time_sync = {0};
267 u32 max_size = args->return_size;
268 void __user *out = (void __user *) (uintptr_t) args->return_pointer;
270 if ((!max_size) || (!out))
271 return -EINVAL;
273 time_sync.device_time = hdev->asic_funcs->get_device_time(hdev);
274 time_sync.host_time = ktime_get_raw_ns();
276 return copy_to_user(out, &time_sync,
277 min((size_t) max_size, sizeof(time_sync))) ? -EFAULT : 0;
280 static int pci_counters_info(struct hl_fpriv *hpriv, struct hl_info_args *args)
282 struct hl_device *hdev = hpriv->hdev;
283 struct hl_info_pci_counters pci_counters = {0};
284 u32 max_size = args->return_size;
285 void __user *out = (void __user *) (uintptr_t) args->return_pointer;
286 int rc;
288 if ((!max_size) || (!out))
289 return -EINVAL;
291 rc = hl_fw_cpucp_pci_counters_get(hdev, &pci_counters);
292 if (rc)
293 return rc;
295 return copy_to_user(out, &pci_counters,
296 min((size_t) max_size, sizeof(pci_counters))) ? -EFAULT : 0;
299 static int clk_throttle_info(struct hl_fpriv *hpriv, struct hl_info_args *args)
301 struct hl_device *hdev = hpriv->hdev;
302 struct hl_info_clk_throttle clk_throttle = {0};
303 u32 max_size = args->return_size;
304 void __user *out = (void __user *) (uintptr_t) args->return_pointer;
306 if ((!max_size) || (!out))
307 return -EINVAL;
309 clk_throttle.clk_throttling_reason = hdev->clk_throttling_reason;
311 return copy_to_user(out, &clk_throttle,
312 min((size_t) max_size, sizeof(clk_throttle))) ? -EFAULT : 0;
315 static int cs_counters_info(struct hl_fpriv *hpriv, struct hl_info_args *args)
317 void __user *out = (void __user *) (uintptr_t) args->return_pointer;
318 struct hl_info_cs_counters cs_counters = {0};
319 struct hl_device *hdev = hpriv->hdev;
320 struct hl_cs_counters_atomic *cntr;
321 u32 max_size = args->return_size;
323 cntr = &hdev->aggregated_cs_counters;
325 if ((!max_size) || (!out))
326 return -EINVAL;
328 cs_counters.total_out_of_mem_drop_cnt =
329 atomic64_read(&cntr->out_of_mem_drop_cnt);
330 cs_counters.total_parsing_drop_cnt =
331 atomic64_read(&cntr->parsing_drop_cnt);
332 cs_counters.total_queue_full_drop_cnt =
333 atomic64_read(&cntr->queue_full_drop_cnt);
334 cs_counters.total_device_in_reset_drop_cnt =
335 atomic64_read(&cntr->device_in_reset_drop_cnt);
336 cs_counters.total_max_cs_in_flight_drop_cnt =
337 atomic64_read(&cntr->max_cs_in_flight_drop_cnt);
338 cs_counters.total_validation_drop_cnt =
339 atomic64_read(&cntr->validation_drop_cnt);
341 if (hpriv->ctx) {
342 cs_counters.ctx_out_of_mem_drop_cnt =
343 atomic64_read(
344 &hpriv->ctx->cs_counters.out_of_mem_drop_cnt);
345 cs_counters.ctx_parsing_drop_cnt =
346 atomic64_read(
347 &hpriv->ctx->cs_counters.parsing_drop_cnt);
348 cs_counters.ctx_queue_full_drop_cnt =
349 atomic64_read(
350 &hpriv->ctx->cs_counters.queue_full_drop_cnt);
351 cs_counters.ctx_device_in_reset_drop_cnt =
352 atomic64_read(
353 &hpriv->ctx->cs_counters.device_in_reset_drop_cnt);
354 cs_counters.ctx_max_cs_in_flight_drop_cnt =
355 atomic64_read(
356 &hpriv->ctx->cs_counters.max_cs_in_flight_drop_cnt);
357 cs_counters.ctx_validation_drop_cnt =
358 atomic64_read(
359 &hpriv->ctx->cs_counters.validation_drop_cnt);
362 return copy_to_user(out, &cs_counters,
363 min((size_t) max_size, sizeof(cs_counters))) ? -EFAULT : 0;
366 static int sync_manager_info(struct hl_fpriv *hpriv, struct hl_info_args *args)
368 struct hl_device *hdev = hpriv->hdev;
369 struct asic_fixed_properties *prop = &hdev->asic_prop;
370 struct hl_info_sync_manager sm_info = {0};
371 u32 max_size = args->return_size;
372 void __user *out = (void __user *) (uintptr_t) args->return_pointer;
374 if ((!max_size) || (!out))
375 return -EINVAL;
377 if (args->dcore_id >= HL_MAX_DCORES)
378 return -EINVAL;
380 sm_info.first_available_sync_object =
381 prop->first_available_user_sob[args->dcore_id];
382 sm_info.first_available_monitor =
383 prop->first_available_user_mon[args->dcore_id];
386 return copy_to_user(out, &sm_info, min_t(size_t, (size_t) max_size,
387 sizeof(sm_info))) ? -EFAULT : 0;
390 static int total_energy_consumption_info(struct hl_fpriv *hpriv,
391 struct hl_info_args *args)
393 struct hl_device *hdev = hpriv->hdev;
394 struct hl_info_energy total_energy = {0};
395 u32 max_size = args->return_size;
396 void __user *out = (void __user *) (uintptr_t) args->return_pointer;
397 int rc;
399 if ((!max_size) || (!out))
400 return -EINVAL;
402 rc = hl_fw_cpucp_total_energy_get(hdev,
403 &total_energy.total_energy_consumption);
404 if (rc)
405 return rc;
407 return copy_to_user(out, &total_energy,
408 min((size_t) max_size, sizeof(total_energy))) ? -EFAULT : 0;
411 static int pll_frequency_info(struct hl_fpriv *hpriv, struct hl_info_args *args)
413 struct hl_device *hdev = hpriv->hdev;
414 struct hl_pll_frequency_info freq_info = { {0} };
415 u32 max_size = args->return_size;
416 void __user *out = (void __user *) (uintptr_t) args->return_pointer;
417 int rc;
419 if ((!max_size) || (!out))
420 return -EINVAL;
422 rc = hl_fw_cpucp_pll_info_get(hdev, args->pll_index, freq_info.output);
423 if (rc)
424 return rc;
426 return copy_to_user(out, &freq_info,
427 min((size_t) max_size, sizeof(freq_info))) ? -EFAULT : 0;
430 static int _hl_info_ioctl(struct hl_fpriv *hpriv, void *data,
431 struct device *dev)
433 enum hl_device_status status;
434 struct hl_info_args *args = data;
435 struct hl_device *hdev = hpriv->hdev;
437 int rc;
440 * Information is returned for the following opcodes even if the device
441 * is disabled or in reset.
443 switch (args->op) {
444 case HL_INFO_HW_IP_INFO:
445 return hw_ip_info(hdev, args);
447 case HL_INFO_DEVICE_STATUS:
448 return device_status_info(hdev, args);
450 case HL_INFO_RESET_COUNT:
451 return get_reset_count(hdev, args);
453 default:
454 break;
457 if (!hl_device_operational(hdev, &status)) {
458 dev_warn_ratelimited(dev,
459 "Device is %s. Can't execute INFO IOCTL\n",
460 hdev->status[status]);
461 return -EBUSY;
464 switch (args->op) {
465 case HL_INFO_HW_EVENTS:
466 rc = hw_events_info(hdev, false, args);
467 break;
469 case HL_INFO_DRAM_USAGE:
470 rc = dram_usage_info(hpriv, args);
471 break;
473 case HL_INFO_HW_IDLE:
474 rc = hw_idle(hdev, args);
475 break;
477 case HL_INFO_DEVICE_UTILIZATION:
478 rc = device_utilization(hdev, args);
479 break;
481 case HL_INFO_HW_EVENTS_AGGREGATE:
482 rc = hw_events_info(hdev, true, args);
483 break;
485 case HL_INFO_CLK_RATE:
486 rc = get_clk_rate(hdev, args);
487 break;
489 case HL_INFO_TIME_SYNC:
490 return time_sync_info(hdev, args);
492 case HL_INFO_CS_COUNTERS:
493 return cs_counters_info(hpriv, args);
495 case HL_INFO_PCI_COUNTERS:
496 return pci_counters_info(hpriv, args);
498 case HL_INFO_CLK_THROTTLE_REASON:
499 return clk_throttle_info(hpriv, args);
501 case HL_INFO_SYNC_MANAGER:
502 return sync_manager_info(hpriv, args);
504 case HL_INFO_TOTAL_ENERGY:
505 return total_energy_consumption_info(hpriv, args);
507 case HL_INFO_PLL_FREQUENCY:
508 return pll_frequency_info(hpriv, args);
510 default:
511 dev_err(dev, "Invalid request %d\n", args->op);
512 rc = -ENOTTY;
513 break;
516 return rc;
519 static int hl_info_ioctl(struct hl_fpriv *hpriv, void *data)
521 return _hl_info_ioctl(hpriv, data, hpriv->hdev->dev);
524 static int hl_info_ioctl_control(struct hl_fpriv *hpriv, void *data)
526 return _hl_info_ioctl(hpriv, data, hpriv->hdev->dev_ctrl);
529 static int hl_debug_ioctl(struct hl_fpriv *hpriv, void *data)
531 struct hl_debug_args *args = data;
532 struct hl_device *hdev = hpriv->hdev;
533 enum hl_device_status status;
535 int rc = 0;
537 if (!hl_device_operational(hdev, &status)) {
538 dev_warn_ratelimited(hdev->dev,
539 "Device is %s. Can't execute DEBUG IOCTL\n",
540 hdev->status[status]);
541 return -EBUSY;
544 switch (args->op) {
545 case HL_DEBUG_OP_ETR:
546 case HL_DEBUG_OP_ETF:
547 case HL_DEBUG_OP_STM:
548 case HL_DEBUG_OP_FUNNEL:
549 case HL_DEBUG_OP_BMON:
550 case HL_DEBUG_OP_SPMU:
551 case HL_DEBUG_OP_TIMESTAMP:
552 if (!hdev->in_debug) {
553 dev_err_ratelimited(hdev->dev,
554 "Rejecting debug configuration request because device not in debug mode\n");
555 return -EFAULT;
557 args->input_size =
558 min(args->input_size, hl_debug_struct_size[args->op]);
559 rc = debug_coresight(hdev, args);
560 break;
561 case HL_DEBUG_OP_SET_MODE:
562 rc = hl_device_set_debug_mode(hdev, (bool) args->enable);
563 break;
564 default:
565 dev_err(hdev->dev, "Invalid request %d\n", args->op);
566 rc = -ENOTTY;
567 break;
570 return rc;
573 #define HL_IOCTL_DEF(ioctl, _func) \
574 [_IOC_NR(ioctl)] = {.cmd = ioctl, .func = _func}
576 static const struct hl_ioctl_desc hl_ioctls[] = {
577 HL_IOCTL_DEF(HL_IOCTL_INFO, hl_info_ioctl),
578 HL_IOCTL_DEF(HL_IOCTL_CB, hl_cb_ioctl),
579 HL_IOCTL_DEF(HL_IOCTL_CS, hl_cs_ioctl),
580 HL_IOCTL_DEF(HL_IOCTL_WAIT_CS, hl_cs_wait_ioctl),
581 HL_IOCTL_DEF(HL_IOCTL_MEMORY, hl_mem_ioctl),
582 HL_IOCTL_DEF(HL_IOCTL_DEBUG, hl_debug_ioctl)
585 static const struct hl_ioctl_desc hl_ioctls_control[] = {
586 HL_IOCTL_DEF(HL_IOCTL_INFO, hl_info_ioctl_control)
589 static long _hl_ioctl(struct file *filep, unsigned int cmd, unsigned long arg,
590 const struct hl_ioctl_desc *ioctl, struct device *dev)
592 struct hl_fpriv *hpriv = filep->private_data;
593 struct hl_device *hdev = hpriv->hdev;
594 unsigned int nr = _IOC_NR(cmd);
595 char stack_kdata[128] = {0};
596 char *kdata = NULL;
597 unsigned int usize, asize;
598 hl_ioctl_t *func;
599 u32 hl_size;
600 int retcode;
602 if (hdev->hard_reset_pending) {
603 dev_crit_ratelimited(dev,
604 "Device HARD reset pending! Please close FD\n");
605 return -ENODEV;
608 /* Do not trust userspace, use our own definition */
609 func = ioctl->func;
611 if (unlikely(!func)) {
612 dev_dbg(dev, "no function\n");
613 retcode = -ENOTTY;
614 goto out_err;
617 hl_size = _IOC_SIZE(ioctl->cmd);
618 usize = asize = _IOC_SIZE(cmd);
619 if (hl_size > asize)
620 asize = hl_size;
622 cmd = ioctl->cmd;
624 if (cmd & (IOC_IN | IOC_OUT)) {
625 if (asize <= sizeof(stack_kdata)) {
626 kdata = stack_kdata;
627 } else {
628 kdata = kzalloc(asize, GFP_KERNEL);
629 if (!kdata) {
630 retcode = -ENOMEM;
631 goto out_err;
636 if (cmd & IOC_IN) {
637 if (copy_from_user(kdata, (void __user *)arg, usize)) {
638 retcode = -EFAULT;
639 goto out_err;
641 } else if (cmd & IOC_OUT) {
642 memset(kdata, 0, usize);
645 retcode = func(hpriv, kdata);
647 if ((cmd & IOC_OUT) && copy_to_user((void __user *)arg, kdata, usize))
648 retcode = -EFAULT;
650 out_err:
651 if (retcode)
652 dev_dbg(dev, "error in ioctl: pid=%d, cmd=0x%02x, nr=0x%02x\n",
653 task_pid_nr(current), cmd, nr);
655 if (kdata != stack_kdata)
656 kfree(kdata);
658 return retcode;
661 long hl_ioctl(struct file *filep, unsigned int cmd, unsigned long arg)
663 struct hl_fpriv *hpriv = filep->private_data;
664 struct hl_device *hdev = hpriv->hdev;
665 const struct hl_ioctl_desc *ioctl = NULL;
666 unsigned int nr = _IOC_NR(cmd);
668 if ((nr >= HL_COMMAND_START) && (nr < HL_COMMAND_END)) {
669 ioctl = &hl_ioctls[nr];
670 } else {
671 dev_err(hdev->dev, "invalid ioctl: pid=%d, nr=0x%02x\n",
672 task_pid_nr(current), nr);
673 return -ENOTTY;
676 return _hl_ioctl(filep, cmd, arg, ioctl, hdev->dev);
679 long hl_ioctl_control(struct file *filep, unsigned int cmd, unsigned long arg)
681 struct hl_fpriv *hpriv = filep->private_data;
682 struct hl_device *hdev = hpriv->hdev;
683 const struct hl_ioctl_desc *ioctl = NULL;
684 unsigned int nr = _IOC_NR(cmd);
686 if (nr == _IOC_NR(HL_IOCTL_INFO)) {
687 ioctl = &hl_ioctls_control[nr];
688 } else {
689 dev_err(hdev->dev_ctrl, "invalid ioctl: pid=%d, nr=0x%02x\n",
690 task_pid_nr(current), nr);
691 return -ENOTTY;
694 return _hl_ioctl(filep, cmd, arg, ioctl, hdev->dev_ctrl);