1 // SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
3 #include <linux/bitmap.h>
4 #include <linux/delay.h>
5 #include <linux/interrupt.h>
7 #include <linux/io-64-nonatomic-lo-hi.h>
9 #include <linux/module.h>
10 #include <linux/nvme.h>
11 #include <linux/pci.h>
12 #include <linux/wait.h>
13 #include <linux/sched/signal.h>
15 #include "fun_queue.h"
18 #define FUN_ADMIN_CMD_TO_MS 3000
23 AQA_MIN_QUEUE_SIZE
= 2,
24 AQA_MAX_QUEUE_SIZE
= 4096
27 /* context for admin commands */
29 fun_admin_callback_t cb
; /* callback to invoke on completion */
30 void *cb_data
; /* user data provided to callback */
31 int cpu
; /* CPU where the cmd's tag was allocated */
34 /* Context for synchronous admin commands. */
35 struct fun_sync_cmd_ctx
{
36 struct completion
compl;
37 u8
*rsp_buf
; /* caller provided response buffer */
38 unsigned int rsp_len
; /* response buffer size */
39 u8 rsp_status
; /* command response status */
42 /* Wait for the CSTS.RDY bit to match @enabled. */
43 static int fun_wait_ready(struct fun_dev
*fdev
, bool enabled
)
45 unsigned int cap_to
= NVME_CAP_TIMEOUT(fdev
->cap_reg
);
46 u32 bit
= enabled
? NVME_CSTS_RDY
: 0;
47 unsigned long deadline
;
49 deadline
= ((cap_to
+ 1) * HZ
/ 2) + jiffies
; /* CAP.TO is in 500ms */
52 u32 csts
= readl(fdev
->bar
+ NVME_REG_CSTS
);
55 dev_err(fdev
->dev
, "CSTS register read %#x\n", csts
);
59 if ((csts
& NVME_CSTS_RDY
) == bit
)
62 if (time_is_before_jiffies(deadline
))
69 "Timed out waiting for device to indicate RDY %u; aborting %s\n",
70 enabled
, enabled
? "initialization" : "reset");
74 /* Check CSTS and return an error if it is unreadable or has unexpected
77 static int fun_check_csts_rdy(struct fun_dev
*fdev
, unsigned int expected_rdy
)
79 u32 csts
= readl(fdev
->bar
+ NVME_REG_CSTS
);
80 u32 actual_rdy
= csts
& NVME_CSTS_RDY
;
83 dev_err(fdev
->dev
, "CSTS register read %#x\n", csts
);
86 if (actual_rdy
!= expected_rdy
) {
87 dev_err(fdev
->dev
, "Unexpected CSTS RDY %u\n", actual_rdy
);
93 /* Check that CSTS RDY has the expected value. Then write a new value to the CC
94 * register and wait for CSTS RDY to match the new CC ENABLE state.
96 static int fun_update_cc_enable(struct fun_dev
*fdev
, unsigned int initial_rdy
)
98 int rc
= fun_check_csts_rdy(fdev
, initial_rdy
);
102 writel(fdev
->cc_reg
, fdev
->bar
+ NVME_REG_CC
);
103 return fun_wait_ready(fdev
, !!(fdev
->cc_reg
& NVME_CC_ENABLE
));
106 static int fun_disable_ctrl(struct fun_dev
*fdev
)
108 fdev
->cc_reg
&= ~(NVME_CC_SHN_MASK
| NVME_CC_ENABLE
);
109 return fun_update_cc_enable(fdev
, 1);
112 static int fun_enable_ctrl(struct fun_dev
*fdev
, u32 admin_cqesz_log2
,
113 u32 admin_sqesz_log2
)
115 fdev
->cc_reg
= (admin_cqesz_log2
<< NVME_CC_IOCQES_SHIFT
) |
116 (admin_sqesz_log2
<< NVME_CC_IOSQES_SHIFT
) |
117 ((PAGE_SHIFT
- 12) << NVME_CC_MPS_SHIFT
) |
120 return fun_update_cc_enable(fdev
, 0);
123 static int fun_map_bars(struct fun_dev
*fdev
, const char *name
)
125 struct pci_dev
*pdev
= to_pci_dev(fdev
->dev
);
128 err
= pci_request_mem_regions(pdev
, name
);
131 "Couldn't get PCI memory resources, err %d\n", err
);
135 fdev
->bar
= pci_ioremap_bar(pdev
, 0);
137 dev_err(&pdev
->dev
, "Couldn't map BAR 0\n");
138 pci_release_mem_regions(pdev
);
145 static void fun_unmap_bars(struct fun_dev
*fdev
)
147 struct pci_dev
*pdev
= to_pci_dev(fdev
->dev
);
152 pci_release_mem_regions(pdev
);
156 static int fun_set_dma_masks(struct device
*dev
)
160 err
= dma_set_mask_and_coherent(dev
, DMA_BIT_MASK(64));
162 dev_err(dev
, "DMA mask configuration failed, err %d\n", err
);
166 static irqreturn_t
fun_admin_irq(int irq
, void *data
)
168 struct fun_queue
*funq
= data
;
170 return fun_process_cq(funq
, 0) ? IRQ_HANDLED
: IRQ_NONE
;
173 static void fun_complete_admin_cmd(struct fun_queue
*funq
, void *data
,
174 void *entry
, const struct fun_cqe_info
*info
)
176 const struct fun_admin_rsp_common
*rsp_common
= entry
;
177 struct fun_dev
*fdev
= funq
->fdev
;
178 struct fun_cmd_ctx
*cmd_ctx
;
182 if (info
->sqhd
== cpu_to_be16(0xffff)) {
183 dev_dbg(fdev
->dev
, "adminq event");
185 fdev
->adminq_cb(fdev
, entry
);
189 cid
= be16_to_cpu(rsp_common
->cid
);
190 dev_dbg(fdev
->dev
, "admin CQE cid %u, op %u, ret %u\n", cid
,
191 rsp_common
->op
, rsp_common
->ret
);
193 cmd_ctx
= &fdev
->cmd_ctx
[cid
];
194 if (cmd_ctx
->cpu
< 0) {
196 "admin CQE with CID=%u, op=%u does not match a pending command\n",
197 cid
, rsp_common
->op
);
202 cmd_ctx
->cb(fdev
, entry
, xchg(&cmd_ctx
->cb_data
, NULL
));
206 sbitmap_queue_clear(&fdev
->admin_sbq
, cid
, cpu
);
209 static int fun_init_cmd_ctx(struct fun_dev
*fdev
, unsigned int ntags
)
213 fdev
->cmd_ctx
= kvcalloc(ntags
, sizeof(*fdev
->cmd_ctx
), GFP_KERNEL
);
217 for (i
= 0; i
< ntags
; i
++)
218 fdev
->cmd_ctx
[i
].cpu
= -1;
223 /* Allocate and enable an admin queue and assign it the first IRQ vector. */
224 static int fun_enable_admin_queue(struct fun_dev
*fdev
,
225 const struct fun_dev_params
*areq
)
227 struct fun_queue_alloc_req qreq
= {
228 .cqe_size_log2
= areq
->cqe_size_log2
,
229 .sqe_size_log2
= areq
->sqe_size_log2
,
230 .cq_depth
= areq
->cq_depth
,
231 .sq_depth
= areq
->sq_depth
,
232 .rq_depth
= areq
->rq_depth
,
234 unsigned int ntags
= areq
->sq_depth
- 1;
235 struct fun_queue
*funq
;
241 if (areq
->sq_depth
< AQA_MIN_QUEUE_SIZE
||
242 areq
->sq_depth
> AQA_MAX_QUEUE_SIZE
||
243 areq
->cq_depth
< AQA_MIN_QUEUE_SIZE
||
244 areq
->cq_depth
> AQA_MAX_QUEUE_SIZE
)
247 fdev
->admin_q
= fun_alloc_queue(fdev
, 0, &qreq
);
251 rc
= fun_init_cmd_ctx(fdev
, ntags
);
255 rc
= sbitmap_queue_init_node(&fdev
->admin_sbq
, ntags
, -1, false,
256 GFP_KERNEL
, dev_to_node(fdev
->dev
));
260 funq
= fdev
->admin_q
;
262 rc
= fun_request_irq(funq
, dev_name(fdev
->dev
), fun_admin_irq
, funq
);
266 fun_set_cq_callback(funq
, fun_complete_admin_cmd
, NULL
);
267 fdev
->adminq_cb
= areq
->event_cb
;
269 writel((funq
->sq_depth
- 1) << AQA_ASQS_SHIFT
|
270 (funq
->cq_depth
- 1) << AQA_ACQS_SHIFT
,
271 fdev
->bar
+ NVME_REG_AQA
);
273 writeq(funq
->sq_dma_addr
, fdev
->bar
+ NVME_REG_ASQ
);
274 writeq(funq
->cq_dma_addr
, fdev
->bar
+ NVME_REG_ACQ
);
276 rc
= fun_enable_ctrl(fdev
, areq
->cqe_size_log2
, areq
->sqe_size_log2
);
280 if (areq
->rq_depth
) {
281 rc
= fun_create_rq(funq
);
291 fun_disable_ctrl(fdev
);
295 sbitmap_queue_free(&fdev
->admin_sbq
);
297 kvfree(fdev
->cmd_ctx
);
298 fdev
->cmd_ctx
= NULL
;
300 fun_free_queue(fdev
->admin_q
);
301 fdev
->admin_q
= NULL
;
305 static void fun_disable_admin_queue(struct fun_dev
*fdev
)
307 struct fun_queue
*admq
= fdev
->admin_q
;
312 fun_disable_ctrl(fdev
);
315 __fun_process_cq(admq
, 0);
317 sbitmap_queue_free(&fdev
->admin_sbq
);
319 kvfree(fdev
->cmd_ctx
);
320 fdev
->cmd_ctx
= NULL
;
322 fun_free_queue(admq
);
323 fdev
->admin_q
= NULL
;
326 /* Return %true if the admin queue has stopped servicing commands as can be
327 * detected through registers. This isn't exhaustive and may provide false
330 static bool fun_adminq_stopped(struct fun_dev
*fdev
)
332 u32 csts
= readl(fdev
->bar
+ NVME_REG_CSTS
);
334 return (csts
& (NVME_CSTS_CFS
| NVME_CSTS_RDY
)) != NVME_CSTS_RDY
;
337 static int fun_wait_for_tag(struct fun_dev
*fdev
, int *cpup
)
339 struct sbitmap_queue
*sbq
= &fdev
->admin_sbq
;
340 struct sbq_wait_state
*ws
= &sbq
->ws
[0];
341 DEFINE_SBQ_WAIT(wait
);
345 sbitmap_prepare_to_wait(sbq
, ws
, &wait
, TASK_UNINTERRUPTIBLE
);
346 if (fdev
->suppress_cmds
) {
350 tag
= sbitmap_queue_get(sbq
, cpup
);
356 sbitmap_finish_wait(sbq
, ws
, &wait
);
360 /* Submit an asynchronous admin command. Caller is responsible for implementing
361 * any waiting or timeout. Upon command completion the callback @cb is called.
363 int fun_submit_admin_cmd(struct fun_dev
*fdev
, struct fun_admin_req_common
*cmd
,
364 fun_admin_callback_t cb
, void *cb_data
, bool wait_ok
)
366 struct fun_queue
*funq
= fdev
->admin_q
;
367 unsigned int cmdsize
= cmd
->len8
* 8;
368 struct fun_cmd_ctx
*cmd_ctx
;
369 int tag
, cpu
, rc
= 0;
371 if (WARN_ON(cmdsize
> (1 << funq
->sqe_size_log2
)))
374 tag
= sbitmap_queue_get(&fdev
->admin_sbq
, &cpu
);
378 tag
= fun_wait_for_tag(fdev
, &cpu
);
383 cmd
->cid
= cpu_to_be16(tag
);
385 cmd_ctx
= &fdev
->cmd_ctx
[tag
];
387 cmd_ctx
->cb_data
= cb_data
;
389 spin_lock(&funq
->sq_lock
);
391 if (unlikely(fdev
->suppress_cmds
)) {
393 sbitmap_queue_clear(&fdev
->admin_sbq
, tag
, cpu
);
396 memcpy(fun_sqe_at(funq
, funq
->sq_tail
), cmd
, cmdsize
);
398 dev_dbg(fdev
->dev
, "admin cmd @ %u: %8ph\n", funq
->sq_tail
,
401 if (++funq
->sq_tail
== funq
->sq_depth
)
403 writel(funq
->sq_tail
, funq
->sq_db
);
405 spin_unlock(&funq
->sq_lock
);
409 /* Abandon a pending admin command by clearing the issuer's callback data.
410 * Failure indicates that the command either has already completed or its
411 * completion is racing with this call.
413 static bool fun_abandon_admin_cmd(struct fun_dev
*fd
,
414 const struct fun_admin_req_common
*cmd
,
417 u16 cid
= be16_to_cpu(cmd
->cid
);
418 struct fun_cmd_ctx
*cmd_ctx
= &fd
->cmd_ctx
[cid
];
420 return cmpxchg(&cmd_ctx
->cb_data
, cb_data
, NULL
) == cb_data
;
423 /* Stop submission of new admin commands and wake up any processes waiting for
424 * tags. Already submitted commands are left to complete or time out.
426 static void fun_admin_stop(struct fun_dev
*fdev
)
428 spin_lock(&fdev
->admin_q
->sq_lock
);
429 fdev
->suppress_cmds
= true;
430 spin_unlock(&fdev
->admin_q
->sq_lock
);
431 sbitmap_queue_wake_all(&fdev
->admin_sbq
);
434 /* The callback for synchronous execution of admin commands. It copies the
435 * command response to the caller's buffer and signals completion.
437 static void fun_admin_cmd_sync_cb(struct fun_dev
*fd
, void *rsp
, void *cb_data
)
439 const struct fun_admin_rsp_common
*rsp_common
= rsp
;
440 struct fun_sync_cmd_ctx
*ctx
= cb_data
;
443 return; /* command issuer timed out and left */
445 unsigned int rsp_len
= rsp_common
->len8
* 8;
447 if (unlikely(rsp_len
> ctx
->rsp_len
)) {
449 "response for op %u is %uB > response buffer %uB\n",
450 rsp_common
->op
, rsp_len
, ctx
->rsp_len
);
451 rsp_len
= ctx
->rsp_len
;
453 memcpy(ctx
->rsp_buf
, rsp
, rsp_len
);
455 ctx
->rsp_status
= rsp_common
->ret
;
456 complete(&ctx
->compl);
459 /* Submit a synchronous admin command. */
460 int fun_submit_admin_sync_cmd(struct fun_dev
*fdev
,
461 struct fun_admin_req_common
*cmd
, void *rsp
,
462 size_t rspsize
, unsigned int timeout
)
464 struct fun_sync_cmd_ctx ctx
= {
465 .compl = COMPLETION_INITIALIZER_ONSTACK(ctx
.compl),
469 unsigned int cmdlen
= cmd
->len8
* 8;
470 unsigned long jiffies_left
;
473 ret
= fun_submit_admin_cmd(fdev
, cmd
, fun_admin_cmd_sync_cb
, &ctx
,
479 timeout
= FUN_ADMIN_CMD_TO_MS
;
481 jiffies_left
= wait_for_completion_timeout(&ctx
.compl,
482 msecs_to_jiffies(timeout
));
484 /* The command timed out. Attempt to cancel it so we can return.
485 * But if the command is in the process of completing we'll
488 if (fun_abandon_admin_cmd(fdev
, cmd
, &ctx
)) {
489 dev_err(fdev
->dev
, "admin command timed out: %*ph\n",
491 fun_admin_stop(fdev
);
492 /* see if the timeout was due to a queue failure */
493 if (fun_adminq_stopped(fdev
))
495 "device does not accept admin commands\n");
499 wait_for_completion(&ctx
.compl);
502 if (ctx
.rsp_status
) {
503 dev_err(fdev
->dev
, "admin command failed, err %d: %*ph\n",
504 ctx
.rsp_status
, cmdlen
, cmd
);
507 return -ctx
.rsp_status
;
509 EXPORT_SYMBOL_GPL(fun_submit_admin_sync_cmd
);
511 /* Return the number of device resources of the requested type. */
512 int fun_get_res_count(struct fun_dev
*fdev
, enum fun_admin_op res
)
515 struct fun_admin_res_count_req req
;
516 struct fun_admin_res_count_rsp rsp
;
520 cmd
.req
.common
= FUN_ADMIN_REQ_COMMON_INIT2(res
, sizeof(cmd
.req
));
521 cmd
.req
.count
= FUN_ADMIN_SIMPLE_SUBOP_INIT(FUN_ADMIN_SUBOP_RES_COUNT
,
524 rc
= fun_submit_admin_sync_cmd(fdev
, &cmd
.req
.common
, &cmd
.rsp
,
526 return rc
? rc
: be32_to_cpu(cmd
.rsp
.count
.data
);
528 EXPORT_SYMBOL_GPL(fun_get_res_count
);
530 /* Request that the instance of resource @res with the given id be deleted. */
531 int fun_res_destroy(struct fun_dev
*fdev
, enum fun_admin_op res
,
532 unsigned int flags
, u32 id
)
534 struct fun_admin_generic_destroy_req req
= {
535 .common
= FUN_ADMIN_REQ_COMMON_INIT2(res
, sizeof(req
)),
536 .destroy
= FUN_ADMIN_SIMPLE_SUBOP_INIT(FUN_ADMIN_SUBOP_DESTROY
,
540 return fun_submit_admin_sync_cmd(fdev
, &req
.common
, NULL
, 0, 0);
542 EXPORT_SYMBOL_GPL(fun_res_destroy
);
544 /* Bind two entities of the given types and IDs. */
545 int fun_bind(struct fun_dev
*fdev
, enum fun_admin_bind_type type0
,
546 unsigned int id0
, enum fun_admin_bind_type type1
,
549 DEFINE_RAW_FLEX(struct fun_admin_bind_req
, cmd
, entry
, 2);
551 cmd
->common
= FUN_ADMIN_REQ_COMMON_INIT2(FUN_ADMIN_OP_BIND
,
553 cmd
->entry
[0] = FUN_ADMIN_BIND_ENTRY_INIT(type0
, id0
);
554 cmd
->entry
[1] = FUN_ADMIN_BIND_ENTRY_INIT(type1
, id1
);
556 return fun_submit_admin_sync_cmd(fdev
, &cmd
->common
, NULL
, 0, 0);
558 EXPORT_SYMBOL_GPL(fun_bind
);
560 static int fun_get_dev_limits(struct fun_dev
*fdev
)
562 struct pci_dev
*pdev
= to_pci_dev(fdev
->dev
);
563 unsigned int cq_count
, sq_count
, num_dbs
;
566 rc
= fun_get_res_count(fdev
, FUN_ADMIN_OP_EPCQ
);
571 rc
= fun_get_res_count(fdev
, FUN_ADMIN_OP_EPSQ
);
576 /* The admin queue consumes 1 CQ and at least 1 SQ. To be usable the
577 * device must provide additional queues.
579 if (cq_count
< 2 || sq_count
< 2 + !!fdev
->admin_q
->rq_depth
)
582 /* Calculate the max QID based on SQ/CQ/doorbell counts.
583 * SQ/CQ doorbells alternate.
585 num_dbs
= (pci_resource_len(pdev
, 0) - NVME_REG_DBS
) >>
586 (2 + NVME_CAP_STRIDE(fdev
->cap_reg
));
587 fdev
->max_qid
= min3(cq_count
, sq_count
, num_dbs
/ 2) - 1;
588 fdev
->kern_end_qid
= fdev
->max_qid
+ 1;
592 /* Allocate all MSI-X vectors available on a function and at least @min_vecs. */
593 static int fun_alloc_irqs(struct pci_dev
*pdev
, unsigned int min_vecs
)
595 int vecs
, num_msix
= pci_msix_vec_count(pdev
);
599 if (min_vecs
> num_msix
)
602 vecs
= pci_alloc_irq_vectors(pdev
, min_vecs
, num_msix
, PCI_IRQ_MSIX
);
605 "Allocated %d IRQ vectors of %d requested\n",
609 "Unable to allocate at least %u IRQ vectors\n",
615 /* Allocate and initialize the IRQ manager state. */
616 static int fun_alloc_irq_mgr(struct fun_dev
*fdev
)
618 fdev
->irq_map
= bitmap_zalloc(fdev
->num_irqs
, GFP_KERNEL
);
622 spin_lock_init(&fdev
->irqmgr_lock
);
623 /* mark IRQ 0 allocated, it is used by the admin queue */
624 __set_bit(0, fdev
->irq_map
);
625 fdev
->irqs_avail
= fdev
->num_irqs
- 1;
629 /* Reserve @nirqs of the currently available IRQs and return their indices. */
630 int fun_reserve_irqs(struct fun_dev
*fdev
, unsigned int nirqs
, u16
*irq_indices
)
632 unsigned int b
, n
= 0;
638 spin_lock(&fdev
->irqmgr_lock
);
639 if (nirqs
> fdev
->irqs_avail
)
642 for_each_clear_bit(b
, fdev
->irq_map
, fdev
->num_irqs
) {
643 __set_bit(b
, fdev
->irq_map
);
644 irq_indices
[n
++] = b
;
650 fdev
->irqs_avail
-= n
;
653 spin_unlock(&fdev
->irqmgr_lock
);
656 EXPORT_SYMBOL(fun_reserve_irqs
);
658 /* Release @nirqs previously allocated IRQS with the supplied indices. */
659 void fun_release_irqs(struct fun_dev
*fdev
, unsigned int nirqs
,
664 spin_lock(&fdev
->irqmgr_lock
);
665 for (i
= 0; i
< nirqs
; i
++)
666 __clear_bit(irq_indices
[i
], fdev
->irq_map
);
667 fdev
->irqs_avail
+= nirqs
;
668 spin_unlock(&fdev
->irqmgr_lock
);
670 EXPORT_SYMBOL(fun_release_irqs
);
672 static void fun_serv_handler(struct work_struct
*work
)
674 struct fun_dev
*fd
= container_of(work
, struct fun_dev
, service_task
);
676 if (test_bit(FUN_SERV_DISABLED
, &fd
->service_flags
))
682 void fun_serv_stop(struct fun_dev
*fd
)
684 set_bit(FUN_SERV_DISABLED
, &fd
->service_flags
);
685 cancel_work_sync(&fd
->service_task
);
687 EXPORT_SYMBOL_GPL(fun_serv_stop
);
689 void fun_serv_restart(struct fun_dev
*fd
)
691 clear_bit(FUN_SERV_DISABLED
, &fd
->service_flags
);
692 if (fd
->service_flags
)
693 schedule_work(&fd
->service_task
);
695 EXPORT_SYMBOL_GPL(fun_serv_restart
);
697 void fun_serv_sched(struct fun_dev
*fd
)
699 if (!test_bit(FUN_SERV_DISABLED
, &fd
->service_flags
))
700 schedule_work(&fd
->service_task
);
702 EXPORT_SYMBOL_GPL(fun_serv_sched
);
704 /* Check and try to get the device into a proper state for initialization,
705 * i.e., CSTS.RDY = CC.EN = 0.
707 static int sanitize_dev(struct fun_dev
*fdev
)
711 fdev
->cap_reg
= readq(fdev
->bar
+ NVME_REG_CAP
);
712 fdev
->cc_reg
= readl(fdev
->bar
+ NVME_REG_CC
);
714 /* First get RDY to agree with the current EN. Give RDY the opportunity
715 * to complete a potential recent EN change.
717 rc
= fun_wait_ready(fdev
, fdev
->cc_reg
& NVME_CC_ENABLE
);
721 /* Next, reset the device if EN is currently 1. */
722 if (fdev
->cc_reg
& NVME_CC_ENABLE
)
723 rc
= fun_disable_ctrl(fdev
);
728 /* Undo the device initialization of fun_dev_enable(). */
729 void fun_dev_disable(struct fun_dev
*fdev
)
731 struct pci_dev
*pdev
= to_pci_dev(fdev
->dev
);
733 pci_set_drvdata(pdev
, NULL
);
735 if (fdev
->fw_handle
!= FUN_HCI_ID_INVALID
) {
736 fun_res_destroy(fdev
, FUN_ADMIN_OP_SWUPGRADE
, 0,
738 fdev
->fw_handle
= FUN_HCI_ID_INVALID
;
741 fun_disable_admin_queue(fdev
);
743 bitmap_free(fdev
->irq_map
);
744 pci_free_irq_vectors(pdev
);
746 pci_disable_device(pdev
);
748 fun_unmap_bars(fdev
);
750 EXPORT_SYMBOL(fun_dev_disable
);
752 /* Perform basic initialization of a device, including
753 * - PCI config space setup and BAR0 mapping
754 * - interrupt management initialization
755 * - 1 admin queue setup
756 * - determination of some device limits, such as number of queues.
758 int fun_dev_enable(struct fun_dev
*fdev
, struct pci_dev
*pdev
,
759 const struct fun_dev_params
*areq
, const char *name
)
763 fdev
->dev
= &pdev
->dev
;
764 rc
= fun_map_bars(fdev
, name
);
768 rc
= fun_set_dma_masks(fdev
->dev
);
772 rc
= pci_enable_device_mem(pdev
);
774 dev_err(&pdev
->dev
, "Couldn't enable device, err %d\n", rc
);
778 rc
= sanitize_dev(fdev
);
782 fdev
->fw_handle
= FUN_HCI_ID_INVALID
;
783 fdev
->q_depth
= NVME_CAP_MQES(fdev
->cap_reg
) + 1;
784 fdev
->db_stride
= 1 << NVME_CAP_STRIDE(fdev
->cap_reg
);
785 fdev
->dbs
= fdev
->bar
+ NVME_REG_DBS
;
787 INIT_WORK(&fdev
->service_task
, fun_serv_handler
);
788 fdev
->service_flags
= FUN_SERV_DISABLED
;
789 fdev
->serv_cb
= areq
->serv_cb
;
791 rc
= fun_alloc_irqs(pdev
, areq
->min_msix
+ 1); /* +1 for admin CQ */
796 rc
= fun_alloc_irq_mgr(fdev
);
800 pci_set_master(pdev
);
801 rc
= fun_enable_admin_queue(fdev
, areq
);
805 rc
= fun_get_dev_limits(fdev
);
809 pci_save_state(pdev
);
810 pci_set_drvdata(pdev
, fdev
);
811 pcie_print_link_status(pdev
);
812 dev_dbg(fdev
->dev
, "q_depth %u, db_stride %u, max qid %d kern_end_qid %d\n",
813 fdev
->q_depth
, fdev
->db_stride
, fdev
->max_qid
,
818 fun_disable_admin_queue(fdev
);
820 bitmap_free(fdev
->irq_map
);
822 pci_free_irq_vectors(pdev
);
824 pci_disable_device(pdev
);
826 fun_unmap_bars(fdev
);
829 EXPORT_SYMBOL(fun_dev_enable
);
831 MODULE_AUTHOR("Dimitris Michailidis <dmichail@fungible.com>");
832 MODULE_DESCRIPTION("Core services driver for Fungible devices");
833 MODULE_LICENSE("Dual BSD/GPL");