1 // SPDX-License-Identifier: GPL-2.0
3 * Apple ANS NVM Express device driver
4 * Copyright The Asahi Linux Contributors
6 * Based on the pci.c NVM Express device driver
7 * Copyright (c) 2011-2014, Intel Corporation.
8 * and on the rdma.c NVMe over Fabrics RDMA host code.
9 * Copyright (c) 2015-2016 HGST, a Western Digital Company.
12 #include <linux/async.h>
13 #include <linux/blkdev.h>
14 #include <linux/blk-mq.h>
15 #include <linux/device.h>
16 #include <linux/dma-mapping.h>
17 #include <linux/dmapool.h>
18 #include <linux/interrupt.h>
19 #include <linux/io-64-nonatomic-lo-hi.h>
21 #include <linux/iopoll.h>
22 #include <linux/jiffies.h>
23 #include <linux/mempool.h>
24 #include <linux/module.h>
26 #include <linux/of_platform.h>
27 #include <linux/once.h>
28 #include <linux/platform_device.h>
29 #include <linux/pm_domain.h>
30 #include <linux/soc/apple/rtkit.h>
31 #include <linux/soc/apple/sart.h>
32 #include <linux/reset.h>
33 #include <linux/time64.h>
37 #define APPLE_ANS_BOOT_TIMEOUT USEC_PER_SEC
38 #define APPLE_ANS_MAX_QUEUE_DEPTH 64
40 #define APPLE_ANS_COPROC_CPU_CONTROL 0x44
41 #define APPLE_ANS_COPROC_CPU_CONTROL_RUN BIT(4)
43 #define APPLE_ANS_ACQ_DB 0x1004
44 #define APPLE_ANS_IOCQ_DB 0x100c
46 #define APPLE_ANS_MAX_PEND_CMDS_CTRL 0x1210
48 #define APPLE_ANS_BOOT_STATUS 0x1300
49 #define APPLE_ANS_BOOT_STATUS_OK 0xde71ce55
51 #define APPLE_ANS_UNKNOWN_CTRL 0x24008
52 #define APPLE_ANS_PRP_NULL_CHECK BIT(11)
54 #define APPLE_ANS_LINEAR_SQ_CTRL 0x24908
55 #define APPLE_ANS_LINEAR_SQ_EN BIT(0)
57 #define APPLE_ANS_LINEAR_ASQ_DB 0x2490c
58 #define APPLE_ANS_LINEAR_IOSQ_DB 0x24910
60 #define APPLE_NVMMU_NUM_TCBS 0x28100
61 #define APPLE_NVMMU_ASQ_TCB_BASE 0x28108
62 #define APPLE_NVMMU_IOSQ_TCB_BASE 0x28110
63 #define APPLE_NVMMU_TCB_INVAL 0x28118
64 #define APPLE_NVMMU_TCB_STAT 0x28120
67 * This controller is a bit weird in the way command tags works: Both the
68 * admin and the IO queue share the same tag space. Additionally, tags
69 * cannot be higher than 0x40 which effectively limits the combined
70 * queue depth to 0x40. Instead of wasting half of that on the admin queue
71 * which gets much less traffic we instead reduce its size here.
72 * The controller also doesn't support async event such that no space must
73 * be reserved for NVME_NR_AEN_COMMANDS.
75 #define APPLE_NVME_AQ_DEPTH 2
76 #define APPLE_NVME_AQ_MQ_TAG_DEPTH (APPLE_NVME_AQ_DEPTH - 1)
79 * These can be higher, but we need to ensure that any command doesn't
80 * require an sg allocation that needs more than a page of data.
82 #define NVME_MAX_KB_SZ 4096
83 #define NVME_MAX_SEGS 127
86 * This controller comes with an embedded IOMMU known as NVMMU.
87 * The NVMMU is pointed to an array of TCBs indexed by the command tag.
88 * Each command must be configured inside this structure before it's allowed
89 * to execute, including commands that don't require DMA transfers.
91 * An exception to this are Apple's vendor-specific commands (opcode 0xD8 on the
92 * admin queue): Those commands must still be added to the NVMMU but the DMA
93 * buffers cannot be represented as PRPs and must instead be allowed using SART.
95 * Programming the PRPs to the same values as those in the submission queue
96 * looks rather silly at first. This hardware is however designed for a kernel
97 * that runs the NVMMU code in a higher exception level than the NVMe driver.
98 * In that setting the NVMe driver first programs the submission queue entry
99 * and then executes a hypercall to the code that is allowed to program the
100 * NVMMU. The NVMMU driver then creates a shadow copy of the PRPs while
101 * verifying that they don't point to kernel text, data, pagetables, or similar
102 * protected areas before programming the TCB to point to this shadow copy.
103 * Since Linux doesn't do any of that we may as well just point both the queue
104 * and the TCB PRP pointer to the same memory.
106 struct apple_nvmmu_tcb
{
109 #define APPLE_ANS_TCB_DMA_FROM_DEVICE BIT(0)
110 #define APPLE_ANS_TCB_DMA_TO_DEVICE BIT(1)
125 * The Apple NVMe controller only supports a single admin and a single IO queue
126 * which are both limited to 64 entries and share a single interrupt.
128 * The completion queue works as usual. The submission "queue" instead is
129 * an array indexed by the command tag on this hardware. Commands must also be
130 * present in the NVMMU's tcb array. They are triggered by writing their tag to
133 struct apple_nvme_queue
{
134 struct nvme_command
*sqes
;
135 struct nvme_completion
*cqes
;
136 struct apple_nvmmu_tcb
*tcbs
;
138 dma_addr_t sq_dma_addr
;
139 dma_addr_t cq_dma_addr
;
140 dma_addr_t tcb_dma_addr
;
153 * The apple_nvme_iod describes the data in an I/O.
155 * The sg pointer contains the list of PRP chunk allocations in addition
156 * to the actual struct scatterlist.
158 struct apple_nvme_iod
{
159 struct nvme_request req
;
160 struct nvme_command cmd
;
161 struct apple_nvme_queue
*q
;
162 int npages
; /* In the PRP list. 0 means small pool in use */
163 int nents
; /* Used in scatterlist */
164 dma_addr_t first_dma
;
165 unsigned int dma_len
; /* length of single DMA segment mapping */
166 struct scatterlist
*sg
;
172 void __iomem
*mmio_coproc
;
173 void __iomem
*mmio_nvme
;
175 struct device
**pd_dev
;
176 struct device_link
**pd_link
;
179 struct apple_sart
*sart
;
180 struct apple_rtkit
*rtk
;
181 struct reset_control
*reset
;
183 struct dma_pool
*prp_page_pool
;
184 struct dma_pool
*prp_small_pool
;
185 mempool_t
*iod_mempool
;
187 struct nvme_ctrl ctrl
;
188 struct work_struct remove_work
;
190 struct apple_nvme_queue adminq
;
191 struct apple_nvme_queue ioq
;
193 struct blk_mq_tag_set admin_tagset
;
194 struct blk_mq_tag_set tagset
;
200 static_assert(sizeof(struct nvme_command
) == 64);
201 static_assert(sizeof(struct apple_nvmmu_tcb
) == 128);
203 static inline struct apple_nvme
*ctrl_to_apple_nvme(struct nvme_ctrl
*ctrl
)
205 return container_of(ctrl
, struct apple_nvme
, ctrl
);
208 static inline struct apple_nvme
*queue_to_apple_nvme(struct apple_nvme_queue
*q
)
211 return container_of(q
, struct apple_nvme
, adminq
);
213 return container_of(q
, struct apple_nvme
, ioq
);
216 static unsigned int apple_nvme_queue_depth(struct apple_nvme_queue
*q
)
219 return APPLE_NVME_AQ_DEPTH
;
221 return APPLE_ANS_MAX_QUEUE_DEPTH
;
224 static void apple_nvme_rtkit_crashed(void *cookie
)
226 struct apple_nvme
*anv
= cookie
;
228 dev_warn(anv
->dev
, "RTKit crashed; unable to recover without a reboot");
229 nvme_reset_ctrl(&anv
->ctrl
);
232 static int apple_nvme_sart_dma_setup(void *cookie
,
233 struct apple_rtkit_shmem
*bfr
)
235 struct apple_nvme
*anv
= cookie
;
244 dma_alloc_coherent(anv
->dev
, bfr
->size
, &bfr
->iova
, GFP_KERNEL
);
248 ret
= apple_sart_add_allowed_region(anv
->sart
, bfr
->iova
, bfr
->size
);
250 dma_free_coherent(anv
->dev
, bfr
->size
, bfr
->buffer
, bfr
->iova
);
258 static void apple_nvme_sart_dma_destroy(void *cookie
,
259 struct apple_rtkit_shmem
*bfr
)
261 struct apple_nvme
*anv
= cookie
;
263 apple_sart_remove_allowed_region(anv
->sart
, bfr
->iova
, bfr
->size
);
264 dma_free_coherent(anv
->dev
, bfr
->size
, bfr
->buffer
, bfr
->iova
);
267 static const struct apple_rtkit_ops apple_nvme_rtkit_ops
= {
268 .crashed
= apple_nvme_rtkit_crashed
,
269 .shmem_setup
= apple_nvme_sart_dma_setup
,
270 .shmem_destroy
= apple_nvme_sart_dma_destroy
,
273 static void apple_nvmmu_inval(struct apple_nvme_queue
*q
, unsigned int tag
)
275 struct apple_nvme
*anv
= queue_to_apple_nvme(q
);
277 writel(tag
, anv
->mmio_nvme
+ APPLE_NVMMU_TCB_INVAL
);
278 if (readl(anv
->mmio_nvme
+ APPLE_NVMMU_TCB_STAT
))
279 dev_warn_ratelimited(anv
->dev
,
280 "NVMMU TCB invalidation failed\n");
283 static void apple_nvme_submit_cmd(struct apple_nvme_queue
*q
,
284 struct nvme_command
*cmd
)
286 struct apple_nvme
*anv
= queue_to_apple_nvme(q
);
287 u32 tag
= nvme_tag_from_cid(cmd
->common
.command_id
);
288 struct apple_nvmmu_tcb
*tcb
= &q
->tcbs
[tag
];
290 tcb
->opcode
= cmd
->common
.opcode
;
291 tcb
->prp1
= cmd
->common
.dptr
.prp1
;
292 tcb
->prp2
= cmd
->common
.dptr
.prp2
;
293 tcb
->length
= cmd
->rw
.length
;
294 tcb
->command_id
= tag
;
296 if (nvme_is_write(cmd
))
297 tcb
->dma_flags
= APPLE_ANS_TCB_DMA_TO_DEVICE
;
299 tcb
->dma_flags
= APPLE_ANS_TCB_DMA_FROM_DEVICE
;
301 memcpy(&q
->sqes
[tag
], cmd
, sizeof(*cmd
));
304 * This lock here doesn't make much sense at a first glace but
305 * removing it will result in occasional missed completetion
306 * interrupts even though the commands still appear on the CQ.
307 * It's unclear why this happens but our best guess is that
308 * there is a bug in the firmware triggered when a new command
309 * is issued while we're inside the irq handler between the
310 * NVMMU invalidation (and making the tag available again)
311 * and the final CQ update.
313 spin_lock_irq(&anv
->lock
);
314 writel(tag
, q
->sq_db
);
315 spin_unlock_irq(&anv
->lock
);
320 * Will slightly overestimate the number of pages needed. This is OK
321 * as it only leads to a small amount of wasted memory for the lifetime of
324 static inline size_t apple_nvme_iod_alloc_size(void)
326 const unsigned int nprps
= DIV_ROUND_UP(
327 NVME_MAX_KB_SZ
+ NVME_CTRL_PAGE_SIZE
, NVME_CTRL_PAGE_SIZE
);
328 const int npages
= DIV_ROUND_UP(8 * nprps
, PAGE_SIZE
- 8);
329 const size_t alloc_size
= sizeof(__le64
*) * npages
+
330 sizeof(struct scatterlist
) * NVME_MAX_SEGS
;
335 static void **apple_nvme_iod_list(struct request
*req
)
337 struct apple_nvme_iod
*iod
= blk_mq_rq_to_pdu(req
);
339 return (void **)(iod
->sg
+ blk_rq_nr_phys_segments(req
));
342 static void apple_nvme_free_prps(struct apple_nvme
*anv
, struct request
*req
)
344 const int last_prp
= NVME_CTRL_PAGE_SIZE
/ sizeof(__le64
) - 1;
345 struct apple_nvme_iod
*iod
= blk_mq_rq_to_pdu(req
);
346 dma_addr_t dma_addr
= iod
->first_dma
;
349 for (i
= 0; i
< iod
->npages
; i
++) {
350 __le64
*prp_list
= apple_nvme_iod_list(req
)[i
];
351 dma_addr_t next_dma_addr
= le64_to_cpu(prp_list
[last_prp
]);
353 dma_pool_free(anv
->prp_page_pool
, prp_list
, dma_addr
);
354 dma_addr
= next_dma_addr
;
358 static void apple_nvme_unmap_data(struct apple_nvme
*anv
, struct request
*req
)
360 struct apple_nvme_iod
*iod
= blk_mq_rq_to_pdu(req
);
363 dma_unmap_page(anv
->dev
, iod
->first_dma
, iod
->dma_len
,
368 WARN_ON_ONCE(!iod
->nents
);
370 dma_unmap_sg(anv
->dev
, iod
->sg
, iod
->nents
, rq_dma_dir(req
));
371 if (iod
->npages
== 0)
372 dma_pool_free(anv
->prp_small_pool
, apple_nvme_iod_list(req
)[0],
375 apple_nvme_free_prps(anv
, req
);
376 mempool_free(iod
->sg
, anv
->iod_mempool
);
379 static void apple_nvme_print_sgl(struct scatterlist
*sgl
, int nents
)
382 struct scatterlist
*sg
;
384 for_each_sg(sgl
, sg
, nents
, i
) {
385 dma_addr_t phys
= sg_phys(sg
);
387 pr_warn("sg[%d] phys_addr:%pad offset:%d length:%d dma_address:%pad dma_length:%d\n",
388 i
, &phys
, sg
->offset
, sg
->length
, &sg_dma_address(sg
),
393 static blk_status_t
apple_nvme_setup_prps(struct apple_nvme
*anv
,
395 struct nvme_rw_command
*cmnd
)
397 struct apple_nvme_iod
*iod
= blk_mq_rq_to_pdu(req
);
398 struct dma_pool
*pool
;
399 int length
= blk_rq_payload_bytes(req
);
400 struct scatterlist
*sg
= iod
->sg
;
401 int dma_len
= sg_dma_len(sg
);
402 u64 dma_addr
= sg_dma_address(sg
);
403 int offset
= dma_addr
& (NVME_CTRL_PAGE_SIZE
- 1);
405 void **list
= apple_nvme_iod_list(req
);
409 length
-= (NVME_CTRL_PAGE_SIZE
- offset
);
415 dma_len
-= (NVME_CTRL_PAGE_SIZE
- offset
);
417 dma_addr
+= (NVME_CTRL_PAGE_SIZE
- offset
);
420 dma_addr
= sg_dma_address(sg
);
421 dma_len
= sg_dma_len(sg
);
424 if (length
<= NVME_CTRL_PAGE_SIZE
) {
425 iod
->first_dma
= dma_addr
;
429 nprps
= DIV_ROUND_UP(length
, NVME_CTRL_PAGE_SIZE
);
430 if (nprps
<= (256 / 8)) {
431 pool
= anv
->prp_small_pool
;
434 pool
= anv
->prp_page_pool
;
438 prp_list
= dma_pool_alloc(pool
, GFP_ATOMIC
, &prp_dma
);
440 iod
->first_dma
= dma_addr
;
442 return BLK_STS_RESOURCE
;
445 iod
->first_dma
= prp_dma
;
448 if (i
== NVME_CTRL_PAGE_SIZE
>> 3) {
449 __le64
*old_prp_list
= prp_list
;
451 prp_list
= dma_pool_alloc(pool
, GFP_ATOMIC
, &prp_dma
);
454 list
[iod
->npages
++] = prp_list
;
455 prp_list
[0] = old_prp_list
[i
- 1];
456 old_prp_list
[i
- 1] = cpu_to_le64(prp_dma
);
459 prp_list
[i
++] = cpu_to_le64(dma_addr
);
460 dma_len
-= NVME_CTRL_PAGE_SIZE
;
461 dma_addr
+= NVME_CTRL_PAGE_SIZE
;
462 length
-= NVME_CTRL_PAGE_SIZE
;
467 if (unlikely(dma_len
< 0))
470 dma_addr
= sg_dma_address(sg
);
471 dma_len
= sg_dma_len(sg
);
474 cmnd
->dptr
.prp1
= cpu_to_le64(sg_dma_address(iod
->sg
));
475 cmnd
->dptr
.prp2
= cpu_to_le64(iod
->first_dma
);
478 apple_nvme_free_prps(anv
, req
);
479 return BLK_STS_RESOURCE
;
481 WARN(DO_ONCE(apple_nvme_print_sgl
, iod
->sg
, iod
->nents
),
482 "Invalid SGL for payload:%d nents:%d\n", blk_rq_payload_bytes(req
),
484 return BLK_STS_IOERR
;
487 static blk_status_t
apple_nvme_setup_prp_simple(struct apple_nvme
*anv
,
489 struct nvme_rw_command
*cmnd
,
492 struct apple_nvme_iod
*iod
= blk_mq_rq_to_pdu(req
);
493 unsigned int offset
= bv
->bv_offset
& (NVME_CTRL_PAGE_SIZE
- 1);
494 unsigned int first_prp_len
= NVME_CTRL_PAGE_SIZE
- offset
;
496 iod
->first_dma
= dma_map_bvec(anv
->dev
, bv
, rq_dma_dir(req
), 0);
497 if (dma_mapping_error(anv
->dev
, iod
->first_dma
))
498 return BLK_STS_RESOURCE
;
499 iod
->dma_len
= bv
->bv_len
;
501 cmnd
->dptr
.prp1
= cpu_to_le64(iod
->first_dma
);
502 if (bv
->bv_len
> first_prp_len
)
503 cmnd
->dptr
.prp2
= cpu_to_le64(iod
->first_dma
+ first_prp_len
);
507 static blk_status_t
apple_nvme_map_data(struct apple_nvme
*anv
,
509 struct nvme_command
*cmnd
)
511 struct apple_nvme_iod
*iod
= blk_mq_rq_to_pdu(req
);
512 blk_status_t ret
= BLK_STS_RESOURCE
;
515 if (blk_rq_nr_phys_segments(req
) == 1) {
516 struct bio_vec bv
= req_bvec(req
);
518 if (bv
.bv_offset
+ bv
.bv_len
<= NVME_CTRL_PAGE_SIZE
* 2)
519 return apple_nvme_setup_prp_simple(anv
, req
, &cmnd
->rw
,
524 iod
->sg
= mempool_alloc(anv
->iod_mempool
, GFP_ATOMIC
);
526 return BLK_STS_RESOURCE
;
527 sg_init_table(iod
->sg
, blk_rq_nr_phys_segments(req
));
528 iod
->nents
= blk_rq_map_sg(req
->q
, req
, iod
->sg
);
532 nr_mapped
= dma_map_sg_attrs(anv
->dev
, iod
->sg
, iod
->nents
,
533 rq_dma_dir(req
), DMA_ATTR_NO_WARN
);
537 ret
= apple_nvme_setup_prps(anv
, req
, &cmnd
->rw
);
538 if (ret
!= BLK_STS_OK
)
543 dma_unmap_sg(anv
->dev
, iod
->sg
, iod
->nents
, rq_dma_dir(req
));
545 mempool_free(iod
->sg
, anv
->iod_mempool
);
549 static __always_inline
void apple_nvme_unmap_rq(struct request
*req
)
551 struct apple_nvme_iod
*iod
= blk_mq_rq_to_pdu(req
);
552 struct apple_nvme
*anv
= queue_to_apple_nvme(iod
->q
);
554 if (blk_rq_nr_phys_segments(req
))
555 apple_nvme_unmap_data(anv
, req
);
558 static void apple_nvme_complete_rq(struct request
*req
)
560 apple_nvme_unmap_rq(req
);
561 nvme_complete_rq(req
);
564 static void apple_nvme_complete_batch(struct io_comp_batch
*iob
)
566 nvme_complete_batch(iob
, apple_nvme_unmap_rq
);
569 static inline bool apple_nvme_cqe_pending(struct apple_nvme_queue
*q
)
571 struct nvme_completion
*hcqe
= &q
->cqes
[q
->cq_head
];
573 return (le16_to_cpu(READ_ONCE(hcqe
->status
)) & 1) == q
->cq_phase
;
576 static inline struct blk_mq_tags
*
577 apple_nvme_queue_tagset(struct apple_nvme
*anv
, struct apple_nvme_queue
*q
)
580 return anv
->admin_tagset
.tags
[0];
582 return anv
->tagset
.tags
[0];
585 static inline void apple_nvme_handle_cqe(struct apple_nvme_queue
*q
,
586 struct io_comp_batch
*iob
, u16 idx
)
588 struct apple_nvme
*anv
= queue_to_apple_nvme(q
);
589 struct nvme_completion
*cqe
= &q
->cqes
[idx
];
590 __u16 command_id
= READ_ONCE(cqe
->command_id
);
593 apple_nvmmu_inval(q
, command_id
);
595 req
= nvme_find_rq(apple_nvme_queue_tagset(anv
, q
), command_id
);
596 if (unlikely(!req
)) {
597 dev_warn(anv
->dev
, "invalid id %d completed", command_id
);
601 if (!nvme_try_complete_req(req
, cqe
->status
, cqe
->result
) &&
602 !blk_mq_add_to_batch(req
, iob
, nvme_req(req
)->status
,
603 apple_nvme_complete_batch
))
604 apple_nvme_complete_rq(req
);
607 static inline void apple_nvme_update_cq_head(struct apple_nvme_queue
*q
)
609 u32 tmp
= q
->cq_head
+ 1;
611 if (tmp
== apple_nvme_queue_depth(q
)) {
619 static bool apple_nvme_poll_cq(struct apple_nvme_queue
*q
,
620 struct io_comp_batch
*iob
)
624 while (apple_nvme_cqe_pending(q
)) {
628 * load-load control dependency between phase and the rest of
629 * the cqe requires a full read memory barrier
632 apple_nvme_handle_cqe(q
, iob
, q
->cq_head
);
633 apple_nvme_update_cq_head(q
);
637 writel(q
->cq_head
, q
->cq_db
);
642 static bool apple_nvme_handle_cq(struct apple_nvme_queue
*q
, bool force
)
645 DEFINE_IO_COMP_BATCH(iob
);
647 if (!READ_ONCE(q
->enabled
) && !force
)
650 found
= apple_nvme_poll_cq(q
, &iob
);
652 if (!rq_list_empty(&iob
.req_list
))
653 apple_nvme_complete_batch(&iob
);
658 static irqreturn_t
apple_nvme_irq(int irq
, void *data
)
660 struct apple_nvme
*anv
= data
;
661 bool handled
= false;
664 spin_lock_irqsave(&anv
->lock
, flags
);
665 if (apple_nvme_handle_cq(&anv
->ioq
, false))
667 if (apple_nvme_handle_cq(&anv
->adminq
, false))
669 spin_unlock_irqrestore(&anv
->lock
, flags
);
676 static int apple_nvme_create_cq(struct apple_nvme
*anv
)
678 struct nvme_command c
= {};
681 * Note: we (ab)use the fact that the prp fields survive if no data
682 * is attached to the request.
684 c
.create_cq
.opcode
= nvme_admin_create_cq
;
685 c
.create_cq
.prp1
= cpu_to_le64(anv
->ioq
.cq_dma_addr
);
686 c
.create_cq
.cqid
= cpu_to_le16(1);
687 c
.create_cq
.qsize
= cpu_to_le16(APPLE_ANS_MAX_QUEUE_DEPTH
- 1);
688 c
.create_cq
.cq_flags
= cpu_to_le16(NVME_QUEUE_PHYS_CONTIG
| NVME_CQ_IRQ_ENABLED
);
689 c
.create_cq
.irq_vector
= cpu_to_le16(0);
691 return nvme_submit_sync_cmd(anv
->ctrl
.admin_q
, &c
, NULL
, 0);
694 static int apple_nvme_remove_cq(struct apple_nvme
*anv
)
696 struct nvme_command c
= {};
698 c
.delete_queue
.opcode
= nvme_admin_delete_cq
;
699 c
.delete_queue
.qid
= cpu_to_le16(1);
701 return nvme_submit_sync_cmd(anv
->ctrl
.admin_q
, &c
, NULL
, 0);
704 static int apple_nvme_create_sq(struct apple_nvme
*anv
)
706 struct nvme_command c
= {};
709 * Note: we (ab)use the fact that the prp fields survive if no data
710 * is attached to the request.
712 c
.create_sq
.opcode
= nvme_admin_create_sq
;
713 c
.create_sq
.prp1
= cpu_to_le64(anv
->ioq
.sq_dma_addr
);
714 c
.create_sq
.sqid
= cpu_to_le16(1);
715 c
.create_sq
.qsize
= cpu_to_le16(APPLE_ANS_MAX_QUEUE_DEPTH
- 1);
716 c
.create_sq
.sq_flags
= cpu_to_le16(NVME_QUEUE_PHYS_CONTIG
);
717 c
.create_sq
.cqid
= cpu_to_le16(1);
719 return nvme_submit_sync_cmd(anv
->ctrl
.admin_q
, &c
, NULL
, 0);
722 static int apple_nvme_remove_sq(struct apple_nvme
*anv
)
724 struct nvme_command c
= {};
726 c
.delete_queue
.opcode
= nvme_admin_delete_sq
;
727 c
.delete_queue
.qid
= cpu_to_le16(1);
729 return nvme_submit_sync_cmd(anv
->ctrl
.admin_q
, &c
, NULL
, 0);
732 static blk_status_t
apple_nvme_queue_rq(struct blk_mq_hw_ctx
*hctx
,
733 const struct blk_mq_queue_data
*bd
)
735 struct nvme_ns
*ns
= hctx
->queue
->queuedata
;
736 struct apple_nvme_queue
*q
= hctx
->driver_data
;
737 struct apple_nvme
*anv
= queue_to_apple_nvme(q
);
738 struct request
*req
= bd
->rq
;
739 struct apple_nvme_iod
*iod
= blk_mq_rq_to_pdu(req
);
740 struct nvme_command
*cmnd
= &iod
->cmd
;
747 * We should not need to do this, but we're still using this to
748 * ensure we can drain requests on a dying queue.
750 if (unlikely(!READ_ONCE(q
->enabled
)))
751 return BLK_STS_IOERR
;
753 if (!nvme_check_ready(&anv
->ctrl
, req
, true))
754 return nvme_fail_nonready_command(&anv
->ctrl
, req
);
756 ret
= nvme_setup_cmd(ns
, req
);
760 if (blk_rq_nr_phys_segments(req
)) {
761 ret
= apple_nvme_map_data(anv
, req
, cmnd
);
766 nvme_start_request(req
);
767 apple_nvme_submit_cmd(q
, cmnd
);
771 nvme_cleanup_cmd(req
);
775 static int apple_nvme_init_hctx(struct blk_mq_hw_ctx
*hctx
, void *data
,
776 unsigned int hctx_idx
)
778 hctx
->driver_data
= data
;
782 static int apple_nvme_init_request(struct blk_mq_tag_set
*set
,
783 struct request
*req
, unsigned int hctx_idx
,
784 unsigned int numa_node
)
786 struct apple_nvme_queue
*q
= set
->driver_data
;
787 struct apple_nvme
*anv
= queue_to_apple_nvme(q
);
788 struct apple_nvme_iod
*iod
= blk_mq_rq_to_pdu(req
);
789 struct nvme_request
*nreq
= nvme_req(req
);
792 nreq
->ctrl
= &anv
->ctrl
;
793 nreq
->cmd
= &iod
->cmd
;
798 static void apple_nvme_disable(struct apple_nvme
*anv
, bool shutdown
)
800 enum nvme_ctrl_state state
= nvme_ctrl_state(&anv
->ctrl
);
801 u32 csts
= readl(anv
->mmio_nvme
+ NVME_REG_CSTS
);
802 bool dead
= false, freeze
= false;
805 if (apple_rtkit_is_crashed(anv
->rtk
))
807 if (!(csts
& NVME_CSTS_RDY
))
809 if (csts
& NVME_CSTS_CFS
)
812 if (state
== NVME_CTRL_LIVE
||
813 state
== NVME_CTRL_RESETTING
) {
815 nvme_start_freeze(&anv
->ctrl
);
819 * Give the controller a chance to complete all entered requests if
820 * doing a safe shutdown.
822 if (!dead
&& shutdown
&& freeze
)
823 nvme_wait_freeze_timeout(&anv
->ctrl
, NVME_IO_TIMEOUT
);
825 nvme_quiesce_io_queues(&anv
->ctrl
);
828 if (READ_ONCE(anv
->ioq
.enabled
)) {
829 apple_nvme_remove_sq(anv
);
830 apple_nvme_remove_cq(anv
);
834 * Always disable the NVMe controller after shutdown.
835 * We need to do this to bring it back up later anyway, and we
836 * can't do it while the firmware is not running (e.g. in the
837 * resume reset path before RTKit is initialized), so for Apple
838 * controllers it makes sense to unconditionally do it here.
839 * Additionally, this sequence of events is reliable, while
840 * others (like disabling after bringing back the firmware on
841 * resume) seem to run into trouble under some circumstances.
843 * Both U-Boot and m1n1 also use this convention (i.e. an ANS
844 * NVMe controller is handed off with firmware shut down, in an
845 * NVMe disabled state, after a clean shutdown).
848 nvme_disable_ctrl(&anv
->ctrl
, shutdown
);
849 nvme_disable_ctrl(&anv
->ctrl
, false);
852 WRITE_ONCE(anv
->ioq
.enabled
, false);
853 WRITE_ONCE(anv
->adminq
.enabled
, false);
854 mb(); /* ensure that nvme_queue_rq() sees that enabled is cleared */
855 nvme_quiesce_admin_queue(&anv
->ctrl
);
857 /* last chance to complete any requests before nvme_cancel_request */
858 spin_lock_irqsave(&anv
->lock
, flags
);
859 apple_nvme_handle_cq(&anv
->ioq
, true);
860 apple_nvme_handle_cq(&anv
->adminq
, true);
861 spin_unlock_irqrestore(&anv
->lock
, flags
);
863 nvme_cancel_tagset(&anv
->ctrl
);
864 nvme_cancel_admin_tagset(&anv
->ctrl
);
867 * The driver will not be starting up queues again if shutting down so
868 * must flush all entered requests to their failed completion to avoid
869 * deadlocking blk-mq hot-cpu notifier.
872 nvme_unquiesce_io_queues(&anv
->ctrl
);
873 nvme_unquiesce_admin_queue(&anv
->ctrl
);
877 static enum blk_eh_timer_return
apple_nvme_timeout(struct request
*req
)
879 struct apple_nvme_iod
*iod
= blk_mq_rq_to_pdu(req
);
880 struct apple_nvme_queue
*q
= iod
->q
;
881 struct apple_nvme
*anv
= queue_to_apple_nvme(q
);
883 u32 csts
= readl(anv
->mmio_nvme
+ NVME_REG_CSTS
);
885 if (nvme_ctrl_state(&anv
->ctrl
) != NVME_CTRL_LIVE
) {
888 * If we are resetting, connecting or deleting we should
889 * complete immediately because we may block controller
890 * teardown or setup sequence
891 * - ctrl disable/shutdown fabrics requests
893 * - initialization admin requests
894 * - I/O requests that entered after unquiescing and
895 * the controller stopped responding
897 * All other requests should be cancelled by the error
898 * recovery work, so it's fine that we fail it here.
901 "I/O %d(aq:%d) timeout while not in live state\n",
902 req
->tag
, q
->is_adminq
);
903 if (blk_mq_request_started(req
) &&
904 !blk_mq_request_completed(req
)) {
905 nvme_req(req
)->status
= NVME_SC_HOST_ABORTED_CMD
;
906 nvme_req(req
)->flags
|= NVME_REQ_CANCELLED
;
907 blk_mq_complete_request(req
);
912 /* check if we just missed an interrupt if we're still alive */
913 if (!apple_rtkit_is_crashed(anv
->rtk
) && !(csts
& NVME_CSTS_CFS
)) {
914 spin_lock_irqsave(&anv
->lock
, flags
);
915 apple_nvme_handle_cq(q
, false);
916 spin_unlock_irqrestore(&anv
->lock
, flags
);
917 if (blk_mq_request_completed(req
)) {
919 "I/O %d(aq:%d) timeout: completion polled\n",
920 req
->tag
, q
->is_adminq
);
926 * aborting commands isn't supported which leaves a full reset as our
929 dev_warn(anv
->dev
, "I/O %d(aq:%d) timeout: resetting controller\n",
930 req
->tag
, q
->is_adminq
);
931 nvme_req(req
)->flags
|= NVME_REQ_CANCELLED
;
932 apple_nvme_disable(anv
, false);
933 nvme_reset_ctrl(&anv
->ctrl
);
937 static int apple_nvme_poll(struct blk_mq_hw_ctx
*hctx
,
938 struct io_comp_batch
*iob
)
940 struct apple_nvme_queue
*q
= hctx
->driver_data
;
941 struct apple_nvme
*anv
= queue_to_apple_nvme(q
);
945 spin_lock_irqsave(&anv
->lock
, flags
);
946 found
= apple_nvme_poll_cq(q
, iob
);
947 spin_unlock_irqrestore(&anv
->lock
, flags
);
952 static const struct blk_mq_ops apple_nvme_mq_admin_ops
= {
953 .queue_rq
= apple_nvme_queue_rq
,
954 .complete
= apple_nvme_complete_rq
,
955 .init_hctx
= apple_nvme_init_hctx
,
956 .init_request
= apple_nvme_init_request
,
957 .timeout
= apple_nvme_timeout
,
960 static const struct blk_mq_ops apple_nvme_mq_ops
= {
961 .queue_rq
= apple_nvme_queue_rq
,
962 .complete
= apple_nvme_complete_rq
,
963 .init_hctx
= apple_nvme_init_hctx
,
964 .init_request
= apple_nvme_init_request
,
965 .timeout
= apple_nvme_timeout
,
966 .poll
= apple_nvme_poll
,
969 static void apple_nvme_init_queue(struct apple_nvme_queue
*q
)
971 unsigned int depth
= apple_nvme_queue_depth(q
);
976 APPLE_ANS_MAX_QUEUE_DEPTH
* sizeof(struct apple_nvmmu_tcb
));
977 memset(q
->cqes
, 0, depth
* sizeof(struct nvme_completion
));
978 WRITE_ONCE(q
->enabled
, true);
979 wmb(); /* ensure the first interrupt sees the initialization */
982 static void apple_nvme_reset_work(struct work_struct
*work
)
984 unsigned int nr_io_queues
= 1;
986 u32 boot_status
, aqa
;
987 struct apple_nvme
*anv
=
988 container_of(work
, struct apple_nvme
, ctrl
.reset_work
);
989 enum nvme_ctrl_state state
= nvme_ctrl_state(&anv
->ctrl
);
991 if (state
!= NVME_CTRL_RESETTING
) {
992 dev_warn(anv
->dev
, "ctrl state %d is not RESETTING\n", state
);
997 /* there's unfortunately no known way to recover if RTKit crashed :( */
998 if (apple_rtkit_is_crashed(anv
->rtk
)) {
1000 "RTKit has crashed without any way to recover.");
1005 /* RTKit must be shut down cleanly for the (soft)-reset to work */
1006 if (apple_rtkit_is_running(anv
->rtk
)) {
1007 /* reset the controller if it is enabled */
1008 if (anv
->ctrl
.ctrl_config
& NVME_CC_ENABLE
)
1009 apple_nvme_disable(anv
, false);
1010 dev_dbg(anv
->dev
, "Trying to shut down RTKit before reset.");
1011 ret
= apple_rtkit_shutdown(anv
->rtk
);
1016 writel(0, anv
->mmio_coproc
+ APPLE_ANS_COPROC_CPU_CONTROL
);
1018 ret
= reset_control_assert(anv
->reset
);
1022 ret
= apple_rtkit_reinit(anv
->rtk
);
1026 ret
= reset_control_deassert(anv
->reset
);
1030 writel(APPLE_ANS_COPROC_CPU_CONTROL_RUN
,
1031 anv
->mmio_coproc
+ APPLE_ANS_COPROC_CPU_CONTROL
);
1032 ret
= apple_rtkit_boot(anv
->rtk
);
1034 dev_err(anv
->dev
, "ANS did not boot");
1038 ret
= readl_poll_timeout(anv
->mmio_nvme
+ APPLE_ANS_BOOT_STATUS
,
1040 boot_status
== APPLE_ANS_BOOT_STATUS_OK
,
1041 USEC_PER_MSEC
, APPLE_ANS_BOOT_TIMEOUT
);
1043 dev_err(anv
->dev
, "ANS did not initialize");
1047 dev_dbg(anv
->dev
, "ANS booted successfully.");
1050 * Limit the max command size to prevent iod->sg allocations going
1051 * over a single page.
1053 anv
->ctrl
.max_hw_sectors
= min_t(u32
, NVME_MAX_KB_SZ
<< 1,
1054 dma_max_mapping_size(anv
->dev
) >> 9);
1055 anv
->ctrl
.max_segments
= NVME_MAX_SEGS
;
1057 dma_set_max_seg_size(anv
->dev
, 0xffffffff);
1060 * Enable NVMMU and linear submission queues.
1061 * While we could keep those disabled and pretend this is slightly
1062 * more common NVMe controller we'd still need some quirks (e.g.
1063 * sq entries will be 128 bytes) and Apple might drop support for
1064 * that mode in the future.
1066 writel(APPLE_ANS_LINEAR_SQ_EN
,
1067 anv
->mmio_nvme
+ APPLE_ANS_LINEAR_SQ_CTRL
);
1069 /* Allow as many pending command as possible for both queues */
1070 writel(APPLE_ANS_MAX_QUEUE_DEPTH
| (APPLE_ANS_MAX_QUEUE_DEPTH
<< 16),
1071 anv
->mmio_nvme
+ APPLE_ANS_MAX_PEND_CMDS_CTRL
);
1073 /* Setup the NVMMU for the maximum admin and IO queue depth */
1074 writel(APPLE_ANS_MAX_QUEUE_DEPTH
- 1,
1075 anv
->mmio_nvme
+ APPLE_NVMMU_NUM_TCBS
);
1078 * This is probably a chicken bit: without it all commands where any PRP
1079 * is set to zero (including those that don't use that field) fail and
1080 * the co-processor complains about "completed with err BAD_CMD-" or
1081 * a "NULL_PRP_PTR_ERR" in the syslog
1083 writel(readl(anv
->mmio_nvme
+ APPLE_ANS_UNKNOWN_CTRL
) &
1084 ~APPLE_ANS_PRP_NULL_CHECK
,
1085 anv
->mmio_nvme
+ APPLE_ANS_UNKNOWN_CTRL
);
1087 /* Setup the admin queue */
1088 aqa
= APPLE_NVME_AQ_DEPTH
- 1;
1090 writel(aqa
, anv
->mmio_nvme
+ NVME_REG_AQA
);
1091 writeq(anv
->adminq
.sq_dma_addr
, anv
->mmio_nvme
+ NVME_REG_ASQ
);
1092 writeq(anv
->adminq
.cq_dma_addr
, anv
->mmio_nvme
+ NVME_REG_ACQ
);
1094 /* Setup NVMMU for both queues */
1095 writeq(anv
->adminq
.tcb_dma_addr
,
1096 anv
->mmio_nvme
+ APPLE_NVMMU_ASQ_TCB_BASE
);
1097 writeq(anv
->ioq
.tcb_dma_addr
,
1098 anv
->mmio_nvme
+ APPLE_NVMMU_IOSQ_TCB_BASE
);
1101 APPLE_ANS_MAX_QUEUE_DEPTH
- 1; /* 0's based queue depth */
1102 anv
->ctrl
.cap
= readq(anv
->mmio_nvme
+ NVME_REG_CAP
);
1104 dev_dbg(anv
->dev
, "Enabling controller now");
1105 ret
= nvme_enable_ctrl(&anv
->ctrl
);
1109 dev_dbg(anv
->dev
, "Starting admin queue");
1110 apple_nvme_init_queue(&anv
->adminq
);
1111 nvme_unquiesce_admin_queue(&anv
->ctrl
);
1113 if (!nvme_change_ctrl_state(&anv
->ctrl
, NVME_CTRL_CONNECTING
)) {
1114 dev_warn(anv
->ctrl
.device
,
1115 "failed to mark controller CONNECTING\n");
1120 ret
= nvme_init_ctrl_finish(&anv
->ctrl
, false);
1124 dev_dbg(anv
->dev
, "Creating IOCQ");
1125 ret
= apple_nvme_create_cq(anv
);
1128 dev_dbg(anv
->dev
, "Creating IOSQ");
1129 ret
= apple_nvme_create_sq(anv
);
1133 apple_nvme_init_queue(&anv
->ioq
);
1135 ret
= nvme_set_queue_count(&anv
->ctrl
, &nr_io_queues
);
1138 if (nr_io_queues
!= 1) {
1143 anv
->ctrl
.queue_count
= nr_io_queues
+ 1;
1145 nvme_unquiesce_io_queues(&anv
->ctrl
);
1146 nvme_wait_freeze(&anv
->ctrl
);
1147 blk_mq_update_nr_hw_queues(&anv
->tagset
, 1);
1148 nvme_unfreeze(&anv
->ctrl
);
1150 if (!nvme_change_ctrl_state(&anv
->ctrl
, NVME_CTRL_LIVE
)) {
1151 dev_warn(anv
->ctrl
.device
,
1152 "failed to mark controller live state\n");
1157 nvme_start_ctrl(&anv
->ctrl
);
1159 dev_dbg(anv
->dev
, "ANS boot and NVMe init completed.");
1163 apple_nvme_remove_sq(anv
);
1165 apple_nvme_remove_cq(anv
);
1167 dev_warn(anv
->ctrl
.device
, "Reset failure status: %d\n", ret
);
1168 nvme_change_ctrl_state(&anv
->ctrl
, NVME_CTRL_DELETING
);
1169 nvme_get_ctrl(&anv
->ctrl
);
1170 apple_nvme_disable(anv
, false);
1171 nvme_mark_namespaces_dead(&anv
->ctrl
);
1172 if (!queue_work(nvme_wq
, &anv
->remove_work
))
1173 nvme_put_ctrl(&anv
->ctrl
);
1176 static void apple_nvme_remove_dead_ctrl_work(struct work_struct
*work
)
1178 struct apple_nvme
*anv
=
1179 container_of(work
, struct apple_nvme
, remove_work
);
1181 nvme_put_ctrl(&anv
->ctrl
);
1182 device_release_driver(anv
->dev
);
1185 static int apple_nvme_reg_read32(struct nvme_ctrl
*ctrl
, u32 off
, u32
*val
)
1187 *val
= readl(ctrl_to_apple_nvme(ctrl
)->mmio_nvme
+ off
);
1191 static int apple_nvme_reg_write32(struct nvme_ctrl
*ctrl
, u32 off
, u32 val
)
1193 writel(val
, ctrl_to_apple_nvme(ctrl
)->mmio_nvme
+ off
);
1197 static int apple_nvme_reg_read64(struct nvme_ctrl
*ctrl
, u32 off
, u64
*val
)
1199 *val
= readq(ctrl_to_apple_nvme(ctrl
)->mmio_nvme
+ off
);
1203 static int apple_nvme_get_address(struct nvme_ctrl
*ctrl
, char *buf
, int size
)
1205 struct device
*dev
= ctrl_to_apple_nvme(ctrl
)->dev
;
1207 return snprintf(buf
, size
, "%s\n", dev_name(dev
));
1210 static void apple_nvme_free_ctrl(struct nvme_ctrl
*ctrl
)
1212 struct apple_nvme
*anv
= ctrl_to_apple_nvme(ctrl
);
1214 if (anv
->ctrl
.admin_q
)
1215 blk_put_queue(anv
->ctrl
.admin_q
);
1216 put_device(anv
->dev
);
1219 static const struct nvme_ctrl_ops nvme_ctrl_ops
= {
1220 .name
= "apple-nvme",
1221 .module
= THIS_MODULE
,
1223 .reg_read32
= apple_nvme_reg_read32
,
1224 .reg_write32
= apple_nvme_reg_write32
,
1225 .reg_read64
= apple_nvme_reg_read64
,
1226 .free_ctrl
= apple_nvme_free_ctrl
,
1227 .get_address
= apple_nvme_get_address
,
1230 static void apple_nvme_async_probe(void *data
, async_cookie_t cookie
)
1232 struct apple_nvme
*anv
= data
;
1234 flush_work(&anv
->ctrl
.reset_work
);
1235 flush_work(&anv
->ctrl
.scan_work
);
1236 nvme_put_ctrl(&anv
->ctrl
);
1239 static void devm_apple_nvme_put_tag_set(void *data
)
1241 blk_mq_free_tag_set(data
);
1244 static int apple_nvme_alloc_tagsets(struct apple_nvme
*anv
)
1248 anv
->admin_tagset
.ops
= &apple_nvme_mq_admin_ops
;
1249 anv
->admin_tagset
.nr_hw_queues
= 1;
1250 anv
->admin_tagset
.queue_depth
= APPLE_NVME_AQ_MQ_TAG_DEPTH
;
1251 anv
->admin_tagset
.timeout
= NVME_ADMIN_TIMEOUT
;
1252 anv
->admin_tagset
.numa_node
= NUMA_NO_NODE
;
1253 anv
->admin_tagset
.cmd_size
= sizeof(struct apple_nvme_iod
);
1254 anv
->admin_tagset
.flags
= BLK_MQ_F_NO_SCHED
;
1255 anv
->admin_tagset
.driver_data
= &anv
->adminq
;
1257 ret
= blk_mq_alloc_tag_set(&anv
->admin_tagset
);
1260 ret
= devm_add_action_or_reset(anv
->dev
, devm_apple_nvme_put_tag_set
,
1261 &anv
->admin_tagset
);
1265 anv
->tagset
.ops
= &apple_nvme_mq_ops
;
1266 anv
->tagset
.nr_hw_queues
= 1;
1267 anv
->tagset
.nr_maps
= 1;
1269 * Tags are used as an index to the NVMMU and must be unique across
1270 * both queues. The admin queue gets the first APPLE_NVME_AQ_DEPTH which
1271 * must be marked as reserved in the IO queue.
1273 anv
->tagset
.reserved_tags
= APPLE_NVME_AQ_DEPTH
;
1274 anv
->tagset
.queue_depth
= APPLE_ANS_MAX_QUEUE_DEPTH
- 1;
1275 anv
->tagset
.timeout
= NVME_IO_TIMEOUT
;
1276 anv
->tagset
.numa_node
= NUMA_NO_NODE
;
1277 anv
->tagset
.cmd_size
= sizeof(struct apple_nvme_iod
);
1278 anv
->tagset
.flags
= BLK_MQ_F_SHOULD_MERGE
;
1279 anv
->tagset
.driver_data
= &anv
->ioq
;
1281 ret
= blk_mq_alloc_tag_set(&anv
->tagset
);
1284 ret
= devm_add_action_or_reset(anv
->dev
, devm_apple_nvme_put_tag_set
,
1289 anv
->ctrl
.admin_tagset
= &anv
->admin_tagset
;
1290 anv
->ctrl
.tagset
= &anv
->tagset
;
1295 static int apple_nvme_queue_alloc(struct apple_nvme
*anv
,
1296 struct apple_nvme_queue
*q
)
1298 unsigned int depth
= apple_nvme_queue_depth(q
);
1300 q
->cqes
= dmam_alloc_coherent(anv
->dev
,
1301 depth
* sizeof(struct nvme_completion
),
1302 &q
->cq_dma_addr
, GFP_KERNEL
);
1306 q
->sqes
= dmam_alloc_coherent(anv
->dev
,
1307 depth
* sizeof(struct nvme_command
),
1308 &q
->sq_dma_addr
, GFP_KERNEL
);
1313 * We need the maximum queue depth here because the NVMMU only has a
1314 * single depth configuration shared between both queues.
1316 q
->tcbs
= dmam_alloc_coherent(anv
->dev
,
1317 APPLE_ANS_MAX_QUEUE_DEPTH
*
1318 sizeof(struct apple_nvmmu_tcb
),
1319 &q
->tcb_dma_addr
, GFP_KERNEL
);
1324 * initialize phase to make sure the allocated and empty memory
1325 * doesn't look like a full cq already.
1331 static void apple_nvme_detach_genpd(struct apple_nvme
*anv
)
1335 if (anv
->pd_count
<= 1)
1338 for (i
= anv
->pd_count
- 1; i
>= 0; i
--) {
1339 if (anv
->pd_link
[i
])
1340 device_link_del(anv
->pd_link
[i
]);
1341 if (!IS_ERR_OR_NULL(anv
->pd_dev
[i
]))
1342 dev_pm_domain_detach(anv
->pd_dev
[i
], true);
1346 static int apple_nvme_attach_genpd(struct apple_nvme
*anv
)
1348 struct device
*dev
= anv
->dev
;
1351 anv
->pd_count
= of_count_phandle_with_args(
1352 dev
->of_node
, "power-domains", "#power-domain-cells");
1353 if (anv
->pd_count
<= 1)
1356 anv
->pd_dev
= devm_kcalloc(dev
, anv
->pd_count
, sizeof(*anv
->pd_dev
),
1361 anv
->pd_link
= devm_kcalloc(dev
, anv
->pd_count
, sizeof(*anv
->pd_link
),
1366 for (i
= 0; i
< anv
->pd_count
; i
++) {
1367 anv
->pd_dev
[i
] = dev_pm_domain_attach_by_id(dev
, i
);
1368 if (IS_ERR(anv
->pd_dev
[i
])) {
1369 apple_nvme_detach_genpd(anv
);
1370 return PTR_ERR(anv
->pd_dev
[i
]);
1373 anv
->pd_link
[i
] = device_link_add(dev
, anv
->pd_dev
[i
],
1375 DL_FLAG_PM_RUNTIME
|
1376 DL_FLAG_RPM_ACTIVE
);
1377 if (!anv
->pd_link
[i
]) {
1378 apple_nvme_detach_genpd(anv
);
1386 static void devm_apple_nvme_mempool_destroy(void *data
)
1388 mempool_destroy(data
);
1391 static struct apple_nvme
*apple_nvme_alloc(struct platform_device
*pdev
)
1393 struct device
*dev
= &pdev
->dev
;
1394 struct apple_nvme
*anv
;
1397 anv
= devm_kzalloc(dev
, sizeof(*anv
), GFP_KERNEL
);
1399 return ERR_PTR(-ENOMEM
);
1401 anv
->dev
= get_device(dev
);
1402 anv
->adminq
.is_adminq
= true;
1403 platform_set_drvdata(pdev
, anv
);
1405 ret
= apple_nvme_attach_genpd(anv
);
1407 dev_err_probe(dev
, ret
, "Failed to attach power domains");
1410 if (dma_set_mask_and_coherent(dev
, DMA_BIT_MASK(64))) {
1415 anv
->irq
= platform_get_irq(pdev
, 0);
1425 anv
->mmio_coproc
= devm_platform_ioremap_resource_byname(pdev
, "ans");
1426 if (IS_ERR(anv
->mmio_coproc
)) {
1427 ret
= PTR_ERR(anv
->mmio_coproc
);
1430 anv
->mmio_nvme
= devm_platform_ioremap_resource_byname(pdev
, "nvme");
1431 if (IS_ERR(anv
->mmio_nvme
)) {
1432 ret
= PTR_ERR(anv
->mmio_nvme
);
1436 anv
->adminq
.sq_db
= anv
->mmio_nvme
+ APPLE_ANS_LINEAR_ASQ_DB
;
1437 anv
->adminq
.cq_db
= anv
->mmio_nvme
+ APPLE_ANS_ACQ_DB
;
1438 anv
->ioq
.sq_db
= anv
->mmio_nvme
+ APPLE_ANS_LINEAR_IOSQ_DB
;
1439 anv
->ioq
.cq_db
= anv
->mmio_nvme
+ APPLE_ANS_IOCQ_DB
;
1441 anv
->sart
= devm_apple_sart_get(dev
);
1442 if (IS_ERR(anv
->sart
)) {
1443 ret
= dev_err_probe(dev
, PTR_ERR(anv
->sart
),
1444 "Failed to initialize SART");
1448 anv
->reset
= devm_reset_control_array_get_exclusive(anv
->dev
);
1449 if (IS_ERR(anv
->reset
)) {
1450 ret
= dev_err_probe(dev
, PTR_ERR(anv
->reset
),
1451 "Failed to get reset control");
1455 INIT_WORK(&anv
->ctrl
.reset_work
, apple_nvme_reset_work
);
1456 INIT_WORK(&anv
->remove_work
, apple_nvme_remove_dead_ctrl_work
);
1457 spin_lock_init(&anv
->lock
);
1459 ret
= apple_nvme_queue_alloc(anv
, &anv
->adminq
);
1462 ret
= apple_nvme_queue_alloc(anv
, &anv
->ioq
);
1466 anv
->prp_page_pool
= dmam_pool_create("prp list page", anv
->dev
,
1467 NVME_CTRL_PAGE_SIZE
,
1468 NVME_CTRL_PAGE_SIZE
, 0);
1469 if (!anv
->prp_page_pool
) {
1474 anv
->prp_small_pool
=
1475 dmam_pool_create("prp list 256", anv
->dev
, 256, 256, 0);
1476 if (!anv
->prp_small_pool
) {
1481 WARN_ON_ONCE(apple_nvme_iod_alloc_size() > PAGE_SIZE
);
1483 mempool_create_kmalloc_pool(1, apple_nvme_iod_alloc_size());
1484 if (!anv
->iod_mempool
) {
1488 ret
= devm_add_action_or_reset(anv
->dev
,
1489 devm_apple_nvme_mempool_destroy
, anv
->iod_mempool
);
1493 ret
= apple_nvme_alloc_tagsets(anv
);
1497 ret
= devm_request_irq(anv
->dev
, anv
->irq
, apple_nvme_irq
, 0,
1500 dev_err_probe(dev
, ret
, "Failed to request IRQ");
1505 devm_apple_rtkit_init(dev
, anv
, NULL
, 0, &apple_nvme_rtkit_ops
);
1506 if (IS_ERR(anv
->rtk
)) {
1507 ret
= dev_err_probe(dev
, PTR_ERR(anv
->rtk
),
1508 "Failed to initialize RTKit");
1512 ret
= nvme_init_ctrl(&anv
->ctrl
, anv
->dev
, &nvme_ctrl_ops
,
1513 NVME_QUIRK_SKIP_CID_GEN
| NVME_QUIRK_IDENTIFY_CNS
);
1515 dev_err_probe(dev
, ret
, "Failed to initialize nvme_ctrl");
1521 put_device(anv
->dev
);
1522 return ERR_PTR(ret
);
1525 static int apple_nvme_probe(struct platform_device
*pdev
)
1527 struct apple_nvme
*anv
;
1530 anv
= apple_nvme_alloc(pdev
);
1532 return PTR_ERR(anv
);
1534 ret
= nvme_add_ctrl(&anv
->ctrl
);
1538 anv
->ctrl
.admin_q
= blk_mq_alloc_queue(&anv
->admin_tagset
, NULL
, NULL
);
1539 if (IS_ERR(anv
->ctrl
.admin_q
)) {
1541 anv
->ctrl
.admin_q
= NULL
;
1542 goto out_uninit_ctrl
;
1545 nvme_reset_ctrl(&anv
->ctrl
);
1546 async_schedule(apple_nvme_async_probe
, anv
);
1551 nvme_uninit_ctrl(&anv
->ctrl
);
1553 nvme_put_ctrl(&anv
->ctrl
);
1557 static void apple_nvme_remove(struct platform_device
*pdev
)
1559 struct apple_nvme
*anv
= platform_get_drvdata(pdev
);
1561 nvme_change_ctrl_state(&anv
->ctrl
, NVME_CTRL_DELETING
);
1562 flush_work(&anv
->ctrl
.reset_work
);
1563 nvme_stop_ctrl(&anv
->ctrl
);
1564 nvme_remove_namespaces(&anv
->ctrl
);
1565 apple_nvme_disable(anv
, true);
1566 nvme_uninit_ctrl(&anv
->ctrl
);
1568 if (apple_rtkit_is_running(anv
->rtk
))
1569 apple_rtkit_shutdown(anv
->rtk
);
1571 apple_nvme_detach_genpd(anv
);
1574 static void apple_nvme_shutdown(struct platform_device
*pdev
)
1576 struct apple_nvme
*anv
= platform_get_drvdata(pdev
);
1578 apple_nvme_disable(anv
, true);
1579 if (apple_rtkit_is_running(anv
->rtk
))
1580 apple_rtkit_shutdown(anv
->rtk
);
1583 static int apple_nvme_resume(struct device
*dev
)
1585 struct apple_nvme
*anv
= dev_get_drvdata(dev
);
1587 return nvme_reset_ctrl(&anv
->ctrl
);
1590 static int apple_nvme_suspend(struct device
*dev
)
1592 struct apple_nvme
*anv
= dev_get_drvdata(dev
);
1595 apple_nvme_disable(anv
, true);
1597 if (apple_rtkit_is_running(anv
->rtk
))
1598 ret
= apple_rtkit_shutdown(anv
->rtk
);
1600 writel(0, anv
->mmio_coproc
+ APPLE_ANS_COPROC_CPU_CONTROL
);
1605 static DEFINE_SIMPLE_DEV_PM_OPS(apple_nvme_pm_ops
, apple_nvme_suspend
,
1608 static const struct of_device_id apple_nvme_of_match
[] = {
1609 { .compatible
= "apple,nvme-ans2" },
1612 MODULE_DEVICE_TABLE(of
, apple_nvme_of_match
);
1614 static struct platform_driver apple_nvme_driver
= {
1616 .name
= "nvme-apple",
1617 .of_match_table
= apple_nvme_of_match
,
1618 .pm
= pm_sleep_ptr(&apple_nvme_pm_ops
),
1620 .probe
= apple_nvme_probe
,
1621 .remove
= apple_nvme_remove
,
1622 .shutdown
= apple_nvme_shutdown
,
1624 module_platform_driver(apple_nvme_driver
);
1626 MODULE_AUTHOR("Sven Peter <sven@svenpeter.dev>");
1627 MODULE_DESCRIPTION("Apple ANS NVM Express device driver");
1628 MODULE_LICENSE("GPL");