2 * QEMU Universal Flash Storage (UFS) Controller
4 * Copyright (c) 2023 Samsung Electronics Co., Ltd. All rights reserved.
6 * Written by Jeuk Kim <jeuk20.kim@samsung.com>
8 * SPDX-License-Identifier: GPL-2.0-or-later
12 * Reference Specs: https://www.jedec.org/, 4.0
18 * -drive file=<file>,if=none,id=<drive_id>
19 * -device ufs,serial=<serial>,id=<bus_name>, \
20 * nutrs=<N[optional]>,nutmrs=<N[optional]>
21 * -device ufs-lu,drive=<drive_id>,bus=<bus_name>
24 #include "qemu/osdep.h"
25 #include "qapi/error.h"
26 #include "migration/vmstate.h"
27 #include "scsi/constants.h"
31 /* The QEMU-UFS device follows spec version 4.0 */
32 #define UFS_SPEC_VER 0x0400
33 #define UFS_MAX_NUTRS 32
34 #define UFS_MAX_NUTMRS 8
35 #define UFS_MCQ_QCFGPTR 2
37 static void ufs_exec_req(UfsRequest
*req
);
38 static void ufs_clear_req(UfsRequest
*req
);
40 static inline uint64_t ufs_mcq_reg_addr(UfsHc
*u
, int qid
)
42 /* Submission Queue MCQ Registers offset (400h) */
43 return (UFS_MCQ_QCFGPTR
* 0x200) + qid
* 0x40;
46 static inline uint64_t ufs_mcq_op_reg_addr(UfsHc
*u
, int qid
)
48 /* MCQ Operation & Runtime Registers offset (1000h) */
49 return UFS_MCQ_OPR_START
+ qid
* 48;
52 static inline uint64_t ufs_reg_size(UfsHc
*u
)
54 /* Total UFS HCI Register size in bytes */
55 return ufs_mcq_op_reg_addr(u
, 0) + sizeof(u
->mcq_op_reg
);
58 static inline bool ufs_is_mcq_reg(UfsHc
*u
, uint64_t addr
, unsigned size
)
60 uint64_t mcq_reg_addr
;
66 mcq_reg_addr
= ufs_mcq_reg_addr(u
, 0);
67 return (addr
>= mcq_reg_addr
&&
68 addr
+ size
<= mcq_reg_addr
+ sizeof(u
->mcq_reg
));
71 static inline bool ufs_is_mcq_op_reg(UfsHc
*u
, uint64_t addr
, unsigned size
)
73 uint64_t mcq_op_reg_addr
;
79 mcq_op_reg_addr
= ufs_mcq_op_reg_addr(u
, 0);
80 return (addr
>= mcq_op_reg_addr
&&
81 addr
+ size
<= mcq_op_reg_addr
+ sizeof(u
->mcq_op_reg
));
84 static MemTxResult
ufs_addr_read(UfsHc
*u
, hwaddr addr
, void *buf
, int size
)
86 hwaddr hi
= addr
+ size
- 1;
89 return MEMTX_DECODE_ERROR
;
92 if (!FIELD_EX32(u
->reg
.cap
, CAP
, 64AS
) && (hi
>> 32)) {
93 return MEMTX_DECODE_ERROR
;
96 return pci_dma_read(PCI_DEVICE(u
), addr
, buf
, size
);
99 static MemTxResult
ufs_addr_write(UfsHc
*u
, hwaddr addr
, const void *buf
,
102 hwaddr hi
= addr
+ size
- 1;
104 return MEMTX_DECODE_ERROR
;
107 if (!FIELD_EX32(u
->reg
.cap
, CAP
, 64AS
) && (hi
>> 32)) {
108 return MEMTX_DECODE_ERROR
;
111 return pci_dma_write(PCI_DEVICE(u
), addr
, buf
, size
);
114 static inline hwaddr
ufs_get_utrd_addr(UfsHc
*u
, uint32_t slot
)
116 hwaddr utrl_base_addr
= (((hwaddr
)u
->reg
.utrlbau
) << 32) + u
->reg
.utrlba
;
117 hwaddr utrd_addr
= utrl_base_addr
+ slot
* sizeof(UtpTransferReqDesc
);
122 static inline hwaddr
ufs_get_req_upiu_base_addr(const UtpTransferReqDesc
*utrd
)
124 uint32_t cmd_desc_base_addr_lo
=
125 le32_to_cpu(utrd
->command_desc_base_addr_lo
);
126 uint32_t cmd_desc_base_addr_hi
=
127 le32_to_cpu(utrd
->command_desc_base_addr_hi
);
129 return (((hwaddr
)cmd_desc_base_addr_hi
) << 32) + cmd_desc_base_addr_lo
;
132 static inline hwaddr
ufs_get_rsp_upiu_base_addr(const UtpTransferReqDesc
*utrd
)
134 hwaddr req_upiu_base_addr
= ufs_get_req_upiu_base_addr(utrd
);
135 uint32_t rsp_upiu_byte_off
=
136 le16_to_cpu(utrd
->response_upiu_offset
) * sizeof(uint32_t);
137 return req_upiu_base_addr
+ rsp_upiu_byte_off
;
140 static MemTxResult
ufs_dma_read_utrd(UfsRequest
*req
)
143 hwaddr utrd_addr
= ufs_get_utrd_addr(u
, req
->slot
);
146 ret
= ufs_addr_read(u
, utrd_addr
, &req
->utrd
, sizeof(req
->utrd
));
148 trace_ufs_err_dma_read_utrd(req
->slot
, utrd_addr
);
153 static MemTxResult
ufs_dma_read_req_upiu(UfsRequest
*req
)
156 hwaddr req_upiu_base_addr
= ufs_get_req_upiu_base_addr(&req
->utrd
);
157 UtpUpiuReq
*req_upiu
= &req
->req_upiu
;
159 uint16_t data_segment_length
;
163 * To know the size of the req_upiu, we need to read the
164 * data_segment_length in the header first.
166 ret
= ufs_addr_read(u
, req_upiu_base_addr
, &req_upiu
->header
,
167 sizeof(UtpUpiuHeader
));
169 trace_ufs_err_dma_read_req_upiu(req
->slot
, req_upiu_base_addr
);
172 data_segment_length
= be16_to_cpu(req_upiu
->header
.data_segment_length
);
174 copy_size
= sizeof(UtpUpiuHeader
) + UFS_TRANSACTION_SPECIFIC_FIELD_SIZE
+
177 if (copy_size
> sizeof(req
->req_upiu
)) {
178 copy_size
= sizeof(req
->req_upiu
);
181 ret
= ufs_addr_read(u
, req_upiu_base_addr
, &req
->req_upiu
, copy_size
);
183 trace_ufs_err_dma_read_req_upiu(req
->slot
, req_upiu_base_addr
);
188 static MemTxResult
ufs_dma_read_prdt(UfsRequest
*req
)
191 uint16_t prdt_len
= le16_to_cpu(req
->utrd
.prd_table_length
);
192 uint16_t prdt_byte_off
=
193 le16_to_cpu(req
->utrd
.prd_table_offset
) * sizeof(uint32_t);
194 uint32_t prdt_size
= prdt_len
* sizeof(UfshcdSgEntry
);
195 g_autofree UfshcdSgEntry
*prd_entries
= NULL
;
196 hwaddr req_upiu_base_addr
, prdt_base_addr
;
201 if (prdt_size
== 0) {
204 prd_entries
= g_new(UfshcdSgEntry
, prdt_size
);
206 req_upiu_base_addr
= ufs_get_req_upiu_base_addr(&req
->utrd
);
207 prdt_base_addr
= req_upiu_base_addr
+ prdt_byte_off
;
209 err
= ufs_addr_read(u
, prdt_base_addr
, prd_entries
, prdt_size
);
211 trace_ufs_err_dma_read_prdt(req
->slot
, prdt_base_addr
);
215 req
->sg
= g_malloc0(sizeof(QEMUSGList
));
216 pci_dma_sglist_init(req
->sg
, PCI_DEVICE(u
), prdt_len
);
219 for (uint16_t i
= 0; i
< prdt_len
; ++i
) {
220 hwaddr data_dma_addr
= le64_to_cpu(prd_entries
[i
].addr
);
221 uint32_t data_byte_count
= le32_to_cpu(prd_entries
[i
].size
) + 1;
222 qemu_sglist_add(req
->sg
, data_dma_addr
, data_byte_count
);
223 req
->data_len
+= data_byte_count
;
228 static MemTxResult
ufs_dma_read_upiu(UfsRequest
*req
)
233 * In case of MCQ, UTRD has already been read from a SQ, so skip it.
235 if (!ufs_mcq_req(req
)) {
236 ret
= ufs_dma_read_utrd(req
);
242 ret
= ufs_dma_read_req_upiu(req
);
247 ret
= ufs_dma_read_prdt(req
);
255 static MemTxResult
ufs_dma_write_utrd(UfsRequest
*req
)
258 hwaddr utrd_addr
= ufs_get_utrd_addr(u
, req
->slot
);
261 ret
= ufs_addr_write(u
, utrd_addr
, &req
->utrd
, sizeof(req
->utrd
));
263 trace_ufs_err_dma_write_utrd(req
->slot
, utrd_addr
);
268 static MemTxResult
ufs_dma_write_rsp_upiu(UfsRequest
*req
)
271 hwaddr rsp_upiu_base_addr
= ufs_get_rsp_upiu_base_addr(&req
->utrd
);
272 uint32_t rsp_upiu_byte_len
=
273 le16_to_cpu(req
->utrd
.response_upiu_length
) * sizeof(uint32_t);
274 uint16_t data_segment_length
=
275 be16_to_cpu(req
->rsp_upiu
.header
.data_segment_length
);
276 uint32_t copy_size
= sizeof(UtpUpiuHeader
) +
277 UFS_TRANSACTION_SPECIFIC_FIELD_SIZE
+
281 if (copy_size
> rsp_upiu_byte_len
) {
282 copy_size
= rsp_upiu_byte_len
;
285 if (copy_size
> sizeof(req
->rsp_upiu
)) {
286 copy_size
= sizeof(req
->rsp_upiu
);
289 ret
= ufs_addr_write(u
, rsp_upiu_base_addr
, &req
->rsp_upiu
, copy_size
);
291 trace_ufs_err_dma_write_rsp_upiu(req
->slot
, rsp_upiu_base_addr
);
296 static MemTxResult
ufs_dma_write_upiu(UfsRequest
*req
)
300 ret
= ufs_dma_write_rsp_upiu(req
);
305 return ufs_dma_write_utrd(req
);
308 static void ufs_irq_check(UfsHc
*u
)
310 PCIDevice
*pci
= PCI_DEVICE(u
);
312 if ((u
->reg
.is
& UFS_INTR_MASK
) & u
->reg
.ie
) {
313 trace_ufs_irq_raise();
316 trace_ufs_irq_lower();
317 pci_irq_deassert(pci
);
321 static void ufs_process_db(UfsHc
*u
, uint32_t val
)
323 DECLARE_BITMAP(doorbell
, UFS_MAX_NUTRS
);
325 uint32_t nutrs
= u
->params
.nutrs
;
328 val
&= ~u
->reg
.utrldbr
;
334 slot
= find_first_bit(doorbell
, nutrs
);
336 while (slot
< nutrs
) {
337 req
= &u
->req_list
[slot
];
338 if (req
->state
== UFS_REQUEST_ERROR
) {
339 trace_ufs_err_utrl_slot_error(req
->slot
);
343 if (req
->state
!= UFS_REQUEST_IDLE
) {
344 trace_ufs_err_utrl_slot_busy(req
->slot
);
348 trace_ufs_process_db(slot
);
349 req
->state
= UFS_REQUEST_READY
;
350 slot
= find_next_bit(doorbell
, nutrs
, slot
+ 1);
353 qemu_bh_schedule(u
->doorbell_bh
);
356 static void ufs_process_uiccmd(UfsHc
*u
, uint32_t val
)
358 trace_ufs_process_uiccmd(val
, u
->reg
.ucmdarg1
, u
->reg
.ucmdarg2
,
361 * Only the essential uic commands for running drivers on Linux and Windows
365 case UFS_UIC_CMD_DME_LINK_STARTUP
:
366 u
->reg
.hcs
= FIELD_DP32(u
->reg
.hcs
, HCS
, DP
, 1);
367 u
->reg
.hcs
= FIELD_DP32(u
->reg
.hcs
, HCS
, UTRLRDY
, 1);
368 u
->reg
.hcs
= FIELD_DP32(u
->reg
.hcs
, HCS
, UTMRLRDY
, 1);
369 u
->reg
.ucmdarg2
= UFS_UIC_CMD_RESULT_SUCCESS
;
371 /* TODO: Revisit it when Power Management is implemented */
372 case UFS_UIC_CMD_DME_HIBER_ENTER
:
373 u
->reg
.is
= FIELD_DP32(u
->reg
.is
, IS
, UHES
, 1);
374 u
->reg
.hcs
= FIELD_DP32(u
->reg
.hcs
, HCS
, UPMCRS
, UFS_PWR_LOCAL
);
375 u
->reg
.ucmdarg2
= UFS_UIC_CMD_RESULT_SUCCESS
;
377 case UFS_UIC_CMD_DME_HIBER_EXIT
:
378 u
->reg
.is
= FIELD_DP32(u
->reg
.is
, IS
, UHXS
, 1);
379 u
->reg
.hcs
= FIELD_DP32(u
->reg
.hcs
, HCS
, UPMCRS
, UFS_PWR_LOCAL
);
380 u
->reg
.ucmdarg2
= UFS_UIC_CMD_RESULT_SUCCESS
;
383 u
->reg
.ucmdarg2
= UFS_UIC_CMD_RESULT_FAILURE
;
386 u
->reg
.is
= FIELD_DP32(u
->reg
.is
, IS
, UCCS
, 1);
391 static void ufs_mcq_init_req(UfsHc
*u
, UfsRequest
*req
, UfsSq
*sq
)
393 memset(req
, 0, sizeof(*req
));
396 req
->state
= UFS_REQUEST_IDLE
;
397 req
->slot
= UFS_INVALID_SLOT
;
401 static void ufs_mcq_process_sq(void *opaque
)
408 uint16_t head
= ufs_mcq_sq_head(u
, sq
->sqid
);
411 while (!(ufs_mcq_sq_empty(u
, sq
->sqid
) || QTAILQ_EMPTY(&sq
->req_list
))) {
412 addr
= sq
->addr
+ head
;
413 err
= ufs_addr_read(sq
->u
, addr
, (void *)&sqe
, sizeof(sqe
));
415 trace_ufs_err_dma_read_sq(sq
->sqid
, addr
);
419 head
= (head
+ sizeof(sqe
)) % (sq
->size
* sizeof(sqe
));
420 ufs_mcq_update_sq_head(u
, sq
->sqid
, head
);
422 req
= QTAILQ_FIRST(&sq
->req_list
);
423 QTAILQ_REMOVE(&sq
->req_list
, req
, entry
);
425 ufs_mcq_init_req(sq
->u
, req
, sq
);
426 memcpy(&req
->utrd
, &sqe
, sizeof(req
->utrd
));
428 req
->state
= UFS_REQUEST_RUNNING
;
433 static void ufs_mcq_process_cq(void *opaque
)
437 UfsRequest
*req
, *next
;
439 uint32_t tail
= ufs_mcq_cq_tail(u
, cq
->cqid
);
441 QTAILQ_FOREACH_SAFE(req
, &cq
->req_list
, entry
, next
)
443 ufs_dma_write_rsp_upiu(req
);
446 ((uint64_t)req
->utrd
.command_desc_base_addr_hi
<< 32ULL) |
447 req
->utrd
.command_desc_base_addr_lo
;
448 req
->cqe
.utp_addr
|= req
->sq
->sqid
;
449 req
->cqe
.resp_len
= req
->utrd
.response_upiu_length
;
450 req
->cqe
.resp_off
= req
->utrd
.response_upiu_offset
;
451 req
->cqe
.prdt_len
= req
->utrd
.prd_table_length
;
452 req
->cqe
.prdt_off
= req
->utrd
.prd_table_offset
;
453 req
->cqe
.status
= req
->utrd
.header
.dword_2
& 0xf;
456 ret
= ufs_addr_write(u
, cq
->addr
+ tail
, &req
->cqe
, sizeof(req
->cqe
));
458 trace_ufs_err_dma_write_cq(cq
->cqid
, cq
->addr
+ tail
);
460 QTAILQ_REMOVE(&cq
->req_list
, req
, entry
);
462 tail
= (tail
+ sizeof(req
->cqe
)) % (cq
->size
* sizeof(req
->cqe
));
463 ufs_mcq_update_cq_tail(u
, cq
->cqid
, tail
);
466 QTAILQ_INSERT_TAIL(&req
->sq
->req_list
, req
, entry
);
469 if (!ufs_mcq_cq_empty(u
, cq
->cqid
)) {
470 u
->mcq_op_reg
[cq
->cqid
].cq_int
.is
=
471 FIELD_DP32(u
->mcq_op_reg
[cq
->cqid
].cq_int
.is
, CQIS
, TEPS
, 1);
473 u
->reg
.is
= FIELD_DP32(u
->reg
.is
, IS
, CQES
, 1);
478 static bool ufs_mcq_create_sq(UfsHc
*u
, uint8_t qid
, uint32_t attr
)
480 UfsMcqReg
*reg
= &u
->mcq_reg
[qid
];
482 uint8_t cqid
= FIELD_EX32(attr
, SQATTR
, CQID
);
484 if (qid
>= u
->params
.mcq_maxq
) {
485 trace_ufs_err_mcq_create_sq_invalid_sqid(qid
);
490 trace_ufs_err_mcq_create_sq_already_exists(qid
);
495 trace_ufs_err_mcq_create_sq_invalid_cqid(qid
);
499 sq
= g_malloc0(sizeof(*sq
));
502 sq
->cq
= u
->cq
[cqid
];
503 sq
->addr
= ((uint64_t)reg
->squba
<< 32) | reg
->sqlba
;
504 sq
->size
= ((FIELD_EX32(attr
, SQATTR
, SIZE
) + 1) << 2) / sizeof(UfsSqEntry
);
506 sq
->bh
= qemu_bh_new_guarded(ufs_mcq_process_sq
, sq
,
507 &DEVICE(u
)->mem_reentrancy_guard
);
508 sq
->req
= g_new0(UfsRequest
, sq
->size
);
509 QTAILQ_INIT(&sq
->req_list
);
510 for (int i
= 0; i
< sq
->size
; i
++) {
511 ufs_mcq_init_req(u
, &sq
->req
[i
], sq
);
512 QTAILQ_INSERT_TAIL(&sq
->req_list
, &sq
->req
[i
], entry
);
517 trace_ufs_mcq_create_sq(sq
->sqid
, sq
->cq
->cqid
, sq
->addr
, sq
->size
);
521 static bool ufs_mcq_delete_sq(UfsHc
*u
, uint8_t qid
)
525 if (qid
>= u
->params
.mcq_maxq
) {
526 trace_ufs_err_mcq_delete_sq_invalid_sqid(qid
);
531 trace_ufs_err_mcq_delete_sq_not_exists(qid
);
537 qemu_bh_delete(sq
->bh
);
544 static bool ufs_mcq_create_cq(UfsHc
*u
, uint8_t qid
, uint32_t attr
)
546 UfsMcqReg
*reg
= &u
->mcq_reg
[qid
];
549 if (qid
>= u
->params
.mcq_maxq
) {
550 trace_ufs_err_mcq_create_cq_invalid_cqid(qid
);
555 trace_ufs_err_mcq_create_cq_already_exists(qid
);
559 cq
= g_malloc0(sizeof(*cq
));
562 cq
->addr
= ((uint64_t)reg
->cquba
<< 32) | reg
->cqlba
;
563 cq
->size
= ((FIELD_EX32(attr
, CQATTR
, SIZE
) + 1) << 2) / sizeof(UfsCqEntry
);
565 cq
->bh
= qemu_bh_new_guarded(ufs_mcq_process_cq
, cq
,
566 &DEVICE(u
)->mem_reentrancy_guard
);
567 QTAILQ_INIT(&cq
->req_list
);
571 trace_ufs_mcq_create_cq(cq
->cqid
, cq
->addr
, cq
->size
);
575 static bool ufs_mcq_delete_cq(UfsHc
*u
, uint8_t qid
)
579 if (qid
>= u
->params
.mcq_maxq
) {
580 trace_ufs_err_mcq_delete_cq_invalid_cqid(qid
);
585 trace_ufs_err_mcq_delete_cq_not_exists(qid
);
589 for (int i
= 0; i
< ARRAY_SIZE(u
->sq
); i
++) {
590 if (u
->sq
[i
] && u
->sq
[i
]->cq
->cqid
== qid
) {
591 trace_ufs_err_mcq_delete_cq_sq_not_deleted(i
, qid
);
598 qemu_bh_delete(cq
->bh
);
604 static void ufs_write_reg(UfsHc
*u
, hwaddr offset
, uint32_t data
, unsigned size
)
616 if (!FIELD_EX32(u
->reg
.hce
, HCE
, HCE
) && FIELD_EX32(data
, HCE
, HCE
)) {
617 u
->reg
.hcs
= FIELD_DP32(u
->reg
.hcs
, HCS
, UCRDY
, 1);
618 u
->reg
.hce
= FIELD_DP32(u
->reg
.hce
, HCE
, HCE
, 1);
619 } else if (FIELD_EX32(u
->reg
.hce
, HCE
, HCE
) &&
620 !FIELD_EX32(data
, HCE
, HCE
)) {
622 u
->reg
.hce
= FIELD_DP32(u
->reg
.hce
, HCE
, HCE
, 0);
626 u
->reg
.utrlba
= data
& R_UTRLBA_UTRLBA_MASK
;
629 u
->reg
.utrlbau
= data
;
632 ufs_process_db(u
, data
);
633 u
->reg
.utrldbr
|= data
;
636 u
->reg
.utrlrsr
= data
;
639 u
->reg
.utrlcnr
&= ~data
;
642 u
->reg
.utmrlba
= data
& R_UTMRLBA_UTMRLBA_MASK
;
645 u
->reg
.utmrlbau
= data
;
648 ufs_process_uiccmd(u
, data
);
651 u
->reg
.ucmdarg1
= data
;
654 u
->reg
.ucmdarg2
= data
;
657 u
->reg
.ucmdarg3
= data
;
660 u
->reg
.config
= data
;
663 u
->reg
.mcqconfig
= data
;
669 trace_ufs_err_unsupport_register_offset(offset
);
672 trace_ufs_err_invalid_register_offset(offset
);
677 static void ufs_write_mcq_reg(UfsHc
*u
, hwaddr offset
, uint32_t data
,
680 int qid
= offset
/ sizeof(UfsMcqReg
);
681 UfsMcqReg
*reg
= &u
->mcq_reg
[qid
];
683 switch (offset
% sizeof(UfsMcqReg
)) {
685 if (!FIELD_EX32(reg
->sqattr
, SQATTR
, SQEN
) &&
686 FIELD_EX32(data
, SQATTR
, SQEN
)) {
687 if (!ufs_mcq_create_sq(u
, qid
, data
)) {
690 } else if (FIELD_EX32(reg
->sqattr
, SQATTR
, SQEN
) &&
691 !FIELD_EX32(data
, SQATTR
, SQEN
)) {
692 if (!ufs_mcq_delete_sq(u
, qid
)) {
708 if (!FIELD_EX32(reg
->cqattr
, CQATTR
, CQEN
) &&
709 FIELD_EX32(data
, CQATTR
, CQEN
)) {
710 if (!ufs_mcq_create_cq(u
, qid
, data
)) {
713 } else if (FIELD_EX32(reg
->cqattr
, CQATTR
, CQEN
) &&
714 !FIELD_EX32(data
, CQATTR
, CQEN
)) {
715 if (!ufs_mcq_delete_cq(u
, qid
)) {
734 trace_ufs_err_unsupport_register_offset(offset
);
737 trace_ufs_err_invalid_register_offset(offset
);
742 static void ufs_mcq_process_db(UfsHc
*u
, uint8_t qid
, uint32_t db
)
746 if (qid
>= u
->params
.mcq_maxq
) {
747 trace_ufs_err_mcq_db_wr_invalid_sqid(qid
);
752 if (sq
->size
* sizeof(UfsSqEntry
) <= db
) {
753 trace_ufs_err_mcq_db_wr_invalid_db(qid
, db
);
757 ufs_mcq_update_sq_tail(u
, sq
->sqid
, db
);
758 qemu_bh_schedule(sq
->bh
);
761 static void ufs_write_mcq_op_reg(UfsHc
*u
, hwaddr offset
, uint32_t data
,
764 int qid
= offset
/ sizeof(UfsMcqOpReg
);
765 UfsMcqOpReg
*opr
= &u
->mcq_op_reg
[qid
];
767 switch (offset
% sizeof(UfsMcqOpReg
)) {
768 case offsetof(UfsMcqOpReg
, sq
.tp
):
769 if (opr
->sq
.tp
!= data
) {
770 ufs_mcq_process_db(u
, qid
, data
);
774 case offsetof(UfsMcqOpReg
, cq
.hp
):
776 ufs_mcq_update_cq_head(u
, qid
, data
);
778 case offsetof(UfsMcqOpReg
, cq_int
.is
):
779 opr
->cq_int
.is
&= ~data
;
782 trace_ufs_err_invalid_register_offset(offset
);
787 static uint64_t ufs_mmio_read(void *opaque
, hwaddr addr
, unsigned size
)
789 UfsHc
*u
= (UfsHc
*)opaque
;
794 if (addr
+ size
<= sizeof(u
->reg
)) {
796 ptr
= (uint32_t *)&u
->reg
;
797 } else if (ufs_is_mcq_reg(u
, addr
, size
)) {
798 offset
= addr
- ufs_mcq_reg_addr(u
, 0);
799 ptr
= (uint32_t *)&u
->mcq_reg
;
800 } else if (ufs_is_mcq_op_reg(u
, addr
, size
)) {
801 offset
= addr
- ufs_mcq_op_reg_addr(u
, 0);
802 ptr
= (uint32_t *)&u
->mcq_op_reg
;
804 trace_ufs_err_invalid_register_offset(addr
);
808 value
= ptr
[offset
>> 2];
809 trace_ufs_mmio_read(addr
, value
, size
);
813 static void ufs_mmio_write(void *opaque
, hwaddr addr
, uint64_t data
,
816 UfsHc
*u
= (UfsHc
*)opaque
;
818 trace_ufs_mmio_write(addr
, data
, size
);
820 if (addr
+ size
<= sizeof(u
->reg
)) {
821 ufs_write_reg(u
, addr
, data
, size
);
822 } else if (ufs_is_mcq_reg(u
, addr
, size
)) {
823 ufs_write_mcq_reg(u
, addr
- ufs_mcq_reg_addr(u
, 0), data
, size
);
824 } else if (ufs_is_mcq_op_reg(u
, addr
, size
)) {
825 ufs_write_mcq_op_reg(u
, addr
- ufs_mcq_op_reg_addr(u
, 0), data
, size
);
827 trace_ufs_err_invalid_register_offset(addr
);
831 static const MemoryRegionOps ufs_mmio_ops
= {
832 .read
= ufs_mmio_read
,
833 .write
= ufs_mmio_write
,
834 .endianness
= DEVICE_LITTLE_ENDIAN
,
836 .min_access_size
= 4,
837 .max_access_size
= 4,
842 void ufs_build_upiu_header(UfsRequest
*req
, uint8_t trans_type
, uint8_t flags
,
843 uint8_t response
, uint8_t scsi_status
,
844 uint16_t data_segment_length
)
846 memcpy(&req
->rsp_upiu
.header
, &req
->req_upiu
.header
, sizeof(UtpUpiuHeader
));
847 req
->rsp_upiu
.header
.trans_type
= trans_type
;
848 req
->rsp_upiu
.header
.flags
= flags
;
849 req
->rsp_upiu
.header
.response
= response
;
850 req
->rsp_upiu
.header
.scsi_status
= scsi_status
;
851 req
->rsp_upiu
.header
.data_segment_length
= cpu_to_be16(data_segment_length
);
854 void ufs_build_query_response(UfsRequest
*req
)
856 req
->rsp_upiu
.qr
.opcode
= req
->req_upiu
.qr
.opcode
;
857 req
->rsp_upiu
.qr
.idn
= req
->req_upiu
.qr
.idn
;
858 req
->rsp_upiu
.qr
.index
= req
->req_upiu
.qr
.index
;
859 req
->rsp_upiu
.qr
.selector
= req
->req_upiu
.qr
.selector
;
862 static UfsReqResult
ufs_exec_scsi_cmd(UfsRequest
*req
)
865 uint8_t lun
= req
->req_upiu
.header
.lun
;
869 trace_ufs_exec_scsi_cmd(req
->slot
, lun
, req
->req_upiu
.sc
.cdb
[0]);
871 if (!is_wlun(lun
) && (lun
>= UFS_MAX_LUS
|| u
->lus
[lun
] == NULL
)) {
872 trace_ufs_err_scsi_cmd_invalid_lun(lun
);
873 return UFS_REQUEST_FAIL
;
877 case UFS_UPIU_REPORT_LUNS_WLUN
:
880 case UFS_UPIU_UFS_DEVICE_WLUN
:
883 case UFS_UPIU_BOOT_WLUN
:
886 case UFS_UPIU_RPMB_WLUN
:
893 return lu
->scsi_op(lu
, req
);
896 static UfsReqResult
ufs_exec_nop_cmd(UfsRequest
*req
)
898 trace_ufs_exec_nop_cmd(req
->slot
);
899 ufs_build_upiu_header(req
, UFS_UPIU_TRANSACTION_NOP_IN
, 0, 0, 0, 0);
900 return UFS_REQUEST_SUCCESS
;
904 * This defines the permission of flags based on their IDN. There are some
905 * things that are declared read-only, which is inconsistent with the ufs spec,
906 * because we want to return an error for features that are not yet supported.
908 static const int flag_permission
[UFS_QUERY_FLAG_IDN_COUNT
] = {
909 [UFS_QUERY_FLAG_IDN_FDEVICEINIT
] = UFS_QUERY_FLAG_READ
| UFS_QUERY_FLAG_SET
,
910 /* Write protection is not supported */
911 [UFS_QUERY_FLAG_IDN_PERMANENT_WPE
] = UFS_QUERY_FLAG_READ
,
912 [UFS_QUERY_FLAG_IDN_PWR_ON_WPE
] = UFS_QUERY_FLAG_READ
,
913 [UFS_QUERY_FLAG_IDN_BKOPS_EN
] = UFS_QUERY_FLAG_READ
| UFS_QUERY_FLAG_SET
|
914 UFS_QUERY_FLAG_CLEAR
|
915 UFS_QUERY_FLAG_TOGGLE
,
916 [UFS_QUERY_FLAG_IDN_LIFE_SPAN_MODE_ENABLE
] =
917 UFS_QUERY_FLAG_READ
| UFS_QUERY_FLAG_SET
| UFS_QUERY_FLAG_CLEAR
|
918 UFS_QUERY_FLAG_TOGGLE
,
919 /* Purge Operation is not supported */
920 [UFS_QUERY_FLAG_IDN_PURGE_ENABLE
] = UFS_QUERY_FLAG_NONE
,
921 /* Refresh Operation is not supported */
922 [UFS_QUERY_FLAG_IDN_REFRESH_ENABLE
] = UFS_QUERY_FLAG_NONE
,
923 /* Physical Resource Removal is not supported */
924 [UFS_QUERY_FLAG_IDN_FPHYRESOURCEREMOVAL
] = UFS_QUERY_FLAG_READ
,
925 [UFS_QUERY_FLAG_IDN_BUSY_RTC
] = UFS_QUERY_FLAG_READ
,
926 [UFS_QUERY_FLAG_IDN_PERMANENTLY_DISABLE_FW_UPDATE
] = UFS_QUERY_FLAG_READ
,
927 /* Write Booster is not supported */
928 [UFS_QUERY_FLAG_IDN_WB_EN
] = UFS_QUERY_FLAG_READ
,
929 [UFS_QUERY_FLAG_IDN_WB_BUFF_FLUSH_EN
] = UFS_QUERY_FLAG_READ
,
930 [UFS_QUERY_FLAG_IDN_WB_BUFF_FLUSH_DURING_HIBERN8
] = UFS_QUERY_FLAG_READ
,
933 static inline QueryRespCode
ufs_flag_check_idn_valid(uint8_t idn
, int op
)
935 if (idn
>= UFS_QUERY_FLAG_IDN_COUNT
) {
936 return UFS_QUERY_RESULT_INVALID_IDN
;
939 if (!(flag_permission
[idn
] & op
)) {
940 if (op
== UFS_QUERY_FLAG_READ
) {
941 trace_ufs_err_query_flag_not_readable(idn
);
942 return UFS_QUERY_RESULT_NOT_READABLE
;
944 trace_ufs_err_query_flag_not_writable(idn
);
945 return UFS_QUERY_RESULT_NOT_WRITEABLE
;
948 return UFS_QUERY_RESULT_SUCCESS
;
951 static const int attr_permission
[UFS_QUERY_ATTR_IDN_COUNT
] = {
952 /* booting is not supported */
953 [UFS_QUERY_ATTR_IDN_BOOT_LU_EN
] = UFS_QUERY_ATTR_READ
,
954 [UFS_QUERY_ATTR_IDN_POWER_MODE
] = UFS_QUERY_ATTR_READ
,
955 [UFS_QUERY_ATTR_IDN_ACTIVE_ICC_LVL
] =
956 UFS_QUERY_ATTR_READ
| UFS_QUERY_ATTR_WRITE
,
957 [UFS_QUERY_ATTR_IDN_OOO_DATA_EN
] = UFS_QUERY_ATTR_READ
,
958 [UFS_QUERY_ATTR_IDN_BKOPS_STATUS
] = UFS_QUERY_ATTR_READ
,
959 [UFS_QUERY_ATTR_IDN_PURGE_STATUS
] = UFS_QUERY_ATTR_READ
,
960 [UFS_QUERY_ATTR_IDN_MAX_DATA_IN
] =
961 UFS_QUERY_ATTR_READ
| UFS_QUERY_ATTR_WRITE
,
962 [UFS_QUERY_ATTR_IDN_MAX_DATA_OUT
] =
963 UFS_QUERY_ATTR_READ
| UFS_QUERY_ATTR_WRITE
,
964 [UFS_QUERY_ATTR_IDN_DYN_CAP_NEEDED
] = UFS_QUERY_ATTR_READ
,
965 [UFS_QUERY_ATTR_IDN_REF_CLK_FREQ
] =
966 UFS_QUERY_ATTR_READ
| UFS_QUERY_ATTR_WRITE
,
967 [UFS_QUERY_ATTR_IDN_CONF_DESC_LOCK
] = UFS_QUERY_ATTR_READ
,
968 [UFS_QUERY_ATTR_IDN_MAX_NUM_OF_RTT
] =
969 UFS_QUERY_ATTR_READ
| UFS_QUERY_ATTR_WRITE
,
970 [UFS_QUERY_ATTR_IDN_EE_CONTROL
] =
971 UFS_QUERY_ATTR_READ
| UFS_QUERY_ATTR_WRITE
,
972 [UFS_QUERY_ATTR_IDN_EE_STATUS
] = UFS_QUERY_ATTR_READ
,
973 [UFS_QUERY_ATTR_IDN_SECONDS_PASSED
] = UFS_QUERY_ATTR_WRITE
,
974 [UFS_QUERY_ATTR_IDN_CNTX_CONF
] = UFS_QUERY_ATTR_READ
,
975 [UFS_QUERY_ATTR_IDN_FFU_STATUS
] = UFS_QUERY_ATTR_READ
,
976 [UFS_QUERY_ATTR_IDN_PSA_STATE
] = UFS_QUERY_ATTR_READ
| UFS_QUERY_ATTR_WRITE
,
977 [UFS_QUERY_ATTR_IDN_PSA_DATA_SIZE
] =
978 UFS_QUERY_ATTR_READ
| UFS_QUERY_ATTR_WRITE
,
979 [UFS_QUERY_ATTR_IDN_REF_CLK_GATING_WAIT_TIME
] = UFS_QUERY_ATTR_READ
,
980 [UFS_QUERY_ATTR_IDN_CASE_ROUGH_TEMP
] = UFS_QUERY_ATTR_READ
,
981 [UFS_QUERY_ATTR_IDN_HIGH_TEMP_BOUND
] = UFS_QUERY_ATTR_READ
,
982 [UFS_QUERY_ATTR_IDN_LOW_TEMP_BOUND
] = UFS_QUERY_ATTR_READ
,
983 [UFS_QUERY_ATTR_IDN_THROTTLING_STATUS
] = UFS_QUERY_ATTR_READ
,
984 [UFS_QUERY_ATTR_IDN_WB_FLUSH_STATUS
] = UFS_QUERY_ATTR_READ
,
985 [UFS_QUERY_ATTR_IDN_AVAIL_WB_BUFF_SIZE
] = UFS_QUERY_ATTR_READ
,
986 [UFS_QUERY_ATTR_IDN_WB_BUFF_LIFE_TIME_EST
] = UFS_QUERY_ATTR_READ
,
987 [UFS_QUERY_ATTR_IDN_CURR_WB_BUFF_SIZE
] = UFS_QUERY_ATTR_READ
,
988 /* refresh operation is not supported */
989 [UFS_QUERY_ATTR_IDN_REFRESH_STATUS
] = UFS_QUERY_ATTR_READ
,
990 [UFS_QUERY_ATTR_IDN_REFRESH_FREQ
] = UFS_QUERY_ATTR_READ
,
991 [UFS_QUERY_ATTR_IDN_REFRESH_UNIT
] = UFS_QUERY_ATTR_READ
,
994 static inline QueryRespCode
ufs_attr_check_idn_valid(uint8_t idn
, int op
)
996 if (idn
>= UFS_QUERY_ATTR_IDN_COUNT
) {
997 return UFS_QUERY_RESULT_INVALID_IDN
;
1000 if (!(attr_permission
[idn
] & op
)) {
1001 if (op
== UFS_QUERY_ATTR_READ
) {
1002 trace_ufs_err_query_attr_not_readable(idn
);
1003 return UFS_QUERY_RESULT_NOT_READABLE
;
1005 trace_ufs_err_query_attr_not_writable(idn
);
1006 return UFS_QUERY_RESULT_NOT_WRITEABLE
;
1009 return UFS_QUERY_RESULT_SUCCESS
;
1012 static QueryRespCode
ufs_exec_query_flag(UfsRequest
*req
, int op
)
1015 uint8_t idn
= req
->req_upiu
.qr
.idn
;
1019 ret
= ufs_flag_check_idn_valid(idn
, op
);
1024 if (idn
== UFS_QUERY_FLAG_IDN_FDEVICEINIT
) {
1026 } else if (op
== UFS_QUERY_FLAG_READ
) {
1027 value
= *(((uint8_t *)&u
->flags
) + idn
);
1028 } else if (op
== UFS_QUERY_FLAG_SET
) {
1030 } else if (op
== UFS_QUERY_FLAG_CLEAR
) {
1032 } else if (op
== UFS_QUERY_FLAG_TOGGLE
) {
1033 value
= *(((uint8_t *)&u
->flags
) + idn
);
1036 trace_ufs_err_query_invalid_opcode(op
);
1037 return UFS_QUERY_RESULT_INVALID_OPCODE
;
1040 *(((uint8_t *)&u
->flags
) + idn
) = value
;
1041 req
->rsp_upiu
.qr
.value
= cpu_to_be32(value
);
1042 return UFS_QUERY_RESULT_SUCCESS
;
1045 static uint32_t ufs_read_attr_value(UfsHc
*u
, uint8_t idn
)
1048 case UFS_QUERY_ATTR_IDN_BOOT_LU_EN
:
1049 return u
->attributes
.boot_lun_en
;
1050 case UFS_QUERY_ATTR_IDN_POWER_MODE
:
1051 return u
->attributes
.current_power_mode
;
1052 case UFS_QUERY_ATTR_IDN_ACTIVE_ICC_LVL
:
1053 return u
->attributes
.active_icc_level
;
1054 case UFS_QUERY_ATTR_IDN_OOO_DATA_EN
:
1055 return u
->attributes
.out_of_order_data_en
;
1056 case UFS_QUERY_ATTR_IDN_BKOPS_STATUS
:
1057 return u
->attributes
.background_op_status
;
1058 case UFS_QUERY_ATTR_IDN_PURGE_STATUS
:
1059 return u
->attributes
.purge_status
;
1060 case UFS_QUERY_ATTR_IDN_MAX_DATA_IN
:
1061 return u
->attributes
.max_data_in_size
;
1062 case UFS_QUERY_ATTR_IDN_MAX_DATA_OUT
:
1063 return u
->attributes
.max_data_out_size
;
1064 case UFS_QUERY_ATTR_IDN_DYN_CAP_NEEDED
:
1065 return be32_to_cpu(u
->attributes
.dyn_cap_needed
);
1066 case UFS_QUERY_ATTR_IDN_REF_CLK_FREQ
:
1067 return u
->attributes
.ref_clk_freq
;
1068 case UFS_QUERY_ATTR_IDN_CONF_DESC_LOCK
:
1069 return u
->attributes
.config_descr_lock
;
1070 case UFS_QUERY_ATTR_IDN_MAX_NUM_OF_RTT
:
1071 return u
->attributes
.max_num_of_rtt
;
1072 case UFS_QUERY_ATTR_IDN_EE_CONTROL
:
1073 return be16_to_cpu(u
->attributes
.exception_event_control
);
1074 case UFS_QUERY_ATTR_IDN_EE_STATUS
:
1075 return be16_to_cpu(u
->attributes
.exception_event_status
);
1076 case UFS_QUERY_ATTR_IDN_SECONDS_PASSED
:
1077 return be32_to_cpu(u
->attributes
.seconds_passed
);
1078 case UFS_QUERY_ATTR_IDN_CNTX_CONF
:
1079 return be16_to_cpu(u
->attributes
.context_conf
);
1080 case UFS_QUERY_ATTR_IDN_FFU_STATUS
:
1081 return u
->attributes
.device_ffu_status
;
1082 case UFS_QUERY_ATTR_IDN_PSA_STATE
:
1083 return be32_to_cpu(u
->attributes
.psa_state
);
1084 case UFS_QUERY_ATTR_IDN_PSA_DATA_SIZE
:
1085 return be32_to_cpu(u
->attributes
.psa_data_size
);
1086 case UFS_QUERY_ATTR_IDN_REF_CLK_GATING_WAIT_TIME
:
1087 return u
->attributes
.ref_clk_gating_wait_time
;
1088 case UFS_QUERY_ATTR_IDN_CASE_ROUGH_TEMP
:
1089 return u
->attributes
.device_case_rough_temperaure
;
1090 case UFS_QUERY_ATTR_IDN_HIGH_TEMP_BOUND
:
1091 return u
->attributes
.device_too_high_temp_boundary
;
1092 case UFS_QUERY_ATTR_IDN_LOW_TEMP_BOUND
:
1093 return u
->attributes
.device_too_low_temp_boundary
;
1094 case UFS_QUERY_ATTR_IDN_THROTTLING_STATUS
:
1095 return u
->attributes
.throttling_status
;
1096 case UFS_QUERY_ATTR_IDN_WB_FLUSH_STATUS
:
1097 return u
->attributes
.wb_buffer_flush_status
;
1098 case UFS_QUERY_ATTR_IDN_AVAIL_WB_BUFF_SIZE
:
1099 return u
->attributes
.available_wb_buffer_size
;
1100 case UFS_QUERY_ATTR_IDN_WB_BUFF_LIFE_TIME_EST
:
1101 return u
->attributes
.wb_buffer_life_time_est
;
1102 case UFS_QUERY_ATTR_IDN_CURR_WB_BUFF_SIZE
:
1103 return be32_to_cpu(u
->attributes
.current_wb_buffer_size
);
1104 case UFS_QUERY_ATTR_IDN_REFRESH_STATUS
:
1105 return u
->attributes
.refresh_status
;
1106 case UFS_QUERY_ATTR_IDN_REFRESH_FREQ
:
1107 return u
->attributes
.refresh_freq
;
1108 case UFS_QUERY_ATTR_IDN_REFRESH_UNIT
:
1109 return u
->attributes
.refresh_unit
;
1114 static QueryRespCode
ufs_write_attr_value(UfsHc
*u
, uint8_t idn
, uint32_t value
)
1117 case UFS_QUERY_ATTR_IDN_ACTIVE_ICC_LVL
:
1118 if (value
> UFS_QUERY_ATTR_ACTIVE_ICC_MAXVALUE
) {
1119 return UFS_QUERY_RESULT_INVALID_VALUE
;
1121 u
->attributes
.active_icc_level
= value
;
1123 case UFS_QUERY_ATTR_IDN_MAX_DATA_IN
:
1124 u
->attributes
.max_data_in_size
= value
;
1126 case UFS_QUERY_ATTR_IDN_MAX_DATA_OUT
:
1127 u
->attributes
.max_data_out_size
= value
;
1129 case UFS_QUERY_ATTR_IDN_REF_CLK_FREQ
:
1130 u
->attributes
.ref_clk_freq
= value
;
1132 case UFS_QUERY_ATTR_IDN_MAX_NUM_OF_RTT
:
1133 u
->attributes
.max_num_of_rtt
= value
;
1135 case UFS_QUERY_ATTR_IDN_EE_CONTROL
:
1136 u
->attributes
.exception_event_control
= cpu_to_be16(value
);
1138 case UFS_QUERY_ATTR_IDN_SECONDS_PASSED
:
1139 u
->attributes
.seconds_passed
= cpu_to_be32(value
);
1141 case UFS_QUERY_ATTR_IDN_PSA_STATE
:
1142 u
->attributes
.psa_state
= value
;
1144 case UFS_QUERY_ATTR_IDN_PSA_DATA_SIZE
:
1145 u
->attributes
.psa_data_size
= cpu_to_be32(value
);
1148 return UFS_QUERY_RESULT_SUCCESS
;
1151 static QueryRespCode
ufs_exec_query_attr(UfsRequest
*req
, int op
)
1154 uint8_t idn
= req
->req_upiu
.qr
.idn
;
1158 ret
= ufs_attr_check_idn_valid(idn
, op
);
1163 if (op
== UFS_QUERY_ATTR_READ
) {
1164 value
= ufs_read_attr_value(u
, idn
);
1165 ret
= UFS_QUERY_RESULT_SUCCESS
;
1167 value
= req
->req_upiu
.qr
.value
;
1168 ret
= ufs_write_attr_value(u
, idn
, value
);
1170 req
->rsp_upiu
.qr
.value
= cpu_to_be32(value
);
1174 static const RpmbUnitDescriptor rpmb_unit_desc
= {
1175 .length
= sizeof(RpmbUnitDescriptor
),
1176 .descriptor_idn
= 2,
1177 .unit_index
= UFS_UPIU_RPMB_WLUN
,
1181 static QueryRespCode
ufs_read_unit_desc(UfsRequest
*req
)
1184 uint8_t lun
= req
->req_upiu
.qr
.index
;
1186 if (lun
!= UFS_UPIU_RPMB_WLUN
&&
1187 (lun
>= UFS_MAX_LUS
|| u
->lus
[lun
] == NULL
)) {
1188 trace_ufs_err_query_invalid_index(req
->req_upiu
.qr
.opcode
, lun
);
1189 return UFS_QUERY_RESULT_INVALID_INDEX
;
1192 if (lun
== UFS_UPIU_RPMB_WLUN
) {
1193 memcpy(&req
->rsp_upiu
.qr
.data
, &rpmb_unit_desc
, rpmb_unit_desc
.length
);
1195 memcpy(&req
->rsp_upiu
.qr
.data
, &u
->lus
[lun
]->unit_desc
,
1196 sizeof(u
->lus
[lun
]->unit_desc
));
1199 return UFS_QUERY_RESULT_SUCCESS
;
1202 static inline StringDescriptor
manufacturer_str_desc(void)
1204 StringDescriptor desc
= {
1206 .descriptor_idn
= UFS_QUERY_DESC_IDN_STRING
,
1208 desc
.UC
[0] = cpu_to_be16('R');
1209 desc
.UC
[1] = cpu_to_be16('E');
1210 desc
.UC
[2] = cpu_to_be16('D');
1211 desc
.UC
[3] = cpu_to_be16('H');
1212 desc
.UC
[4] = cpu_to_be16('A');
1213 desc
.UC
[5] = cpu_to_be16('T');
1217 static inline StringDescriptor
product_name_str_desc(void)
1219 StringDescriptor desc
= {
1221 .descriptor_idn
= UFS_QUERY_DESC_IDN_STRING
,
1223 desc
.UC
[0] = cpu_to_be16('Q');
1224 desc
.UC
[1] = cpu_to_be16('E');
1225 desc
.UC
[2] = cpu_to_be16('M');
1226 desc
.UC
[3] = cpu_to_be16('U');
1227 desc
.UC
[4] = cpu_to_be16(' ');
1228 desc
.UC
[5] = cpu_to_be16('U');
1229 desc
.UC
[6] = cpu_to_be16('F');
1230 desc
.UC
[7] = cpu_to_be16('S');
1234 static inline StringDescriptor
product_rev_level_str_desc(void)
1236 StringDescriptor desc
= {
1238 .descriptor_idn
= UFS_QUERY_DESC_IDN_STRING
,
1240 desc
.UC
[0] = cpu_to_be16('0');
1241 desc
.UC
[1] = cpu_to_be16('0');
1242 desc
.UC
[2] = cpu_to_be16('0');
1243 desc
.UC
[3] = cpu_to_be16('1');
1247 static const StringDescriptor null_str_desc
= {
1249 .descriptor_idn
= UFS_QUERY_DESC_IDN_STRING
,
1252 static QueryRespCode
ufs_read_string_desc(UfsRequest
*req
)
1255 uint8_t index
= req
->req_upiu
.qr
.index
;
1256 StringDescriptor desc
;
1258 if (index
== u
->device_desc
.manufacturer_name
) {
1259 desc
= manufacturer_str_desc();
1260 memcpy(&req
->rsp_upiu
.qr
.data
, &desc
, desc
.length
);
1261 } else if (index
== u
->device_desc
.product_name
) {
1262 desc
= product_name_str_desc();
1263 memcpy(&req
->rsp_upiu
.qr
.data
, &desc
, desc
.length
);
1264 } else if (index
== u
->device_desc
.serial_number
) {
1265 memcpy(&req
->rsp_upiu
.qr
.data
, &null_str_desc
, null_str_desc
.length
);
1266 } else if (index
== u
->device_desc
.oem_id
) {
1267 memcpy(&req
->rsp_upiu
.qr
.data
, &null_str_desc
, null_str_desc
.length
);
1268 } else if (index
== u
->device_desc
.product_revision_level
) {
1269 desc
= product_rev_level_str_desc();
1270 memcpy(&req
->rsp_upiu
.qr
.data
, &desc
, desc
.length
);
1272 trace_ufs_err_query_invalid_index(req
->req_upiu
.qr
.opcode
, index
);
1273 return UFS_QUERY_RESULT_INVALID_INDEX
;
1275 return UFS_QUERY_RESULT_SUCCESS
;
1278 static inline InterconnectDescriptor
interconnect_desc(void)
1280 InterconnectDescriptor desc
= {
1281 .length
= sizeof(InterconnectDescriptor
),
1282 .descriptor_idn
= UFS_QUERY_DESC_IDN_INTERCONNECT
,
1284 desc
.bcd_unipro_version
= cpu_to_be16(0x180);
1285 desc
.bcd_mphy_version
= cpu_to_be16(0x410);
1289 static QueryRespCode
ufs_read_desc(UfsRequest
*req
)
1292 QueryRespCode status
;
1293 uint8_t idn
= req
->req_upiu
.qr
.idn
;
1294 uint8_t selector
= req
->req_upiu
.qr
.selector
;
1295 uint16_t length
= be16_to_cpu(req
->req_upiu
.qr
.length
);
1296 InterconnectDescriptor desc
;
1297 if (selector
!= 0) {
1298 return UFS_QUERY_RESULT_INVALID_SELECTOR
;
1301 case UFS_QUERY_DESC_IDN_DEVICE
:
1302 memcpy(&req
->rsp_upiu
.qr
.data
, &u
->device_desc
, sizeof(u
->device_desc
));
1303 status
= UFS_QUERY_RESULT_SUCCESS
;
1305 case UFS_QUERY_DESC_IDN_UNIT
:
1306 status
= ufs_read_unit_desc(req
);
1308 case UFS_QUERY_DESC_IDN_GEOMETRY
:
1309 memcpy(&req
->rsp_upiu
.qr
.data
, &u
->geometry_desc
,
1310 sizeof(u
->geometry_desc
));
1311 status
= UFS_QUERY_RESULT_SUCCESS
;
1313 case UFS_QUERY_DESC_IDN_INTERCONNECT
: {
1314 desc
= interconnect_desc();
1315 memcpy(&req
->rsp_upiu
.qr
.data
, &desc
, sizeof(InterconnectDescriptor
));
1316 status
= UFS_QUERY_RESULT_SUCCESS
;
1319 case UFS_QUERY_DESC_IDN_STRING
:
1320 status
= ufs_read_string_desc(req
);
1322 case UFS_QUERY_DESC_IDN_POWER
:
1323 /* mocking of power descriptor is not supported */
1324 memset(&req
->rsp_upiu
.qr
.data
, 0, sizeof(PowerParametersDescriptor
));
1325 req
->rsp_upiu
.qr
.data
[0] = sizeof(PowerParametersDescriptor
);
1326 req
->rsp_upiu
.qr
.data
[1] = UFS_QUERY_DESC_IDN_POWER
;
1327 status
= UFS_QUERY_RESULT_SUCCESS
;
1329 case UFS_QUERY_DESC_IDN_HEALTH
:
1330 /* mocking of health descriptor is not supported */
1331 memset(&req
->rsp_upiu
.qr
.data
, 0, sizeof(DeviceHealthDescriptor
));
1332 req
->rsp_upiu
.qr
.data
[0] = sizeof(DeviceHealthDescriptor
);
1333 req
->rsp_upiu
.qr
.data
[1] = UFS_QUERY_DESC_IDN_HEALTH
;
1334 status
= UFS_QUERY_RESULT_SUCCESS
;
1338 trace_ufs_err_query_invalid_idn(req
->req_upiu
.qr
.opcode
, idn
);
1339 status
= UFS_QUERY_RESULT_INVALID_IDN
;
1342 if (length
> req
->rsp_upiu
.qr
.data
[0]) {
1343 length
= req
->rsp_upiu
.qr
.data
[0];
1345 req
->rsp_upiu
.qr
.length
= cpu_to_be16(length
);
1350 static QueryRespCode
ufs_exec_query_read(UfsRequest
*req
)
1352 QueryRespCode status
;
1353 switch (req
->req_upiu
.qr
.opcode
) {
1354 case UFS_UPIU_QUERY_OPCODE_NOP
:
1355 status
= UFS_QUERY_RESULT_SUCCESS
;
1357 case UFS_UPIU_QUERY_OPCODE_READ_DESC
:
1358 status
= ufs_read_desc(req
);
1360 case UFS_UPIU_QUERY_OPCODE_READ_ATTR
:
1361 status
= ufs_exec_query_attr(req
, UFS_QUERY_ATTR_READ
);
1363 case UFS_UPIU_QUERY_OPCODE_READ_FLAG
:
1364 status
= ufs_exec_query_flag(req
, UFS_QUERY_FLAG_READ
);
1367 trace_ufs_err_query_invalid_opcode(req
->req_upiu
.qr
.opcode
);
1368 status
= UFS_QUERY_RESULT_INVALID_OPCODE
;
1375 static QueryRespCode
ufs_exec_query_write(UfsRequest
*req
)
1377 QueryRespCode status
;
1378 switch (req
->req_upiu
.qr
.opcode
) {
1379 case UFS_UPIU_QUERY_OPCODE_NOP
:
1380 status
= UFS_QUERY_RESULT_SUCCESS
;
1382 case UFS_UPIU_QUERY_OPCODE_WRITE_DESC
:
1383 /* write descriptor is not supported */
1384 status
= UFS_QUERY_RESULT_NOT_WRITEABLE
;
1386 case UFS_UPIU_QUERY_OPCODE_WRITE_ATTR
:
1387 status
= ufs_exec_query_attr(req
, UFS_QUERY_ATTR_WRITE
);
1389 case UFS_UPIU_QUERY_OPCODE_SET_FLAG
:
1390 status
= ufs_exec_query_flag(req
, UFS_QUERY_FLAG_SET
);
1392 case UFS_UPIU_QUERY_OPCODE_CLEAR_FLAG
:
1393 status
= ufs_exec_query_flag(req
, UFS_QUERY_FLAG_CLEAR
);
1395 case UFS_UPIU_QUERY_OPCODE_TOGGLE_FLAG
:
1396 status
= ufs_exec_query_flag(req
, UFS_QUERY_FLAG_TOGGLE
);
1399 trace_ufs_err_query_invalid_opcode(req
->req_upiu
.qr
.opcode
);
1400 status
= UFS_QUERY_RESULT_INVALID_OPCODE
;
1407 static UfsReqResult
ufs_exec_query_cmd(UfsRequest
*req
)
1409 uint8_t query_func
= req
->req_upiu
.header
.query_func
;
1410 uint16_t data_segment_length
;
1411 QueryRespCode status
;
1413 trace_ufs_exec_query_cmd(req
->slot
, req
->req_upiu
.qr
.opcode
);
1414 if (query_func
== UFS_UPIU_QUERY_FUNC_STANDARD_READ_REQUEST
) {
1415 status
= ufs_exec_query_read(req
);
1416 } else if (query_func
== UFS_UPIU_QUERY_FUNC_STANDARD_WRITE_REQUEST
) {
1417 status
= ufs_exec_query_write(req
);
1419 status
= UFS_QUERY_RESULT_GENERAL_FAILURE
;
1422 data_segment_length
= be16_to_cpu(req
->rsp_upiu
.qr
.length
);
1423 ufs_build_upiu_header(req
, UFS_UPIU_TRANSACTION_QUERY_RSP
, 0, status
, 0,
1424 data_segment_length
);
1425 ufs_build_query_response(req
);
1427 if (status
!= UFS_QUERY_RESULT_SUCCESS
) {
1428 return UFS_REQUEST_FAIL
;
1430 return UFS_REQUEST_SUCCESS
;
1433 static void ufs_exec_req(UfsRequest
*req
)
1435 UfsReqResult req_result
;
1437 if (ufs_dma_read_upiu(req
)) {
1441 switch (req
->req_upiu
.header
.trans_type
) {
1442 case UFS_UPIU_TRANSACTION_NOP_OUT
:
1443 req_result
= ufs_exec_nop_cmd(req
);
1445 case UFS_UPIU_TRANSACTION_COMMAND
:
1446 req_result
= ufs_exec_scsi_cmd(req
);
1448 case UFS_UPIU_TRANSACTION_QUERY_REQ
:
1449 req_result
= ufs_exec_query_cmd(req
);
1452 trace_ufs_err_invalid_trans_code(req
->slot
,
1453 req
->req_upiu
.header
.trans_type
);
1454 req_result
= UFS_REQUEST_FAIL
;
1458 * The ufs_complete_req for scsi commands is handled by the
1459 * ufs_scsi_command_complete() callback function. Therefore, to avoid
1460 * duplicate processing, ufs_complete_req() is not called for scsi commands.
1462 if (req_result
!= UFS_REQUEST_NO_COMPLETE
) {
1463 ufs_complete_req(req
, req_result
);
1467 static void ufs_process_req(void *opaque
)
1473 for (slot
= 0; slot
< u
->params
.nutrs
; slot
++) {
1474 req
= &u
->req_list
[slot
];
1476 if (req
->state
!= UFS_REQUEST_READY
) {
1479 trace_ufs_process_req(slot
);
1480 req
->state
= UFS_REQUEST_RUNNING
;
1486 void ufs_complete_req(UfsRequest
*req
, UfsReqResult req_result
)
1489 assert(req
->state
== UFS_REQUEST_RUNNING
);
1491 if (req_result
== UFS_REQUEST_SUCCESS
) {
1492 req
->utrd
.header
.dword_2
= cpu_to_le32(UFS_OCS_SUCCESS
);
1494 req
->utrd
.header
.dword_2
= cpu_to_le32(UFS_OCS_INVALID_CMD_TABLE_ATTR
);
1497 req
->state
= UFS_REQUEST_COMPLETE
;
1499 if (ufs_mcq_req(req
)) {
1500 trace_ufs_mcq_complete_req(req
->sq
->sqid
);
1501 QTAILQ_INSERT_TAIL(&req
->sq
->cq
->req_list
, req
, entry
);
1502 qemu_bh_schedule(req
->sq
->cq
->bh
);
1504 trace_ufs_complete_req(req
->slot
);
1505 qemu_bh_schedule(u
->complete_bh
);
1509 static void ufs_clear_req(UfsRequest
*req
)
1511 if (req
->sg
!= NULL
) {
1512 qemu_sglist_destroy(req
->sg
);
1518 memset(&req
->utrd
, 0, sizeof(req
->utrd
));
1519 memset(&req
->req_upiu
, 0, sizeof(req
->req_upiu
));
1520 memset(&req
->rsp_upiu
, 0, sizeof(req
->rsp_upiu
));
1523 static void ufs_sendback_req(void *opaque
)
1529 for (slot
= 0; slot
< u
->params
.nutrs
; slot
++) {
1530 req
= &u
->req_list
[slot
];
1532 if (req
->state
!= UFS_REQUEST_COMPLETE
) {
1536 if (ufs_dma_write_upiu(req
)) {
1537 req
->state
= UFS_REQUEST_ERROR
;
1542 * TODO: UTP Transfer Request Interrupt Aggregation Control is not yet
1545 if (le32_to_cpu(req
->utrd
.header
.dword_2
) != UFS_OCS_SUCCESS
||
1546 le32_to_cpu(req
->utrd
.header
.dword_0
) & UFS_UTP_REQ_DESC_INT_CMD
) {
1547 u
->reg
.is
= FIELD_DP32(u
->reg
.is
, IS
, UTRCS
, 1);
1550 u
->reg
.utrldbr
&= ~(1 << slot
);
1551 u
->reg
.utrlcnr
|= (1 << slot
);
1553 trace_ufs_sendback_req(req
->slot
);
1556 req
->state
= UFS_REQUEST_IDLE
;
1562 static bool ufs_check_constraints(UfsHc
*u
, Error
**errp
)
1564 if (u
->params
.nutrs
> UFS_MAX_NUTRS
) {
1565 error_setg(errp
, "nutrs must be less than or equal to %d",
1570 if (u
->params
.nutmrs
> UFS_MAX_NUTMRS
) {
1571 error_setg(errp
, "nutmrs must be less than or equal to %d",
1576 if (u
->params
.mcq_maxq
>= UFS_MAX_MCQ_QNUM
) {
1577 error_setg(errp
, "mcq-maxq must be less than %d", UFS_MAX_MCQ_QNUM
);
1584 static void ufs_init_pci(UfsHc
*u
, PCIDevice
*pci_dev
)
1586 uint8_t *pci_conf
= pci_dev
->config
;
1588 pci_conf
[PCI_INTERRUPT_PIN
] = 1;
1589 pci_config_set_prog_interface(pci_conf
, 0x1);
1591 memory_region_init_io(&u
->iomem
, OBJECT(u
), &ufs_mmio_ops
, u
, "ufs",
1593 pci_register_bar(pci_dev
, 0, PCI_BASE_ADDRESS_SPACE_MEMORY
, &u
->iomem
);
1594 u
->irq
= pci_allocate_irq(pci_dev
);
1597 static void ufs_init_state(UfsHc
*u
)
1599 u
->req_list
= g_new0(UfsRequest
, u
->params
.nutrs
);
1601 for (int i
= 0; i
< u
->params
.nutrs
; i
++) {
1602 u
->req_list
[i
].hc
= u
;
1603 u
->req_list
[i
].slot
= i
;
1604 u
->req_list
[i
].sg
= NULL
;
1605 u
->req_list
[i
].state
= UFS_REQUEST_IDLE
;
1608 u
->doorbell_bh
= qemu_bh_new_guarded(ufs_process_req
, u
,
1609 &DEVICE(u
)->mem_reentrancy_guard
);
1610 u
->complete_bh
= qemu_bh_new_guarded(ufs_sendback_req
, u
,
1611 &DEVICE(u
)->mem_reentrancy_guard
);
1613 if (u
->params
.mcq
) {
1614 memset(u
->sq
, 0, sizeof(u
->sq
));
1615 memset(u
->cq
, 0, sizeof(u
->cq
));
1619 static void ufs_init_hc(UfsHc
*u
)
1622 uint32_t mcqconfig
= 0;
1623 uint32_t mcqcap
= 0;
1625 u
->reg_size
= pow2ceil(ufs_reg_size(u
));
1627 memset(&u
->reg
, 0, sizeof(u
->reg
));
1628 memset(&u
->mcq_reg
, 0, sizeof(u
->mcq_reg
));
1629 memset(&u
->mcq_op_reg
, 0, sizeof(u
->mcq_op_reg
));
1630 cap
= FIELD_DP32(cap
, CAP
, NUTRS
, (u
->params
.nutrs
- 1));
1631 cap
= FIELD_DP32(cap
, CAP
, RTT
, 2);
1632 cap
= FIELD_DP32(cap
, CAP
, NUTMRS
, (u
->params
.nutmrs
- 1));
1633 cap
= FIELD_DP32(cap
, CAP
, AUTOH8
, 0);
1634 cap
= FIELD_DP32(cap
, CAP
, 64AS
, 1);
1635 cap
= FIELD_DP32(cap
, CAP
, OODDS
, 0);
1636 cap
= FIELD_DP32(cap
, CAP
, UICDMETMS
, 0);
1637 cap
= FIELD_DP32(cap
, CAP
, CS
, 0);
1638 cap
= FIELD_DP32(cap
, CAP
, LSDBS
, 1);
1639 cap
= FIELD_DP32(cap
, CAP
, MCQS
, u
->params
.mcq
);
1642 if (u
->params
.mcq
) {
1643 mcqconfig
= FIELD_DP32(mcqconfig
, MCQCONFIG
, MAC
, 0x1f);
1644 u
->reg
.mcqconfig
= mcqconfig
;
1646 mcqcap
= FIELD_DP32(mcqcap
, MCQCAP
, MAXQ
, u
->params
.mcq_maxq
- 1);
1647 mcqcap
= FIELD_DP32(mcqcap
, MCQCAP
, RRP
, 1);
1648 mcqcap
= FIELD_DP32(mcqcap
, MCQCAP
, QCFGPTR
, UFS_MCQ_QCFGPTR
);
1649 u
->reg
.mcqcap
= mcqcap
;
1651 for (int i
= 0; i
< ARRAY_SIZE(u
->mcq_reg
); i
++) {
1652 uint64_t addr
= ufs_mcq_op_reg_addr(u
, i
);
1653 u
->mcq_reg
[i
].sqdao
= addr
;
1654 u
->mcq_reg
[i
].sqisao
= addr
+ sizeof(UfsMcqSqReg
);
1655 addr
+= sizeof(UfsMcqSqReg
);
1656 u
->mcq_reg
[i
].cqdao
= addr
+ sizeof(UfsMcqSqIntReg
);
1657 addr
+= sizeof(UfsMcqSqIntReg
);
1658 u
->mcq_reg
[i
].cqisao
= addr
+ sizeof(UfsMcqCqReg
);
1661 u
->reg
.ver
= UFS_SPEC_VER
;
1663 memset(&u
->device_desc
, 0, sizeof(DeviceDescriptor
));
1664 u
->device_desc
.length
= sizeof(DeviceDescriptor
);
1665 u
->device_desc
.descriptor_idn
= UFS_QUERY_DESC_IDN_DEVICE
;
1666 u
->device_desc
.device_sub_class
= 0x01;
1667 u
->device_desc
.number_lu
= 0x00;
1668 u
->device_desc
.number_wlu
= 0x04;
1669 /* TODO: Revisit it when Power Management is implemented */
1670 u
->device_desc
.init_power_mode
= 0x01; /* Active Mode */
1671 u
->device_desc
.high_priority_lun
= 0x7F; /* Same Priority */
1672 u
->device_desc
.spec_version
= cpu_to_be16(UFS_SPEC_VER
);
1673 u
->device_desc
.manufacturer_name
= 0x00;
1674 u
->device_desc
.product_name
= 0x01;
1675 u
->device_desc
.serial_number
= 0x02;
1676 u
->device_desc
.oem_id
= 0x03;
1677 u
->device_desc
.ud_0_base_offset
= 0x16;
1678 u
->device_desc
.ud_config_p_length
= 0x1A;
1679 u
->device_desc
.device_rtt_cap
= 0x02;
1680 u
->device_desc
.queue_depth
= u
->params
.nutrs
;
1681 u
->device_desc
.product_revision_level
= 0x04;
1683 memset(&u
->geometry_desc
, 0, sizeof(GeometryDescriptor
));
1684 u
->geometry_desc
.length
= sizeof(GeometryDescriptor
);
1685 u
->geometry_desc
.descriptor_idn
= UFS_QUERY_DESC_IDN_GEOMETRY
;
1686 u
->geometry_desc
.max_number_lu
= (UFS_MAX_LUS
== 32) ? 0x1 : 0x0;
1687 u
->geometry_desc
.segment_size
= cpu_to_be32(0x2000); /* 4KB */
1688 u
->geometry_desc
.allocation_unit_size
= 0x1; /* 4KB */
1689 u
->geometry_desc
.min_addr_block_size
= 0x8; /* 4KB */
1690 u
->geometry_desc
.max_in_buffer_size
= 0x8;
1691 u
->geometry_desc
.max_out_buffer_size
= 0x8;
1692 u
->geometry_desc
.rpmb_read_write_size
= 0x40;
1693 u
->geometry_desc
.data_ordering
=
1694 0x0; /* out-of-order data transfer is not supported */
1695 u
->geometry_desc
.max_context_id_number
= 0x5;
1696 u
->geometry_desc
.supported_memory_types
= cpu_to_be16(0x8001);
1698 memset(&u
->attributes
, 0, sizeof(u
->attributes
));
1699 u
->attributes
.max_data_in_size
= 0x08;
1700 u
->attributes
.max_data_out_size
= 0x08;
1701 u
->attributes
.ref_clk_freq
= 0x01; /* 26 MHz */
1702 /* configure descriptor is not supported */
1703 u
->attributes
.config_descr_lock
= 0x01;
1704 u
->attributes
.max_num_of_rtt
= 0x02;
1706 memset(&u
->flags
, 0, sizeof(u
->flags
));
1707 u
->flags
.permanently_disable_fw_update
= 1;
1710 static void ufs_realize(PCIDevice
*pci_dev
, Error
**errp
)
1712 UfsHc
*u
= UFS(pci_dev
);
1714 if (!ufs_check_constraints(u
, errp
)) {
1718 qbus_init(&u
->bus
, sizeof(UfsBus
), TYPE_UFS_BUS
, &pci_dev
->qdev
,
1719 u
->parent_obj
.qdev
.id
);
1723 ufs_init_pci(u
, pci_dev
);
1725 ufs_init_wlu(&u
->report_wlu
, UFS_UPIU_REPORT_LUNS_WLUN
);
1726 ufs_init_wlu(&u
->dev_wlu
, UFS_UPIU_UFS_DEVICE_WLUN
);
1727 ufs_init_wlu(&u
->boot_wlu
, UFS_UPIU_BOOT_WLUN
);
1728 ufs_init_wlu(&u
->rpmb_wlu
, UFS_UPIU_RPMB_WLUN
);
1731 static void ufs_exit(PCIDevice
*pci_dev
)
1733 UfsHc
*u
= UFS(pci_dev
);
1735 qemu_bh_delete(u
->doorbell_bh
);
1736 qemu_bh_delete(u
->complete_bh
);
1738 for (int i
= 0; i
< u
->params
.nutrs
; i
++) {
1739 ufs_clear_req(&u
->req_list
[i
]);
1741 g_free(u
->req_list
);
1743 for (int i
= 0; i
< ARRAY_SIZE(u
->sq
); i
++) {
1745 ufs_mcq_delete_sq(u
, i
);
1748 for (int i
= 0; i
< ARRAY_SIZE(u
->cq
); i
++) {
1750 ufs_mcq_delete_cq(u
, i
);
1755 static Property ufs_props
[] = {
1756 DEFINE_PROP_STRING("serial", UfsHc
, params
.serial
),
1757 DEFINE_PROP_UINT8("nutrs", UfsHc
, params
.nutrs
, 32),
1758 DEFINE_PROP_UINT8("nutmrs", UfsHc
, params
.nutmrs
, 8),
1759 DEFINE_PROP_BOOL("mcq", UfsHc
, params
.mcq
, false),
1760 DEFINE_PROP_UINT8("mcq-maxq", UfsHc
, params
.mcq_maxq
, 2),
1761 DEFINE_PROP_END_OF_LIST(),
1764 static const VMStateDescription ufs_vmstate
= {
1769 static void ufs_class_init(ObjectClass
*oc
, void *data
)
1771 DeviceClass
*dc
= DEVICE_CLASS(oc
);
1772 PCIDeviceClass
*pc
= PCI_DEVICE_CLASS(oc
);
1774 pc
->realize
= ufs_realize
;
1775 pc
->exit
= ufs_exit
;
1776 pc
->vendor_id
= PCI_VENDOR_ID_REDHAT
;
1777 pc
->device_id
= PCI_DEVICE_ID_REDHAT_UFS
;
1778 pc
->class_id
= PCI_CLASS_STORAGE_UFS
;
1780 set_bit(DEVICE_CATEGORY_STORAGE
, dc
->categories
);
1781 dc
->desc
= "Universal Flash Storage";
1782 device_class_set_props(dc
, ufs_props
);
1783 dc
->vmsd
= &ufs_vmstate
;
1786 static bool ufs_bus_check_address(BusState
*qbus
, DeviceState
*qdev
,
1789 if (strcmp(object_get_typename(OBJECT(qdev
)), TYPE_UFS_LU
) != 0) {
1790 error_setg(errp
, "%s cannot be connected to ufs-bus",
1791 object_get_typename(OBJECT(qdev
)));
1798 static char *ufs_bus_get_dev_path(DeviceState
*dev
)
1800 BusState
*bus
= qdev_get_parent_bus(dev
);
1802 return qdev_get_dev_path(bus
->parent
);
1805 static void ufs_bus_class_init(ObjectClass
*class, void *data
)
1807 BusClass
*bc
= BUS_CLASS(class);
1808 bc
->get_dev_path
= ufs_bus_get_dev_path
;
1809 bc
->check_address
= ufs_bus_check_address
;
1812 static const TypeInfo ufs_info
= {
1814 .parent
= TYPE_PCI_DEVICE
,
1815 .class_init
= ufs_class_init
,
1816 .instance_size
= sizeof(UfsHc
),
1817 .interfaces
= (InterfaceInfo
[]){ { INTERFACE_PCIE_DEVICE
}, {} },
1820 static const TypeInfo ufs_bus_info
= {
1821 .name
= TYPE_UFS_BUS
,
1823 .class_init
= ufs_bus_class_init
,
1824 .class_size
= sizeof(UfsBusClass
),
1825 .instance_size
= sizeof(UfsBus
),
1828 static void ufs_register_types(void)
1830 type_register_static(&ufs_info
);
1831 type_register_static(&ufs_bus_info
);
1834 type_init(ufs_register_types
)