1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (C) 2024 Marvell. */
4 #include <linux/iopoll.h>
6 #include "octep_vdpa.h"
9 OCTEP_MBOX_MSG_SET_VQ_STATE
= 1,
10 OCTEP_MBOX_MSG_GET_VQ_STATE
,
13 #define OCTEP_HW_TIMEOUT 10000000
15 #define MBOX_OFFSET 64
16 #define MBOX_RSP_MASK 0x00000001
17 #define MBOX_RC_MASK 0x0000FFFE
19 #define MBOX_RSP_TO_ERR(val) (-(((val) & MBOX_RC_MASK) >> 2))
20 #define MBOX_AVAIL(val) (((val) & MBOX_RSP_MASK))
21 #define MBOX_RSP(val) ((val) & (MBOX_RC_MASK | MBOX_RSP_MASK))
23 #define DEV_RST_ACK_BIT 7
24 #define FEATURE_SEL_ACK_BIT 15
25 #define QUEUE_SEL_ACK_BIT 15
27 struct octep_mbox_hdr
{
32 #define MBOX_REQ_SIG (0xdead)
33 #define MBOX_RSP_SIG (0xbeef)
37 struct octep_mbox_sts
{
44 struct octep_mbox_hdr hdr
;
45 struct octep_mbox_sts sts
;
50 static inline struct octep_mbox __iomem
*octep_get_mbox(struct octep_hw
*oct_hw
)
52 return (struct octep_mbox __iomem
*)(oct_hw
->dev_cfg
+ MBOX_OFFSET
);
55 static inline int octep_wait_for_mbox_avail(struct octep_mbox __iomem
*mbox
)
59 return readx_poll_timeout(ioread32
, &mbox
->sts
, val
, MBOX_AVAIL(val
), 10,
63 static inline int octep_wait_for_mbox_rsp(struct octep_mbox __iomem
*mbox
)
67 return readx_poll_timeout(ioread32
, &mbox
->sts
, val
, MBOX_RSP(val
), 10,
71 static inline void octep_write_hdr(struct octep_mbox __iomem
*mbox
, u16 id
, u16 sig
)
73 iowrite16(id
, &mbox
->hdr
.id
);
74 iowrite16(sig
, &mbox
->hdr
.sig
);
77 static inline u32
octep_read_sig(struct octep_mbox __iomem
*mbox
)
79 return ioread16(&mbox
->hdr
.sig
);
82 static inline void octep_write_sts(struct octep_mbox __iomem
*mbox
, u32 sts
)
84 iowrite32(sts
, &mbox
->sts
);
87 static inline u32
octep_read_sts(struct octep_mbox __iomem
*mbox
)
89 return ioread32(&mbox
->sts
);
92 static inline u32
octep_read32_word(struct octep_mbox __iomem
*mbox
, u16 word_idx
)
94 return ioread32(&mbox
->data
[word_idx
]);
97 static inline void octep_write32_word(struct octep_mbox __iomem
*mbox
, u16 word_idx
, u32 word
)
99 return iowrite32(word
, &mbox
->data
[word_idx
]);
102 static int octep_process_mbox(struct octep_hw
*oct_hw
, u16 id
, u16 qid
, void *buffer
,
103 u32 buf_size
, bool write
)
105 struct octep_mbox __iomem
*mbox
= octep_get_mbox(oct_hw
);
106 struct pci_dev
*pdev
= oct_hw
->pdev
;
107 u32
*p
= (u32
*)buffer
;
112 if (!IS_ALIGNED(buf_size
, 4))
115 /* Make sure mbox space is available */
116 ret
= octep_wait_for_mbox_avail(mbox
);
118 dev_warn(&pdev
->dev
, "Timeout waiting for previous mbox data to be consumed\n");
121 data_wds
= buf_size
/ 4;
124 for (i
= 1; i
<= data_wds
; i
++) {
125 octep_write32_word(mbox
, i
, *p
);
129 octep_write32_word(mbox
, 0, (u32
)qid
);
130 octep_write_sts(mbox
, 0);
132 octep_write_hdr(mbox
, id
, MBOX_REQ_SIG
);
134 ret
= octep_wait_for_mbox_rsp(mbox
);
136 dev_warn(&pdev
->dev
, "Timeout waiting for mbox : %d response\n", id
);
140 val
= octep_read_sig(mbox
);
141 if ((val
& 0xFFFF) != MBOX_RSP_SIG
) {
142 dev_warn(&pdev
->dev
, "Invalid Signature from mbox : %d response\n", id
);
146 val
= octep_read_sts(mbox
);
147 if (val
& MBOX_RC_MASK
) {
148 ret
= MBOX_RSP_TO_ERR(val
);
149 dev_warn(&pdev
->dev
, "Error while processing mbox : %d, err %d\n", id
, ret
);
154 for (i
= 1; i
<= data_wds
; i
++)
155 *p
++ = octep_read32_word(mbox
, i
);
160 static void octep_mbox_init(struct octep_mbox __iomem
*mbox
)
162 iowrite32(1, &mbox
->sts
);
165 int octep_verify_features(u64 features
)
167 /* Minimum features to expect */
168 if (!(features
& BIT_ULL(VIRTIO_F_VERSION_1
)))
171 if (!(features
& BIT_ULL(VIRTIO_F_NOTIFICATION_DATA
)))
174 if (!(features
& BIT_ULL(VIRTIO_F_RING_PACKED
)))
180 u8
octep_hw_get_status(struct octep_hw
*oct_hw
)
182 return ioread8(&oct_hw
->common_cfg
->device_status
);
185 void octep_hw_set_status(struct octep_hw
*oct_hw
, u8 status
)
187 iowrite8(status
, &oct_hw
->common_cfg
->device_status
);
190 void octep_hw_reset(struct octep_hw
*oct_hw
)
194 octep_hw_set_status(oct_hw
, 0 | BIT(DEV_RST_ACK_BIT
));
195 if (readx_poll_timeout(ioread8
, &oct_hw
->common_cfg
->device_status
, val
, !val
, 10,
197 dev_warn(&oct_hw
->pdev
->dev
, "Octeon device reset timeout\n");
202 static int feature_sel_write_with_timeout(struct octep_hw
*oct_hw
, u32 select
, void __iomem
*addr
)
206 iowrite32(select
| BIT(FEATURE_SEL_ACK_BIT
), addr
);
208 if (readx_poll_timeout(ioread32
, addr
, val
, val
== select
, 10, OCTEP_HW_TIMEOUT
)) {
209 dev_warn(&oct_hw
->pdev
->dev
, "Feature select%d write timeout\n", select
);
215 u64
octep_hw_get_dev_features(struct octep_hw
*oct_hw
)
217 u32 features_lo
, features_hi
;
219 if (feature_sel_write_with_timeout(oct_hw
, 0, &oct_hw
->common_cfg
->device_feature_select
))
222 features_lo
= ioread32(&oct_hw
->common_cfg
->device_feature
);
224 if (feature_sel_write_with_timeout(oct_hw
, 1, &oct_hw
->common_cfg
->device_feature_select
))
227 features_hi
= ioread32(&oct_hw
->common_cfg
->device_feature
);
229 return ((u64
)features_hi
<< 32) | features_lo
;
232 u64
octep_hw_get_drv_features(struct octep_hw
*oct_hw
)
234 u32 features_lo
, features_hi
;
236 if (feature_sel_write_with_timeout(oct_hw
, 0, &oct_hw
->common_cfg
->guest_feature_select
))
239 features_lo
= ioread32(&oct_hw
->common_cfg
->guest_feature
);
241 if (feature_sel_write_with_timeout(oct_hw
, 1, &oct_hw
->common_cfg
->guest_feature_select
))
244 features_hi
= ioread32(&oct_hw
->common_cfg
->guest_feature
);
246 return ((u64
)features_hi
<< 32) | features_lo
;
249 void octep_hw_set_drv_features(struct octep_hw
*oct_hw
, u64 features
)
251 if (feature_sel_write_with_timeout(oct_hw
, 0, &oct_hw
->common_cfg
->guest_feature_select
))
254 iowrite32(features
& (BIT_ULL(32) - 1), &oct_hw
->common_cfg
->guest_feature
);
256 if (feature_sel_write_with_timeout(oct_hw
, 1, &oct_hw
->common_cfg
->guest_feature_select
))
259 iowrite32(features
>> 32, &oct_hw
->common_cfg
->guest_feature
);
262 void octep_write_queue_select(struct octep_hw
*oct_hw
, u16 queue_id
)
266 iowrite16(queue_id
| BIT(QUEUE_SEL_ACK_BIT
), &oct_hw
->common_cfg
->queue_select
);
268 if (readx_poll_timeout(ioread16
, &oct_hw
->common_cfg
->queue_select
, val
, val
== queue_id
,
269 10, OCTEP_HW_TIMEOUT
)) {
270 dev_warn(&oct_hw
->pdev
->dev
, "Queue select write timeout\n");
275 void octep_notify_queue(struct octep_hw
*oct_hw
, u16 qid
)
277 iowrite16(qid
, oct_hw
->vqs
[qid
].notify_addr
);
280 void octep_read_dev_config(struct octep_hw
*oct_hw
, u64 offset
, void *dst
, int length
)
282 u8 old_gen
, new_gen
, *p
;
285 if (WARN_ON(offset
+ length
> oct_hw
->config_size
))
289 old_gen
= ioread8(&oct_hw
->common_cfg
->config_generation
);
291 for (i
= 0; i
< length
; i
++)
292 *p
++ = ioread8(oct_hw
->dev_cfg
+ offset
+ i
);
294 new_gen
= ioread8(&oct_hw
->common_cfg
->config_generation
);
295 } while (old_gen
!= new_gen
);
298 int octep_set_vq_address(struct octep_hw
*oct_hw
, u16 qid
, u64 desc_area
, u64 driver_area
,
301 struct virtio_pci_common_cfg __iomem
*cfg
= oct_hw
->common_cfg
;
303 octep_write_queue_select(oct_hw
, qid
);
304 vp_iowrite64_twopart(desc_area
, &cfg
->queue_desc_lo
,
305 &cfg
->queue_desc_hi
);
306 vp_iowrite64_twopart(driver_area
, &cfg
->queue_avail_lo
,
307 &cfg
->queue_avail_hi
);
308 vp_iowrite64_twopart(device_area
, &cfg
->queue_used_lo
,
309 &cfg
->queue_used_hi
);
314 int octep_get_vq_state(struct octep_hw
*oct_hw
, u16 qid
, struct vdpa_vq_state
*state
)
316 return octep_process_mbox(oct_hw
, OCTEP_MBOX_MSG_GET_VQ_STATE
, qid
, state
,
320 int octep_set_vq_state(struct octep_hw
*oct_hw
, u16 qid
, const struct vdpa_vq_state
*state
)
322 struct vdpa_vq_state q_state
;
324 memcpy(&q_state
, state
, sizeof(struct vdpa_vq_state
));
325 return octep_process_mbox(oct_hw
, OCTEP_MBOX_MSG_SET_VQ_STATE
, qid
, &q_state
,
329 void octep_set_vq_num(struct octep_hw
*oct_hw
, u16 qid
, u32 num
)
331 struct virtio_pci_common_cfg __iomem
*cfg
= oct_hw
->common_cfg
;
333 octep_write_queue_select(oct_hw
, qid
);
334 iowrite16(num
, &cfg
->queue_size
);
337 void octep_set_vq_ready(struct octep_hw
*oct_hw
, u16 qid
, bool ready
)
339 struct virtio_pci_common_cfg __iomem
*cfg
= oct_hw
->common_cfg
;
341 octep_write_queue_select(oct_hw
, qid
);
342 iowrite16(ready
, &cfg
->queue_enable
);
345 bool octep_get_vq_ready(struct octep_hw
*oct_hw
, u16 qid
)
347 struct virtio_pci_common_cfg __iomem
*cfg
= oct_hw
->common_cfg
;
349 octep_write_queue_select(oct_hw
, qid
);
350 return ioread16(&cfg
->queue_enable
);
353 u16
octep_get_vq_size(struct octep_hw
*oct_hw
)
355 octep_write_queue_select(oct_hw
, 0);
356 return ioread16(&oct_hw
->common_cfg
->queue_size
);
359 static u32
octep_get_config_size(struct octep_hw
*oct_hw
)
361 return sizeof(struct virtio_net_config
);
364 static void __iomem
*octep_get_cap_addr(struct octep_hw
*oct_hw
, struct virtio_pci_cap
*cap
)
366 struct device
*dev
= &oct_hw
->pdev
->dev
;
367 u32 length
= le32_to_cpu(cap
->length
);
368 u32 offset
= le32_to_cpu(cap
->offset
);
372 if (bar
!= OCTEP_HW_CAPS_BAR
) {
373 dev_err(dev
, "Invalid bar: %u\n", bar
);
376 if (offset
+ length
< offset
) {
377 dev_err(dev
, "offset(%u) + length(%u) overflows\n",
381 len
= pci_resource_len(oct_hw
->pdev
, bar
);
382 if (offset
+ length
> len
) {
383 dev_err(dev
, "invalid cap: overflows bar space: %u > %u\n",
384 offset
+ length
, len
);
387 return oct_hw
->base
[bar
] + offset
;
390 /* In Octeon DPU device, the virtio config space is completely
391 * emulated by the device's firmware. So, the standard pci config
392 * read apis can't be used for reading the virtio capability.
394 static void octep_pci_caps_read(struct octep_hw
*oct_hw
, void *buf
, size_t len
, off_t offset
)
396 u8 __iomem
*bar
= oct_hw
->base
[OCTEP_HW_CAPS_BAR
];
400 for (i
= 0; i
< len
; i
++)
401 *p
++ = ioread8(bar
+ offset
+ i
);
404 static int octep_pci_signature_verify(struct octep_hw
*oct_hw
)
408 octep_pci_caps_read(oct_hw
, &signature
, sizeof(signature
), 0);
410 if (signature
[0] != OCTEP_FW_READY_SIGNATURE0
)
413 if (signature
[1] != OCTEP_FW_READY_SIGNATURE1
)
419 int octep_hw_caps_read(struct octep_hw
*oct_hw
, struct pci_dev
*pdev
)
421 struct octep_mbox __iomem
*mbox
;
422 struct device
*dev
= &pdev
->dev
;
423 struct virtio_pci_cap cap
;
429 ret
= octep_pci_signature_verify(oct_hw
);
431 dev_err(dev
, "Octeon Virtio FW is not initialized\n");
435 octep_pci_caps_read(oct_hw
, &pos
, 1, PCI_CAPABILITY_LIST
);
438 octep_pci_caps_read(oct_hw
, &cap
, 2, pos
);
440 if (cap
.cap_vndr
!= PCI_CAP_ID_VNDR
) {
441 dev_err(dev
, "Found invalid capability vndr id: %d\n", cap
.cap_vndr
);
445 octep_pci_caps_read(oct_hw
, &cap
, sizeof(cap
), pos
);
447 dev_info(dev
, "[%2x] cfg type: %u, bar: %u, offset: %04x, len: %u\n",
448 pos
, cap
.cfg_type
, cap
.bar
, cap
.offset
, cap
.length
);
450 switch (cap
.cfg_type
) {
451 case VIRTIO_PCI_CAP_COMMON_CFG
:
452 oct_hw
->common_cfg
= octep_get_cap_addr(oct_hw
, &cap
);
454 case VIRTIO_PCI_CAP_NOTIFY_CFG
:
455 octep_pci_caps_read(oct_hw
, &oct_hw
->notify_off_multiplier
,
456 4, pos
+ sizeof(cap
));
458 oct_hw
->notify_base
= octep_get_cap_addr(oct_hw
, &cap
);
459 oct_hw
->notify_bar
= cap
.bar
;
460 oct_hw
->notify_base_pa
= pci_resource_start(pdev
, cap
.bar
) +
461 le32_to_cpu(cap
.offset
);
463 case VIRTIO_PCI_CAP_DEVICE_CFG
:
464 oct_hw
->dev_cfg
= octep_get_cap_addr(oct_hw
, &cap
);
466 case VIRTIO_PCI_CAP_ISR_CFG
:
467 oct_hw
->isr
= octep_get_cap_addr(oct_hw
, &cap
);
473 if (!oct_hw
->common_cfg
|| !oct_hw
->notify_base
||
474 !oct_hw
->dev_cfg
|| !oct_hw
->isr
) {
475 dev_err(dev
, "Incomplete PCI capabilities");
478 dev_info(dev
, "common cfg mapped at: %p\n", oct_hw
->common_cfg
);
479 dev_info(dev
, "device cfg mapped at: %p\n", oct_hw
->dev_cfg
);
480 dev_info(dev
, "isr cfg mapped at: %p\n", oct_hw
->isr
);
481 dev_info(dev
, "notify base: %p, notify off multiplier: %u\n",
482 oct_hw
->notify_base
, oct_hw
->notify_off_multiplier
);
484 oct_hw
->config_size
= octep_get_config_size(oct_hw
);
485 oct_hw
->features
= octep_hw_get_dev_features(oct_hw
);
487 ret
= octep_verify_features(oct_hw
->features
);
489 dev_err(&pdev
->dev
, "Couldn't read features from the device FW\n");
492 oct_hw
->nr_vring
= vp_ioread16(&oct_hw
->common_cfg
->num_queues
);
494 oct_hw
->vqs
= devm_kcalloc(&pdev
->dev
, oct_hw
->nr_vring
, sizeof(*oct_hw
->vqs
), GFP_KERNEL
);
500 dev_info(&pdev
->dev
, "Device features : %llx\n", oct_hw
->features
);
501 dev_info(&pdev
->dev
, "Maximum queues : %u\n", oct_hw
->nr_vring
);
503 for (i
= 0; i
< oct_hw
->nr_vring
; i
++) {
504 octep_write_queue_select(oct_hw
, i
);
505 notify_off
= vp_ioread16(&oct_hw
->common_cfg
->queue_notify_off
);
506 oct_hw
->vqs
[i
].notify_addr
= oct_hw
->notify_base
+
507 notify_off
* oct_hw
->notify_off_multiplier
;
508 oct_hw
->vqs
[i
].cb_notify_addr
= (u32 __iomem
*)oct_hw
->vqs
[i
].notify_addr
+ 1;
509 oct_hw
->vqs
[i
].notify_pa
= oct_hw
->notify_base_pa
+
510 notify_off
* oct_hw
->notify_off_multiplier
;
512 mbox
= octep_get_mbox(oct_hw
);
513 octep_mbox_init(mbox
);
514 dev_info(dev
, "mbox mapped at: %p\n", mbox
);