1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright 2008 Cisco Systems, Inc. All rights reserved.
4 * Copyright 2007 Nuova Systems, Inc. All rights reserved.
7 #include <linux/errno.h>
8 #include <linux/types.h>
10 #include <linux/delay.h>
11 #include <linux/slab.h>
15 static int vnic_rq_alloc_bufs(struct vnic_rq
*rq
)
17 struct vnic_rq_buf
*buf
;
18 unsigned int i
, j
, count
= rq
->ring
.desc_count
;
19 unsigned int blks
= VNIC_RQ_BUF_BLKS_NEEDED(count
);
21 for (i
= 0; i
< blks
; i
++) {
22 rq
->bufs
[i
] = kzalloc(VNIC_RQ_BUF_BLK_SZ
, GFP_ATOMIC
);
24 printk(KERN_ERR
"Failed to alloc rq_bufs\n");
29 for (i
= 0; i
< blks
; i
++) {
31 for (j
= 0; j
< VNIC_RQ_BUF_BLK_ENTRIES
; j
++) {
32 buf
->index
= i
* VNIC_RQ_BUF_BLK_ENTRIES
+ j
;
33 buf
->desc
= (u8
*)rq
->ring
.descs
+
34 rq
->ring
.desc_size
* buf
->index
;
35 if (buf
->index
+ 1 == count
) {
36 buf
->next
= rq
->bufs
[0];
38 } else if (j
+ 1 == VNIC_RQ_BUF_BLK_ENTRIES
) {
39 buf
->next
= rq
->bufs
[i
+ 1];
47 rq
->to_use
= rq
->to_clean
= rq
->bufs
[0];
53 void vnic_rq_free(struct vnic_rq
*rq
)
55 struct vnic_dev
*vdev
;
60 vnic_dev_free_desc_ring(vdev
, &rq
->ring
);
62 for (i
= 0; i
< VNIC_RQ_BUF_BLKS_MAX
; i
++) {
70 int vnic_rq_alloc(struct vnic_dev
*vdev
, struct vnic_rq
*rq
, unsigned int index
,
71 unsigned int desc_count
, unsigned int desc_size
)
78 rq
->ctrl
= vnic_dev_get_res(vdev
, RES_TYPE_RQ
, index
);
80 printk(KERN_ERR
"Failed to hook RQ[%d] resource\n", index
);
86 err
= vnic_dev_alloc_desc_ring(vdev
, &rq
->ring
, desc_count
, desc_size
);
90 err
= vnic_rq_alloc_bufs(rq
);
99 void vnic_rq_init(struct vnic_rq
*rq
, unsigned int cq_index
,
100 unsigned int error_interrupt_enable
,
101 unsigned int error_interrupt_offset
)
106 paddr
= (u64
)rq
->ring
.base_addr
| VNIC_PADDR_TARGET
;
107 writeq(paddr
, &rq
->ctrl
->ring_base
);
108 iowrite32(rq
->ring
.desc_count
, &rq
->ctrl
->ring_size
);
109 iowrite32(cq_index
, &rq
->ctrl
->cq_index
);
110 iowrite32(error_interrupt_enable
, &rq
->ctrl
->error_interrupt_enable
);
111 iowrite32(error_interrupt_offset
, &rq
->ctrl
->error_interrupt_offset
);
112 iowrite32(0, &rq
->ctrl
->dropped_packet_count
);
113 iowrite32(0, &rq
->ctrl
->error_status
);
115 /* Use current fetch_index as the ring starting point */
116 fetch_index
= ioread32(&rq
->ctrl
->fetch_index
);
117 rq
->to_use
= rq
->to_clean
=
118 &rq
->bufs
[fetch_index
/ VNIC_RQ_BUF_BLK_ENTRIES
]
119 [fetch_index
% VNIC_RQ_BUF_BLK_ENTRIES
];
120 iowrite32(fetch_index
, &rq
->ctrl
->posted_index
);
125 unsigned int vnic_rq_error_status(struct vnic_rq
*rq
)
127 return ioread32(&rq
->ctrl
->error_status
);
130 void vnic_rq_enable(struct vnic_rq
*rq
)
132 iowrite32(1, &rq
->ctrl
->enable
);
135 int vnic_rq_disable(struct vnic_rq
*rq
)
139 iowrite32(0, &rq
->ctrl
->enable
);
141 /* Wait for HW to ACK disable request */
142 for (wait
= 0; wait
< 100; wait
++) {
143 if (!(ioread32(&rq
->ctrl
->running
)))
148 printk(KERN_ERR
"Failed to disable RQ[%d]\n", rq
->index
);
153 void vnic_rq_clean(struct vnic_rq
*rq
,
154 void (*buf_clean
)(struct vnic_rq
*rq
, struct vnic_rq_buf
*buf
))
156 struct vnic_rq_buf
*buf
;
159 WARN_ON(ioread32(&rq
->ctrl
->enable
));
163 while (vnic_rq_desc_used(rq
) > 0) {
165 (*buf_clean
)(rq
, buf
);
167 buf
= rq
->to_clean
= buf
->next
;
168 rq
->ring
.desc_avail
++;
171 /* Use current fetch_index as the ring starting point */
172 fetch_index
= ioread32(&rq
->ctrl
->fetch_index
);
173 rq
->to_use
= rq
->to_clean
=
174 &rq
->bufs
[fetch_index
/ VNIC_RQ_BUF_BLK_ENTRIES
]
175 [fetch_index
% VNIC_RQ_BUF_BLK_ENTRIES
];
176 iowrite32(fetch_index
, &rq
->ctrl
->posted_index
);
180 vnic_dev_clear_desc_ring(&rq
->ring
);