2 * Copyright 2008-2010 Cisco Systems, Inc. All rights reserved.
3 * Copyright 2007 Nuova Systems, Inc. All rights reserved.
5 * This program is free software; you may redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; version 2 of the License.
9 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
10 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
11 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
12 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
13 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
14 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
15 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
23 #include <linux/pci.h>
28 /* Receive queue control */
30 u64 ring_base
; /* 0x00 */
31 u32 ring_size
; /* 0x08 */
33 u32 posted_index
; /* 0x10 */
35 u32 cq_index
; /* 0x18 */
37 u32 enable
; /* 0x20 */
39 u32 running
; /* 0x28 */
41 u32 fetch_index
; /* 0x30 */
43 u32 error_interrupt_enable
; /* 0x38 */
45 u32 error_interrupt_offset
; /* 0x40 */
47 u32 error_status
; /* 0x48 */
49 u32 dropped_packet_count
; /* 0x50 */
51 u32 dropped_packet_count_rc
; /* 0x58 */
55 /* Break the vnic_rq_buf allocations into blocks of 32/64 entries */
56 #define VNIC_RQ_BUF_MIN_BLK_ENTRIES 32
57 #define VNIC_RQ_BUF_DFLT_BLK_ENTRIES 64
58 #define VNIC_RQ_BUF_BLK_ENTRIES(entries) \
59 ((unsigned int)((entries < VNIC_RQ_BUF_DFLT_BLK_ENTRIES) ? \
60 VNIC_RQ_BUF_MIN_BLK_ENTRIES : VNIC_RQ_BUF_DFLT_BLK_ENTRIES))
61 #define VNIC_RQ_BUF_BLK_SZ(entries) \
62 (VNIC_RQ_BUF_BLK_ENTRIES(entries) * sizeof(struct vnic_rq_buf))
63 #define VNIC_RQ_BUF_BLKS_NEEDED(entries) \
64 DIV_ROUND_UP(entries, VNIC_RQ_BUF_BLK_ENTRIES(entries))
65 #define VNIC_RQ_BUF_BLKS_MAX VNIC_RQ_BUF_BLKS_NEEDED(4096)
68 struct vnic_rq_buf
*next
;
71 unsigned int os_buf_index
;
79 struct vnic_dev
*vdev
;
80 struct vnic_rq_ctrl __iomem
*ctrl
; /* memory-mapped */
81 struct vnic_dev_ring ring
;
82 struct vnic_rq_buf
*bufs
[VNIC_RQ_BUF_BLKS_MAX
];
83 struct vnic_rq_buf
*to_use
;
84 struct vnic_rq_buf
*to_clean
;
86 unsigned int pkts_outstanding
;
89 static inline unsigned int vnic_rq_desc_avail(struct vnic_rq
*rq
)
91 /* how many does SW own? */
92 return rq
->ring
.desc_avail
;
95 static inline unsigned int vnic_rq_desc_used(struct vnic_rq
*rq
)
97 /* how many does HW own? */
98 return rq
->ring
.desc_count
- rq
->ring
.desc_avail
- 1;
101 static inline void *vnic_rq_next_desc(struct vnic_rq
*rq
)
103 return rq
->to_use
->desc
;
106 static inline unsigned int vnic_rq_next_index(struct vnic_rq
*rq
)
108 return rq
->to_use
->index
;
111 static inline void vnic_rq_post(struct vnic_rq
*rq
,
112 void *os_buf
, unsigned int os_buf_index
,
113 dma_addr_t dma_addr
, unsigned int len
)
115 struct vnic_rq_buf
*buf
= rq
->to_use
;
117 buf
->os_buf
= os_buf
;
118 buf
->os_buf_index
= os_buf_index
;
119 buf
->dma_addr
= dma_addr
;
124 rq
->ring
.desc_avail
--;
126 /* Move the posted_index every nth descriptor
129 #ifndef VNIC_RQ_RETURN_RATE
130 #define VNIC_RQ_RETURN_RATE 0xf /* keep 2^n - 1 */
133 if ((buf
->index
& VNIC_RQ_RETURN_RATE
) == 0) {
134 /* Adding write memory barrier prevents compiler and/or CPU
135 * reordering, thus avoiding descriptor posting before
136 * descriptor is initialized. Otherwise, hardware can read
137 * stale descriptor fields.
140 iowrite32(buf
->index
, &rq
->ctrl
->posted_index
);
144 static inline void vnic_rq_return_descs(struct vnic_rq
*rq
, unsigned int count
)
146 rq
->ring
.desc_avail
+= count
;
149 enum desc_return_options
{
151 VNIC_RQ_DEFER_RETURN_DESC
,
154 static inline void vnic_rq_service(struct vnic_rq
*rq
,
155 struct cq_desc
*cq_desc
, u16 completed_index
,
156 int desc_return
, void (*buf_service
)(struct vnic_rq
*rq
,
157 struct cq_desc
*cq_desc
, struct vnic_rq_buf
*buf
,
158 int skipped
, void *opaque
), void *opaque
)
160 struct vnic_rq_buf
*buf
;
166 skipped
= (buf
->index
!= completed_index
);
168 (*buf_service
)(rq
, cq_desc
, buf
, skipped
, opaque
);
170 if (desc_return
== VNIC_RQ_RETURN_DESC
)
171 rq
->ring
.desc_avail
++;
173 rq
->to_clean
= buf
->next
;
182 static inline int vnic_rq_fill(struct vnic_rq
*rq
,
183 int (*buf_fill
)(struct vnic_rq
*rq
))
187 while (vnic_rq_desc_avail(rq
) > 0) {
189 err
= (*buf_fill
)(rq
);
197 void vnic_rq_free(struct vnic_rq
*rq
);
198 int vnic_rq_alloc(struct vnic_dev
*vdev
, struct vnic_rq
*rq
, unsigned int index
,
199 unsigned int desc_count
, unsigned int desc_size
);
200 void vnic_rq_init(struct vnic_rq
*rq
, unsigned int cq_index
,
201 unsigned int error_interrupt_enable
,
202 unsigned int error_interrupt_offset
);
203 unsigned int vnic_rq_error_status(struct vnic_rq
*rq
);
204 void vnic_rq_enable(struct vnic_rq
*rq
);
205 int vnic_rq_disable(struct vnic_rq
*rq
);
206 void vnic_rq_clean(struct vnic_rq
*rq
,
207 void (*buf_clean
)(struct vnic_rq
*rq
, struct vnic_rq_buf
*buf
));
209 #endif /* _VNIC_RQ_H_ */