2 * Copyright 2008 Cisco Systems, Inc. All rights reserved.
3 * Copyright 2007 Nuova Systems, Inc. All rights reserved.
5 * This program is free software; you may redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; version 2 of the License.
9 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
10 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
11 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
12 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
13 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
14 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
15 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
18 #ifndef _VNIC_WQ_COPY_H_
19 #define _VNIC_WQ_COPY_H_
21 #include <linux/pci.h>
25 #define VNIC_WQ_COPY_MAX 1
29 struct vnic_dev
*vdev
;
30 struct vnic_wq_ctrl __iomem
*ctrl
; /* memory-mapped */
31 struct vnic_dev_ring ring
;
32 unsigned to_use_index
;
33 unsigned to_clean_index
;
36 static inline unsigned int vnic_wq_copy_desc_avail(struct vnic_wq_copy
*wq
)
38 return wq
->ring
.desc_avail
;
41 static inline unsigned int vnic_wq_copy_desc_in_use(struct vnic_wq_copy
*wq
)
43 return wq
->ring
.desc_count
- 1 - wq
->ring
.desc_avail
;
46 static inline void *vnic_wq_copy_next_desc(struct vnic_wq_copy
*wq
)
48 struct fcpio_host_req
*desc
= wq
->ring
.descs
;
49 return &desc
[wq
->to_use_index
];
52 static inline void vnic_wq_copy_post(struct vnic_wq_copy
*wq
)
55 ((wq
->to_use_index
+ 1) == wq
->ring
.desc_count
) ?
56 (wq
->to_use_index
= 0) : (wq
->to_use_index
++);
57 wq
->ring
.desc_avail
--;
59 /* Adding write memory barrier prevents compiler and/or CPU
60 * reordering, thus avoiding descriptor posting before
61 * descriptor is initialized. Otherwise, hardware can read
62 * stale descriptor fields.
66 iowrite32(wq
->to_use_index
, &wq
->ctrl
->posted_index
);
69 static inline void vnic_wq_copy_desc_process(struct vnic_wq_copy
*wq
, u16 index
)
73 if (wq
->to_clean_index
<= index
)
74 cnt
= (index
- wq
->to_clean_index
) + 1;
76 cnt
= wq
->ring
.desc_count
- wq
->to_clean_index
+ index
+ 1;
78 wq
->to_clean_index
= ((index
+ 1) % wq
->ring
.desc_count
);
79 wq
->ring
.desc_avail
+= cnt
;
83 static inline void vnic_wq_copy_service(struct vnic_wq_copy
*wq
,
85 void (*q_service
)(struct vnic_wq_copy
*wq
,
86 struct fcpio_host_req
*wq_desc
))
88 struct fcpio_host_req
*wq_desc
= wq
->ring
.descs
;
89 unsigned int curr_index
;
94 (*q_service
)(wq
, &wq_desc
[wq
->to_clean_index
]);
96 wq
->ring
.desc_avail
++;
98 curr_index
= wq
->to_clean_index
;
100 /* increment the to-clean index so that we start
101 * with an unprocessed index next time we enter the loop
103 ((wq
->to_clean_index
+ 1) == wq
->ring
.desc_count
) ?
104 (wq
->to_clean_index
= 0) : (wq
->to_clean_index
++);
106 if (curr_index
== completed_index
)
109 /* we have cleaned all the entries */
110 if ((completed_index
== (u16
)-1) &&
111 (wq
->to_clean_index
== wq
->to_use_index
))
116 void vnic_wq_copy_enable(struct vnic_wq_copy
*wq
);
117 int vnic_wq_copy_disable(struct vnic_wq_copy
*wq
);
118 void vnic_wq_copy_free(struct vnic_wq_copy
*wq
);
119 int vnic_wq_copy_alloc(struct vnic_dev
*vdev
, struct vnic_wq_copy
*wq
,
120 unsigned int index
, unsigned int desc_count
, unsigned int desc_size
);
121 void vnic_wq_copy_init(struct vnic_wq_copy
*wq
, unsigned int cq_index
,
122 unsigned int error_interrupt_enable
,
123 unsigned int error_interrupt_offset
);
124 void vnic_wq_copy_clean(struct vnic_wq_copy
*wq
,
125 void (*q_clean
)(struct vnic_wq_copy
*wq
,
126 struct fcpio_host_req
*wq_desc
));
128 #endif /* _VNIC_WQ_COPY_H_ */