1 /* SPDX-License-Identifier: GPL-2.0-only */
3 * Copyright 2008 Cisco Systems, Inc. All rights reserved.
4 * Copyright 2007 Nuova Systems, Inc. All rights reserved.
6 #ifndef _VNIC_WQ_COPY_H_
7 #define _VNIC_WQ_COPY_H_
13 #define VNIC_WQ_COPY_MAX 1
17 struct vnic_dev
*vdev
;
18 struct vnic_wq_ctrl __iomem
*ctrl
; /* memory-mapped */
19 struct vnic_dev_ring ring
;
20 unsigned to_use_index
;
21 unsigned to_clean_index
;
24 static inline unsigned int vnic_wq_copy_desc_avail(struct vnic_wq_copy
*wq
)
26 return wq
->ring
.desc_avail
;
29 static inline unsigned int vnic_wq_copy_desc_in_use(struct vnic_wq_copy
*wq
)
31 return wq
->ring
.desc_count
- 1 - wq
->ring
.desc_avail
;
34 static inline void *vnic_wq_copy_next_desc(struct vnic_wq_copy
*wq
)
36 struct fcpio_host_req
*desc
= wq
->ring
.descs
;
37 return &desc
[wq
->to_use_index
];
40 static inline void vnic_wq_copy_post(struct vnic_wq_copy
*wq
)
43 ((wq
->to_use_index
+ 1) == wq
->ring
.desc_count
) ?
44 (wq
->to_use_index
= 0) : (wq
->to_use_index
++);
45 wq
->ring
.desc_avail
--;
47 /* Adding write memory barrier prevents compiler and/or CPU
48 * reordering, thus avoiding descriptor posting before
49 * descriptor is initialized. Otherwise, hardware can read
50 * stale descriptor fields.
54 iowrite32(wq
->to_use_index
, &wq
->ctrl
->posted_index
);
57 static inline void vnic_wq_copy_desc_process(struct vnic_wq_copy
*wq
, u16 index
)
61 if (wq
->to_clean_index
<= index
)
62 cnt
= (index
- wq
->to_clean_index
) + 1;
64 cnt
= wq
->ring
.desc_count
- wq
->to_clean_index
+ index
+ 1;
66 wq
->to_clean_index
= ((index
+ 1) % wq
->ring
.desc_count
);
67 wq
->ring
.desc_avail
+= cnt
;
71 static inline void vnic_wq_copy_service(struct vnic_wq_copy
*wq
,
73 void (*q_service
)(struct vnic_wq_copy
*wq
,
74 struct fcpio_host_req
*wq_desc
))
76 struct fcpio_host_req
*wq_desc
= wq
->ring
.descs
;
77 unsigned int curr_index
;
82 (*q_service
)(wq
, &wq_desc
[wq
->to_clean_index
]);
84 wq
->ring
.desc_avail
++;
86 curr_index
= wq
->to_clean_index
;
88 /* increment the to-clean index so that we start
89 * with an unprocessed index next time we enter the loop
91 ((wq
->to_clean_index
+ 1) == wq
->ring
.desc_count
) ?
92 (wq
->to_clean_index
= 0) : (wq
->to_clean_index
++);
94 if (curr_index
== completed_index
)
97 /* we have cleaned all the entries */
98 if ((completed_index
== (u16
)-1) &&
99 (wq
->to_clean_index
== wq
->to_use_index
))
104 void vnic_wq_copy_enable(struct vnic_wq_copy
*wq
);
105 int vnic_wq_copy_disable(struct vnic_wq_copy
*wq
);
106 void vnic_wq_copy_free(struct vnic_wq_copy
*wq
);
107 int vnic_wq_copy_alloc(struct vnic_dev
*vdev
, struct vnic_wq_copy
*wq
,
108 unsigned int index
, unsigned int desc_count
, unsigned int desc_size
);
109 void vnic_wq_copy_init(struct vnic_wq_copy
*wq
, unsigned int cq_index
,
110 unsigned int error_interrupt_enable
,
111 unsigned int error_interrupt_offset
);
112 void vnic_wq_copy_clean(struct vnic_wq_copy
*wq
,
113 void (*q_clean
)(struct vnic_wq_copy
*wq
,
114 struct fcpio_host_req
*wq_desc
));
116 #endif /* _VNIC_WQ_COPY_H_ */