1 /**********************************************************************
4 * Contact: support@cavium.com
5 * Please include "LiquidIO" in the subject.
7 * Copyright (c) 2003-2016 Cavium, Inc.
9 * This file is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License, Version 2, as
11 * published by the Free Software Foundation.
13 * This file is distributed in the hope that it will be useful, but
14 * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
15 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
16 * NONINFRINGEMENT. See the GNU General Public License for more
18 **********************************************************************/
19 #include <linux/pci.h>
20 #include <linux/netdevice.h>
21 #include "liquidio_common.h"
22 #include "octeon_droq.h"
23 #include "octeon_iq.h"
24 #include "response_manager.h"
25 #include "octeon_device.h"
26 #include "octeon_main.h"
28 static void oct_poll_req_completion(struct work_struct
*work
);
30 int octeon_setup_response_list(struct octeon_device
*oct
)
33 struct cavium_wq
*cwq
;
35 for (i
= 0; i
< MAX_RESPONSE_LISTS
; i
++) {
36 INIT_LIST_HEAD(&oct
->response_list
[i
].head
);
37 spin_lock_init(&oct
->response_list
[i
].lock
);
38 atomic_set(&oct
->response_list
[i
].pending_req_count
, 0);
40 spin_lock_init(&oct
->cmd_resp_wqlock
);
42 oct
->dma_comp_wq
.wq
= alloc_workqueue("dma-comp", WQ_MEM_RECLAIM
, 0);
43 if (!oct
->dma_comp_wq
.wq
) {
44 dev_err(&oct
->pci_dev
->dev
, "failed to create wq thread\n");
48 cwq
= &oct
->dma_comp_wq
;
49 INIT_DELAYED_WORK(&cwq
->wk
.work
, oct_poll_req_completion
);
51 oct
->cmd_resp_state
= OCT_DRV_ONLINE
;
56 void octeon_delete_response_list(struct octeon_device
*oct
)
58 cancel_delayed_work_sync(&oct
->dma_comp_wq
.wk
.work
);
59 destroy_workqueue(oct
->dma_comp_wq
.wq
);
62 int lio_process_ordered_list(struct octeon_device
*octeon_dev
,
65 struct octeon_response_list
*ordered_sc_list
;
66 struct octeon_soft_command
*sc
;
67 int request_complete
= 0;
68 int resp_to_process
= MAX_ORD_REQS_TO_PROCESS
;
72 octeon_free_sc_done_list(octeon_dev
);
74 ordered_sc_list
= &octeon_dev
->response_list
[OCTEON_ORDERED_SC_LIST
];
77 spin_lock_bh(&ordered_sc_list
->lock
);
79 if (list_empty(&ordered_sc_list
->head
)) {
80 spin_unlock_bh(&ordered_sc_list
->lock
);
84 sc
= list_first_entry(&ordered_sc_list
->head
,
85 struct octeon_soft_command
, node
);
87 status
= OCTEON_REQUEST_PENDING
;
89 /* check if octeon has finished DMA'ing a response
90 * to where rptr is pointing to
92 status64
= *sc
->status_word
;
94 if (status64
!= COMPLETION_WORD_INIT
) {
95 /* This logic ensures that all 64b have been written.
96 * 1. check byte 0 for non-FF
97 * 2. if non-FF, then swap result from BE to host order
98 * 3. check byte 7 (swapped to 0) for non-FF
99 * 4. if non-FF, use the low 32-bit status code
100 * 5. if either byte 0 or byte 7 is FF, don't use status
102 if ((status64
& 0xff) != 0xff) {
103 octeon_swap_8B_data(&status64
, 1);
104 if (((status64
& 0xff) != 0xff)) {
105 /* retrieve 16-bit firmware status */
106 status
= (u32
)(status64
& 0xffffULL
);
109 FIRMWARE_STATUS_CODE(status
);
112 status
= OCTEON_REQUEST_DONE
;
116 } else if (unlikely(force_quit
) || (sc
->expiry_time
&&
117 time_after(jiffies
, (unsigned long)sc
->expiry_time
))) {
118 struct octeon_instr_irh
*irh
=
119 (struct octeon_instr_irh
*)&sc
->cmd
.cmd3
.irh
;
121 dev_err(&octeon_dev
->pci_dev
->dev
, "%s: ", __func__
);
122 dev_err(&octeon_dev
->pci_dev
->dev
,
123 "cmd %x/%x/%llx/%llx failed, ",
124 irh
->opcode
, irh
->subcode
,
125 sc
->cmd
.cmd3
.ossp
[0], sc
->cmd
.cmd3
.ossp
[1]);
126 dev_err(&octeon_dev
->pci_dev
->dev
,
127 "timeout (%ld, %ld)\n",
128 (long)jiffies
, (long)sc
->expiry_time
);
129 status
= OCTEON_REQUEST_TIMEOUT
;
132 if (status
!= OCTEON_REQUEST_PENDING
) {
133 sc
->sc_status
= status
;
135 /* we have received a response or we have timed out */
136 /* remove node from linked list */
138 atomic_dec(&octeon_dev
->response_list
139 [OCTEON_ORDERED_SC_LIST
].
143 atomic_inc(&octeon_dev
->response_list
144 [OCTEON_DONE_SC_LIST
].
146 list_add_tail(&sc
->node
,
147 &octeon_dev
->response_list
148 [OCTEON_DONE_SC_LIST
].head
);
150 if (unlikely(READ_ONCE(sc
->caller_is_done
))) {
151 /* caller does not wait for response
154 if (status
!= OCTEON_REQUEST_DONE
) {
155 struct octeon_instr_irh
*irh
;
158 (struct octeon_instr_irh
*)
161 (&octeon_dev
->pci_dev
->dev
,
162 "%s: sc failed: opcode=%x, ",
163 __func__
, irh
->opcode
);
165 (&octeon_dev
->pci_dev
->dev
,
166 "subcode=%x, ossp[0]=%llx, ",
168 sc
->cmd
.cmd3
.ossp
[0]);
170 (&octeon_dev
->pci_dev
->dev
,
171 "ossp[1]=%llx, status=%d\n",
172 sc
->cmd
.cmd3
.ossp
[1],
176 complete(&sc
->complete
);
179 spin_unlock_bh(&ordered_sc_list
->lock
);
181 /* sc with callback function */
182 if (status
== OCTEON_REQUEST_TIMEOUT
) {
183 atomic_inc(&octeon_dev
->response_list
184 [OCTEON_ZOMBIE_SC_LIST
].
186 list_add_tail(&sc
->node
,
187 &octeon_dev
->response_list
188 [OCTEON_ZOMBIE_SC_LIST
].
192 spin_unlock_bh(&ordered_sc_list
->lock
);
194 sc
->callback(octeon_dev
, status
,
196 /* sc is freed by caller */
202 /* no response yet */
203 request_complete
= 0;
205 (&ordered_sc_list
->lock
);
208 /* If we hit the Max Ordered requests to process every loop,
210 * and let this function be invoked the next time the poll
212 * to process the remaining requests. This function can take up
213 * the entire CPU if there is no upper limit to the requests
216 if (request_complete
>= resp_to_process
)
218 } while (request_complete
);
223 static void oct_poll_req_completion(struct work_struct
*work
)
225 struct cavium_wk
*wk
= (struct cavium_wk
*)work
;
226 struct octeon_device
*oct
= (struct octeon_device
*)wk
->ctxptr
;
227 struct cavium_wq
*cwq
= &oct
->dma_comp_wq
;
229 lio_process_ordered_list(oct
, 0);
231 if (atomic_read(&oct
->response_list
232 [OCTEON_ORDERED_SC_LIST
].pending_req_count
))
233 queue_delayed_work(cwq
->wq
, &cwq
->wk
.work
, msecs_to_jiffies(1));