2 * Copyright (C) 2005 - 2009 ServerEngines
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
10 * Contact Information:
11 * linux-drivers@serverengines.com
14 * 209 N. Fair Oaks Ave
22 static inline bool be_mcc_compl_is_new(struct be_mcc_compl
*compl)
24 if (compl->flags
!= 0) {
25 compl->flags
= le32_to_cpu(compl->flags
);
26 WARN_ON((compl->flags
& CQE_FLAGS_VALID_MASK
) == 0);
32 static inline void be_mcc_compl_use(struct be_mcc_compl
*compl)
37 static int be_mcc_compl_process(struct be_ctrl_info
*ctrl
,
38 struct be_mcc_compl
*compl)
40 u16 compl_status
, extd_status
;
42 be_dws_le_to_cpu(compl, 4);
44 compl_status
= (compl->status
>> CQE_STATUS_COMPL_SHIFT
) &
45 CQE_STATUS_COMPL_MASK
;
46 if (compl_status
!= MCC_STATUS_SUCCESS
) {
47 extd_status
= (compl->status
>> CQE_STATUS_EXTD_SHIFT
) &
49 dev_err(&ctrl
->pdev
->dev
,
50 "error in cmd completion: status(compl/extd)=%d/%d\n",
51 compl_status
, extd_status
);
57 static inline bool is_link_state_evt(u32 trailer
)
59 return (((trailer
>> ASYNC_TRAILER_EVENT_CODE_SHIFT
) &
60 ASYNC_TRAILER_EVENT_CODE_MASK
) == ASYNC_EVENT_CODE_LINK_STATE
);
63 void beiscsi_cq_notify(struct be_ctrl_info
*ctrl
, u16 qid
, bool arm
,
67 val
|= qid
& DB_CQ_RING_ID_MASK
;
69 val
|= 1 << DB_CQ_REARM_SHIFT
;
70 val
|= num_popped
<< DB_CQ_NUM_POPPED_SHIFT
;
71 iowrite32(val
, ctrl
->db
+ DB_CQ_OFFSET
);
74 static int be_mbox_db_ready_wait(struct be_ctrl_info
*ctrl
)
76 #define long_delay 2000
77 void __iomem
*db
= ctrl
->db
+ MPU_MAILBOX_DB_OFFSET
;
78 int cnt
= 0, wait
= 5; /* in usecs */
82 ready
= ioread32(db
) & MPU_MAILBOX_DB_RDY_MASK
;
87 dev_err(&ctrl
->pdev
->dev
, "mbox_db poll timed out\n");
93 mdelay(long_delay
/ 1000);
101 int be_mbox_notify(struct be_ctrl_info
*ctrl
)
105 void __iomem
*db
= ctrl
->db
+ MPU_MAILBOX_DB_OFFSET
;
106 struct be_dma_mem
*mbox_mem
= &ctrl
->mbox_mem
;
107 struct be_mcc_mailbox
*mbox
= mbox_mem
->va
;
108 struct be_mcc_compl
*compl = &mbox
->compl;
110 val
&= ~MPU_MAILBOX_DB_RDY_MASK
;
111 val
|= MPU_MAILBOX_DB_HI_MASK
;
112 val
|= (upper_32_bits(mbox_mem
->dma
) >> 2) << 2;
115 status
= be_mbox_db_ready_wait(ctrl
);
117 SE_DEBUG(DBG_LVL_1
, " be_mbox_db_ready_wait failed 1\n");
121 val
&= ~MPU_MAILBOX_DB_RDY_MASK
;
122 val
&= ~MPU_MAILBOX_DB_HI_MASK
;
123 val
|= (u32
) (mbox_mem
->dma
>> 4) << 2;
126 status
= be_mbox_db_ready_wait(ctrl
);
128 SE_DEBUG(DBG_LVL_1
, " be_mbox_db_ready_wait failed 2\n");
131 if (be_mcc_compl_is_new(compl)) {
132 status
= be_mcc_compl_process(ctrl
, &mbox
->compl);
133 be_mcc_compl_use(compl);
135 SE_DEBUG(DBG_LVL_1
, "After be_mcc_compl_process \n");
139 dev_err(&ctrl
->pdev
->dev
, "invalid mailbox completion\n");
145 void be_wrb_hdr_prepare(struct be_mcc_wrb
*wrb
, int payload_len
,
146 bool embedded
, u8 sge_cnt
)
149 wrb
->embedded
|= MCC_WRB_EMBEDDED_MASK
;
151 wrb
->embedded
|= (sge_cnt
& MCC_WRB_SGE_CNT_MASK
) <<
152 MCC_WRB_SGE_CNT_SHIFT
;
153 wrb
->payload_length
= payload_len
;
154 be_dws_cpu_to_le(wrb
, 8);
157 void be_cmd_hdr_prepare(struct be_cmd_req_hdr
*req_hdr
,
158 u8 subsystem
, u8 opcode
, int cmd_len
)
160 req_hdr
->opcode
= opcode
;
161 req_hdr
->subsystem
= subsystem
;
162 req_hdr
->request_length
= cpu_to_le32(cmd_len
- sizeof(*req_hdr
));
165 static void be_cmd_page_addrs_prepare(struct phys_addr
*pages
, u32 max_pages
,
166 struct be_dma_mem
*mem
)
169 u64 dma
= (u64
) mem
->dma
;
171 buf_pages
= min(PAGES_4K_SPANNED(mem
->va
, mem
->size
), max_pages
);
172 for (i
= 0; i
< buf_pages
; i
++) {
173 pages
[i
].lo
= cpu_to_le32(dma
& 0xFFFFFFFF);
174 pages
[i
].hi
= cpu_to_le32(upper_32_bits(dma
));
179 static u32
eq_delay_to_mult(u32 usec_delay
)
181 #define MAX_INTR_RATE 651042
182 const u32 round
= 10;
188 u32 interrupt_rate
= 1000000 / usec_delay
;
189 if (interrupt_rate
== 0)
192 multiplier
= (MAX_INTR_RATE
- interrupt_rate
) * round
;
193 multiplier
/= interrupt_rate
;
194 multiplier
= (multiplier
+ round
/ 2) / round
;
195 multiplier
= min(multiplier
, (u32
) 1023);
201 struct be_mcc_wrb
*wrb_from_mbox(struct be_dma_mem
*mbox_mem
)
203 return &((struct be_mcc_mailbox
*)(mbox_mem
->va
))->wrb
;
206 int beiscsi_cmd_eq_create(struct be_ctrl_info
*ctrl
,
207 struct be_queue_info
*eq
, int eq_delay
)
209 struct be_mcc_wrb
*wrb
= wrb_from_mbox(&ctrl
->mbox_mem
);
210 struct be_cmd_req_eq_create
*req
= embedded_payload(wrb
);
211 struct be_cmd_resp_eq_create
*resp
= embedded_payload(wrb
);
212 struct be_dma_mem
*q_mem
= &eq
->dma_mem
;
215 spin_lock(&ctrl
->mbox_lock
);
216 memset(wrb
, 0, sizeof(*wrb
));
218 be_wrb_hdr_prepare(wrb
, sizeof(*req
), true, 0);
220 be_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_COMMON
,
221 OPCODE_COMMON_EQ_CREATE
, sizeof(*req
));
223 req
->num_pages
= cpu_to_le16(PAGES_4K_SPANNED(q_mem
->va
, q_mem
->size
));
225 AMAP_SET_BITS(struct amap_eq_context
, func
, req
->context
,
226 PCI_FUNC(ctrl
->pdev
->devfn
));
227 AMAP_SET_BITS(struct amap_eq_context
, valid
, req
->context
, 1);
228 AMAP_SET_BITS(struct amap_eq_context
, size
, req
->context
, 0);
229 AMAP_SET_BITS(struct amap_eq_context
, count
, req
->context
,
230 __ilog2_u32(eq
->len
/ 256));
231 AMAP_SET_BITS(struct amap_eq_context
, delaymult
, req
->context
,
232 eq_delay_to_mult(eq_delay
));
233 be_dws_cpu_to_le(req
->context
, sizeof(req
->context
));
235 be_cmd_page_addrs_prepare(req
->pages
, ARRAY_SIZE(req
->pages
), q_mem
);
237 status
= be_mbox_notify(ctrl
);
239 eq
->id
= le16_to_cpu(resp
->eq_id
);
242 spin_unlock(&ctrl
->mbox_lock
);
246 int be_cmd_fw_initialize(struct be_ctrl_info
*ctrl
)
248 struct be_mcc_wrb
*wrb
= wrb_from_mbox(&ctrl
->mbox_mem
);
252 spin_lock(&ctrl
->mbox_lock
);
253 memset(wrb
, 0, sizeof(*wrb
));
255 endian_check
= (u8
*) wrb
;
256 *endian_check
++ = 0xFF;
257 *endian_check
++ = 0x12;
258 *endian_check
++ = 0x34;
259 *endian_check
++ = 0xFF;
260 *endian_check
++ = 0xFF;
261 *endian_check
++ = 0x56;
262 *endian_check
++ = 0x78;
263 *endian_check
++ = 0xFF;
264 be_dws_cpu_to_le(wrb
, sizeof(*wrb
));
266 status
= be_mbox_notify(ctrl
);
268 SE_DEBUG(DBG_LVL_1
, "be_cmd_fw_initialize Failed \n");
270 spin_unlock(&ctrl
->mbox_lock
);
274 int beiscsi_cmd_cq_create(struct be_ctrl_info
*ctrl
,
275 struct be_queue_info
*cq
, struct be_queue_info
*eq
,
276 bool sol_evts
, bool no_delay
, int coalesce_wm
)
278 struct be_mcc_wrb
*wrb
= wrb_from_mbox(&ctrl
->mbox_mem
);
279 struct be_cmd_req_cq_create
*req
= embedded_payload(wrb
);
280 struct be_cmd_resp_cq_create
*resp
= embedded_payload(wrb
);
281 struct be_dma_mem
*q_mem
= &cq
->dma_mem
;
282 void *ctxt
= &req
->context
;
285 spin_lock(&ctrl
->mbox_lock
);
286 memset(wrb
, 0, sizeof(*wrb
));
288 be_wrb_hdr_prepare(wrb
, sizeof(*req
), true, 0);
290 be_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_COMMON
,
291 OPCODE_COMMON_CQ_CREATE
, sizeof(*req
));
294 SE_DEBUG(DBG_LVL_1
, "uninitialized q_mem->va\n");
296 req
->num_pages
= cpu_to_le16(PAGES_4K_SPANNED(q_mem
->va
, q_mem
->size
));
298 AMAP_SET_BITS(struct amap_cq_context
, coalescwm
, ctxt
, coalesce_wm
);
299 AMAP_SET_BITS(struct amap_cq_context
, nodelay
, ctxt
, no_delay
);
300 AMAP_SET_BITS(struct amap_cq_context
, count
, ctxt
,
301 __ilog2_u32(cq
->len
/ 256));
302 AMAP_SET_BITS(struct amap_cq_context
, valid
, ctxt
, 1);
303 AMAP_SET_BITS(struct amap_cq_context
, solevent
, ctxt
, sol_evts
);
304 AMAP_SET_BITS(struct amap_cq_context
, eventable
, ctxt
, 1);
305 AMAP_SET_BITS(struct amap_cq_context
, eqid
, ctxt
, eq
->id
);
306 AMAP_SET_BITS(struct amap_cq_context
, armed
, ctxt
, 1);
307 AMAP_SET_BITS(struct amap_cq_context
, func
, ctxt
,
308 PCI_FUNC(ctrl
->pdev
->devfn
));
309 be_dws_cpu_to_le(ctxt
, sizeof(req
->context
));
311 be_cmd_page_addrs_prepare(req
->pages
, ARRAY_SIZE(req
->pages
), q_mem
);
313 status
= be_mbox_notify(ctrl
);
315 cq
->id
= le16_to_cpu(resp
->cq_id
);
318 SE_DEBUG(DBG_LVL_1
, "In be_cmd_cq_create, status=ox%08x \n",
320 spin_unlock(&ctrl
->mbox_lock
);
325 static u32
be_encoded_q_len(int q_len
)
327 u32 len_encoded
= fls(q_len
); /* log2(len) + 1 */
328 if (len_encoded
== 16)
332 int beiscsi_cmd_q_destroy(struct be_ctrl_info
*ctrl
, struct be_queue_info
*q
,
335 struct be_mcc_wrb
*wrb
= wrb_from_mbox(&ctrl
->mbox_mem
);
336 struct be_cmd_req_q_destroy
*req
= embedded_payload(wrb
);
337 u8 subsys
= 0, opcode
= 0;
340 spin_lock(&ctrl
->mbox_lock
);
341 memset(wrb
, 0, sizeof(*wrb
));
342 be_wrb_hdr_prepare(wrb
, sizeof(*req
), true, 0);
344 switch (queue_type
) {
346 subsys
= CMD_SUBSYSTEM_COMMON
;
347 opcode
= OPCODE_COMMON_EQ_DESTROY
;
350 subsys
= CMD_SUBSYSTEM_COMMON
;
351 opcode
= OPCODE_COMMON_CQ_DESTROY
;
354 subsys
= CMD_SUBSYSTEM_ISCSI
;
355 opcode
= OPCODE_COMMON_ISCSI_WRBQ_DESTROY
;
358 subsys
= CMD_SUBSYSTEM_ISCSI
;
359 opcode
= OPCODE_COMMON_ISCSI_DEFQ_DESTROY
;
362 subsys
= CMD_SUBSYSTEM_ISCSI
;
363 opcode
= OPCODE_COMMON_ISCSI_CFG_REMOVE_SGL_PAGES
;
366 spin_unlock(&ctrl
->mbox_lock
);
370 be_cmd_hdr_prepare(&req
->hdr
, subsys
, opcode
, sizeof(*req
));
371 if (queue_type
!= QTYPE_SGL
)
372 req
->id
= cpu_to_le16(q
->id
);
374 status
= be_mbox_notify(ctrl
);
376 spin_unlock(&ctrl
->mbox_lock
);
380 int be_cmd_get_mac_addr(struct be_ctrl_info
*ctrl
, u8
*mac_addr
)
382 struct be_mcc_wrb
*wrb
= wrb_from_mbox(&ctrl
->mbox_mem
);
383 struct be_cmd_req_get_mac_addr
*req
= embedded_payload(wrb
);
386 spin_lock(&ctrl
->mbox_lock
);
387 memset(wrb
, 0, sizeof(*wrb
));
388 be_wrb_hdr_prepare(wrb
, sizeof(*req
), true, 0);
389 be_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_ISCSI
,
390 OPCODE_COMMON_ISCSI_NTWK_GET_NIC_CONFIG
,
393 status
= be_mbox_notify(ctrl
);
395 struct be_cmd_resp_get_mac_addr
*resp
= embedded_payload(wrb
);
397 memcpy(mac_addr
, resp
->mac_address
, ETH_ALEN
);
400 spin_unlock(&ctrl
->mbox_lock
);
404 int be_cmd_create_default_pdu_queue(struct be_ctrl_info
*ctrl
,
405 struct be_queue_info
*cq
,
406 struct be_queue_info
*dq
, int length
,
409 struct be_mcc_wrb
*wrb
= wrb_from_mbox(&ctrl
->mbox_mem
);
410 struct be_defq_create_req
*req
= embedded_payload(wrb
);
411 struct be_dma_mem
*q_mem
= &dq
->dma_mem
;
412 void *ctxt
= &req
->context
;
415 spin_lock(&ctrl
->mbox_lock
);
416 memset(wrb
, 0, sizeof(*wrb
));
418 be_wrb_hdr_prepare(wrb
, sizeof(*req
), true, 0);
420 be_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_ISCSI
,
421 OPCODE_COMMON_ISCSI_DEFQ_CREATE
, sizeof(*req
));
423 req
->num_pages
= PAGES_4K_SPANNED(q_mem
->va
, q_mem
->size
);
424 AMAP_SET_BITS(struct amap_be_default_pdu_context
, rx_pdid
, ctxt
, 0);
425 AMAP_SET_BITS(struct amap_be_default_pdu_context
, rx_pdid_valid
, ctxt
,
427 AMAP_SET_BITS(struct amap_be_default_pdu_context
, pci_func_id
, ctxt
,
428 PCI_FUNC(ctrl
->pdev
->devfn
));
429 AMAP_SET_BITS(struct amap_be_default_pdu_context
, ring_size
, ctxt
,
430 be_encoded_q_len(length
/ sizeof(struct phys_addr
)));
431 AMAP_SET_BITS(struct amap_be_default_pdu_context
, default_buffer_size
,
433 AMAP_SET_BITS(struct amap_be_default_pdu_context
, cq_id_recv
, ctxt
,
436 be_dws_cpu_to_le(ctxt
, sizeof(req
->context
));
438 be_cmd_page_addrs_prepare(req
->pages
, ARRAY_SIZE(req
->pages
), q_mem
);
440 status
= be_mbox_notify(ctrl
);
442 struct be_defq_create_resp
*resp
= embedded_payload(wrb
);
444 dq
->id
= le16_to_cpu(resp
->id
);
447 spin_unlock(&ctrl
->mbox_lock
);
452 int be_cmd_wrbq_create(struct be_ctrl_info
*ctrl
, struct be_dma_mem
*q_mem
,
453 struct be_queue_info
*wrbq
)
455 struct be_mcc_wrb
*wrb
= wrb_from_mbox(&ctrl
->mbox_mem
);
456 struct be_wrbq_create_req
*req
= embedded_payload(wrb
);
457 struct be_wrbq_create_resp
*resp
= embedded_payload(wrb
);
460 spin_lock(&ctrl
->mbox_lock
);
461 memset(wrb
, 0, sizeof(*wrb
));
463 be_wrb_hdr_prepare(wrb
, sizeof(*req
), true, 0);
465 be_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_ISCSI
,
466 OPCODE_COMMON_ISCSI_WRBQ_CREATE
, sizeof(*req
));
467 req
->num_pages
= PAGES_4K_SPANNED(q_mem
->va
, q_mem
->size
);
468 be_cmd_page_addrs_prepare(req
->pages
, ARRAY_SIZE(req
->pages
), q_mem
);
470 status
= be_mbox_notify(ctrl
);
472 wrbq
->id
= le16_to_cpu(resp
->cid
);
473 spin_unlock(&ctrl
->mbox_lock
);
477 int be_cmd_iscsi_post_sgl_pages(struct be_ctrl_info
*ctrl
,
478 struct be_dma_mem
*q_mem
,
479 u32 page_offset
, u32 num_pages
)
481 struct be_mcc_wrb
*wrb
= wrb_from_mbox(&ctrl
->mbox_mem
);
482 struct be_post_sgl_pages_req
*req
= embedded_payload(wrb
);
484 unsigned int curr_pages
;
485 u32 internal_page_offset
= 0;
486 u32 temp_num_pages
= num_pages
;
488 if (num_pages
== 0xff)
491 spin_lock(&ctrl
->mbox_lock
);
493 memset(wrb
, 0, sizeof(*wrb
));
494 be_wrb_hdr_prepare(wrb
, sizeof(*req
), true, 0);
495 be_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_ISCSI
,
496 OPCODE_COMMON_ISCSI_CFG_POST_SGL_PAGES
,
498 curr_pages
= BE_NUMBER_OF_FIELD(struct be_post_sgl_pages_req
,
500 req
->num_pages
= min(num_pages
, curr_pages
);
501 req
->page_offset
= page_offset
;
502 be_cmd_page_addrs_prepare(req
->pages
, req
->num_pages
, q_mem
);
503 q_mem
->dma
= q_mem
->dma
+ (req
->num_pages
* PAGE_SIZE
);
504 internal_page_offset
+= req
->num_pages
;
505 page_offset
+= req
->num_pages
;
506 num_pages
-= req
->num_pages
;
508 if (temp_num_pages
== 0xff)
509 req
->num_pages
= temp_num_pages
;
511 status
= be_mbox_notify(ctrl
);
514 "FW CMD to map iscsi frags failed.\n");
517 } while (num_pages
> 0);
519 spin_unlock(&ctrl
->mbox_lock
);
521 beiscsi_cmd_q_destroy(ctrl
, NULL
, QTYPE_SGL
);