1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * Driver for Broadcom MPI3 Storage Controllers
5 * Copyright (C) 2017-2023 Broadcom Inc.
6 * (mailto: mpi3mr-linuxdrv.pdl@broadcom.com)
11 #include <linux/bsg-lib.h>
12 #include <uapi/scsi/scsi_bsg_mpi3mr.h>
15 * mpi3mr_alloc_trace_buffer: Allocate trace buffer
16 * @mrioc: Adapter instance reference
17 * @trace_size: Trace buffer size
19 * Allocate trace buffer
20 * Return: 0 on success, non-zero on failure.
22 static int mpi3mr_alloc_trace_buffer(struct mpi3mr_ioc
*mrioc
, u32 trace_size
)
24 struct diag_buffer_desc
*diag_buffer
= &mrioc
->diag_buffers
[0];
26 diag_buffer
->addr
= dma_alloc_coherent(&mrioc
->pdev
->dev
,
27 trace_size
, &diag_buffer
->dma_addr
, GFP_KERNEL
);
28 if (diag_buffer
->addr
) {
29 dprint_init(mrioc
, "trace diag buffer is allocated successfully\n");
36 * mpi3mr_alloc_diag_bufs - Allocate memory for diag buffers
37 * @mrioc: Adapter instance reference
39 * This functions checks whether the driver defined buffer sizes
40 * are greater than IOCFacts provided controller local buffer
41 * sizes and if the driver defined sizes are more then the
42 * driver allocates the specific buffer by reading driver page1
46 void mpi3mr_alloc_diag_bufs(struct mpi3mr_ioc
*mrioc
)
48 struct diag_buffer_desc
*diag_buffer
;
49 struct mpi3_driver_page1 driver_pg1
;
50 u32 trace_dec_size
, trace_min_size
, fw_dec_size
, fw_min_size
,
52 u16 pg_sz
= sizeof(driver_pg1
);
56 if (mrioc
->diag_buffers
[0].addr
|| mrioc
->diag_buffers
[1].addr
)
59 retval
= mpi3mr_cfg_get_driver_pg1(mrioc
, &driver_pg1
, pg_sz
);
62 "%s: driver page 1 read failed, allocating trace\n"
63 "and firmware diag buffers of default size\n", __func__
);
64 trace_size
= fw_size
= MPI3MR_DEFAULT_HDB_MAX_SZ
;
65 trace_dec_size
= fw_dec_size
= MPI3MR_DEFAULT_HDB_DEC_SZ
;
66 trace_min_size
= fw_min_size
= MPI3MR_DEFAULT_HDB_MIN_SZ
;
69 trace_size
= driver_pg1
.host_diag_trace_max_size
* 1024;
70 trace_dec_size
= driver_pg1
.host_diag_trace_decrement_size
72 trace_min_size
= driver_pg1
.host_diag_trace_min_size
* 1024;
73 fw_size
= driver_pg1
.host_diag_fw_max_size
* 1024;
74 fw_dec_size
= driver_pg1
.host_diag_fw_decrement_size
* 1024;
75 fw_min_size
= driver_pg1
.host_diag_fw_min_size
* 1024;
77 "%s:trace diag buffer sizes read from driver\n"
78 "page1: maximum size = %dKB, decrement size = %dKB\n"
79 ", minimum size = %dKB\n", __func__
, driver_pg1
.host_diag_trace_max_size
,
80 driver_pg1
.host_diag_trace_decrement_size
,
81 driver_pg1
.host_diag_trace_min_size
);
83 "%s:firmware diag buffer sizes read from driver\n"
84 "page1: maximum size = %dKB, decrement size = %dKB\n"
85 ", minimum size = %dKB\n", __func__
, driver_pg1
.host_diag_fw_max_size
,
86 driver_pg1
.host_diag_fw_decrement_size
,
87 driver_pg1
.host_diag_fw_min_size
);
88 if ((trace_size
== 0) && (fw_size
== 0))
94 diag_buffer
= &mrioc
->diag_buffers
[0];
95 diag_buffer
->type
= MPI3_DIAG_BUFFER_TYPE_TRACE
;
96 diag_buffer
->status
= MPI3MR_HDB_BUFSTATUS_NOT_ALLOCATED
;
97 if ((mrioc
->facts
.diag_trace_sz
< trace_size
) && (trace_size
>=
101 "trying to allocate trace diag buffer of size = %dKB\n",
103 if (get_order(trace_size
) > MAX_PAGE_ORDER
||
104 mpi3mr_alloc_trace_buffer(mrioc
, trace_size
)) {
106 trace_size
-= trace_dec_size
;
107 dprint_init(mrioc
, "trace diag buffer allocation failed\n"
108 "retrying smaller size %dKB\n", trace_size
/ 1024);
111 diag_buffer
->size
= trace_size
;
117 diag_buffer
= &mrioc
->diag_buffers
[1];
119 diag_buffer
->type
= MPI3_DIAG_BUFFER_TYPE_FW
;
120 diag_buffer
->status
= MPI3MR_HDB_BUFSTATUS_NOT_ALLOCATED
;
121 if ((mrioc
->facts
.diag_fw_sz
< fw_size
) && (fw_size
>= fw_min_size
)) {
122 if (get_order(fw_size
) <= MAX_PAGE_ORDER
) {
124 = dma_alloc_coherent(&mrioc
->pdev
->dev
, fw_size
,
125 &diag_buffer
->dma_addr
,
130 "%s:trying to allocate firmware diag buffer of size = %dKB\n",
131 __func__
, fw_size
/ 1024);
132 if (diag_buffer
->addr
) {
133 dprint_init(mrioc
, "%s:firmware diag buffer allocated successfully\n",
135 diag_buffer
->size
= fw_size
;
138 fw_size
-= fw_dec_size
;
139 dprint_init(mrioc
, "%s:trace diag buffer allocation failed,\n"
140 "retrying smaller size %dKB\n",
141 __func__
, fw_size
/ 1024);
148 * mpi3mr_issue_diag_buf_post - Send diag buffer post req
149 * @mrioc: Adapter instance reference
150 * @diag_buffer: Diagnostic buffer descriptor
152 * Issue diagnostic buffer post MPI request through admin queue
153 * and wait for the completion of it or time out.
155 * Return: 0 on success, non-zero on failures.
157 int mpi3mr_issue_diag_buf_post(struct mpi3mr_ioc
*mrioc
,
158 struct diag_buffer_desc
*diag_buffer
)
160 struct mpi3_diag_buffer_post_request diag_buf_post_req
;
164 memset(&diag_buf_post_req
, 0, sizeof(diag_buf_post_req
));
165 mutex_lock(&mrioc
->init_cmds
.mutex
);
166 if (mrioc
->init_cmds
.state
& MPI3MR_CMD_PENDING
) {
167 dprint_bsg_err(mrioc
, "%s: command is in use\n", __func__
);
168 mutex_unlock(&mrioc
->init_cmds
.mutex
);
171 mrioc
->init_cmds
.state
= MPI3MR_CMD_PENDING
;
172 mrioc
->init_cmds
.is_waiting
= 1;
173 mrioc
->init_cmds
.callback
= NULL
;
174 diag_buf_post_req
.host_tag
= cpu_to_le16(MPI3MR_HOSTTAG_INITCMDS
);
175 diag_buf_post_req
.function
= MPI3_FUNCTION_DIAG_BUFFER_POST
;
176 diag_buf_post_req
.type
= diag_buffer
->type
;
177 diag_buf_post_req
.address
= le64_to_cpu(diag_buffer
->dma_addr
);
178 diag_buf_post_req
.length
= le32_to_cpu(diag_buffer
->size
);
180 dprint_bsg_info(mrioc
, "%s: posting diag buffer type %d\n", __func__
,
182 prev_status
= diag_buffer
->status
;
183 diag_buffer
->status
= MPI3MR_HDB_BUFSTATUS_POSTED_UNPAUSED
;
184 init_completion(&mrioc
->init_cmds
.done
);
185 retval
= mpi3mr_admin_request_post(mrioc
, &diag_buf_post_req
,
186 sizeof(diag_buf_post_req
), 1);
188 dprint_bsg_err(mrioc
, "%s: admin request post failed\n",
192 wait_for_completion_timeout(&mrioc
->init_cmds
.done
,
193 (MPI3MR_INTADMCMD_TIMEOUT
* HZ
));
194 if (!(mrioc
->init_cmds
.state
& MPI3MR_CMD_COMPLETE
)) {
195 mrioc
->init_cmds
.is_waiting
= 0;
196 dprint_bsg_err(mrioc
, "%s: command timedout\n", __func__
);
197 mpi3mr_check_rh_fault_ioc(mrioc
,
198 MPI3MR_RESET_FROM_DIAG_BUFFER_POST_TIMEOUT
);
202 if ((mrioc
->init_cmds
.ioc_status
& MPI3_IOCSTATUS_STATUS_MASK
)
203 != MPI3_IOCSTATUS_SUCCESS
) {
204 dprint_bsg_err(mrioc
,
205 "%s: command failed, buffer_type (%d) ioc_status(0x%04x) log_info(0x%08x)\n",
206 __func__
, diag_buffer
->type
,
207 (mrioc
->init_cmds
.ioc_status
& MPI3_IOCSTATUS_STATUS_MASK
),
208 mrioc
->init_cmds
.ioc_loginfo
);
212 dprint_bsg_info(mrioc
, "%s: diag buffer type %d posted successfully\n",
213 __func__
, diag_buffer
->type
);
217 diag_buffer
->status
= prev_status
;
218 mrioc
->init_cmds
.state
= MPI3MR_CMD_NOTUSED
;
219 mutex_unlock(&mrioc
->init_cmds
.mutex
);
224 * mpi3mr_post_diag_bufs - Post diag buffers to the controller
225 * @mrioc: Adapter instance reference
227 * This function calls helper function to post both trace and
228 * firmware buffers to the controller.
232 int mpi3mr_post_diag_bufs(struct mpi3mr_ioc
*mrioc
)
235 struct diag_buffer_desc
*diag_buffer
;
237 for (i
= 0; i
< MPI3MR_MAX_NUM_HDB
; i
++) {
238 diag_buffer
= &mrioc
->diag_buffers
[i
];
239 if (!(diag_buffer
->addr
))
241 if (mpi3mr_issue_diag_buf_post(mrioc
, diag_buffer
))
248 * mpi3mr_issue_diag_buf_release - Send diag buffer release req
249 * @mrioc: Adapter instance reference
250 * @diag_buffer: Diagnostic buffer descriptor
252 * Issue diagnostic buffer manage MPI request with release
253 * action request through admin queue and wait for the
254 * completion of it or time out.
256 * Return: 0 on success, non-zero on failures.
258 int mpi3mr_issue_diag_buf_release(struct mpi3mr_ioc
*mrioc
,
259 struct diag_buffer_desc
*diag_buffer
)
261 struct mpi3_diag_buffer_manage_request diag_buf_manage_req
;
264 if ((diag_buffer
->status
!= MPI3MR_HDB_BUFSTATUS_POSTED_UNPAUSED
) &&
265 (diag_buffer
->status
!= MPI3MR_HDB_BUFSTATUS_POSTED_PAUSED
))
268 memset(&diag_buf_manage_req
, 0, sizeof(diag_buf_manage_req
));
269 mutex_lock(&mrioc
->init_cmds
.mutex
);
270 if (mrioc
->init_cmds
.state
& MPI3MR_CMD_PENDING
) {
271 dprint_reset(mrioc
, "%s: command is in use\n", __func__
);
272 mutex_unlock(&mrioc
->init_cmds
.mutex
);
275 mrioc
->init_cmds
.state
= MPI3MR_CMD_PENDING
;
276 mrioc
->init_cmds
.is_waiting
= 1;
277 mrioc
->init_cmds
.callback
= NULL
;
278 diag_buf_manage_req
.host_tag
= cpu_to_le16(MPI3MR_HOSTTAG_INITCMDS
);
279 diag_buf_manage_req
.function
= MPI3_FUNCTION_DIAG_BUFFER_MANAGE
;
280 diag_buf_manage_req
.type
= diag_buffer
->type
;
281 diag_buf_manage_req
.action
= MPI3_DIAG_BUFFER_ACTION_RELEASE
;
284 dprint_reset(mrioc
, "%s: releasing diag buffer type %d\n", __func__
,
286 init_completion(&mrioc
->init_cmds
.done
);
287 retval
= mpi3mr_admin_request_post(mrioc
, &diag_buf_manage_req
,
288 sizeof(diag_buf_manage_req
), 1);
290 dprint_reset(mrioc
, "%s: admin request post failed\n", __func__
);
291 mpi3mr_set_trigger_data_in_hdb(diag_buffer
,
292 MPI3MR_HDB_TRIGGER_TYPE_UNKNOWN
, NULL
, 1);
295 wait_for_completion_timeout(&mrioc
->init_cmds
.done
,
296 (MPI3MR_INTADMCMD_TIMEOUT
* HZ
));
297 if (!(mrioc
->init_cmds
.state
& MPI3MR_CMD_COMPLETE
)) {
298 mrioc
->init_cmds
.is_waiting
= 0;
299 dprint_reset(mrioc
, "%s: command timedout\n", __func__
);
300 mpi3mr_check_rh_fault_ioc(mrioc
,
301 MPI3MR_RESET_FROM_DIAG_BUFFER_RELEASE_TIMEOUT
);
305 if ((mrioc
->init_cmds
.ioc_status
& MPI3_IOCSTATUS_STATUS_MASK
)
306 != MPI3_IOCSTATUS_SUCCESS
) {
308 "%s: command failed, buffer_type (%d) ioc_status(0x%04x) log_info(0x%08x)\n",
309 __func__
, diag_buffer
->type
,
310 (mrioc
->init_cmds
.ioc_status
& MPI3_IOCSTATUS_STATUS_MASK
),
311 mrioc
->init_cmds
.ioc_loginfo
);
315 dprint_reset(mrioc
, "%s: diag buffer type %d released successfully\n",
316 __func__
, diag_buffer
->type
);
319 mrioc
->init_cmds
.state
= MPI3MR_CMD_NOTUSED
;
320 mutex_unlock(&mrioc
->init_cmds
.mutex
);
325 * mpi3mr_process_trigger - Generic HDB Trigger handler
326 * @mrioc: Adapter instance reference
327 * @trigger_type: Trigger type
328 * @trigger_data: Trigger data
329 * @trigger_flags: Trigger flags
331 * This function checks validity of HDB, triggers and based on
332 * trigger information, creates an event to be processed in the
333 * firmware event worker thread .
335 * This function should be called with trigger spinlock held
339 static void mpi3mr_process_trigger(struct mpi3mr_ioc
*mrioc
, u8 trigger_type
,
340 union mpi3mr_trigger_data
*trigger_data
, u8 trigger_flags
)
342 struct trigger_event_data event_data
;
343 struct diag_buffer_desc
*trace_hdb
= NULL
;
344 struct diag_buffer_desc
*fw_hdb
= NULL
;
347 trace_hdb
= mpi3mr_diag_buffer_for_type(mrioc
,
348 MPI3_DIAG_BUFFER_TYPE_TRACE
);
350 (trace_hdb
->status
!= MPI3MR_HDB_BUFSTATUS_POSTED_UNPAUSED
) &&
351 (trace_hdb
->status
!= MPI3MR_HDB_BUFSTATUS_POSTED_PAUSED
))
354 fw_hdb
= mpi3mr_diag_buffer_for_type(mrioc
, MPI3_DIAG_BUFFER_TYPE_FW
);
357 (fw_hdb
->status
!= MPI3MR_HDB_BUFSTATUS_POSTED_UNPAUSED
) &&
358 (fw_hdb
->status
!= MPI3MR_HDB_BUFSTATUS_POSTED_PAUSED
))
361 if (mrioc
->snapdump_trigger_active
|| (mrioc
->fw_release_trigger_active
362 && mrioc
->trace_release_trigger_active
) ||
363 (!trace_hdb
&& !fw_hdb
) || (!mrioc
->driver_pg2
) ||
364 ((trigger_type
== MPI3MR_HDB_TRIGGER_TYPE_ELEMENT
)
365 && (!mrioc
->driver_pg2
->num_triggers
)))
368 memset(&event_data
, 0, sizeof(event_data
));
369 event_data
.trigger_type
= trigger_type
;
370 memcpy(&event_data
.trigger_specific_data
, trigger_data
,
371 sizeof(*trigger_data
));
372 global_trigger
= le64_to_cpu(mrioc
->driver_pg2
->global_trigger
);
374 if (global_trigger
& MPI3_DRIVER2_GLOBALTRIGGER_SNAPDUMP_ENABLED
) {
375 event_data
.snapdump
= true;
376 event_data
.trace_hdb
= trace_hdb
;
377 event_data
.fw_hdb
= fw_hdb
;
378 mrioc
->snapdump_trigger_active
= true;
379 } else if (trigger_type
== MPI3MR_HDB_TRIGGER_TYPE_GLOBAL
) {
380 if ((trace_hdb
) && (global_trigger
&
381 MPI3_DRIVER2_GLOBALTRIGGER_DIAG_TRACE_RELEASE
) &&
382 (!mrioc
->trace_release_trigger_active
)) {
383 event_data
.trace_hdb
= trace_hdb
;
384 mrioc
->trace_release_trigger_active
= true;
386 if ((fw_hdb
) && (global_trigger
&
387 MPI3_DRIVER2_GLOBALTRIGGER_DIAG_FW_RELEASE
) &&
388 (!mrioc
->fw_release_trigger_active
)) {
389 event_data
.fw_hdb
= fw_hdb
;
390 mrioc
->fw_release_trigger_active
= true;
392 } else if (trigger_type
== MPI3MR_HDB_TRIGGER_TYPE_ELEMENT
) {
393 if ((trace_hdb
) && (trigger_flags
&
394 MPI3_DRIVER2_TRIGGER_FLAGS_DIAG_TRACE_RELEASE
) &&
395 (!mrioc
->trace_release_trigger_active
)) {
396 event_data
.trace_hdb
= trace_hdb
;
397 mrioc
->trace_release_trigger_active
= true;
399 if ((fw_hdb
) && (trigger_flags
&
400 MPI3_DRIVER2_TRIGGER_FLAGS_DIAG_FW_RELEASE
) &&
401 (!mrioc
->fw_release_trigger_active
)) {
402 event_data
.fw_hdb
= fw_hdb
;
403 mrioc
->fw_release_trigger_active
= true;
407 if (event_data
.trace_hdb
|| event_data
.fw_hdb
)
408 mpi3mr_hdb_trigger_data_event(mrioc
, &event_data
);
412 * mpi3mr_global_trigger - Global HDB trigger handler
413 * @mrioc: Adapter instance reference
414 * @trigger_data: Trigger data
416 * This function checks whether the given global trigger is
417 * enabled in the driver page 2 and if so calls generic trigger
418 * handler to queue event for HDB release.
422 void mpi3mr_global_trigger(struct mpi3mr_ioc
*mrioc
, u64 trigger_data
)
425 union mpi3mr_trigger_data trigger_specific_data
;
427 spin_lock_irqsave(&mrioc
->trigger_lock
, flags
);
428 if (le64_to_cpu(mrioc
->driver_pg2
->global_trigger
) & trigger_data
) {
429 memset(&trigger_specific_data
, 0,
430 sizeof(trigger_specific_data
));
431 trigger_specific_data
.global
= trigger_data
;
432 mpi3mr_process_trigger(mrioc
, MPI3MR_HDB_TRIGGER_TYPE_GLOBAL
,
433 &trigger_specific_data
, 0);
435 spin_unlock_irqrestore(&mrioc
->trigger_lock
, flags
);
439 * mpi3mr_scsisense_trigger - SCSI sense HDB trigger handler
440 * @mrioc: Adapter instance reference
441 * @sensekey: Sense Key
442 * @asc: Additional Sense Code
443 * @ascq: Additional Sense Code Qualifier
445 * This function compares SCSI sense trigger values with driver
446 * page 2 values and calls generic trigger handler to release
447 * HDBs if match found
451 void mpi3mr_scsisense_trigger(struct mpi3mr_ioc
*mrioc
, u8 sensekey
, u8 asc
,
454 struct mpi3_driver2_trigger_scsi_sense
*scsi_sense_trigger
= NULL
;
457 u8 num_triggers
, trigger_flags
;
459 if (mrioc
->scsisense_trigger_present
) {
460 spin_lock_irqsave(&mrioc
->trigger_lock
, flags
);
461 scsi_sense_trigger
= (struct mpi3_driver2_trigger_scsi_sense
*)
462 mrioc
->driver_pg2
->trigger
;
463 num_triggers
= mrioc
->driver_pg2
->num_triggers
;
464 for (i
= 0; i
< num_triggers
; i
++, scsi_sense_trigger
++) {
465 if (scsi_sense_trigger
->type
!=
466 MPI3_DRIVER2_TRIGGER_TYPE_SCSI_SENSE
)
468 if (!(scsi_sense_trigger
->sense_key
==
469 MPI3_DRIVER2_TRIGGER_SCSI_SENSE_SENSE_KEY_MATCH_ALL
470 || scsi_sense_trigger
->sense_key
== sensekey
))
472 if (!(scsi_sense_trigger
->asc
==
473 MPI3_DRIVER2_TRIGGER_SCSI_SENSE_ASC_MATCH_ALL
||
474 scsi_sense_trigger
->asc
== asc
))
476 if (!(scsi_sense_trigger
->ascq
==
477 MPI3_DRIVER2_TRIGGER_SCSI_SENSE_ASCQ_MATCH_ALL
||
478 scsi_sense_trigger
->ascq
== ascq
))
480 trigger_flags
= scsi_sense_trigger
->flags
;
481 mpi3mr_process_trigger(mrioc
,
482 MPI3MR_HDB_TRIGGER_TYPE_ELEMENT
,
483 (union mpi3mr_trigger_data
*)scsi_sense_trigger
,
487 spin_unlock_irqrestore(&mrioc
->trigger_lock
, flags
);
492 * mpi3mr_event_trigger - MPI event HDB trigger handler
493 * @mrioc: Adapter instance reference
496 * This function compares event trigger values with driver page
497 * 2 values and calls generic trigger handler to release
498 * HDBs if match found.
502 void mpi3mr_event_trigger(struct mpi3mr_ioc
*mrioc
, u8 event
)
504 struct mpi3_driver2_trigger_event
*event_trigger
= NULL
;
507 u8 num_triggers
, trigger_flags
;
509 if (mrioc
->event_trigger_present
) {
510 spin_lock_irqsave(&mrioc
->trigger_lock
, flags
);
511 event_trigger
= (struct mpi3_driver2_trigger_event
*)
512 mrioc
->driver_pg2
->trigger
;
513 num_triggers
= mrioc
->driver_pg2
->num_triggers
;
515 for (i
= 0; i
< num_triggers
; i
++, event_trigger
++) {
516 if (event_trigger
->type
!=
517 MPI3_DRIVER2_TRIGGER_TYPE_EVENT
)
519 if (event_trigger
->event
!= event
)
521 trigger_flags
= event_trigger
->flags
;
522 mpi3mr_process_trigger(mrioc
,
523 MPI3MR_HDB_TRIGGER_TYPE_ELEMENT
,
524 (union mpi3mr_trigger_data
*)event_trigger
,
528 spin_unlock_irqrestore(&mrioc
->trigger_lock
, flags
);
533 * mpi3mr_reply_trigger - MPI Reply HDB trigger handler
534 * @mrioc: Adapter instance reference
535 * @ioc_status: Masked value of IOC Status from MPI Reply
536 * @ioc_loginfo: IOC Log Info from MPI Reply
538 * This function compares IOC status and IOC log info trigger
539 * values with driver page 2 values and calls generic trigger
540 * handler to release HDBs if match found.
544 void mpi3mr_reply_trigger(struct mpi3mr_ioc
*mrioc
, u16 ioc_status
,
547 struct mpi3_driver2_trigger_reply
*reply_trigger
= NULL
;
550 u8 num_triggers
, trigger_flags
;
552 if (mrioc
->reply_trigger_present
) {
553 spin_lock_irqsave(&mrioc
->trigger_lock
, flags
);
554 reply_trigger
= (struct mpi3_driver2_trigger_reply
*)
555 mrioc
->driver_pg2
->trigger
;
556 num_triggers
= mrioc
->driver_pg2
->num_triggers
;
557 for (i
= 0; i
< num_triggers
; i
++, reply_trigger
++) {
558 if (reply_trigger
->type
!=
559 MPI3_DRIVER2_TRIGGER_TYPE_REPLY
)
561 if ((le16_to_cpu(reply_trigger
->ioc_status
) !=
563 && (le16_to_cpu(reply_trigger
->ioc_status
) !=
564 MPI3_DRIVER2_TRIGGER_REPLY_IOCSTATUS_MATCH_ALL
))
566 if ((le32_to_cpu(reply_trigger
->ioc_log_info
) !=
567 (le32_to_cpu(reply_trigger
->ioc_log_info_mask
) &
570 trigger_flags
= reply_trigger
->flags
;
571 mpi3mr_process_trigger(mrioc
,
572 MPI3MR_HDB_TRIGGER_TYPE_ELEMENT
,
573 (union mpi3mr_trigger_data
*)reply_trigger
,
577 spin_unlock_irqrestore(&mrioc
->trigger_lock
, flags
);
582 * mpi3mr_get_num_trigger - Gets number of HDB triggers
583 * @mrioc: Adapter instance reference
584 * @num_triggers: Number of triggers
585 * @page_action: Page action
587 * This function reads number of triggers by reading driver page
590 * Return: 0 on success and proper error codes on failure
592 static int mpi3mr_get_num_trigger(struct mpi3mr_ioc
*mrioc
, u8
*num_triggers
,
595 struct mpi3_driver_page2 drvr_page2
;
600 retval
= mpi3mr_cfg_get_driver_pg2(mrioc
, &drvr_page2
,
601 sizeof(struct mpi3_driver_page2
), page_action
);
604 dprint_init(mrioc
, "%s: driver page 2 read failed\n", __func__
);
607 *num_triggers
= drvr_page2
.num_triggers
;
612 * mpi3mr_refresh_trigger - Handler for Refresh trigger BSG
613 * @mrioc: Adapter instance reference
614 * @page_action: Page action
616 * This function caches the driver page 2 in the driver's memory
617 * by reading driver page 2 from the controller for a given page
618 * type and updates the HDB trigger values
620 * Return: 0 on success and proper error codes on failure
622 int mpi3mr_refresh_trigger(struct mpi3mr_ioc
*mrioc
, u8 page_action
)
624 u16 pg_sz
= sizeof(struct mpi3_driver_page2
);
625 struct mpi3_driver_page2
*drvr_page2
= NULL
;
626 u8 trigger_type
, num_triggers
;
631 retval
= mpi3mr_get_num_trigger(mrioc
, &num_triggers
, page_action
);
636 pg_sz
= offsetof(struct mpi3_driver_page2
, trigger
) +
637 (num_triggers
* sizeof(union mpi3_driver2_trigger_element
));
638 drvr_page2
= kzalloc(pg_sz
, GFP_KERNEL
);
644 retval
= mpi3mr_cfg_get_driver_pg2(mrioc
, drvr_page2
, pg_sz
, page_action
);
646 dprint_init(mrioc
, "%s: driver page 2 read failed\n", __func__
);
650 spin_lock_irqsave(&mrioc
->trigger_lock
, flags
);
651 kfree(mrioc
->driver_pg2
);
652 mrioc
->driver_pg2
= drvr_page2
;
653 mrioc
->reply_trigger_present
= false;
654 mrioc
->event_trigger_present
= false;
655 mrioc
->scsisense_trigger_present
= false;
657 for (i
= 0; (i
< mrioc
->driver_pg2
->num_triggers
); i
++) {
658 trigger_type
= mrioc
->driver_pg2
->trigger
[i
].event
.type
;
659 switch (trigger_type
) {
660 case MPI3_DRIVER2_TRIGGER_TYPE_REPLY
:
661 mrioc
->reply_trigger_present
= true;
663 case MPI3_DRIVER2_TRIGGER_TYPE_EVENT
:
664 mrioc
->event_trigger_present
= true;
666 case MPI3_DRIVER2_TRIGGER_TYPE_SCSI_SENSE
:
667 mrioc
->scsisense_trigger_present
= true;
673 spin_unlock_irqrestore(&mrioc
->trigger_lock
, flags
);
679 * mpi3mr_release_diag_bufs - Release diag buffers
680 * @mrioc: Adapter instance reference
681 * @skip_rel_action: Skip release action and set buffer state
683 * This function calls helper function to release both trace and
684 * firmware buffers from the controller.
688 void mpi3mr_release_diag_bufs(struct mpi3mr_ioc
*mrioc
, u8 skip_rel_action
)
691 struct diag_buffer_desc
*diag_buffer
;
693 for (i
= 0; i
< MPI3MR_MAX_NUM_HDB
; i
++) {
694 diag_buffer
= &mrioc
->diag_buffers
[i
];
695 if (!(diag_buffer
->addr
))
697 if (diag_buffer
->status
== MPI3MR_HDB_BUFSTATUS_RELEASED
)
699 if (!skip_rel_action
)
700 mpi3mr_issue_diag_buf_release(mrioc
, diag_buffer
);
701 diag_buffer
->status
= MPI3MR_HDB_BUFSTATUS_RELEASED
;
702 atomic64_inc(&event_counter
);
707 * mpi3mr_set_trigger_data_in_hdb - Updates HDB trigger type and
711 * @type: Trigger type
712 * @data: Trigger data
713 * @force: Trigger overwrite flag
714 * @trigger_data: Pointer to trigger data information
716 * Updates trigger type and trigger data based on parameter
717 * passed to this function
721 void mpi3mr_set_trigger_data_in_hdb(struct diag_buffer_desc
*hdb
,
722 u8 type
, union mpi3mr_trigger_data
*trigger_data
, bool force
)
724 if ((!force
) && (hdb
->trigger_type
!= MPI3MR_HDB_TRIGGER_TYPE_UNKNOWN
))
726 hdb
->trigger_type
= type
;
728 memset(&hdb
->trigger_data
, 0, sizeof(*trigger_data
));
730 memcpy(&hdb
->trigger_data
, trigger_data
, sizeof(*trigger_data
));
734 * mpi3mr_set_trigger_data_in_all_hdb - Updates HDB trigger type
735 * and trigger data for all HDB
737 * @mrioc: Adapter instance reference
738 * @type: Trigger type
739 * @data: Trigger data
740 * @force: Trigger overwrite flag
741 * @trigger_data: Pointer to trigger data information
743 * Updates trigger type and trigger data based on parameter
744 * passed to this function
748 void mpi3mr_set_trigger_data_in_all_hdb(struct mpi3mr_ioc
*mrioc
,
749 u8 type
, union mpi3mr_trigger_data
*trigger_data
, bool force
)
751 struct diag_buffer_desc
*hdb
= NULL
;
753 hdb
= mpi3mr_diag_buffer_for_type(mrioc
, MPI3_DIAG_BUFFER_TYPE_TRACE
);
755 mpi3mr_set_trigger_data_in_hdb(hdb
, type
, trigger_data
, force
);
756 hdb
= mpi3mr_diag_buffer_for_type(mrioc
, MPI3_DIAG_BUFFER_TYPE_FW
);
758 mpi3mr_set_trigger_data_in_hdb(hdb
, type
, trigger_data
, force
);
762 * mpi3mr_hdbstatuschg_evt_th - HDB status change evt tophalf
763 * @mrioc: Adapter instance reference
764 * @event_reply: event data
766 * Modifies the status of the applicable diag buffer descriptors
770 void mpi3mr_hdbstatuschg_evt_th(struct mpi3mr_ioc
*mrioc
,
771 struct mpi3_event_notification_reply
*event_reply
)
773 struct mpi3_event_data_diag_buffer_status_change
*evtdata
;
774 struct diag_buffer_desc
*diag_buffer
;
776 evtdata
= (struct mpi3_event_data_diag_buffer_status_change
*)
777 event_reply
->event_data
;
779 diag_buffer
= mpi3mr_diag_buffer_for_type(mrioc
, evtdata
->type
);
782 if ((diag_buffer
->status
!= MPI3MR_HDB_BUFSTATUS_POSTED_UNPAUSED
) &&
783 (diag_buffer
->status
!= MPI3MR_HDB_BUFSTATUS_POSTED_PAUSED
))
785 switch (evtdata
->reason_code
) {
786 case MPI3_EVENT_DIAG_BUFFER_STATUS_CHANGE_RC_RELEASED
:
788 diag_buffer
->status
= MPI3MR_HDB_BUFSTATUS_RELEASED
;
789 mpi3mr_set_trigger_data_in_hdb(diag_buffer
,
790 MPI3MR_HDB_TRIGGER_TYPE_FW_RELEASED
, NULL
, 0);
791 atomic64_inc(&event_counter
);
794 case MPI3_EVENT_DIAG_BUFFER_STATUS_CHANGE_RC_RESUMED
:
796 diag_buffer
->status
= MPI3MR_HDB_BUFSTATUS_POSTED_UNPAUSED
;
799 case MPI3_EVENT_DIAG_BUFFER_STATUS_CHANGE_RC_PAUSED
:
801 diag_buffer
->status
= MPI3MR_HDB_BUFSTATUS_POSTED_PAUSED
;
805 dprint_event_th(mrioc
, "%s: unknown reason_code(%d)\n",
806 __func__
, evtdata
->reason_code
);
812 * mpi3mr_diag_buffer_for_type - returns buffer desc for type
813 * @mrioc: Adapter instance reference
814 * @buf_type: Diagnostic buffer type
816 * Identifies matching diag descriptor from mrioc for given diag
819 * Return: diag buffer descriptor on success, NULL on failures.
822 struct diag_buffer_desc
*
823 mpi3mr_diag_buffer_for_type(struct mpi3mr_ioc
*mrioc
, u8 buf_type
)
827 for (i
= 0; i
< MPI3MR_MAX_NUM_HDB
; i
++) {
828 if (mrioc
->diag_buffers
[i
].type
== buf_type
)
829 return &mrioc
->diag_buffers
[i
];
835 * mpi3mr_bsg_pel_abort - sends PEL abort request
836 * @mrioc: Adapter instance reference
838 * This function sends PEL abort request to the firmware through
839 * admin request queue.
841 * Return: 0 on success, -1 on failure
843 static int mpi3mr_bsg_pel_abort(struct mpi3mr_ioc
*mrioc
)
845 struct mpi3_pel_req_action_abort pel_abort_req
;
846 struct mpi3_pel_reply
*pel_reply
;
850 if (mrioc
->reset_in_progress
) {
851 dprint_bsg_err(mrioc
, "%s: reset in progress\n", __func__
);
854 if (mrioc
->stop_bsgs
|| mrioc
->block_on_pci_err
) {
855 dprint_bsg_err(mrioc
, "%s: bsgs are blocked\n", __func__
);
859 memset(&pel_abort_req
, 0, sizeof(pel_abort_req
));
860 mutex_lock(&mrioc
->pel_abort_cmd
.mutex
);
861 if (mrioc
->pel_abort_cmd
.state
& MPI3MR_CMD_PENDING
) {
862 dprint_bsg_err(mrioc
, "%s: command is in use\n", __func__
);
863 mutex_unlock(&mrioc
->pel_abort_cmd
.mutex
);
866 mrioc
->pel_abort_cmd
.state
= MPI3MR_CMD_PENDING
;
867 mrioc
->pel_abort_cmd
.is_waiting
= 1;
868 mrioc
->pel_abort_cmd
.callback
= NULL
;
869 pel_abort_req
.host_tag
= cpu_to_le16(MPI3MR_HOSTTAG_PEL_ABORT
);
870 pel_abort_req
.function
= MPI3_FUNCTION_PERSISTENT_EVENT_LOG
;
871 pel_abort_req
.action
= MPI3_PEL_ACTION_ABORT
;
872 pel_abort_req
.abort_host_tag
= cpu_to_le16(MPI3MR_HOSTTAG_PEL_WAIT
);
874 mrioc
->pel_abort_requested
= 1;
875 init_completion(&mrioc
->pel_abort_cmd
.done
);
876 retval
= mpi3mr_admin_request_post(mrioc
, &pel_abort_req
,
877 sizeof(pel_abort_req
), 0);
880 dprint_bsg_err(mrioc
, "%s: admin request post failed\n",
882 mrioc
->pel_abort_requested
= 0;
886 wait_for_completion_timeout(&mrioc
->pel_abort_cmd
.done
,
887 (MPI3MR_INTADMCMD_TIMEOUT
* HZ
));
888 if (!(mrioc
->pel_abort_cmd
.state
& MPI3MR_CMD_COMPLETE
)) {
889 mrioc
->pel_abort_cmd
.is_waiting
= 0;
890 dprint_bsg_err(mrioc
, "%s: command timedout\n", __func__
);
891 if (!(mrioc
->pel_abort_cmd
.state
& MPI3MR_CMD_RESET
))
892 mpi3mr_soft_reset_handler(mrioc
,
893 MPI3MR_RESET_FROM_PELABORT_TIMEOUT
, 1);
897 if ((mrioc
->pel_abort_cmd
.ioc_status
& MPI3_IOCSTATUS_STATUS_MASK
)
898 != MPI3_IOCSTATUS_SUCCESS
) {
899 dprint_bsg_err(mrioc
,
900 "%s: command failed, ioc_status(0x%04x) log_info(0x%08x)\n",
901 __func__
, (mrioc
->pel_abort_cmd
.ioc_status
&
902 MPI3_IOCSTATUS_STATUS_MASK
),
903 mrioc
->pel_abort_cmd
.ioc_loginfo
);
907 if (mrioc
->pel_abort_cmd
.state
& MPI3MR_CMD_REPLY_VALID
) {
908 pel_reply
= (struct mpi3_pel_reply
*)mrioc
->pel_abort_cmd
.reply
;
909 pe_log_status
= le16_to_cpu(pel_reply
->pe_log_status
);
910 if (pe_log_status
!= MPI3_PEL_STATUS_SUCCESS
) {
911 dprint_bsg_err(mrioc
,
912 "%s: command failed, pel_status(0x%04x)\n",
913 __func__
, pe_log_status
);
919 mrioc
->pel_abort_cmd
.state
= MPI3MR_CMD_NOTUSED
;
920 mutex_unlock(&mrioc
->pel_abort_cmd
.mutex
);
924 * mpi3mr_bsg_verify_adapter - verify adapter number is valid
925 * @ioc_number: Adapter number
927 * This function returns the adapter instance pointer of given
928 * adapter number. If adapter number does not match with the
929 * driver's adapter list, driver returns NULL.
931 * Return: adapter instance reference
933 static struct mpi3mr_ioc
*mpi3mr_bsg_verify_adapter(int ioc_number
)
935 struct mpi3mr_ioc
*mrioc
= NULL
;
937 spin_lock(&mrioc_list_lock
);
938 list_for_each_entry(mrioc
, &mrioc_list
, list
) {
939 if (mrioc
->id
== ioc_number
) {
940 spin_unlock(&mrioc_list_lock
);
944 spin_unlock(&mrioc_list_lock
);
949 * mpi3mr_bsg_refresh_hdb_triggers - Refresh HDB trigger data
950 * @mrioc: Adapter instance reference
951 * @job: BSG Job pointer
953 * This function reads the controller trigger config page as
954 * defined by the input page type and refreshes the driver's
955 * local trigger information structures with the controller's
958 * Return: 0 on success and proper error codes on failure
961 mpi3mr_bsg_refresh_hdb_triggers(struct mpi3mr_ioc
*mrioc
,
964 struct mpi3mr_bsg_out_refresh_hdb_triggers refresh_triggers
;
965 uint32_t data_out_sz
;
969 data_out_sz
= job
->request_payload
.payload_len
;
971 if (data_out_sz
!= sizeof(refresh_triggers
)) {
972 dprint_bsg_err(mrioc
, "%s: invalid size argument\n",
977 if (mrioc
->unrecoverable
) {
978 dprint_bsg_err(mrioc
, "%s: unrecoverable controller\n",
982 if (mrioc
->reset_in_progress
) {
983 dprint_bsg_err(mrioc
, "%s: reset in progress\n", __func__
);
987 sg_copy_to_buffer(job
->request_payload
.sg_list
,
988 job
->request_payload
.sg_cnt
,
989 &refresh_triggers
, sizeof(refresh_triggers
));
991 switch (refresh_triggers
.page_type
) {
992 case MPI3MR_HDB_REFRESH_TYPE_CURRENT
:
993 page_action
= MPI3_CONFIG_ACTION_READ_CURRENT
;
995 case MPI3MR_HDB_REFRESH_TYPE_DEFAULT
:
996 page_action
= MPI3_CONFIG_ACTION_READ_DEFAULT
;
998 case MPI3MR_HDB_HDB_REFRESH_TYPE_PERSISTENT
:
999 page_action
= MPI3_CONFIG_ACTION_READ_PERSISTENT
;
1002 dprint_bsg_err(mrioc
,
1003 "%s: unsupported refresh trigger, page_type %d\n",
1004 __func__
, refresh_triggers
.page_type
);
1007 rval
= mpi3mr_refresh_trigger(mrioc
, page_action
);
1013 * mpi3mr_bsg_upload_hdb - Upload a specific HDB to user space
1014 * @mrioc: Adapter instance reference
1015 * @job: BSG Job pointer
1017 * Return: 0 on success and proper error codes on failure
1019 static long mpi3mr_bsg_upload_hdb(struct mpi3mr_ioc
*mrioc
,
1020 struct bsg_job
*job
)
1022 struct mpi3mr_bsg_out_upload_hdb upload_hdb
;
1023 struct diag_buffer_desc
*diag_buffer
;
1024 uint32_t data_out_size
;
1025 uint32_t data_in_size
;
1027 data_out_size
= job
->request_payload
.payload_len
;
1028 data_in_size
= job
->reply_payload
.payload_len
;
1030 if (data_out_size
!= sizeof(upload_hdb
)) {
1031 dprint_bsg_err(mrioc
, "%s: invalid size argument\n",
1036 sg_copy_to_buffer(job
->request_payload
.sg_list
,
1037 job
->request_payload
.sg_cnt
,
1038 &upload_hdb
, sizeof(upload_hdb
));
1040 if ((!upload_hdb
.length
) || (data_in_size
!= upload_hdb
.length
)) {
1041 dprint_bsg_err(mrioc
, "%s: invalid length argument\n",
1045 diag_buffer
= mpi3mr_diag_buffer_for_type(mrioc
, upload_hdb
.buf_type
);
1046 if ((!diag_buffer
) || (!diag_buffer
->addr
)) {
1047 dprint_bsg_err(mrioc
, "%s: invalid buffer type %d\n",
1048 __func__
, upload_hdb
.buf_type
);
1052 if ((diag_buffer
->status
!= MPI3MR_HDB_BUFSTATUS_RELEASED
) &&
1053 (diag_buffer
->status
!= MPI3MR_HDB_BUFSTATUS_POSTED_PAUSED
)) {
1054 dprint_bsg_err(mrioc
,
1055 "%s: invalid buffer status %d for type %d\n",
1056 __func__
, diag_buffer
->status
, upload_hdb
.buf_type
);
1060 if ((upload_hdb
.start_offset
+ upload_hdb
.length
) > diag_buffer
->size
) {
1061 dprint_bsg_err(mrioc
,
1062 "%s: invalid start offset %d, length %d for type %d\n",
1063 __func__
, upload_hdb
.start_offset
, upload_hdb
.length
,
1064 upload_hdb
.buf_type
);
1067 sg_copy_from_buffer(job
->reply_payload
.sg_list
,
1068 job
->reply_payload
.sg_cnt
,
1069 (diag_buffer
->addr
+ upload_hdb
.start_offset
),
1075 * mpi3mr_bsg_repost_hdb - Re-post HDB
1076 * @mrioc: Adapter instance reference
1077 * @job: BSG job pointer
1079 * This function retrieves the HDB descriptor corresponding to a
1080 * given buffer type and if the HDB is in released status then
1081 * posts the HDB with the firmware.
1083 * Return: 0 on success and proper error codes on failure
1085 static long mpi3mr_bsg_repost_hdb(struct mpi3mr_ioc
*mrioc
,
1086 struct bsg_job
*job
)
1088 struct mpi3mr_bsg_out_repost_hdb repost_hdb
;
1089 struct diag_buffer_desc
*diag_buffer
;
1090 uint32_t data_out_sz
;
1092 data_out_sz
= job
->request_payload
.payload_len
;
1094 if (data_out_sz
!= sizeof(repost_hdb
)) {
1095 dprint_bsg_err(mrioc
, "%s: invalid size argument\n",
1099 if (mrioc
->unrecoverable
) {
1100 dprint_bsg_err(mrioc
, "%s: unrecoverable controller\n",
1104 if (mrioc
->reset_in_progress
) {
1105 dprint_bsg_err(mrioc
, "%s: reset in progress\n", __func__
);
1109 sg_copy_to_buffer(job
->request_payload
.sg_list
,
1110 job
->request_payload
.sg_cnt
,
1111 &repost_hdb
, sizeof(repost_hdb
));
1113 diag_buffer
= mpi3mr_diag_buffer_for_type(mrioc
, repost_hdb
.buf_type
);
1114 if ((!diag_buffer
) || (!diag_buffer
->addr
)) {
1115 dprint_bsg_err(mrioc
, "%s: invalid buffer type %d\n",
1116 __func__
, repost_hdb
.buf_type
);
1120 if (diag_buffer
->status
!= MPI3MR_HDB_BUFSTATUS_RELEASED
) {
1121 dprint_bsg_err(mrioc
,
1122 "%s: invalid buffer status %d for type %d\n",
1123 __func__
, diag_buffer
->status
, repost_hdb
.buf_type
);
1127 if (mpi3mr_issue_diag_buf_post(mrioc
, diag_buffer
)) {
1128 dprint_bsg_err(mrioc
, "%s: post failed for type %d\n",
1129 __func__
, repost_hdb
.buf_type
);
1132 mpi3mr_set_trigger_data_in_hdb(diag_buffer
,
1133 MPI3MR_HDB_TRIGGER_TYPE_UNKNOWN
, NULL
, 1);
1139 * mpi3mr_bsg_query_hdb - Handler for query HDB command
1140 * @mrioc: Adapter instance reference
1141 * @job: BSG job pointer
1143 * This function prepares and copies the host diagnostic buffer
1144 * entries to the user buffer.
1146 * Return: 0 on success and proper error codes on failure
1148 static long mpi3mr_bsg_query_hdb(struct mpi3mr_ioc
*mrioc
,
1149 struct bsg_job
*job
)
1152 struct mpi3mr_bsg_in_hdb_status
*hbd_status
;
1153 struct mpi3mr_hdb_entry
*hbd_status_entry
;
1154 u32 length
, min_length
;
1156 struct diag_buffer_desc
*diag_buffer
;
1157 uint32_t data_in_sz
= 0;
1159 data_in_sz
= job
->request_payload
.payload_len
;
1161 length
= (sizeof(*hbd_status
) + ((MPI3MR_MAX_NUM_HDB
- 1) *
1162 sizeof(*hbd_status_entry
)));
1163 hbd_status
= kmalloc(length
, GFP_KERNEL
);
1166 hbd_status_entry
= &hbd_status
->entry
[0];
1168 hbd_status
->num_hdb_types
= MPI3MR_MAX_NUM_HDB
;
1169 for (i
= 0; i
< MPI3MR_MAX_NUM_HDB
; i
++) {
1170 diag_buffer
= &mrioc
->diag_buffers
[i
];
1171 hbd_status_entry
->buf_type
= diag_buffer
->type
;
1172 hbd_status_entry
->status
= diag_buffer
->status
;
1173 hbd_status_entry
->trigger_type
= diag_buffer
->trigger_type
;
1174 memcpy(&hbd_status_entry
->trigger_data
,
1175 &diag_buffer
->trigger_data
,
1176 sizeof(hbd_status_entry
->trigger_data
));
1177 hbd_status_entry
->size
= (diag_buffer
->size
/ 1024);
1180 hbd_status
->element_trigger_format
=
1181 MPI3MR_HDB_QUERY_ELEMENT_TRIGGER_FORMAT_DATA
;
1183 if (data_in_sz
< 4) {
1184 dprint_bsg_err(mrioc
, "%s: invalid size passed\n", __func__
);
1188 min_length
= min(data_in_sz
, length
);
1189 if (job
->request_payload
.payload_len
>= min_length
) {
1190 sg_copy_from_buffer(job
->request_payload
.sg_list
,
1191 job
->request_payload
.sg_cnt
,
1192 hbd_status
, min_length
);
1202 * mpi3mr_enable_logdata - Handler for log data enable
1203 * @mrioc: Adapter instance reference
1204 * @job: BSG job reference
1206 * This function enables log data caching in the driver if not
1207 * already enabled and return the maximum number of log data
1208 * entries that can be cached in the driver.
1210 * Return: 0 on success and proper error codes on failure
1212 static long mpi3mr_enable_logdata(struct mpi3mr_ioc
*mrioc
,
1213 struct bsg_job
*job
)
1215 struct mpi3mr_logdata_enable logdata_enable
;
1217 if (!mrioc
->logdata_buf
) {
1218 mrioc
->logdata_entry_sz
=
1219 (mrioc
->reply_sz
- (sizeof(struct mpi3_event_notification_reply
) - 4))
1220 + MPI3MR_BSG_LOGDATA_ENTRY_HEADER_SZ
;
1221 mrioc
->logdata_buf_idx
= 0;
1222 mrioc
->logdata_buf
= kcalloc(MPI3MR_BSG_LOGDATA_MAX_ENTRIES
,
1223 mrioc
->logdata_entry_sz
, GFP_KERNEL
);
1225 if (!mrioc
->logdata_buf
)
1229 memset(&logdata_enable
, 0, sizeof(logdata_enable
));
1230 logdata_enable
.max_entries
=
1231 MPI3MR_BSG_LOGDATA_MAX_ENTRIES
;
1232 if (job
->request_payload
.payload_len
>= sizeof(logdata_enable
)) {
1233 sg_copy_from_buffer(job
->request_payload
.sg_list
,
1234 job
->request_payload
.sg_cnt
,
1235 &logdata_enable
, sizeof(logdata_enable
));
1242 * mpi3mr_get_logdata - Handler for get log data
1243 * @mrioc: Adapter instance reference
1244 * @job: BSG job pointer
1245 * This function copies the log data entries to the user buffer
1246 * when log caching is enabled in the driver.
1248 * Return: 0 on success and proper error codes on failure
1250 static long mpi3mr_get_logdata(struct mpi3mr_ioc
*mrioc
,
1251 struct bsg_job
*job
)
1253 u16 num_entries
, sz
, entry_sz
= mrioc
->logdata_entry_sz
;
1255 if ((!mrioc
->logdata_buf
) || (job
->request_payload
.payload_len
< entry_sz
))
1258 num_entries
= job
->request_payload
.payload_len
/ entry_sz
;
1259 if (num_entries
> MPI3MR_BSG_LOGDATA_MAX_ENTRIES
)
1260 num_entries
= MPI3MR_BSG_LOGDATA_MAX_ENTRIES
;
1261 sz
= num_entries
* entry_sz
;
1263 if (job
->request_payload
.payload_len
>= sz
) {
1264 sg_copy_from_buffer(job
->request_payload
.sg_list
,
1265 job
->request_payload
.sg_cnt
,
1266 mrioc
->logdata_buf
, sz
);
1273 * mpi3mr_bsg_pel_enable - Handler for PEL enable driver
1274 * @mrioc: Adapter instance reference
1275 * @job: BSG job pointer
1277 * This function is the handler for PEL enable driver.
1278 * Validates the application given class and locale and if
1279 * requires aborts the existing PEL wait request and/or issues
1280 * new PEL wait request to the firmware and returns.
1282 * Return: 0 on success and proper error codes on failure.
1284 static long mpi3mr_bsg_pel_enable(struct mpi3mr_ioc
*mrioc
,
1285 struct bsg_job
*job
)
1287 long rval
= -EINVAL
;
1288 struct mpi3mr_bsg_out_pel_enable pel_enable
;
1293 if (job
->request_payload
.payload_len
!= sizeof(pel_enable
)) {
1294 dprint_bsg_err(mrioc
, "%s: invalid size argument\n",
1299 if (mrioc
->unrecoverable
) {
1300 dprint_bsg_err(mrioc
, "%s: unrecoverable controller\n",
1305 if (mrioc
->reset_in_progress
) {
1306 dprint_bsg_err(mrioc
, "%s: reset in progress\n", __func__
);
1310 if (mrioc
->stop_bsgs
) {
1311 dprint_bsg_err(mrioc
, "%s: bsgs are blocked\n", __func__
);
1315 sg_copy_to_buffer(job
->request_payload
.sg_list
,
1316 job
->request_payload
.sg_cnt
,
1317 &pel_enable
, sizeof(pel_enable
));
1319 if (pel_enable
.pel_class
> MPI3_PEL_CLASS_FAULT
) {
1320 dprint_bsg_err(mrioc
, "%s: out of range class %d sent\n",
1321 __func__
, pel_enable
.pel_class
);
1325 if (!mrioc
->pel_enabled
)
1328 if ((mrioc
->pel_class
<= pel_enable
.pel_class
) &&
1329 !((mrioc
->pel_locale
& pel_enable
.pel_locale
) ^
1330 pel_enable
.pel_locale
)) {
1334 pel_enable
.pel_locale
|= mrioc
->pel_locale
;
1336 if (mrioc
->pel_class
< pel_enable
.pel_class
)
1337 pel_enable
.pel_class
= mrioc
->pel_class
;
1339 rval
= mpi3mr_bsg_pel_abort(mrioc
);
1341 dprint_bsg_err(mrioc
,
1342 "%s: pel_abort failed, status(%ld)\n",
1349 if (issue_pel_wait
) {
1350 tmp_class
= mrioc
->pel_class
;
1351 tmp_locale
= mrioc
->pel_locale
;
1352 mrioc
->pel_class
= pel_enable
.pel_class
;
1353 mrioc
->pel_locale
= pel_enable
.pel_locale
;
1354 mrioc
->pel_enabled
= 1;
1355 rval
= mpi3mr_pel_get_seqnum_post(mrioc
, NULL
);
1357 mrioc
->pel_class
= tmp_class
;
1358 mrioc
->pel_locale
= tmp_locale
;
1359 mrioc
->pel_enabled
= 0;
1360 dprint_bsg_err(mrioc
,
1361 "%s: pel get sequence number failed, status(%ld)\n",
1370 * mpi3mr_get_all_tgt_info - Get all target information
1371 * @mrioc: Adapter instance reference
1372 * @job: BSG job reference
1374 * This function copies the driver managed target devices device
1375 * handle, persistent ID, bus ID and taret ID to the user
1376 * provided buffer for the specific controller. This function
1377 * also provides the number of devices managed by the driver for
1378 * the specific controller.
1380 * Return: 0 on success and proper error codes on failure
1382 static long mpi3mr_get_all_tgt_info(struct mpi3mr_ioc
*mrioc
,
1383 struct bsg_job
*job
)
1385 u16 num_devices
= 0, i
= 0, size
;
1386 unsigned long flags
;
1387 struct mpi3mr_tgt_dev
*tgtdev
;
1388 struct mpi3mr_device_map_info
*devmap_info
= NULL
;
1389 struct mpi3mr_all_tgt_info
*alltgt_info
= NULL
;
1390 uint32_t min_entrylen
= 0, kern_entrylen
= 0, usr_entrylen
= 0;
1392 if (job
->request_payload
.payload_len
< sizeof(u32
)) {
1393 dprint_bsg_err(mrioc
, "%s: invalid size argument\n",
1398 spin_lock_irqsave(&mrioc
->tgtdev_lock
, flags
);
1399 list_for_each_entry(tgtdev
, &mrioc
->tgtdev_list
, list
)
1401 spin_unlock_irqrestore(&mrioc
->tgtdev_lock
, flags
);
1403 if ((job
->request_payload
.payload_len
<= sizeof(u64
)) ||
1404 list_empty(&mrioc
->tgtdev_list
)) {
1405 sg_copy_from_buffer(job
->request_payload
.sg_list
,
1406 job
->request_payload
.sg_cnt
,
1407 &num_devices
, sizeof(num_devices
));
1411 kern_entrylen
= num_devices
* sizeof(*devmap_info
);
1412 size
= sizeof(u64
) + kern_entrylen
;
1413 alltgt_info
= kzalloc(size
, GFP_KERNEL
);
1417 devmap_info
= alltgt_info
->dmi
;
1418 memset((u8
*)devmap_info
, 0xFF, kern_entrylen
);
1419 spin_lock_irqsave(&mrioc
->tgtdev_lock
, flags
);
1420 list_for_each_entry(tgtdev
, &mrioc
->tgtdev_list
, list
) {
1421 if (i
< num_devices
) {
1422 devmap_info
[i
].handle
= tgtdev
->dev_handle
;
1423 devmap_info
[i
].perst_id
= tgtdev
->perst_id
;
1424 if (tgtdev
->host_exposed
&& tgtdev
->starget
) {
1425 devmap_info
[i
].target_id
= tgtdev
->starget
->id
;
1426 devmap_info
[i
].bus_id
=
1427 tgtdev
->starget
->channel
;
1433 spin_unlock_irqrestore(&mrioc
->tgtdev_lock
, flags
);
1435 alltgt_info
->num_devices
= num_devices
;
1437 usr_entrylen
= (job
->request_payload
.payload_len
- sizeof(u64
)) /
1438 sizeof(*devmap_info
);
1439 usr_entrylen
*= sizeof(*devmap_info
);
1440 min_entrylen
= min(usr_entrylen
, kern_entrylen
);
1442 sg_copy_from_buffer(job
->request_payload
.sg_list
,
1443 job
->request_payload
.sg_cnt
,
1444 alltgt_info
, (min_entrylen
+ sizeof(u64
)));
1449 * mpi3mr_get_change_count - Get topology change count
1450 * @mrioc: Adapter instance reference
1451 * @job: BSG job reference
1453 * This function copies the toplogy change count provided by the
1454 * driver in events and cached in the driver to the user
1455 * provided buffer for the specific controller.
1457 * Return: 0 on success and proper error codes on failure
1459 static long mpi3mr_get_change_count(struct mpi3mr_ioc
*mrioc
,
1460 struct bsg_job
*job
)
1462 struct mpi3mr_change_count chgcnt
;
1464 memset(&chgcnt
, 0, sizeof(chgcnt
));
1465 chgcnt
.change_count
= mrioc
->change_count
;
1466 if (job
->request_payload
.payload_len
>= sizeof(chgcnt
)) {
1467 sg_copy_from_buffer(job
->request_payload
.sg_list
,
1468 job
->request_payload
.sg_cnt
,
1469 &chgcnt
, sizeof(chgcnt
));
1476 * mpi3mr_bsg_adp_reset - Issue controller reset
1477 * @mrioc: Adapter instance reference
1478 * @job: BSG job reference
1480 * This function identifies the user provided reset type and
1481 * issues approporiate reset to the controller and wait for that
1482 * to complete and reinitialize the controller and then returns
1484 * Return: 0 on success and proper error codes on failure
1486 static long mpi3mr_bsg_adp_reset(struct mpi3mr_ioc
*mrioc
,
1487 struct bsg_job
*job
)
1489 long rval
= -EINVAL
;
1491 struct mpi3mr_bsg_adp_reset adpreset
;
1493 if (job
->request_payload
.payload_len
!=
1495 dprint_bsg_err(mrioc
, "%s: invalid size argument\n",
1500 if (mrioc
->unrecoverable
|| mrioc
->block_on_pci_err
)
1503 sg_copy_to_buffer(job
->request_payload
.sg_list
,
1504 job
->request_payload
.sg_cnt
,
1505 &adpreset
, sizeof(adpreset
));
1507 switch (adpreset
.reset_type
) {
1508 case MPI3MR_BSG_ADPRESET_SOFT
:
1511 case MPI3MR_BSG_ADPRESET_DIAG_FAULT
:
1515 dprint_bsg_err(mrioc
, "%s: unknown reset_type(%d)\n",
1516 __func__
, adpreset
.reset_type
);
1520 rval
= mpi3mr_soft_reset_handler(mrioc
, MPI3MR_RESET_FROM_APP
,
1524 dprint_bsg_err(mrioc
,
1525 "%s: reset handler returned error(%ld) for reset type %d\n",
1526 __func__
, rval
, adpreset
.reset_type
);
1532 * mpi3mr_bsg_populate_adpinfo - Get adapter info command handler
1533 * @mrioc: Adapter instance reference
1534 * @job: BSG job reference
1536 * This function provides adapter information for the given
1539 * Return: 0 on success and proper error codes on failure
1541 static long mpi3mr_bsg_populate_adpinfo(struct mpi3mr_ioc
*mrioc
,
1542 struct bsg_job
*job
)
1544 enum mpi3mr_iocstate ioc_state
;
1545 struct mpi3mr_bsg_in_adpinfo adpinfo
;
1547 memset(&adpinfo
, 0, sizeof(adpinfo
));
1548 adpinfo
.adp_type
= MPI3MR_BSG_ADPTYPE_AVGFAMILY
;
1549 adpinfo
.pci_dev_id
= mrioc
->pdev
->device
;
1550 adpinfo
.pci_dev_hw_rev
= mrioc
->pdev
->revision
;
1551 adpinfo
.pci_subsys_dev_id
= mrioc
->pdev
->subsystem_device
;
1552 adpinfo
.pci_subsys_ven_id
= mrioc
->pdev
->subsystem_vendor
;
1553 adpinfo
.pci_bus
= mrioc
->pdev
->bus
->number
;
1554 adpinfo
.pci_dev
= PCI_SLOT(mrioc
->pdev
->devfn
);
1555 adpinfo
.pci_func
= PCI_FUNC(mrioc
->pdev
->devfn
);
1556 adpinfo
.pci_seg_id
= pci_domain_nr(mrioc
->pdev
->bus
);
1557 adpinfo
.app_intfc_ver
= MPI3MR_IOCTL_VERSION
;
1559 ioc_state
= mpi3mr_get_iocstate(mrioc
);
1560 if (ioc_state
== MRIOC_STATE_UNRECOVERABLE
)
1561 adpinfo
.adp_state
= MPI3MR_BSG_ADPSTATE_UNRECOVERABLE
;
1562 else if ((mrioc
->reset_in_progress
) || (mrioc
->stop_bsgs
))
1563 adpinfo
.adp_state
= MPI3MR_BSG_ADPSTATE_IN_RESET
;
1564 else if (ioc_state
== MRIOC_STATE_FAULT
)
1565 adpinfo
.adp_state
= MPI3MR_BSG_ADPSTATE_FAULT
;
1567 adpinfo
.adp_state
= MPI3MR_BSG_ADPSTATE_OPERATIONAL
;
1569 memcpy((u8
*)&adpinfo
.driver_info
, (u8
*)&mrioc
->driver_info
,
1570 sizeof(adpinfo
.driver_info
));
1572 if (job
->request_payload
.payload_len
>= sizeof(adpinfo
)) {
1573 sg_copy_from_buffer(job
->request_payload
.sg_list
,
1574 job
->request_payload
.sg_cnt
,
1575 &adpinfo
, sizeof(adpinfo
));
1582 * mpi3mr_bsg_process_drv_cmds - Driver Command handler
1583 * @job: BSG job reference
1585 * This function is the top level handler for driver commands,
1586 * this does basic validation of the buffer and identifies the
1587 * opcode and switches to correct sub handler.
1589 * Return: 0 on success and proper error codes on failure
1591 static long mpi3mr_bsg_process_drv_cmds(struct bsg_job
*job
)
1593 long rval
= -EINVAL
;
1594 struct mpi3mr_ioc
*mrioc
= NULL
;
1595 struct mpi3mr_bsg_packet
*bsg_req
= NULL
;
1596 struct mpi3mr_bsg_drv_cmd
*drvrcmd
= NULL
;
1598 bsg_req
= job
->request
;
1599 drvrcmd
= &bsg_req
->cmd
.drvrcmd
;
1601 mrioc
= mpi3mr_bsg_verify_adapter(drvrcmd
->mrioc_id
);
1605 if (drvrcmd
->opcode
== MPI3MR_DRVBSG_OPCODE_ADPINFO
) {
1606 rval
= mpi3mr_bsg_populate_adpinfo(mrioc
, job
);
1610 if (mutex_lock_interruptible(&mrioc
->bsg_cmds
.mutex
))
1611 return -ERESTARTSYS
;
1613 switch (drvrcmd
->opcode
) {
1614 case MPI3MR_DRVBSG_OPCODE_ADPRESET
:
1615 rval
= mpi3mr_bsg_adp_reset(mrioc
, job
);
1617 case MPI3MR_DRVBSG_OPCODE_ALLTGTDEVINFO
:
1618 rval
= mpi3mr_get_all_tgt_info(mrioc
, job
);
1620 case MPI3MR_DRVBSG_OPCODE_GETCHGCNT
:
1621 rval
= mpi3mr_get_change_count(mrioc
, job
);
1623 case MPI3MR_DRVBSG_OPCODE_LOGDATAENABLE
:
1624 rval
= mpi3mr_enable_logdata(mrioc
, job
);
1626 case MPI3MR_DRVBSG_OPCODE_GETLOGDATA
:
1627 rval
= mpi3mr_get_logdata(mrioc
, job
);
1629 case MPI3MR_DRVBSG_OPCODE_PELENABLE
:
1630 rval
= mpi3mr_bsg_pel_enable(mrioc
, job
);
1632 case MPI3MR_DRVBSG_OPCODE_QUERY_HDB
:
1633 rval
= mpi3mr_bsg_query_hdb(mrioc
, job
);
1635 case MPI3MR_DRVBSG_OPCODE_REPOST_HDB
:
1636 rval
= mpi3mr_bsg_repost_hdb(mrioc
, job
);
1638 case MPI3MR_DRVBSG_OPCODE_UPLOAD_HDB
:
1639 rval
= mpi3mr_bsg_upload_hdb(mrioc
, job
);
1641 case MPI3MR_DRVBSG_OPCODE_REFRESH_HDB_TRIGGERS
:
1642 rval
= mpi3mr_bsg_refresh_hdb_triggers(mrioc
, job
);
1644 case MPI3MR_DRVBSG_OPCODE_UNKNOWN
:
1646 pr_err("%s: unsupported driver command opcode %d\n",
1647 MPI3MR_DRIVER_NAME
, drvrcmd
->opcode
);
1650 mutex_unlock(&mrioc
->bsg_cmds
.mutex
);
1655 * mpi3mr_total_num_ioctl_sges - Count number of SGEs required
1656 * @drv_bufs: DMA address of the buffers to be placed in sgl
1657 * @bufcnt: Number of DMA buffers
1659 * This function returns total number of data SGEs required
1660 * including zero length SGEs and excluding management request
1661 * and response buffer for the given list of data buffer
1664 * Return: Number of SGE elements needed
1666 static inline u16
mpi3mr_total_num_ioctl_sges(struct mpi3mr_buf_map
*drv_bufs
,
1669 u16 i
, sge_count
= 0;
1671 for (i
= 0; i
< bufcnt
; i
++, drv_bufs
++) {
1672 if (drv_bufs
->data_dir
== DMA_NONE
||
1675 sge_count
+= drv_bufs
->num_dma_desc
;
1676 if (!drv_bufs
->num_dma_desc
)
1683 * mpi3mr_bsg_build_sgl - SGL construction for MPI commands
1684 * @mrioc: Adapter instance reference
1685 * @mpi_req: MPI request
1686 * @sgl_offset: offset to start sgl in the MPI request
1687 * @drv_bufs: DMA address of the buffers to be placed in sgl
1688 * @bufcnt: Number of DMA buffers
1689 * @is_rmc: Does the buffer list has management command buffer
1690 * @is_rmr: Does the buffer list has management response buffer
1691 * @num_datasges: Number of data buffers in the list
1693 * This function places the DMA address of the given buffers in
1694 * proper format as SGEs in the given MPI request.
1696 * Return: 0 on success,-1 on failure
1698 static int mpi3mr_bsg_build_sgl(struct mpi3mr_ioc
*mrioc
, u8
*mpi_req
,
1699 u32 sgl_offset
, struct mpi3mr_buf_map
*drv_bufs
,
1700 u8 bufcnt
, u8 is_rmc
, u8 is_rmr
, u8 num_datasges
)
1702 struct mpi3_request_header
*mpi_header
=
1703 (struct mpi3_request_header
*)mpi_req
;
1704 u8
*sgl
= (mpi_req
+ sgl_offset
), count
= 0;
1705 struct mpi3_mgmt_passthrough_request
*rmgmt_req
=
1706 (struct mpi3_mgmt_passthrough_request
*)mpi_req
;
1707 struct mpi3mr_buf_map
*drv_buf_iter
= drv_bufs
;
1708 u8 flag
, sgl_flags
, sgl_flag_eob
, sgl_flags_last
, last_chain_sgl_flag
;
1709 u16 available_sges
, i
, sges_needed
;
1710 u32 sge_element_size
= sizeof(struct mpi3_sge_common
);
1711 bool chain_used
= false;
1713 sgl_flags
= MPI3_SGE_FLAGS_ELEMENT_TYPE_SIMPLE
|
1714 MPI3_SGE_FLAGS_DLAS_SYSTEM
;
1715 sgl_flag_eob
= sgl_flags
| MPI3_SGE_FLAGS_END_OF_BUFFER
;
1716 sgl_flags_last
= sgl_flag_eob
| MPI3_SGE_FLAGS_END_OF_LIST
;
1717 last_chain_sgl_flag
= MPI3_SGE_FLAGS_ELEMENT_TYPE_LAST_CHAIN
|
1718 MPI3_SGE_FLAGS_DLAS_SYSTEM
;
1720 sges_needed
= mpi3mr_total_num_ioctl_sges(drv_bufs
, bufcnt
);
1723 mpi3mr_add_sg_single(&rmgmt_req
->command_sgl
,
1724 sgl_flags_last
, drv_buf_iter
->kern_buf_len
,
1725 drv_buf_iter
->kern_buf_dma
);
1726 sgl
= (u8
*)drv_buf_iter
->kern_buf
+
1727 drv_buf_iter
->bsg_buf_len
;
1728 available_sges
= (drv_buf_iter
->kern_buf_len
-
1729 drv_buf_iter
->bsg_buf_len
) / sge_element_size
;
1731 if (sges_needed
> available_sges
)
1738 mpi3mr_add_sg_single(&rmgmt_req
->response_sgl
,
1739 sgl_flags_last
, drv_buf_iter
->kern_buf_len
,
1740 drv_buf_iter
->kern_buf_dma
);
1744 mpi3mr_build_zero_len_sge(
1745 &rmgmt_req
->response_sgl
);
1751 if (sgl_offset
>= MPI3MR_ADMIN_REQ_FRAME_SZ
)
1753 available_sges
= (MPI3MR_ADMIN_REQ_FRAME_SZ
- sgl_offset
) /
1755 if (!available_sges
)
1758 if (!num_datasges
) {
1759 mpi3mr_build_zero_len_sge(sgl
);
1762 if (mpi_header
->function
== MPI3_BSG_FUNCTION_SMP_PASSTHROUGH
) {
1763 if ((sges_needed
> 2) || (sges_needed
> available_sges
))
1765 for (; count
< bufcnt
; count
++, drv_buf_iter
++) {
1766 if (drv_buf_iter
->data_dir
== DMA_NONE
||
1767 !drv_buf_iter
->num_dma_desc
)
1769 mpi3mr_add_sg_single(sgl
, sgl_flags_last
,
1770 drv_buf_iter
->dma_desc
[0].size
,
1771 drv_buf_iter
->dma_desc
[0].dma_addr
);
1772 sgl
+= sge_element_size
;
1779 for (; count
< bufcnt
; count
++, drv_buf_iter
++) {
1780 if (drv_buf_iter
->data_dir
== DMA_NONE
)
1782 if (!drv_buf_iter
->num_dma_desc
) {
1783 if (chain_used
&& !available_sges
)
1785 if (!chain_used
&& (available_sges
== 1) &&
1788 flag
= sgl_flag_eob
;
1789 if (num_datasges
== 1)
1790 flag
= sgl_flags_last
;
1791 mpi3mr_add_sg_single(sgl
, flag
, 0, 0);
1792 sgl
+= sge_element_size
;
1798 for (; i
< drv_buf_iter
->num_dma_desc
; i
++) {
1799 if (chain_used
&& !available_sges
)
1801 if (!chain_used
&& (available_sges
== 1) &&
1805 if (i
== (drv_buf_iter
->num_dma_desc
- 1)) {
1806 if (num_datasges
== 1)
1807 flag
= sgl_flags_last
;
1809 flag
= sgl_flag_eob
;
1812 mpi3mr_add_sg_single(sgl
, flag
,
1813 drv_buf_iter
->dma_desc
[i
].size
,
1814 drv_buf_iter
->dma_desc
[i
].dma_addr
);
1815 sgl
+= sge_element_size
;
1825 available_sges
= mrioc
->ioctl_chain_sge
.size
/ sge_element_size
;
1826 if (sges_needed
> available_sges
)
1828 mpi3mr_add_sg_single(sgl
, last_chain_sgl_flag
,
1829 (sges_needed
* sge_element_size
),
1830 mrioc
->ioctl_chain_sge
.dma_addr
);
1831 memset(mrioc
->ioctl_chain_sge
.addr
, 0, mrioc
->ioctl_chain_sge
.size
);
1832 sgl
= (u8
*)mrioc
->ioctl_chain_sge
.addr
;
1838 * mpi3mr_get_nvme_data_fmt - returns the NVMe data format
1839 * @nvme_encap_request: NVMe encapsulated MPI request
1841 * This function returns the type of the data format specified
1842 * in user provided NVMe command in NVMe encapsulated request.
1844 * Return: Data format of the NVMe command (PRP/SGL etc)
1846 static unsigned int mpi3mr_get_nvme_data_fmt(
1847 struct mpi3_nvme_encapsulated_request
*nvme_encap_request
)
1851 format
= ((nvme_encap_request
->command
[0] & 0xc000) >> 14);
1857 * mpi3mr_build_nvme_sgl - SGL constructor for NVME
1858 * encapsulated request
1859 * @mrioc: Adapter instance reference
1860 * @nvme_encap_request: NVMe encapsulated MPI request
1861 * @drv_bufs: DMA address of the buffers to be placed in sgl
1862 * @bufcnt: Number of DMA buffers
1864 * This function places the DMA address of the given buffers in
1865 * proper format as SGEs in the given NVMe encapsulated request.
1867 * Return: 0 on success, -1 on failure
1869 static int mpi3mr_build_nvme_sgl(struct mpi3mr_ioc
*mrioc
,
1870 struct mpi3_nvme_encapsulated_request
*nvme_encap_request
,
1871 struct mpi3mr_buf_map
*drv_bufs
, u8 bufcnt
)
1873 struct mpi3mr_nvme_pt_sge
*nvme_sgl
;
1877 u16 available_sges
= 0, i
;
1878 u32 sge_element_size
= sizeof(struct mpi3mr_nvme_pt_sge
);
1879 struct mpi3mr_buf_map
*drv_buf_iter
= drv_bufs
;
1880 u64 sgemod_mask
= ((u64
)((mrioc
->facts
.sge_mod_mask
) <<
1881 mrioc
->facts
.sge_mod_shift
) << 32);
1882 u64 sgemod_val
= ((u64
)(mrioc
->facts
.sge_mod_value
) <<
1883 mrioc
->facts
.sge_mod_shift
) << 32;
1886 nvme_sgl
= (struct mpi3mr_nvme_pt_sge
*)
1887 ((u8
*)(nvme_encap_request
->command
) + MPI3MR_NVME_CMD_SGL_OFFSET
);
1890 * Not all commands require a data transfer. If no data, just return
1891 * without constructing any sgl.
1893 for (count
= 0; count
< bufcnt
; count
++, drv_buf_iter
++) {
1894 if (drv_buf_iter
->data_dir
== DMA_NONE
)
1896 length
= drv_buf_iter
->kern_buf_len
;
1899 if (!length
|| !drv_buf_iter
->num_dma_desc
)
1902 if (drv_buf_iter
->num_dma_desc
== 1) {
1907 sgl_dma
= cpu_to_le64(mrioc
->ioctl_chain_sge
.dma_addr
);
1908 if (sgl_dma
& sgemod_mask
) {
1909 dprint_bsg_err(mrioc
,
1910 "%s: SGL chain address collides with SGE modifier\n",
1915 sgl_dma
&= ~sgemod_mask
;
1916 sgl_dma
|= sgemod_val
;
1918 memset(mrioc
->ioctl_chain_sge
.addr
, 0, mrioc
->ioctl_chain_sge
.size
);
1919 available_sges
= mrioc
->ioctl_chain_sge
.size
/ sge_element_size
;
1920 if (available_sges
< drv_buf_iter
->num_dma_desc
)
1922 memset(nvme_sgl
, 0, sizeof(struct mpi3mr_nvme_pt_sge
));
1923 nvme_sgl
->base_addr
= sgl_dma
;
1924 size
= drv_buf_iter
->num_dma_desc
* sizeof(struct mpi3mr_nvme_pt_sge
);
1925 nvme_sgl
->length
= cpu_to_le32(size
);
1926 nvme_sgl
->type
= MPI3MR_NVMESGL_LAST_SEGMENT
;
1927 nvme_sgl
= (struct mpi3mr_nvme_pt_sge
*)mrioc
->ioctl_chain_sge
.addr
;
1930 for (i
= 0; i
< drv_buf_iter
->num_dma_desc
; i
++) {
1931 sgl_dma
= cpu_to_le64(drv_buf_iter
->dma_desc
[i
].dma_addr
);
1932 if (sgl_dma
& sgemod_mask
) {
1933 dprint_bsg_err(mrioc
,
1934 "%s: SGL address collides with SGE modifier\n",
1939 sgl_dma
&= ~sgemod_mask
;
1940 sgl_dma
|= sgemod_val
;
1942 nvme_sgl
->base_addr
= sgl_dma
;
1943 nvme_sgl
->length
= cpu_to_le32(drv_buf_iter
->dma_desc
[i
].size
);
1944 nvme_sgl
->type
= MPI3MR_NVMESGL_DATA_SEGMENT
;
1953 * mpi3mr_build_nvme_prp - PRP constructor for NVME
1954 * encapsulated request
1955 * @mrioc: Adapter instance reference
1956 * @nvme_encap_request: NVMe encapsulated MPI request
1957 * @drv_bufs: DMA address of the buffers to be placed in SGL
1958 * @bufcnt: Number of DMA buffers
1960 * This function places the DMA address of the given buffers in
1961 * proper format as PRP entries in the given NVMe encapsulated
1964 * Return: 0 on success, -1 on failure
1966 static int mpi3mr_build_nvme_prp(struct mpi3mr_ioc
*mrioc
,
1967 struct mpi3_nvme_encapsulated_request
*nvme_encap_request
,
1968 struct mpi3mr_buf_map
*drv_bufs
, u8 bufcnt
)
1970 int prp_size
= MPI3MR_NVME_PRP_SIZE
;
1971 __le64
*prp_entry
, *prp1_entry
, *prp2_entry
;
1973 dma_addr_t prp_entry_dma
, prp_page_dma
, dma_addr
;
1974 u32 offset
, entry_len
, dev_pgsz
;
1975 u32 page_mask_result
, page_mask
;
1976 size_t length
= 0, desc_len
;
1978 struct mpi3mr_buf_map
*drv_buf_iter
= drv_bufs
;
1979 u64 sgemod_mask
= ((u64
)((mrioc
->facts
.sge_mod_mask
) <<
1980 mrioc
->facts
.sge_mod_shift
) << 32);
1981 u64 sgemod_val
= ((u64
)(mrioc
->facts
.sge_mod_value
) <<
1982 mrioc
->facts
.sge_mod_shift
) << 32;
1983 u16 dev_handle
= nvme_encap_request
->dev_handle
;
1984 struct mpi3mr_tgt_dev
*tgtdev
;
1987 tgtdev
= mpi3mr_get_tgtdev_by_handle(mrioc
, dev_handle
);
1989 dprint_bsg_err(mrioc
, "%s: invalid device handle 0x%04x\n",
1990 __func__
, dev_handle
);
1994 if (tgtdev
->dev_spec
.pcie_inf
.pgsz
== 0) {
1995 dprint_bsg_err(mrioc
,
1996 "%s: NVMe device page size is zero for handle 0x%04x\n",
1997 __func__
, dev_handle
);
1998 mpi3mr_tgtdev_put(tgtdev
);
2002 dev_pgsz
= 1 << (tgtdev
->dev_spec
.pcie_inf
.pgsz
);
2003 mpi3mr_tgtdev_put(tgtdev
);
2004 page_mask
= dev_pgsz
- 1;
2006 if (dev_pgsz
> MPI3MR_IOCTL_SGE_SIZE
) {
2007 dprint_bsg_err(mrioc
,
2008 "%s: NVMe device page size(%d) is greater than ioctl data sge size(%d) for handle 0x%04x\n",
2009 __func__
, dev_pgsz
, MPI3MR_IOCTL_SGE_SIZE
, dev_handle
);
2013 if (MPI3MR_IOCTL_SGE_SIZE
% dev_pgsz
) {
2014 dprint_bsg_err(mrioc
,
2015 "%s: ioctl data sge size(%d) is not a multiple of NVMe device page size(%d) for handle 0x%04x\n",
2016 __func__
, MPI3MR_IOCTL_SGE_SIZE
, dev_pgsz
, dev_handle
);
2021 * Not all commands require a data transfer. If no data, just return
2022 * without constructing any PRP.
2024 for (count
= 0; count
< bufcnt
; count
++, drv_buf_iter
++) {
2025 if (drv_buf_iter
->data_dir
== DMA_NONE
)
2027 length
= drv_buf_iter
->kern_buf_len
;
2031 if (!length
|| !drv_buf_iter
->num_dma_desc
)
2034 for (count
= 0; count
< drv_buf_iter
->num_dma_desc
; count
++) {
2035 dma_addr
= drv_buf_iter
->dma_desc
[count
].dma_addr
;
2036 if (dma_addr
& page_mask
) {
2037 dprint_bsg_err(mrioc
,
2038 "%s:dma_addr %pad is not aligned with page size 0x%x\n",
2039 __func__
, &dma_addr
, dev_pgsz
);
2044 dma_addr
= drv_buf_iter
->dma_desc
[0].dma_addr
;
2045 desc_len
= drv_buf_iter
->dma_desc
[0].size
;
2048 mrioc
->prp_list_virt
= dma_alloc_coherent(&mrioc
->pdev
->dev
,
2049 dev_pgsz
, &mrioc
->prp_list_dma
, GFP_KERNEL
);
2051 if (!mrioc
->prp_list_virt
)
2053 mrioc
->prp_sz
= dev_pgsz
;
2056 * Set pointers to PRP1 and PRP2, which are in the NVMe command.
2057 * PRP1 is located at a 24 byte offset from the start of the NVMe
2058 * command. Then set the current PRP entry pointer to PRP1.
2060 prp1_entry
= (__le64
*)((u8
*)(nvme_encap_request
->command
) +
2061 MPI3MR_NVME_CMD_PRP1_OFFSET
);
2062 prp2_entry
= (__le64
*)((u8
*)(nvme_encap_request
->command
) +
2063 MPI3MR_NVME_CMD_PRP2_OFFSET
);
2064 prp_entry
= prp1_entry
;
2066 * For the PRP entries, use the specially allocated buffer of
2067 * contiguous memory.
2069 prp_page
= (__le64
*)mrioc
->prp_list_virt
;
2070 prp_page_dma
= mrioc
->prp_list_dma
;
2073 * Check if we are within 1 entry of a page boundary we don't
2074 * want our first entry to be a PRP List entry.
2076 page_mask_result
= (uintptr_t)((u8
*)prp_page
+ prp_size
) & page_mask
;
2077 if (!page_mask_result
) {
2078 dprint_bsg_err(mrioc
, "%s: PRP page is not page aligned\n",
2084 * Set PRP physical pointer, which initially points to the current PRP
2087 prp_entry_dma
= prp_page_dma
;
2090 /* Loop while the length is not zero. */
2092 page_mask_result
= (prp_entry_dma
+ prp_size
) & page_mask
;
2093 if (!page_mask_result
&& (length
> dev_pgsz
)) {
2094 dprint_bsg_err(mrioc
,
2095 "%s: single PRP page is not sufficient\n",
2100 /* Need to handle if entry will be part of a page. */
2101 offset
= dma_addr
& page_mask
;
2102 entry_len
= dev_pgsz
- offset
;
2104 if (prp_entry
== prp1_entry
) {
2106 * Must fill in the first PRP pointer (PRP1) before
2109 *prp1_entry
= cpu_to_le64(dma_addr
);
2110 if (*prp1_entry
& sgemod_mask
) {
2111 dprint_bsg_err(mrioc
,
2112 "%s: PRP1 address collides with SGE modifier\n",
2116 *prp1_entry
&= ~sgemod_mask
;
2117 *prp1_entry
|= sgemod_val
;
2120 * Now point to the second PRP entry within the
2123 prp_entry
= prp2_entry
;
2124 } else if (prp_entry
== prp2_entry
) {
2126 * Should the PRP2 entry be a PRP List pointer or just
2127 * a regular PRP pointer? If there is more than one
2128 * more page of data, must use a PRP List pointer.
2130 if (length
> dev_pgsz
) {
2132 * PRP2 will contain a PRP List pointer because
2133 * more PRP's are needed with this command. The
2134 * list will start at the beginning of the
2135 * contiguous buffer.
2137 *prp2_entry
= cpu_to_le64(prp_entry_dma
);
2138 if (*prp2_entry
& sgemod_mask
) {
2139 dprint_bsg_err(mrioc
,
2140 "%s: PRP list address collides with SGE modifier\n",
2144 *prp2_entry
&= ~sgemod_mask
;
2145 *prp2_entry
|= sgemod_val
;
2148 * The next PRP Entry will be the start of the
2151 prp_entry
= prp_page
;
2155 * After this, the PRP Entries are complete.
2156 * This command uses 2 PRP's and no PRP list.
2158 *prp2_entry
= cpu_to_le64(dma_addr
);
2159 if (*prp2_entry
& sgemod_mask
) {
2160 dprint_bsg_err(mrioc
,
2161 "%s: PRP2 collides with SGE modifier\n",
2165 *prp2_entry
&= ~sgemod_mask
;
2166 *prp2_entry
|= sgemod_val
;
2170 * Put entry in list and bump the addresses.
2172 * After PRP1 and PRP2 are filled in, this will fill in
2173 * all remaining PRP entries in a PRP List, one per
2174 * each time through the loop.
2176 *prp_entry
= cpu_to_le64(dma_addr
);
2177 if (*prp_entry
& sgemod_mask
) {
2178 dprint_bsg_err(mrioc
,
2179 "%s: PRP address collides with SGE modifier\n",
2183 *prp_entry
&= ~sgemod_mask
;
2184 *prp_entry
|= sgemod_val
;
2186 prp_entry_dma
+= prp_size
;
2189 /* decrement length accounting for last partial page. */
2190 if (entry_len
>= length
) {
2193 if (entry_len
<= desc_len
) {
2194 dma_addr
+= entry_len
;
2195 desc_len
-= entry_len
;
2198 if ((++desc_count
) >=
2199 drv_buf_iter
->num_dma_desc
) {
2200 dprint_bsg_err(mrioc
,
2201 "%s: Invalid len %zd while building PRP\n",
2206 drv_buf_iter
->dma_desc
[desc_count
].dma_addr
;
2208 drv_buf_iter
->dma_desc
[desc_count
].size
;
2210 length
-= entry_len
;
2216 if (mrioc
->prp_list_virt
) {
2217 dma_free_coherent(&mrioc
->pdev
->dev
, mrioc
->prp_sz
,
2218 mrioc
->prp_list_virt
, mrioc
->prp_list_dma
);
2219 mrioc
->prp_list_virt
= NULL
;
2225 * mpi3mr_map_data_buffer_dma - build dma descriptors for data
2227 * @mrioc: Adapter instance reference
2228 * @drv_buf: buffer map descriptor
2229 * @desc_count: Number of already consumed dma descriptors
2231 * This function computes how many pre-allocated DMA descriptors
2232 * are required for the given data buffer and if those number of
2233 * descriptors are free, then setup the mapping of the scattered
2234 * DMA address to the given data buffer, if the data direction
2235 * of the buffer is DMA_TO_DEVICE then the actual data is copied to
2238 * Return: 0 on success, -1 on failure
2240 static int mpi3mr_map_data_buffer_dma(struct mpi3mr_ioc
*mrioc
,
2241 struct mpi3mr_buf_map
*drv_buf
,
2244 u16 i
, needed_desc
= drv_buf
->kern_buf_len
/ MPI3MR_IOCTL_SGE_SIZE
;
2245 u32 buf_len
= drv_buf
->kern_buf_len
, copied_len
= 0;
2247 if (drv_buf
->kern_buf_len
% MPI3MR_IOCTL_SGE_SIZE
)
2249 if ((needed_desc
+ desc_count
) > MPI3MR_NUM_IOCTL_SGE
) {
2250 dprint_bsg_err(mrioc
, "%s: DMA descriptor mapping error %d:%d:%d\n",
2251 __func__
, needed_desc
, desc_count
, MPI3MR_NUM_IOCTL_SGE
);
2254 drv_buf
->dma_desc
= kzalloc(sizeof(*drv_buf
->dma_desc
) * needed_desc
,
2256 if (!drv_buf
->dma_desc
)
2258 for (i
= 0; i
< needed_desc
; i
++, desc_count
++) {
2259 drv_buf
->dma_desc
[i
].addr
= mrioc
->ioctl_sge
[desc_count
].addr
;
2260 drv_buf
->dma_desc
[i
].dma_addr
=
2261 mrioc
->ioctl_sge
[desc_count
].dma_addr
;
2262 if (buf_len
< mrioc
->ioctl_sge
[desc_count
].size
)
2263 drv_buf
->dma_desc
[i
].size
= buf_len
;
2265 drv_buf
->dma_desc
[i
].size
=
2266 mrioc
->ioctl_sge
[desc_count
].size
;
2267 buf_len
-= drv_buf
->dma_desc
[i
].size
;
2268 memset(drv_buf
->dma_desc
[i
].addr
, 0,
2269 mrioc
->ioctl_sge
[desc_count
].size
);
2270 if (drv_buf
->data_dir
== DMA_TO_DEVICE
) {
2271 memcpy(drv_buf
->dma_desc
[i
].addr
,
2272 drv_buf
->bsg_buf
+ copied_len
,
2273 drv_buf
->dma_desc
[i
].size
);
2274 copied_len
+= drv_buf
->dma_desc
[i
].size
;
2277 drv_buf
->num_dma_desc
= needed_desc
;
2281 * mpi3mr_bsg_process_mpt_cmds - MPI Pass through BSG handler
2282 * @job: BSG job reference
2284 * This function is the top level handler for MPI Pass through
2285 * command, this does basic validation of the input data buffers,
2286 * identifies the given buffer types and MPI command, allocates
2287 * DMAable memory for user given buffers, construstcs SGL
2288 * properly and passes the command to the firmware.
2290 * Once the MPI command is completed the driver copies the data
2291 * if any and reply, sense information to user provided buffers.
2292 * If the command is timed out then issues controller reset
2293 * prior to returning.
2295 * Return: 0 on success and proper error codes on failure
2298 static long mpi3mr_bsg_process_mpt_cmds(struct bsg_job
*job
)
2300 long rval
= -EINVAL
;
2301 struct mpi3mr_ioc
*mrioc
= NULL
;
2302 u8
*mpi_req
= NULL
, *sense_buff_k
= NULL
;
2303 u8 mpi_msg_size
= 0;
2304 struct mpi3mr_bsg_packet
*bsg_req
= NULL
;
2305 struct mpi3mr_bsg_mptcmd
*karg
;
2306 struct mpi3mr_buf_entry
*buf_entries
= NULL
;
2307 struct mpi3mr_buf_map
*drv_bufs
= NULL
, *drv_buf_iter
= NULL
;
2308 u8 count
, bufcnt
= 0, is_rmcb
= 0, is_rmrb
= 0;
2309 u8 din_cnt
= 0, dout_cnt
= 0;
2310 u8 invalid_be
= 0, erb_offset
= 0xFF, mpirep_offset
= 0xFF;
2311 u8 block_io
= 0, nvme_fmt
= 0, resp_code
= 0;
2312 struct mpi3_request_header
*mpi_header
= NULL
;
2313 struct mpi3_status_reply_descriptor
*status_desc
;
2314 struct mpi3_scsi_task_mgmt_request
*tm_req
;
2315 u32 erbsz
= MPI3MR_SENSE_BUF_SZ
, tmplen
;
2317 struct mpi3mr_tgt_dev
*tgtdev
;
2318 struct mpi3mr_stgt_priv_data
*stgt_priv
= NULL
;
2319 struct mpi3mr_bsg_in_reply_buf
*bsg_reply_buf
= NULL
;
2320 u32 din_size
= 0, dout_size
= 0;
2321 u8
*din_buf
= NULL
, *dout_buf
= NULL
;
2322 u8
*sgl_iter
= NULL
, *sgl_din_iter
= NULL
, *sgl_dout_iter
= NULL
;
2323 u16 rmc_size
= 0, desc_count
= 0;
2325 bsg_req
= job
->request
;
2326 karg
= (struct mpi3mr_bsg_mptcmd
*)&bsg_req
->cmd
.mptcmd
;
2328 mrioc
= mpi3mr_bsg_verify_adapter(karg
->mrioc_id
);
2332 if (mutex_lock_interruptible(&mrioc
->bsg_cmds
.mutex
))
2333 return -ERESTARTSYS
;
2335 if (mrioc
->bsg_cmds
.state
& MPI3MR_CMD_PENDING
) {
2336 dprint_bsg_err(mrioc
, "%s: command is in use\n", __func__
);
2337 mutex_unlock(&mrioc
->bsg_cmds
.mutex
);
2341 if (!mrioc
->ioctl_sges_allocated
) {
2342 dprint_bsg_err(mrioc
, "%s: DMA memory was not allocated\n",
2347 if (karg
->timeout
< MPI3MR_APP_DEFAULT_TIMEOUT
)
2348 karg
->timeout
= MPI3MR_APP_DEFAULT_TIMEOUT
;
2350 mpi_req
= kzalloc(MPI3MR_ADMIN_REQ_FRAME_SZ
, GFP_KERNEL
);
2352 mutex_unlock(&mrioc
->bsg_cmds
.mutex
);
2355 mpi_header
= (struct mpi3_request_header
*)mpi_req
;
2357 bufcnt
= karg
->buf_entry_list
.num_of_entries
;
2358 drv_bufs
= kzalloc((sizeof(*drv_bufs
) * bufcnt
), GFP_KERNEL
);
2360 mutex_unlock(&mrioc
->bsg_cmds
.mutex
);
2365 dout_buf
= kzalloc(job
->request_payload
.payload_len
,
2368 mutex_unlock(&mrioc
->bsg_cmds
.mutex
);
2373 din_buf
= kzalloc(job
->reply_payload
.payload_len
,
2376 mutex_unlock(&mrioc
->bsg_cmds
.mutex
);
2381 sg_copy_to_buffer(job
->request_payload
.sg_list
,
2382 job
->request_payload
.sg_cnt
,
2383 dout_buf
, job
->request_payload
.payload_len
);
2385 buf_entries
= karg
->buf_entry_list
.buf_entry
;
2386 sgl_din_iter
= din_buf
;
2387 sgl_dout_iter
= dout_buf
;
2388 drv_buf_iter
= drv_bufs
;
2390 for (count
= 0; count
< bufcnt
; count
++, buf_entries
++, drv_buf_iter
++) {
2392 switch (buf_entries
->buf_type
) {
2393 case MPI3MR_BSG_BUFTYPE_RAIDMGMT_CMD
:
2394 sgl_iter
= sgl_dout_iter
;
2395 sgl_dout_iter
+= buf_entries
->buf_len
;
2396 drv_buf_iter
->data_dir
= DMA_TO_DEVICE
;
2398 if ((count
!= 0) || !buf_entries
->buf_len
)
2401 case MPI3MR_BSG_BUFTYPE_RAIDMGMT_RESP
:
2402 sgl_iter
= sgl_din_iter
;
2403 sgl_din_iter
+= buf_entries
->buf_len
;
2404 drv_buf_iter
->data_dir
= DMA_FROM_DEVICE
;
2406 if (count
!= 1 || !is_rmcb
|| !buf_entries
->buf_len
)
2409 case MPI3MR_BSG_BUFTYPE_DATA_IN
:
2410 sgl_iter
= sgl_din_iter
;
2411 sgl_din_iter
+= buf_entries
->buf_len
;
2412 drv_buf_iter
->data_dir
= DMA_FROM_DEVICE
;
2414 din_size
+= buf_entries
->buf_len
;
2415 if ((din_cnt
> 1) && !is_rmcb
)
2418 case MPI3MR_BSG_BUFTYPE_DATA_OUT
:
2419 sgl_iter
= sgl_dout_iter
;
2420 sgl_dout_iter
+= buf_entries
->buf_len
;
2421 drv_buf_iter
->data_dir
= DMA_TO_DEVICE
;
2423 dout_size
+= buf_entries
->buf_len
;
2424 if ((dout_cnt
> 1) && !is_rmcb
)
2427 case MPI3MR_BSG_BUFTYPE_MPI_REPLY
:
2428 sgl_iter
= sgl_din_iter
;
2429 sgl_din_iter
+= buf_entries
->buf_len
;
2430 drv_buf_iter
->data_dir
= DMA_NONE
;
2431 mpirep_offset
= count
;
2432 if (!buf_entries
->buf_len
)
2435 case MPI3MR_BSG_BUFTYPE_ERR_RESPONSE
:
2436 sgl_iter
= sgl_din_iter
;
2437 sgl_din_iter
+= buf_entries
->buf_len
;
2438 drv_buf_iter
->data_dir
= DMA_NONE
;
2440 if (!buf_entries
->buf_len
)
2443 case MPI3MR_BSG_BUFTYPE_MPI_REQUEST
:
2444 sgl_iter
= sgl_dout_iter
;
2445 sgl_dout_iter
+= buf_entries
->buf_len
;
2446 drv_buf_iter
->data_dir
= DMA_NONE
;
2447 mpi_msg_size
= buf_entries
->buf_len
;
2448 if ((!mpi_msg_size
|| (mpi_msg_size
% 4)) ||
2449 (mpi_msg_size
> MPI3MR_ADMIN_REQ_FRAME_SZ
)) {
2450 dprint_bsg_err(mrioc
, "%s: invalid MPI message size\n",
2452 mutex_unlock(&mrioc
->bsg_cmds
.mutex
);
2456 memcpy(mpi_req
, sgl_iter
, buf_entries
->buf_len
);
2463 dprint_bsg_err(mrioc
, "%s: invalid buffer entries passed\n",
2465 mutex_unlock(&mrioc
->bsg_cmds
.mutex
);
2470 if (sgl_dout_iter
> (dout_buf
+ job
->request_payload
.payload_len
)) {
2471 dprint_bsg_err(mrioc
, "%s: data_out buffer length mismatch\n",
2473 mutex_unlock(&mrioc
->bsg_cmds
.mutex
);
2477 if (sgl_din_iter
> (din_buf
+ job
->reply_payload
.payload_len
)) {
2478 dprint_bsg_err(mrioc
, "%s: data_in buffer length mismatch\n",
2480 mutex_unlock(&mrioc
->bsg_cmds
.mutex
);
2485 drv_buf_iter
->bsg_buf
= sgl_iter
;
2486 drv_buf_iter
->bsg_buf_len
= buf_entries
->buf_len
;
2489 if (is_rmcb
&& ((din_size
+ dout_size
) > MPI3MR_MAX_APP_XFER_SIZE
)) {
2490 dprint_bsg_err(mrioc
, "%s:%d: invalid data transfer size passed for function 0x%x din_size = %d, dout_size = %d\n",
2491 __func__
, __LINE__
, mpi_header
->function
, din_size
,
2493 mutex_unlock(&mrioc
->bsg_cmds
.mutex
);
2498 if (din_size
> MPI3MR_MAX_APP_XFER_SIZE
) {
2499 dprint_bsg_err(mrioc
,
2500 "%s:%d: invalid data transfer size passed for function 0x%x din_size=%d\n",
2501 __func__
, __LINE__
, mpi_header
->function
, din_size
);
2502 mutex_unlock(&mrioc
->bsg_cmds
.mutex
);
2506 if (dout_size
> MPI3MR_MAX_APP_XFER_SIZE
) {
2507 dprint_bsg_err(mrioc
,
2508 "%s:%d: invalid data transfer size passed for function 0x%x dout_size = %d\n",
2509 __func__
, __LINE__
, mpi_header
->function
, dout_size
);
2510 mutex_unlock(&mrioc
->bsg_cmds
.mutex
);
2515 if (mpi_header
->function
== MPI3_BSG_FUNCTION_SMP_PASSTHROUGH
) {
2516 if (din_size
> MPI3MR_IOCTL_SGE_SIZE
||
2517 dout_size
> MPI3MR_IOCTL_SGE_SIZE
) {
2518 dprint_bsg_err(mrioc
, "%s:%d: invalid message size passed:%d:%d:%d:%d\n",
2519 __func__
, __LINE__
, din_cnt
, dout_cnt
, din_size
,
2521 mutex_unlock(&mrioc
->bsg_cmds
.mutex
);
2527 drv_buf_iter
= drv_bufs
;
2528 for (count
= 0; count
< bufcnt
; count
++, drv_buf_iter
++) {
2529 if (drv_buf_iter
->data_dir
== DMA_NONE
)
2532 drv_buf_iter
->kern_buf_len
= drv_buf_iter
->bsg_buf_len
;
2533 if (is_rmcb
&& !count
) {
2534 drv_buf_iter
->kern_buf_len
=
2535 mrioc
->ioctl_chain_sge
.size
;
2536 drv_buf_iter
->kern_buf
=
2537 mrioc
->ioctl_chain_sge
.addr
;
2538 drv_buf_iter
->kern_buf_dma
=
2539 mrioc
->ioctl_chain_sge
.dma_addr
;
2540 drv_buf_iter
->dma_desc
= NULL
;
2541 drv_buf_iter
->num_dma_desc
= 0;
2542 memset(drv_buf_iter
->kern_buf
, 0,
2543 drv_buf_iter
->kern_buf_len
);
2544 tmplen
= min(drv_buf_iter
->kern_buf_len
,
2545 drv_buf_iter
->bsg_buf_len
);
2547 memcpy(drv_buf_iter
->kern_buf
, drv_buf_iter
->bsg_buf
, tmplen
);
2548 } else if (is_rmrb
&& (count
== 1)) {
2549 drv_buf_iter
->kern_buf_len
=
2550 mrioc
->ioctl_resp_sge
.size
;
2551 drv_buf_iter
->kern_buf
=
2552 mrioc
->ioctl_resp_sge
.addr
;
2553 drv_buf_iter
->kern_buf_dma
=
2554 mrioc
->ioctl_resp_sge
.dma_addr
;
2555 drv_buf_iter
->dma_desc
= NULL
;
2556 drv_buf_iter
->num_dma_desc
= 0;
2557 memset(drv_buf_iter
->kern_buf
, 0,
2558 drv_buf_iter
->kern_buf_len
);
2559 tmplen
= min(drv_buf_iter
->kern_buf_len
,
2560 drv_buf_iter
->bsg_buf_len
);
2561 drv_buf_iter
->kern_buf_len
= tmplen
;
2562 memset(drv_buf_iter
->bsg_buf
, 0,
2563 drv_buf_iter
->bsg_buf_len
);
2565 if (!drv_buf_iter
->kern_buf_len
)
2567 if (mpi3mr_map_data_buffer_dma(mrioc
, drv_buf_iter
, desc_count
)) {
2569 mutex_unlock(&mrioc
->bsg_cmds
.mutex
);
2570 dprint_bsg_err(mrioc
, "%s:%d: mapping data buffers failed\n",
2571 __func__
, __LINE__
);
2574 desc_count
+= drv_buf_iter
->num_dma_desc
;
2578 if (erb_offset
!= 0xFF) {
2579 sense_buff_k
= kzalloc(erbsz
, GFP_KERNEL
);
2580 if (!sense_buff_k
) {
2582 mutex_unlock(&mrioc
->bsg_cmds
.mutex
);
2587 if (mrioc
->unrecoverable
) {
2588 dprint_bsg_err(mrioc
, "%s: unrecoverable controller\n",
2591 mutex_unlock(&mrioc
->bsg_cmds
.mutex
);
2594 if (mrioc
->reset_in_progress
) {
2595 dprint_bsg_err(mrioc
, "%s: reset in progress\n", __func__
);
2597 mutex_unlock(&mrioc
->bsg_cmds
.mutex
);
2600 if (mrioc
->stop_bsgs
|| mrioc
->block_on_pci_err
) {
2601 dprint_bsg_err(mrioc
, "%s: bsgs are blocked\n", __func__
);
2603 mutex_unlock(&mrioc
->bsg_cmds
.mutex
);
2607 if (mpi_header
->function
== MPI3_BSG_FUNCTION_NVME_ENCAPSULATED
) {
2608 nvme_fmt
= mpi3mr_get_nvme_data_fmt(
2609 (struct mpi3_nvme_encapsulated_request
*)mpi_req
);
2610 if (nvme_fmt
== MPI3MR_NVME_DATA_FORMAT_PRP
) {
2611 if (mpi3mr_build_nvme_prp(mrioc
,
2612 (struct mpi3_nvme_encapsulated_request
*)mpi_req
,
2613 drv_bufs
, bufcnt
)) {
2615 mutex_unlock(&mrioc
->bsg_cmds
.mutex
);
2618 } else if (nvme_fmt
== MPI3MR_NVME_DATA_FORMAT_SGL1
||
2619 nvme_fmt
== MPI3MR_NVME_DATA_FORMAT_SGL2
) {
2620 if (mpi3mr_build_nvme_sgl(mrioc
,
2621 (struct mpi3_nvme_encapsulated_request
*)mpi_req
,
2622 drv_bufs
, bufcnt
)) {
2624 mutex_unlock(&mrioc
->bsg_cmds
.mutex
);
2628 dprint_bsg_err(mrioc
,
2629 "%s:invalid NVMe command format\n", __func__
);
2631 mutex_unlock(&mrioc
->bsg_cmds
.mutex
);
2635 if (mpi3mr_bsg_build_sgl(mrioc
, mpi_req
, mpi_msg_size
,
2636 drv_bufs
, bufcnt
, is_rmcb
, is_rmrb
,
2637 (dout_cnt
+ din_cnt
))) {
2638 dprint_bsg_err(mrioc
, "%s: sgl build failed\n", __func__
);
2640 mutex_unlock(&mrioc
->bsg_cmds
.mutex
);
2645 if (mpi_header
->function
== MPI3_BSG_FUNCTION_SCSI_TASK_MGMT
) {
2646 tm_req
= (struct mpi3_scsi_task_mgmt_request
*)mpi_req
;
2647 if (tm_req
->task_type
!=
2648 MPI3_SCSITASKMGMT_TASKTYPE_ABORT_TASK
) {
2649 dev_handle
= tm_req
->dev_handle
;
2654 tgtdev
= mpi3mr_get_tgtdev_by_handle(mrioc
, dev_handle
);
2655 if (tgtdev
&& tgtdev
->starget
&& tgtdev
->starget
->hostdata
) {
2656 stgt_priv
= (struct mpi3mr_stgt_priv_data
*)
2657 tgtdev
->starget
->hostdata
;
2658 atomic_inc(&stgt_priv
->block_io
);
2659 mpi3mr_tgtdev_put(tgtdev
);
2663 mrioc
->bsg_cmds
.state
= MPI3MR_CMD_PENDING
;
2664 mrioc
->bsg_cmds
.is_waiting
= 1;
2665 mrioc
->bsg_cmds
.callback
= NULL
;
2666 mrioc
->bsg_cmds
.is_sense
= 0;
2667 mrioc
->bsg_cmds
.sensebuf
= sense_buff_k
;
2668 memset(mrioc
->bsg_cmds
.reply
, 0, mrioc
->reply_sz
);
2669 mpi_header
->host_tag
= cpu_to_le16(MPI3MR_HOSTTAG_BSG_CMDS
);
2670 if (mrioc
->logging_level
& MPI3_DEBUG_BSG_INFO
) {
2671 dprint_bsg_info(mrioc
,
2672 "%s: posting bsg request to the controller\n", __func__
);
2673 dprint_dump(mpi_req
, MPI3MR_ADMIN_REQ_FRAME_SZ
,
2675 if (mpi_header
->function
== MPI3_BSG_FUNCTION_MGMT_PASSTHROUGH
) {
2676 drv_buf_iter
= &drv_bufs
[0];
2677 dprint_dump(drv_buf_iter
->kern_buf
,
2678 rmc_size
, "mpi3_mgmt_req");
2682 init_completion(&mrioc
->bsg_cmds
.done
);
2683 rval
= mpi3mr_admin_request_post(mrioc
, mpi_req
,
2684 MPI3MR_ADMIN_REQ_FRAME_SZ
, 0);
2688 mrioc
->bsg_cmds
.is_waiting
= 0;
2689 dprint_bsg_err(mrioc
,
2690 "%s: posting bsg request is failed\n", __func__
);
2694 wait_for_completion_timeout(&mrioc
->bsg_cmds
.done
,
2695 (karg
->timeout
* HZ
));
2696 if (block_io
&& stgt_priv
)
2697 atomic_dec(&stgt_priv
->block_io
);
2698 if (!(mrioc
->bsg_cmds
.state
& MPI3MR_CMD_COMPLETE
)) {
2699 mrioc
->bsg_cmds
.is_waiting
= 0;
2701 if (mrioc
->bsg_cmds
.state
& MPI3MR_CMD_RESET
)
2703 if (((mpi_header
->function
!= MPI3_FUNCTION_SCSI_IO
) &&
2704 (mpi_header
->function
!= MPI3_FUNCTION_NVME_ENCAPSULATED
))
2705 || (mrioc
->logging_level
& MPI3_DEBUG_BSG_ERROR
)) {
2706 ioc_info(mrioc
, "%s: bsg request timedout after %d seconds\n",
2707 __func__
, karg
->timeout
);
2708 if (!(mrioc
->logging_level
& MPI3_DEBUG_BSG_INFO
)) {
2709 dprint_dump(mpi_req
, MPI3MR_ADMIN_REQ_FRAME_SZ
,
2711 if (mpi_header
->function
==
2712 MPI3_FUNCTION_MGMT_PASSTHROUGH
) {
2713 drv_buf_iter
= &drv_bufs
[0];
2714 dprint_dump(drv_buf_iter
->kern_buf
,
2715 rmc_size
, "mpi3_mgmt_req");
2719 if ((mpi_header
->function
== MPI3_BSG_FUNCTION_NVME_ENCAPSULATED
) ||
2720 (mpi_header
->function
== MPI3_BSG_FUNCTION_SCSI_IO
)) {
2721 dprint_bsg_err(mrioc
, "%s: bsg request timedout after %d seconds,\n"
2722 "issuing target reset to (0x%04x)\n", __func__
,
2723 karg
->timeout
, mpi_header
->function_dependent
);
2724 mpi3mr_issue_tm(mrioc
,
2725 MPI3_SCSITASKMGMT_TASKTYPE_TARGET_RESET
,
2726 mpi_header
->function_dependent
, 0,
2727 MPI3MR_HOSTTAG_BLK_TMS
, MPI3MR_RESETTM_TIMEOUT
,
2728 &mrioc
->host_tm_cmds
, &resp_code
, NULL
);
2730 if (!(mrioc
->bsg_cmds
.state
& MPI3MR_CMD_COMPLETE
) &&
2731 !(mrioc
->bsg_cmds
.state
& MPI3MR_CMD_RESET
))
2732 mpi3mr_soft_reset_handler(mrioc
,
2733 MPI3MR_RESET_FROM_APP_TIMEOUT
, 1);
2736 dprint_bsg_info(mrioc
, "%s: bsg request is completed\n", __func__
);
2738 if (mrioc
->prp_list_virt
) {
2739 dma_free_coherent(&mrioc
->pdev
->dev
, mrioc
->prp_sz
,
2740 mrioc
->prp_list_virt
, mrioc
->prp_list_dma
);
2741 mrioc
->prp_list_virt
= NULL
;
2744 if ((mrioc
->bsg_cmds
.ioc_status
& MPI3_IOCSTATUS_STATUS_MASK
)
2745 != MPI3_IOCSTATUS_SUCCESS
) {
2746 dprint_bsg_info(mrioc
,
2747 "%s: command failed, ioc_status(0x%04x) log_info(0x%08x)\n",
2749 (mrioc
->bsg_cmds
.ioc_status
& MPI3_IOCSTATUS_STATUS_MASK
),
2750 mrioc
->bsg_cmds
.ioc_loginfo
);
2753 if ((mpirep_offset
!= 0xFF) &&
2754 drv_bufs
[mpirep_offset
].bsg_buf_len
) {
2755 drv_buf_iter
= &drv_bufs
[mpirep_offset
];
2756 drv_buf_iter
->kern_buf_len
= (sizeof(*bsg_reply_buf
) +
2758 bsg_reply_buf
= kzalloc(drv_buf_iter
->kern_buf_len
, GFP_KERNEL
);
2760 if (!bsg_reply_buf
) {
2764 if (mrioc
->bsg_cmds
.state
& MPI3MR_CMD_REPLY_VALID
) {
2765 bsg_reply_buf
->mpi_reply_type
=
2766 MPI3MR_BSG_MPI_REPLY_BUFTYPE_ADDRESS
;
2767 memcpy(bsg_reply_buf
->reply_buf
,
2768 mrioc
->bsg_cmds
.reply
, mrioc
->reply_sz
);
2770 bsg_reply_buf
->mpi_reply_type
=
2771 MPI3MR_BSG_MPI_REPLY_BUFTYPE_STATUS
;
2772 status_desc
= (struct mpi3_status_reply_descriptor
*)
2773 bsg_reply_buf
->reply_buf
;
2774 status_desc
->ioc_status
= mrioc
->bsg_cmds
.ioc_status
;
2775 status_desc
->ioc_log_info
= mrioc
->bsg_cmds
.ioc_loginfo
;
2777 tmplen
= min(drv_buf_iter
->kern_buf_len
,
2778 drv_buf_iter
->bsg_buf_len
);
2779 memcpy(drv_buf_iter
->bsg_buf
, bsg_reply_buf
, tmplen
);
2782 if (erb_offset
!= 0xFF && mrioc
->bsg_cmds
.sensebuf
&&
2783 mrioc
->bsg_cmds
.is_sense
) {
2784 drv_buf_iter
= &drv_bufs
[erb_offset
];
2785 tmplen
= min(erbsz
, drv_buf_iter
->bsg_buf_len
);
2786 memcpy(drv_buf_iter
->bsg_buf
, sense_buff_k
, tmplen
);
2789 drv_buf_iter
= drv_bufs
;
2790 for (count
= 0; count
< bufcnt
; count
++, drv_buf_iter
++) {
2791 if (drv_buf_iter
->data_dir
== DMA_NONE
)
2793 if ((count
== 1) && is_rmrb
) {
2794 memcpy(drv_buf_iter
->bsg_buf
,
2795 drv_buf_iter
->kern_buf
,
2796 drv_buf_iter
->kern_buf_len
);
2797 } else if (drv_buf_iter
->data_dir
== DMA_FROM_DEVICE
) {
2799 for (desc_count
= 0;
2800 desc_count
< drv_buf_iter
->num_dma_desc
;
2802 memcpy(((u8
*)drv_buf_iter
->bsg_buf
+ tmplen
),
2803 drv_buf_iter
->dma_desc
[desc_count
].addr
,
2804 drv_buf_iter
->dma_desc
[desc_count
].size
);
2806 drv_buf_iter
->dma_desc
[desc_count
].size
;
2813 job
->reply_payload_rcv_len
=
2814 sg_copy_from_buffer(job
->reply_payload
.sg_list
,
2815 job
->reply_payload
.sg_cnt
,
2816 din_buf
, job
->reply_payload
.payload_len
);
2818 mrioc
->bsg_cmds
.is_sense
= 0;
2819 mrioc
->bsg_cmds
.sensebuf
= NULL
;
2820 mrioc
->bsg_cmds
.state
= MPI3MR_CMD_NOTUSED
;
2821 mutex_unlock(&mrioc
->bsg_cmds
.mutex
);
2823 kfree(sense_buff_k
);
2828 drv_buf_iter
= drv_bufs
;
2829 for (count
= 0; count
< bufcnt
; count
++, drv_buf_iter
++)
2830 kfree(drv_buf_iter
->dma_desc
);
2833 kfree(bsg_reply_buf
);
2838 * mpi3mr_app_save_logdata - Save Log Data events
2839 * @mrioc: Adapter instance reference
2840 * @event_data: event data associated with log data event
2841 * @event_data_size: event data size to copy
2843 * If log data event caching is enabled by the applicatiobns,
2844 * then this function saves the log data in the circular queue
2845 * and Sends async signal SIGIO to indicate there is an async
2846 * event from the firmware to the event monitoring applications.
2850 void mpi3mr_app_save_logdata(struct mpi3mr_ioc
*mrioc
, char *event_data
,
2851 u16 event_data_size
)
2853 u32 index
= mrioc
->logdata_buf_idx
, sz
;
2854 struct mpi3mr_logdata_entry
*entry
;
2856 if (!(mrioc
->logdata_buf
))
2859 entry
= (struct mpi3mr_logdata_entry
*)
2860 (mrioc
->logdata_buf
+ (index
* mrioc
->logdata_entry_sz
));
2861 entry
->valid_entry
= 1;
2862 sz
= min(mrioc
->logdata_entry_sz
, event_data_size
);
2863 memcpy(entry
->data
, event_data
, sz
);
2864 mrioc
->logdata_buf_idx
=
2865 ((++index
) % MPI3MR_BSG_LOGDATA_MAX_ENTRIES
);
2866 atomic64_inc(&event_counter
);
2870 * mpi3mr_bsg_request - bsg request entry point
2871 * @job: BSG job reference
2873 * This is driver's entry point for bsg requests
2875 * Return: 0 on success and proper error codes on failure
2877 static int mpi3mr_bsg_request(struct bsg_job
*job
)
2879 long rval
= -EINVAL
;
2880 unsigned int reply_payload_rcv_len
= 0;
2882 struct mpi3mr_bsg_packet
*bsg_req
= job
->request
;
2884 switch (bsg_req
->cmd_type
) {
2885 case MPI3MR_DRV_CMD
:
2886 rval
= mpi3mr_bsg_process_drv_cmds(job
);
2888 case MPI3MR_MPT_CMD
:
2889 rval
= mpi3mr_bsg_process_mpt_cmds(job
);
2892 pr_err("%s: unsupported BSG command(0x%08x)\n",
2893 MPI3MR_DRIVER_NAME
, bsg_req
->cmd_type
);
2897 bsg_job_done(job
, rval
, reply_payload_rcv_len
);
2903 * mpi3mr_bsg_exit - de-registration from bsg layer
2904 * @mrioc: Adapter instance reference
2906 * This will be called during driver unload and all
2907 * bsg resources allocated during load will be freed.
2911 void mpi3mr_bsg_exit(struct mpi3mr_ioc
*mrioc
)
2913 struct device
*bsg_dev
= &mrioc
->bsg_dev
;
2914 if (!mrioc
->bsg_queue
)
2917 bsg_remove_queue(mrioc
->bsg_queue
);
2918 mrioc
->bsg_queue
= NULL
;
2920 device_del(bsg_dev
);
2921 put_device(bsg_dev
);
2925 * mpi3mr_bsg_node_release -release bsg device node
2926 * @dev: bsg device node
2928 * decrements bsg dev parent reference count
2932 static void mpi3mr_bsg_node_release(struct device
*dev
)
2934 put_device(dev
->parent
);
2938 * mpi3mr_bsg_init - registration with bsg layer
2939 * @mrioc: Adapter instance reference
2941 * This will be called during driver load and it will
2942 * register driver with bsg layer
2946 void mpi3mr_bsg_init(struct mpi3mr_ioc
*mrioc
)
2948 struct device
*bsg_dev
= &mrioc
->bsg_dev
;
2949 struct device
*parent
= &mrioc
->shost
->shost_gendev
;
2950 struct queue_limits lim
= {
2951 .max_hw_sectors
= MPI3MR_MAX_APP_XFER_SECTORS
,
2952 .max_segments
= MPI3MR_MAX_APP_XFER_SEGMENTS
,
2954 struct request_queue
*q
;
2956 device_initialize(bsg_dev
);
2958 bsg_dev
->parent
= get_device(parent
);
2959 bsg_dev
->release
= mpi3mr_bsg_node_release
;
2961 dev_set_name(bsg_dev
, "mpi3mrctl%u", mrioc
->id
);
2963 if (device_add(bsg_dev
)) {
2964 ioc_err(mrioc
, "%s: bsg device add failed\n",
2966 put_device(bsg_dev
);
2970 q
= bsg_setup_queue(bsg_dev
, dev_name(bsg_dev
), &lim
,
2971 mpi3mr_bsg_request
, NULL
, 0);
2973 ioc_err(mrioc
, "%s: bsg registration failed\n",
2975 device_del(bsg_dev
);
2976 put_device(bsg_dev
);
2980 mrioc
->bsg_queue
= q
;
2984 * version_fw_show - SysFS callback for firmware version read
2985 * @dev: class device
2986 * @attr: Device attributes
2987 * @buf: Buffer to copy
2989 * Return: sysfs_emit() return after copying firmware version
2992 version_fw_show(struct device
*dev
, struct device_attribute
*attr
,
2995 struct Scsi_Host
*shost
= class_to_shost(dev
);
2996 struct mpi3mr_ioc
*mrioc
= shost_priv(shost
);
2997 struct mpi3mr_compimg_ver
*fwver
= &mrioc
->facts
.fw_ver
;
2999 return sysfs_emit(buf
, "%d.%d.%d.%d.%05d-%05d\n",
3000 fwver
->gen_major
, fwver
->gen_minor
, fwver
->ph_major
,
3001 fwver
->ph_minor
, fwver
->cust_id
, fwver
->build_num
);
3003 static DEVICE_ATTR_RO(version_fw
);
3006 * fw_queue_depth_show - SysFS callback for firmware max cmds
3007 * @dev: class device
3008 * @attr: Device attributes
3009 * @buf: Buffer to copy
3011 * Return: sysfs_emit() return after copying firmware max commands
3014 fw_queue_depth_show(struct device
*dev
, struct device_attribute
*attr
,
3017 struct Scsi_Host
*shost
= class_to_shost(dev
);
3018 struct mpi3mr_ioc
*mrioc
= shost_priv(shost
);
3020 return sysfs_emit(buf
, "%d\n", mrioc
->facts
.max_reqs
);
3022 static DEVICE_ATTR_RO(fw_queue_depth
);
3025 * op_req_q_count_show - SysFS callback for request queue count
3026 * @dev: class device
3027 * @attr: Device attributes
3028 * @buf: Buffer to copy
3030 * Return: sysfs_emit() return after copying request queue count
3033 op_req_q_count_show(struct device
*dev
, struct device_attribute
*attr
,
3036 struct Scsi_Host
*shost
= class_to_shost(dev
);
3037 struct mpi3mr_ioc
*mrioc
= shost_priv(shost
);
3039 return sysfs_emit(buf
, "%d\n", mrioc
->num_op_req_q
);
3041 static DEVICE_ATTR_RO(op_req_q_count
);
3044 * reply_queue_count_show - SysFS callback for reply queue count
3045 * @dev: class device
3046 * @attr: Device attributes
3047 * @buf: Buffer to copy
3049 * Return: sysfs_emit() return after copying reply queue count
3052 reply_queue_count_show(struct device
*dev
, struct device_attribute
*attr
,
3055 struct Scsi_Host
*shost
= class_to_shost(dev
);
3056 struct mpi3mr_ioc
*mrioc
= shost_priv(shost
);
3058 return sysfs_emit(buf
, "%d\n", mrioc
->num_op_reply_q
);
3061 static DEVICE_ATTR_RO(reply_queue_count
);
3064 * logging_level_show - Show controller debug level
3065 * @dev: class device
3066 * @attr: Device attributes
3067 * @buf: Buffer to copy
3069 * A sysfs 'read/write' shost attribute, to show the current
3070 * debug log level used by the driver for the specific
3073 * Return: sysfs_emit() return
3076 logging_level_show(struct device
*dev
,
3077 struct device_attribute
*attr
, char *buf
)
3080 struct Scsi_Host
*shost
= class_to_shost(dev
);
3081 struct mpi3mr_ioc
*mrioc
= shost_priv(shost
);
3083 return sysfs_emit(buf
, "%08xh\n", mrioc
->logging_level
);
3087 * logging_level_store- Change controller debug level
3088 * @dev: class device
3089 * @attr: Device attributes
3090 * @buf: Buffer to copy
3091 * @count: size of the buffer
3093 * A sysfs 'read/write' shost attribute, to change the current
3094 * debug log level used by the driver for the specific
3097 * Return: strlen() return
3100 logging_level_store(struct device
*dev
,
3101 struct device_attribute
*attr
,
3102 const char *buf
, size_t count
)
3104 struct Scsi_Host
*shost
= class_to_shost(dev
);
3105 struct mpi3mr_ioc
*mrioc
= shost_priv(shost
);
3108 if (kstrtoint(buf
, 0, &val
) != 0)
3111 mrioc
->logging_level
= val
;
3112 ioc_info(mrioc
, "logging_level=%08xh\n", mrioc
->logging_level
);
3115 static DEVICE_ATTR_RW(logging_level
);
3118 * adp_state_show() - SysFS callback for adapter state show
3119 * @dev: class device
3120 * @attr: Device attributes
3121 * @buf: Buffer to copy
3123 * Return: sysfs_emit() return after copying adapter state
3126 adp_state_show(struct device
*dev
, struct device_attribute
*attr
,
3129 struct Scsi_Host
*shost
= class_to_shost(dev
);
3130 struct mpi3mr_ioc
*mrioc
= shost_priv(shost
);
3131 enum mpi3mr_iocstate ioc_state
;
3134 ioc_state
= mpi3mr_get_iocstate(mrioc
);
3135 if (ioc_state
== MRIOC_STATE_UNRECOVERABLE
)
3136 adp_state
= MPI3MR_BSG_ADPSTATE_UNRECOVERABLE
;
3137 else if (mrioc
->reset_in_progress
|| mrioc
->stop_bsgs
||
3138 mrioc
->block_on_pci_err
)
3139 adp_state
= MPI3MR_BSG_ADPSTATE_IN_RESET
;
3140 else if (ioc_state
== MRIOC_STATE_FAULT
)
3141 adp_state
= MPI3MR_BSG_ADPSTATE_FAULT
;
3143 adp_state
= MPI3MR_BSG_ADPSTATE_OPERATIONAL
;
3145 return sysfs_emit(buf
, "%u\n", adp_state
);
3148 static DEVICE_ATTR_RO(adp_state
);
3150 static struct attribute
*mpi3mr_host_attrs
[] = {
3151 &dev_attr_version_fw
.attr
,
3152 &dev_attr_fw_queue_depth
.attr
,
3153 &dev_attr_op_req_q_count
.attr
,
3154 &dev_attr_reply_queue_count
.attr
,
3155 &dev_attr_logging_level
.attr
,
3156 &dev_attr_adp_state
.attr
,
3160 static const struct attribute_group mpi3mr_host_attr_group
= {
3161 .attrs
= mpi3mr_host_attrs
3164 const struct attribute_group
*mpi3mr_host_groups
[] = {
3165 &mpi3mr_host_attr_group
,
3171 * SCSI Device attributes under sysfs
3175 * sas_address_show - SysFS callback for dev SASaddress display
3176 * @dev: class device
3177 * @attr: Device attributes
3178 * @buf: Buffer to copy
3180 * Return: sysfs_emit() return after copying SAS address of the
3181 * specific SAS/SATA end device.
3184 sas_address_show(struct device
*dev
, struct device_attribute
*attr
,
3187 struct scsi_device
*sdev
= to_scsi_device(dev
);
3188 struct mpi3mr_sdev_priv_data
*sdev_priv_data
;
3189 struct mpi3mr_stgt_priv_data
*tgt_priv_data
;
3190 struct mpi3mr_tgt_dev
*tgtdev
;
3192 sdev_priv_data
= sdev
->hostdata
;
3193 if (!sdev_priv_data
)
3196 tgt_priv_data
= sdev_priv_data
->tgt_priv_data
;
3199 tgtdev
= tgt_priv_data
->tgt_dev
;
3200 if (!tgtdev
|| tgtdev
->dev_type
!= MPI3_DEVICE_DEVFORM_SAS_SATA
)
3202 return sysfs_emit(buf
, "0x%016llx\n",
3203 (unsigned long long)tgtdev
->dev_spec
.sas_sata_inf
.sas_address
);
3206 static DEVICE_ATTR_RO(sas_address
);
3209 * device_handle_show - SysFS callback for device handle display
3210 * @dev: class device
3211 * @attr: Device attributes
3212 * @buf: Buffer to copy
3214 * Return: sysfs_emit() return after copying firmware internal
3215 * device handle of the specific device.
3218 device_handle_show(struct device
*dev
, struct device_attribute
*attr
,
3221 struct scsi_device
*sdev
= to_scsi_device(dev
);
3222 struct mpi3mr_sdev_priv_data
*sdev_priv_data
;
3223 struct mpi3mr_stgt_priv_data
*tgt_priv_data
;
3224 struct mpi3mr_tgt_dev
*tgtdev
;
3226 sdev_priv_data
= sdev
->hostdata
;
3227 if (!sdev_priv_data
)
3230 tgt_priv_data
= sdev_priv_data
->tgt_priv_data
;
3233 tgtdev
= tgt_priv_data
->tgt_dev
;
3236 return sysfs_emit(buf
, "0x%04x\n", tgtdev
->dev_handle
);
3239 static DEVICE_ATTR_RO(device_handle
);
3242 * persistent_id_show - SysFS callback for persisten ID display
3243 * @dev: class device
3244 * @attr: Device attributes
3245 * @buf: Buffer to copy
3247 * Return: sysfs_emit() return after copying persistent ID of the
3248 * of the specific device.
3251 persistent_id_show(struct device
*dev
, struct device_attribute
*attr
,
3254 struct scsi_device
*sdev
= to_scsi_device(dev
);
3255 struct mpi3mr_sdev_priv_data
*sdev_priv_data
;
3256 struct mpi3mr_stgt_priv_data
*tgt_priv_data
;
3257 struct mpi3mr_tgt_dev
*tgtdev
;
3259 sdev_priv_data
= sdev
->hostdata
;
3260 if (!sdev_priv_data
)
3263 tgt_priv_data
= sdev_priv_data
->tgt_priv_data
;
3266 tgtdev
= tgt_priv_data
->tgt_dev
;
3269 return sysfs_emit(buf
, "%d\n", tgtdev
->perst_id
);
3271 static DEVICE_ATTR_RO(persistent_id
);
3274 * sas_ncq_prio_supported_show - Indicate if device supports NCQ priority
3275 * @dev: pointer to embedded device
3276 * @attr: sas_ncq_prio_supported attribute descriptor
3277 * @buf: the buffer returned
3279 * A sysfs 'read-only' sdev attribute, only works with SATA devices
3282 sas_ncq_prio_supported_show(struct device
*dev
,
3283 struct device_attribute
*attr
, char *buf
)
3285 struct scsi_device
*sdev
= to_scsi_device(dev
);
3287 return sysfs_emit(buf
, "%d\n", sas_ata_ncq_prio_supported(sdev
));
3289 static DEVICE_ATTR_RO(sas_ncq_prio_supported
);
3292 * sas_ncq_prio_enable_show - send prioritized io commands to device
3293 * @dev: pointer to embedded device
3294 * @attr: sas_ncq_prio_enable attribute descriptor
3295 * @buf: the buffer returned
3297 * A sysfs 'read/write' sdev attribute, only works with SATA devices
3300 sas_ncq_prio_enable_show(struct device
*dev
,
3301 struct device_attribute
*attr
, char *buf
)
3303 struct scsi_device
*sdev
= to_scsi_device(dev
);
3304 struct mpi3mr_sdev_priv_data
*sdev_priv_data
= sdev
->hostdata
;
3306 if (!sdev_priv_data
)
3309 return sysfs_emit(buf
, "%d\n", sdev_priv_data
->ncq_prio_enable
);
3313 sas_ncq_prio_enable_store(struct device
*dev
,
3314 struct device_attribute
*attr
,
3315 const char *buf
, size_t count
)
3317 struct scsi_device
*sdev
= to_scsi_device(dev
);
3318 struct mpi3mr_sdev_priv_data
*sdev_priv_data
= sdev
->hostdata
;
3319 bool ncq_prio_enable
= 0;
3321 if (kstrtobool(buf
, &ncq_prio_enable
))
3324 if (!sas_ata_ncq_prio_supported(sdev
))
3327 sdev_priv_data
->ncq_prio_enable
= ncq_prio_enable
;
3331 static DEVICE_ATTR_RW(sas_ncq_prio_enable
);
3333 static struct attribute
*mpi3mr_dev_attrs
[] = {
3334 &dev_attr_sas_address
.attr
,
3335 &dev_attr_device_handle
.attr
,
3336 &dev_attr_persistent_id
.attr
,
3337 &dev_attr_sas_ncq_prio_supported
.attr
,
3338 &dev_attr_sas_ncq_prio_enable
.attr
,
3342 static const struct attribute_group mpi3mr_dev_attr_group
= {
3343 .attrs
= mpi3mr_dev_attrs
3346 const struct attribute_group
*mpi3mr_dev_groups
[] = {
3347 &mpi3mr_dev_attr_group
,