x86/mm/pat: Don't report PAT on CPUs that don't support it
[linux/fpc-iii.git] / drivers / scsi / esas2r / esas2r_vda.c
blob30028e56df63790e7f8ae1a9b6caec3a729fc966
1 /*
2 * linux/drivers/scsi/esas2r/esas2r_vda.c
3 * esas2r driver VDA firmware interface functions
5 * Copyright (c) 2001-2013 ATTO Technology, Inc.
6 * (mailto:linuxdrivers@attotech.com)
7 */
8 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
9 /*
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; version 2 of the License.
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
19 * NO WARRANTY
20 * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
21 * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
22 * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
23 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
24 * solely responsible for determining the appropriateness of using and
25 * distributing the Program and assumes all risks associated with its
26 * exercise of rights under this Agreement, including but not limited to
27 * the risks and costs of program errors, damage to or loss of data,
28 * programs or equipment, and unavailability or interruption of operations.
30 * DISCLAIMER OF LIABILITY
31 * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
32 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
33 * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
34 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
35 * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
36 * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
37 * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
39 * You should have received a copy of the GNU General Public License
40 * along with this program; if not, write to the Free Software
41 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
43 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
45 #include "esas2r.h"
47 static u8 esas2r_vdaioctl_versions[] = {
48 ATTO_VDA_VER_UNSUPPORTED,
49 ATTO_VDA_FLASH_VER,
50 ATTO_VDA_VER_UNSUPPORTED,
51 ATTO_VDA_VER_UNSUPPORTED,
52 ATTO_VDA_CLI_VER,
53 ATTO_VDA_VER_UNSUPPORTED,
54 ATTO_VDA_CFG_VER,
55 ATTO_VDA_MGT_VER,
56 ATTO_VDA_GSV_VER
59 static void clear_vda_request(struct esas2r_request *rq);
61 static void esas2r_complete_vda_ioctl(struct esas2r_adapter *a,
62 struct esas2r_request *rq);
64 /* Prepare a VDA IOCTL request to be sent to the firmware. */
65 bool esas2r_process_vda_ioctl(struct esas2r_adapter *a,
66 struct atto_ioctl_vda *vi,
67 struct esas2r_request *rq,
68 struct esas2r_sg_context *sgc)
70 u32 datalen = 0;
71 struct atto_vda_sge *firstsg = NULL;
72 u8 vercnt = (u8)ARRAY_SIZE(esas2r_vdaioctl_versions);
74 vi->status = ATTO_STS_SUCCESS;
75 vi->vda_status = RS_PENDING;
77 if (vi->function >= vercnt) {
78 vi->status = ATTO_STS_INV_FUNC;
79 return false;
82 if (vi->version > esas2r_vdaioctl_versions[vi->function]) {
83 vi->status = ATTO_STS_INV_VERSION;
84 return false;
87 if (test_bit(AF_DEGRADED_MODE, &a->flags)) {
88 vi->status = ATTO_STS_DEGRADED;
89 return false;
92 if (vi->function != VDA_FUNC_SCSI)
93 clear_vda_request(rq);
95 rq->vrq->scsi.function = vi->function;
96 rq->interrupt_cb = esas2r_complete_vda_ioctl;
97 rq->interrupt_cx = vi;
99 switch (vi->function) {
100 case VDA_FUNC_FLASH:
102 if (vi->cmd.flash.sub_func != VDA_FLASH_FREAD
103 && vi->cmd.flash.sub_func != VDA_FLASH_FWRITE
104 && vi->cmd.flash.sub_func != VDA_FLASH_FINFO) {
105 vi->status = ATTO_STS_INV_FUNC;
106 return false;
109 if (vi->cmd.flash.sub_func != VDA_FLASH_FINFO)
110 datalen = vi->data_length;
112 rq->vrq->flash.length = cpu_to_le32(datalen);
113 rq->vrq->flash.sub_func = vi->cmd.flash.sub_func;
115 memcpy(rq->vrq->flash.data.file.file_name,
116 vi->cmd.flash.data.file.file_name,
117 sizeof(vi->cmd.flash.data.file.file_name));
119 firstsg = rq->vrq->flash.data.file.sge;
120 break;
122 case VDA_FUNC_CLI:
124 datalen = vi->data_length;
126 rq->vrq->cli.cmd_rsp_len =
127 cpu_to_le32(vi->cmd.cli.cmd_rsp_len);
128 rq->vrq->cli.length = cpu_to_le32(datalen);
130 firstsg = rq->vrq->cli.sge;
131 break;
133 case VDA_FUNC_MGT:
135 u8 *cmdcurr_offset = sgc->cur_offset
136 - offsetof(struct atto_ioctl_vda, data)
137 + offsetof(struct atto_ioctl_vda, cmd)
138 + offsetof(struct atto_ioctl_vda_mgt_cmd,
139 data);
141 * build the data payload SGL here first since
142 * esas2r_sgc_init() will modify the S/G list offset for the
143 * management SGL (which is built below where the data SGL is
144 * usually built).
147 if (vi->data_length) {
148 u32 payldlen = 0;
150 if (vi->cmd.mgt.mgt_func == VDAMGT_DEV_HEALTH_REQ
151 || vi->cmd.mgt.mgt_func == VDAMGT_DEV_METRICS) {
152 rq->vrq->mgt.payld_sglst_offset =
153 (u8)offsetof(struct atto_vda_mgmt_req,
154 payld_sge);
156 payldlen = vi->data_length;
157 datalen = vi->cmd.mgt.data_length;
158 } else if (vi->cmd.mgt.mgt_func == VDAMGT_DEV_INFO2
159 || vi->cmd.mgt.mgt_func ==
160 VDAMGT_DEV_INFO2_BYADDR) {
161 datalen = vi->data_length;
162 cmdcurr_offset = sgc->cur_offset;
163 } else {
164 vi->status = ATTO_STS_INV_PARAM;
165 return false;
168 /* Setup the length so building the payload SGL works */
169 rq->vrq->mgt.length = cpu_to_le32(datalen);
171 if (payldlen) {
172 rq->vrq->mgt.payld_length =
173 cpu_to_le32(payldlen);
175 esas2r_sgc_init(sgc, a, rq,
176 rq->vrq->mgt.payld_sge);
177 sgc->length = payldlen;
179 if (!esas2r_build_sg_list(a, rq, sgc)) {
180 vi->status = ATTO_STS_OUT_OF_RSRC;
181 return false;
184 } else {
185 datalen = vi->cmd.mgt.data_length;
187 rq->vrq->mgt.length = cpu_to_le32(datalen);
191 * Now that the payload SGL is built, if any, setup to build
192 * the management SGL.
194 firstsg = rq->vrq->mgt.sge;
195 sgc->cur_offset = cmdcurr_offset;
197 /* Finish initializing the management request. */
198 rq->vrq->mgt.mgt_func = vi->cmd.mgt.mgt_func;
199 rq->vrq->mgt.scan_generation = vi->cmd.mgt.scan_generation;
200 rq->vrq->mgt.dev_index =
201 cpu_to_le32(vi->cmd.mgt.dev_index);
203 esas2r_nuxi_mgt_data(rq->vrq->mgt.mgt_func, &vi->cmd.mgt.data);
204 break;
207 case VDA_FUNC_CFG:
209 if (vi->data_length
210 || vi->cmd.cfg.data_length == 0) {
211 vi->status = ATTO_STS_INV_PARAM;
212 return false;
215 if (vi->cmd.cfg.cfg_func == VDA_CFG_INIT) {
216 vi->status = ATTO_STS_INV_FUNC;
217 return false;
220 rq->vrq->cfg.sub_func = vi->cmd.cfg.cfg_func;
221 rq->vrq->cfg.length = cpu_to_le32(vi->cmd.cfg.data_length);
223 if (vi->cmd.cfg.cfg_func == VDA_CFG_GET_INIT) {
224 memcpy(&rq->vrq->cfg.data,
225 &vi->cmd.cfg.data,
226 vi->cmd.cfg.data_length);
228 esas2r_nuxi_cfg_data(rq->vrq->cfg.sub_func,
229 &rq->vrq->cfg.data);
230 } else {
231 vi->status = ATTO_STS_INV_FUNC;
233 return false;
236 break;
238 case VDA_FUNC_GSV:
240 vi->cmd.gsv.rsp_len = vercnt;
242 memcpy(vi->cmd.gsv.version_info, esas2r_vdaioctl_versions,
243 vercnt);
245 vi->vda_status = RS_SUCCESS;
246 break;
248 default:
250 vi->status = ATTO_STS_INV_FUNC;
251 return false;
254 if (datalen) {
255 esas2r_sgc_init(sgc, a, rq, firstsg);
256 sgc->length = datalen;
258 if (!esas2r_build_sg_list(a, rq, sgc)) {
259 vi->status = ATTO_STS_OUT_OF_RSRC;
260 return false;
264 esas2r_start_request(a, rq);
266 return true;
269 static void esas2r_complete_vda_ioctl(struct esas2r_adapter *a,
270 struct esas2r_request *rq)
272 struct atto_ioctl_vda *vi = (struct atto_ioctl_vda *)rq->interrupt_cx;
274 vi->vda_status = rq->req_stat;
276 switch (vi->function) {
277 case VDA_FUNC_FLASH:
279 if (vi->cmd.flash.sub_func == VDA_FLASH_FINFO
280 || vi->cmd.flash.sub_func == VDA_FLASH_FREAD)
281 vi->cmd.flash.data.file.file_size =
282 le32_to_cpu(rq->func_rsp.flash_rsp.file_size);
284 break;
286 case VDA_FUNC_MGT:
288 vi->cmd.mgt.scan_generation =
289 rq->func_rsp.mgt_rsp.scan_generation;
290 vi->cmd.mgt.dev_index = le16_to_cpu(
291 rq->func_rsp.mgt_rsp.dev_index);
293 if (vi->data_length == 0)
294 vi->cmd.mgt.data_length =
295 le32_to_cpu(rq->func_rsp.mgt_rsp.length);
297 esas2r_nuxi_mgt_data(rq->vrq->mgt.mgt_func, &vi->cmd.mgt.data);
298 break;
300 case VDA_FUNC_CFG:
302 if (vi->cmd.cfg.cfg_func == VDA_CFG_GET_INIT) {
303 struct atto_ioctl_vda_cfg_cmd *cfg = &vi->cmd.cfg;
304 struct atto_vda_cfg_rsp *rsp = &rq->func_rsp.cfg_rsp;
305 char buf[sizeof(cfg->data.init.fw_release) + 1];
307 cfg->data_length =
308 cpu_to_le32(sizeof(struct atto_vda_cfg_init));
309 cfg->data.init.vda_version =
310 le32_to_cpu(rsp->vda_version);
311 cfg->data.init.fw_build = rsp->fw_build;
313 snprintf(buf, sizeof(buf), "%1.1u.%2.2u",
314 (int)LOBYTE(le16_to_cpu(rsp->fw_release)),
315 (int)HIBYTE(le16_to_cpu(rsp->fw_release)));
317 memcpy(&cfg->data.init.fw_release, buf,
318 sizeof(cfg->data.init.fw_release));
320 if (LOWORD(LOBYTE(cfg->data.init.fw_build)) == 'A')
321 cfg->data.init.fw_version =
322 cfg->data.init.fw_build;
323 else
324 cfg->data.init.fw_version =
325 cfg->data.init.fw_release;
326 } else {
327 esas2r_nuxi_cfg_data(rq->vrq->cfg.sub_func,
328 &vi->cmd.cfg.data);
331 break;
333 case VDA_FUNC_CLI:
335 vi->cmd.cli.cmd_rsp_len =
336 le32_to_cpu(rq->func_rsp.cli_rsp.cmd_rsp_len);
337 break;
339 default:
341 break;
345 /* Build a flash VDA request. */
346 void esas2r_build_flash_req(struct esas2r_adapter *a,
347 struct esas2r_request *rq,
348 u8 sub_func,
349 u8 cksum,
350 u32 addr,
351 u32 length)
353 struct atto_vda_flash_req *vrq = &rq->vrq->flash;
355 clear_vda_request(rq);
357 rq->vrq->scsi.function = VDA_FUNC_FLASH;
359 if (sub_func == VDA_FLASH_BEGINW
360 || sub_func == VDA_FLASH_WRITE
361 || sub_func == VDA_FLASH_READ)
362 vrq->sg_list_offset = (u8)offsetof(struct atto_vda_flash_req,
363 data.sge);
365 vrq->length = cpu_to_le32(length);
366 vrq->flash_addr = cpu_to_le32(addr);
367 vrq->checksum = cksum;
368 vrq->sub_func = sub_func;
371 /* Build a VDA management request. */
372 void esas2r_build_mgt_req(struct esas2r_adapter *a,
373 struct esas2r_request *rq,
374 u8 sub_func,
375 u8 scan_gen,
376 u16 dev_index,
377 u32 length,
378 void *data)
380 struct atto_vda_mgmt_req *vrq = &rq->vrq->mgt;
382 clear_vda_request(rq);
384 rq->vrq->scsi.function = VDA_FUNC_MGT;
386 vrq->mgt_func = sub_func;
387 vrq->scan_generation = scan_gen;
388 vrq->dev_index = cpu_to_le16(dev_index);
389 vrq->length = cpu_to_le32(length);
391 if (vrq->length) {
392 if (test_bit(AF_LEGACY_SGE_MODE, &a->flags)) {
393 vrq->sg_list_offset = (u8)offsetof(
394 struct atto_vda_mgmt_req, sge);
396 vrq->sge[0].length = cpu_to_le32(SGE_LAST | length);
397 vrq->sge[0].address = cpu_to_le64(
398 rq->vrq_md->phys_addr +
399 sizeof(union atto_vda_req));
400 } else {
401 vrq->sg_list_offset = (u8)offsetof(
402 struct atto_vda_mgmt_req, prde);
404 vrq->prde[0].ctl_len = cpu_to_le32(length);
405 vrq->prde[0].address = cpu_to_le64(
406 rq->vrq_md->phys_addr +
407 sizeof(union atto_vda_req));
411 if (data) {
412 esas2r_nuxi_mgt_data(sub_func, data);
414 memcpy(&rq->vda_rsp_data->mgt_data.data.bytes[0], data,
415 length);
419 /* Build a VDA asyncronous event (AE) request. */
420 void esas2r_build_ae_req(struct esas2r_adapter *a, struct esas2r_request *rq)
422 struct atto_vda_ae_req *vrq = &rq->vrq->ae;
424 clear_vda_request(rq);
426 rq->vrq->scsi.function = VDA_FUNC_AE;
428 vrq->length = cpu_to_le32(sizeof(struct atto_vda_ae_data));
430 if (test_bit(AF_LEGACY_SGE_MODE, &a->flags)) {
431 vrq->sg_list_offset =
432 (u8)offsetof(struct atto_vda_ae_req, sge);
433 vrq->sge[0].length = cpu_to_le32(SGE_LAST | vrq->length);
434 vrq->sge[0].address = cpu_to_le64(
435 rq->vrq_md->phys_addr +
436 sizeof(union atto_vda_req));
437 } else {
438 vrq->sg_list_offset = (u8)offsetof(struct atto_vda_ae_req,
439 prde);
440 vrq->prde[0].ctl_len = cpu_to_le32(vrq->length);
441 vrq->prde[0].address = cpu_to_le64(
442 rq->vrq_md->phys_addr +
443 sizeof(union atto_vda_req));
447 /* Build a VDA CLI request. */
448 void esas2r_build_cli_req(struct esas2r_adapter *a,
449 struct esas2r_request *rq,
450 u32 length,
451 u32 cmd_rsp_len)
453 struct atto_vda_cli_req *vrq = &rq->vrq->cli;
455 clear_vda_request(rq);
457 rq->vrq->scsi.function = VDA_FUNC_CLI;
459 vrq->length = cpu_to_le32(length);
460 vrq->cmd_rsp_len = cpu_to_le32(cmd_rsp_len);
461 vrq->sg_list_offset = (u8)offsetof(struct atto_vda_cli_req, sge);
464 /* Build a VDA IOCTL request. */
465 void esas2r_build_ioctl_req(struct esas2r_adapter *a,
466 struct esas2r_request *rq,
467 u32 length,
468 u8 sub_func)
470 struct atto_vda_ioctl_req *vrq = &rq->vrq->ioctl;
472 clear_vda_request(rq);
474 rq->vrq->scsi.function = VDA_FUNC_IOCTL;
476 vrq->length = cpu_to_le32(length);
477 vrq->sub_func = sub_func;
478 vrq->sg_list_offset = (u8)offsetof(struct atto_vda_ioctl_req, sge);
481 /* Build a VDA configuration request. */
482 void esas2r_build_cfg_req(struct esas2r_adapter *a,
483 struct esas2r_request *rq,
484 u8 sub_func,
485 u32 length,
486 void *data)
488 struct atto_vda_cfg_req *vrq = &rq->vrq->cfg;
490 clear_vda_request(rq);
492 rq->vrq->scsi.function = VDA_FUNC_CFG;
494 vrq->sub_func = sub_func;
495 vrq->length = cpu_to_le32(length);
497 if (data) {
498 esas2r_nuxi_cfg_data(sub_func, data);
500 memcpy(&vrq->data, data, length);
504 static void clear_vda_request(struct esas2r_request *rq)
506 u32 handle = rq->vrq->scsi.handle;
508 memset(rq->vrq, 0, sizeof(*rq->vrq));
510 rq->vrq->scsi.handle = handle;
512 rq->req_stat = RS_PENDING;
514 /* since the data buffer is separate clear that too */
516 memset(rq->data_buf, 0, ESAS2R_DATA_BUF_LEN);
519 * Setup next and prev pointer in case the request is not going through
520 * esas2r_start_request().
523 INIT_LIST_HEAD(&rq->req_list);