x86, microcode, AMD: Fix signedness bug in generic_load_microcode()
[linux/fpc-iii.git] / drivers / scsi / osd / osd_initiator.c
blobb37c8a3c1bb0de6d1b64fdf9ea3cbb8357f13805
1 /*
2 * osd_initiator - Main body of the osd initiator library.
4 * Note: The file does not contain the advanced security functionality which
5 * is only needed by the security_manager's initiators.
7 * Copyright (C) 2008 Panasas Inc. All rights reserved.
9 * Authors:
10 * Boaz Harrosh <bharrosh@panasas.com>
11 * Benny Halevy <bhalevy@panasas.com>
13 * This program is free software; you can redistribute it and/or modify
14 * it under the terms of the GNU General Public License version 2
16 * Redistribution and use in source and binary forms, with or without
17 * modification, are permitted provided that the following conditions
18 * are met:
20 * 1. Redistributions of source code must retain the above copyright
21 * notice, this list of conditions and the following disclaimer.
22 * 2. Redistributions in binary form must reproduce the above copyright
23 * notice, this list of conditions and the following disclaimer in the
24 * documentation and/or other materials provided with the distribution.
25 * 3. Neither the name of the Panasas company nor the names of its
26 * contributors may be used to endorse or promote products derived
27 * from this software without specific prior written permission.
29 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
30 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
31 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
32 * DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
33 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
34 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
35 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
36 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
37 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
38 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
39 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
42 #include <linux/slab.h>
44 #include <scsi/osd_initiator.h>
45 #include <scsi/osd_sec.h>
46 #include <scsi/osd_attributes.h>
47 #include <scsi/osd_sense.h>
49 #include <scsi/scsi_device.h>
51 #include "osd_debug.h"
53 #ifndef __unused
54 # define __unused __attribute__((unused))
55 #endif
57 enum { OSD_REQ_RETRIES = 1 };
59 MODULE_AUTHOR("Boaz Harrosh <bharrosh@panasas.com>");
60 MODULE_DESCRIPTION("open-osd initiator library libosd.ko");
61 MODULE_LICENSE("GPL");
63 static inline void build_test(void)
65 /* structures were not packed */
66 BUILD_BUG_ON(sizeof(struct osd_capability) != OSD_CAP_LEN);
67 BUILD_BUG_ON(sizeof(struct osdv2_cdb) != OSD_TOTAL_CDB_LEN);
68 BUILD_BUG_ON(sizeof(struct osdv1_cdb) != OSDv1_TOTAL_CDB_LEN);
71 static const char *_osd_ver_desc(struct osd_request *or)
73 return osd_req_is_ver1(or) ? "OSD1" : "OSD2";
76 #define ATTR_DEF_RI(id, len) ATTR_DEF(OSD_APAGE_ROOT_INFORMATION, id, len)
78 static int _osd_get_print_system_info(struct osd_dev *od,
79 void *caps, struct osd_dev_info *odi)
81 struct osd_request *or;
82 struct osd_attr get_attrs[] = {
83 ATTR_DEF_RI(OSD_ATTR_RI_VENDOR_IDENTIFICATION, 8),
84 ATTR_DEF_RI(OSD_ATTR_RI_PRODUCT_IDENTIFICATION, 16),
85 ATTR_DEF_RI(OSD_ATTR_RI_PRODUCT_MODEL, 32),
86 ATTR_DEF_RI(OSD_ATTR_RI_PRODUCT_REVISION_LEVEL, 4),
87 ATTR_DEF_RI(OSD_ATTR_RI_PRODUCT_SERIAL_NUMBER, 64 /*variable*/),
88 ATTR_DEF_RI(OSD_ATTR_RI_OSD_NAME, 64 /*variable*/),
89 ATTR_DEF_RI(OSD_ATTR_RI_TOTAL_CAPACITY, 8),
90 ATTR_DEF_RI(OSD_ATTR_RI_USED_CAPACITY, 8),
91 ATTR_DEF_RI(OSD_ATTR_RI_NUMBER_OF_PARTITIONS, 8),
92 ATTR_DEF_RI(OSD_ATTR_RI_CLOCK, 6),
93 /* IBM-OSD-SIM Has a bug with this one put it last */
94 ATTR_DEF_RI(OSD_ATTR_RI_OSD_SYSTEM_ID, 20),
96 void *iter = NULL, *pFirst;
97 int nelem = ARRAY_SIZE(get_attrs), a = 0;
98 int ret;
100 or = osd_start_request(od, GFP_KERNEL);
101 if (!or)
102 return -ENOMEM;
104 /* get attrs */
105 osd_req_get_attributes(or, &osd_root_object);
106 osd_req_add_get_attr_list(or, get_attrs, ARRAY_SIZE(get_attrs));
108 ret = osd_finalize_request(or, 0, caps, NULL);
109 if (ret)
110 goto out;
112 ret = osd_execute_request(or);
113 if (ret) {
114 OSD_ERR("Failed to detect %s => %d\n", _osd_ver_desc(or), ret);
115 goto out;
118 osd_req_decode_get_attr_list(or, get_attrs, &nelem, &iter);
120 OSD_INFO("Detected %s device\n",
121 _osd_ver_desc(or));
123 pFirst = get_attrs[a++].val_ptr;
124 OSD_INFO("VENDOR_IDENTIFICATION [%s]\n",
125 (char *)pFirst);
127 pFirst = get_attrs[a++].val_ptr;
128 OSD_INFO("PRODUCT_IDENTIFICATION [%s]\n",
129 (char *)pFirst);
131 pFirst = get_attrs[a++].val_ptr;
132 OSD_INFO("PRODUCT_MODEL [%s]\n",
133 (char *)pFirst);
135 pFirst = get_attrs[a++].val_ptr;
136 OSD_INFO("PRODUCT_REVISION_LEVEL [%u]\n",
137 pFirst ? get_unaligned_be32(pFirst) : ~0U);
139 pFirst = get_attrs[a++].val_ptr;
140 OSD_INFO("PRODUCT_SERIAL_NUMBER [%s]\n",
141 (char *)pFirst);
143 odi->osdname_len = get_attrs[a].len;
144 /* Avoid NULL for memcmp optimization 0-length is good enough */
145 odi->osdname = kzalloc(odi->osdname_len + 1, GFP_KERNEL);
146 if (odi->osdname_len)
147 memcpy(odi->osdname, get_attrs[a].val_ptr, odi->osdname_len);
148 OSD_INFO("OSD_NAME [%s]\n", odi->osdname);
149 a++;
151 pFirst = get_attrs[a++].val_ptr;
152 OSD_INFO("TOTAL_CAPACITY [0x%llx]\n",
153 pFirst ? _LLU(get_unaligned_be64(pFirst)) : ~0ULL);
155 pFirst = get_attrs[a++].val_ptr;
156 OSD_INFO("USED_CAPACITY [0x%llx]\n",
157 pFirst ? _LLU(get_unaligned_be64(pFirst)) : ~0ULL);
159 pFirst = get_attrs[a++].val_ptr;
160 OSD_INFO("NUMBER_OF_PARTITIONS [%llu]\n",
161 pFirst ? _LLU(get_unaligned_be64(pFirst)) : ~0ULL);
163 if (a >= nelem)
164 goto out;
166 /* FIXME: Where are the time utilities */
167 pFirst = get_attrs[a++].val_ptr;
168 OSD_INFO("CLOCK [0x%02x%02x%02x%02x%02x%02x]\n",
169 ((char *)pFirst)[0], ((char *)pFirst)[1],
170 ((char *)pFirst)[2], ((char *)pFirst)[3],
171 ((char *)pFirst)[4], ((char *)pFirst)[5]);
173 if (a < nelem) { /* IBM-OSD-SIM bug, Might not have it */
174 unsigned len = get_attrs[a].len;
175 char sid_dump[32*4 + 2]; /* 2nibbles+space+ASCII */
177 hex_dump_to_buffer(get_attrs[a].val_ptr, len, 32, 1,
178 sid_dump, sizeof(sid_dump), true);
179 OSD_INFO("OSD_SYSTEM_ID(%d)\n"
180 " [%s]\n", len, sid_dump);
182 if (unlikely(len > sizeof(odi->systemid))) {
183 OSD_ERR("OSD Target error: OSD_SYSTEM_ID too long(%d). "
184 "device idetification might not work\n", len);
185 len = sizeof(odi->systemid);
187 odi->systemid_len = len;
188 memcpy(odi->systemid, get_attrs[a].val_ptr, len);
189 a++;
191 out:
192 osd_end_request(or);
193 return ret;
196 int osd_auto_detect_ver(struct osd_dev *od,
197 void *caps, struct osd_dev_info *odi)
199 int ret;
201 /* Auto-detect the osd version */
202 ret = _osd_get_print_system_info(od, caps, odi);
203 if (ret) {
204 osd_dev_set_ver(od, OSD_VER1);
205 OSD_DEBUG("converting to OSD1\n");
206 ret = _osd_get_print_system_info(od, caps, odi);
209 return ret;
211 EXPORT_SYMBOL(osd_auto_detect_ver);
213 static unsigned _osd_req_cdb_len(struct osd_request *or)
215 return osd_req_is_ver1(or) ? OSDv1_TOTAL_CDB_LEN : OSD_TOTAL_CDB_LEN;
218 static unsigned _osd_req_alist_elem_size(struct osd_request *or, unsigned len)
220 return osd_req_is_ver1(or) ?
221 osdv1_attr_list_elem_size(len) :
222 osdv2_attr_list_elem_size(len);
225 static void _osd_req_alist_elem_encode(struct osd_request *or,
226 void *attr_last, const struct osd_attr *oa)
228 if (osd_req_is_ver1(or)) {
229 struct osdv1_attributes_list_element *attr = attr_last;
231 attr->attr_page = cpu_to_be32(oa->attr_page);
232 attr->attr_id = cpu_to_be32(oa->attr_id);
233 attr->attr_bytes = cpu_to_be16(oa->len);
234 memcpy(attr->attr_val, oa->val_ptr, oa->len);
235 } else {
236 struct osdv2_attributes_list_element *attr = attr_last;
238 attr->attr_page = cpu_to_be32(oa->attr_page);
239 attr->attr_id = cpu_to_be32(oa->attr_id);
240 attr->attr_bytes = cpu_to_be16(oa->len);
241 memcpy(attr->attr_val, oa->val_ptr, oa->len);
245 static int _osd_req_alist_elem_decode(struct osd_request *or,
246 void *cur_p, struct osd_attr *oa, unsigned max_bytes)
248 unsigned inc;
249 if (osd_req_is_ver1(or)) {
250 struct osdv1_attributes_list_element *attr = cur_p;
252 if (max_bytes < sizeof(*attr))
253 return -1;
255 oa->len = be16_to_cpu(attr->attr_bytes);
256 inc = _osd_req_alist_elem_size(or, oa->len);
257 if (inc > max_bytes)
258 return -1;
260 oa->attr_page = be32_to_cpu(attr->attr_page);
261 oa->attr_id = be32_to_cpu(attr->attr_id);
263 /* OSD1: On empty attributes we return a pointer to 2 bytes
264 * of zeros. This keeps similar behaviour with OSD2.
265 * (See below)
267 oa->val_ptr = likely(oa->len) ? attr->attr_val :
268 (u8 *)&attr->attr_bytes;
269 } else {
270 struct osdv2_attributes_list_element *attr = cur_p;
272 if (max_bytes < sizeof(*attr))
273 return -1;
275 oa->len = be16_to_cpu(attr->attr_bytes);
276 inc = _osd_req_alist_elem_size(or, oa->len);
277 if (inc > max_bytes)
278 return -1;
280 oa->attr_page = be32_to_cpu(attr->attr_page);
281 oa->attr_id = be32_to_cpu(attr->attr_id);
283 /* OSD2: For convenience, on empty attributes, we return 8 bytes
284 * of zeros here. This keeps the same behaviour with OSD2r04,
285 * and is nice with null terminating ASCII fields.
286 * oa->val_ptr == NULL marks the end-of-list, or error.
288 oa->val_ptr = likely(oa->len) ? attr->attr_val : attr->reserved;
290 return inc;
293 static unsigned _osd_req_alist_size(struct osd_request *or, void *list_head)
295 return osd_req_is_ver1(or) ?
296 osdv1_list_size(list_head) :
297 osdv2_list_size(list_head);
300 static unsigned _osd_req_sizeof_alist_header(struct osd_request *or)
302 return osd_req_is_ver1(or) ?
303 sizeof(struct osdv1_attributes_list_header) :
304 sizeof(struct osdv2_attributes_list_header);
307 static void _osd_req_set_alist_type(struct osd_request *or,
308 void *list, int list_type)
310 if (osd_req_is_ver1(or)) {
311 struct osdv1_attributes_list_header *attr_list = list;
313 memset(attr_list, 0, sizeof(*attr_list));
314 attr_list->type = list_type;
315 } else {
316 struct osdv2_attributes_list_header *attr_list = list;
318 memset(attr_list, 0, sizeof(*attr_list));
319 attr_list->type = list_type;
323 static bool _osd_req_is_alist_type(struct osd_request *or,
324 void *list, int list_type)
326 if (!list)
327 return false;
329 if (osd_req_is_ver1(or)) {
330 struct osdv1_attributes_list_header *attr_list = list;
332 return attr_list->type == list_type;
333 } else {
334 struct osdv2_attributes_list_header *attr_list = list;
336 return attr_list->type == list_type;
340 /* This is for List-objects not Attributes-Lists */
341 static void _osd_req_encode_olist(struct osd_request *or,
342 struct osd_obj_id_list *list)
344 struct osd_cdb_head *cdbh = osd_cdb_head(&or->cdb);
346 if (osd_req_is_ver1(or)) {
347 cdbh->v1.list_identifier = list->list_identifier;
348 cdbh->v1.start_address = list->continuation_id;
349 } else {
350 cdbh->v2.list_identifier = list->list_identifier;
351 cdbh->v2.start_address = list->continuation_id;
355 static osd_cdb_offset osd_req_encode_offset(struct osd_request *or,
356 u64 offset, unsigned *padding)
358 return __osd_encode_offset(offset, padding,
359 osd_req_is_ver1(or) ?
360 OSDv1_OFFSET_MIN_SHIFT : OSD_OFFSET_MIN_SHIFT,
361 OSD_OFFSET_MAX_SHIFT);
364 static struct osd_security_parameters *
365 _osd_req_sec_params(struct osd_request *or)
367 struct osd_cdb *ocdb = &or->cdb;
369 if (osd_req_is_ver1(or))
370 return (struct osd_security_parameters *)&ocdb->v1.sec_params;
371 else
372 return (struct osd_security_parameters *)&ocdb->v2.sec_params;
375 void osd_dev_init(struct osd_dev *osdd, struct scsi_device *scsi_device)
377 memset(osdd, 0, sizeof(*osdd));
378 osdd->scsi_device = scsi_device;
379 osdd->def_timeout = BLK_DEFAULT_SG_TIMEOUT;
380 #ifdef OSD_VER1_SUPPORT
381 osdd->version = OSD_VER2;
382 #endif
383 /* TODO: Allocate pools for osd_request attributes ... */
385 EXPORT_SYMBOL(osd_dev_init);
387 void osd_dev_fini(struct osd_dev *osdd)
389 /* TODO: De-allocate pools */
391 osdd->scsi_device = NULL;
393 EXPORT_SYMBOL(osd_dev_fini);
395 static struct osd_request *_osd_request_alloc(gfp_t gfp)
397 struct osd_request *or;
399 /* TODO: Use mempool with one saved request */
400 or = kzalloc(sizeof(*or), gfp);
401 return or;
404 static void _osd_request_free(struct osd_request *or)
406 kfree(or);
409 struct osd_request *osd_start_request(struct osd_dev *dev, gfp_t gfp)
411 struct osd_request *or;
413 or = _osd_request_alloc(gfp);
414 if (!or)
415 return NULL;
417 or->osd_dev = dev;
418 or->alloc_flags = gfp;
419 or->timeout = dev->def_timeout;
420 or->retries = OSD_REQ_RETRIES;
422 return or;
424 EXPORT_SYMBOL(osd_start_request);
426 static void _osd_free_seg(struct osd_request *or __unused,
427 struct _osd_req_data_segment *seg)
429 if (!seg->buff || !seg->alloc_size)
430 return;
432 kfree(seg->buff);
433 seg->buff = NULL;
434 seg->alloc_size = 0;
437 static void _put_request(struct request *rq)
440 * If osd_finalize_request() was called but the request was not
441 * executed through the block layer, then we must release BIOs.
442 * TODO: Keep error code in or->async_error. Need to audit all
443 * code paths.
445 if (unlikely(rq->bio))
446 blk_end_request(rq, -ENOMEM, blk_rq_bytes(rq));
447 else
448 blk_put_request(rq);
451 void osd_end_request(struct osd_request *or)
453 struct request *rq = or->request;
455 if (rq) {
456 if (rq->next_rq) {
457 _put_request(rq->next_rq);
458 rq->next_rq = NULL;
461 _put_request(rq);
464 _osd_free_seg(or, &or->get_attr);
465 _osd_free_seg(or, &or->enc_get_attr);
466 _osd_free_seg(or, &or->set_attr);
467 _osd_free_seg(or, &or->cdb_cont);
469 _osd_request_free(or);
471 EXPORT_SYMBOL(osd_end_request);
473 static void _set_error_resid(struct osd_request *or, struct request *req,
474 int error)
476 or->async_error = error;
477 or->req_errors = req->errors ? : error;
478 or->sense_len = req->sense_len;
479 if (or->out.req)
480 or->out.residual = or->out.req->resid_len;
481 if (or->in.req)
482 or->in.residual = or->in.req->resid_len;
485 int osd_execute_request(struct osd_request *or)
487 int error = blk_execute_rq(or->request->q, NULL, or->request, 0);
489 _set_error_resid(or, or->request, error);
490 return error;
492 EXPORT_SYMBOL(osd_execute_request);
494 static void osd_request_async_done(struct request *req, int error)
496 struct osd_request *or = req->end_io_data;
498 _set_error_resid(or, req, error);
499 if (req->next_rq) {
500 __blk_put_request(req->q, req->next_rq);
501 req->next_rq = NULL;
504 __blk_put_request(req->q, req);
505 or->request = NULL;
506 or->in.req = NULL;
507 or->out.req = NULL;
509 if (or->async_done)
510 or->async_done(or, or->async_private);
511 else
512 osd_end_request(or);
515 int osd_execute_request_async(struct osd_request *or,
516 osd_req_done_fn *done, void *private)
518 or->request->end_io_data = or;
519 or->async_private = private;
520 or->async_done = done;
522 blk_execute_rq_nowait(or->request->q, NULL, or->request, 0,
523 osd_request_async_done);
524 return 0;
526 EXPORT_SYMBOL(osd_execute_request_async);
528 u8 sg_out_pad_buffer[1 << OSDv1_OFFSET_MIN_SHIFT];
529 u8 sg_in_pad_buffer[1 << OSDv1_OFFSET_MIN_SHIFT];
531 static int _osd_realloc_seg(struct osd_request *or,
532 struct _osd_req_data_segment *seg, unsigned max_bytes)
534 void *buff;
536 if (seg->alloc_size >= max_bytes)
537 return 0;
539 buff = krealloc(seg->buff, max_bytes, or->alloc_flags);
540 if (!buff) {
541 OSD_ERR("Failed to Realloc %d-bytes was-%d\n", max_bytes,
542 seg->alloc_size);
543 return -ENOMEM;
546 memset(buff + seg->alloc_size, 0, max_bytes - seg->alloc_size);
547 seg->buff = buff;
548 seg->alloc_size = max_bytes;
549 return 0;
552 static int _alloc_cdb_cont(struct osd_request *or, unsigned total_bytes)
554 OSD_DEBUG("total_bytes=%d\n", total_bytes);
555 return _osd_realloc_seg(or, &or->cdb_cont, total_bytes);
558 static int _alloc_set_attr_list(struct osd_request *or,
559 const struct osd_attr *oa, unsigned nelem, unsigned add_bytes)
561 unsigned total_bytes = add_bytes;
563 for (; nelem; --nelem, ++oa)
564 total_bytes += _osd_req_alist_elem_size(or, oa->len);
566 OSD_DEBUG("total_bytes=%d\n", total_bytes);
567 return _osd_realloc_seg(or, &or->set_attr, total_bytes);
570 static int _alloc_get_attr_desc(struct osd_request *or, unsigned max_bytes)
572 OSD_DEBUG("total_bytes=%d\n", max_bytes);
573 return _osd_realloc_seg(or, &or->enc_get_attr, max_bytes);
576 static int _alloc_get_attr_list(struct osd_request *or)
578 OSD_DEBUG("total_bytes=%d\n", or->get_attr.total_bytes);
579 return _osd_realloc_seg(or, &or->get_attr, or->get_attr.total_bytes);
583 * Common to all OSD commands
586 static void _osdv1_req_encode_common(struct osd_request *or,
587 __be16 act, const struct osd_obj_id *obj, u64 offset, u64 len)
589 struct osdv1_cdb *ocdb = &or->cdb.v1;
592 * For speed, the commands
593 * OSD_ACT_PERFORM_SCSI_COMMAND , V1 0x8F7E, V2 0x8F7C
594 * OSD_ACT_SCSI_TASK_MANAGEMENT , V1 0x8F7F, V2 0x8F7D
595 * are not supported here. Should pass zero and set after the call
597 act &= cpu_to_be16(~0x0080); /* V1 action code */
599 OSD_DEBUG("OSDv1 execute opcode 0x%x\n", be16_to_cpu(act));
601 ocdb->h.varlen_cdb.opcode = VARIABLE_LENGTH_CMD;
602 ocdb->h.varlen_cdb.additional_cdb_length = OSD_ADDITIONAL_CDB_LENGTH;
603 ocdb->h.varlen_cdb.service_action = act;
605 ocdb->h.partition = cpu_to_be64(obj->partition);
606 ocdb->h.object = cpu_to_be64(obj->id);
607 ocdb->h.v1.length = cpu_to_be64(len);
608 ocdb->h.v1.start_address = cpu_to_be64(offset);
611 static void _osdv2_req_encode_common(struct osd_request *or,
612 __be16 act, const struct osd_obj_id *obj, u64 offset, u64 len)
614 struct osdv2_cdb *ocdb = &or->cdb.v2;
616 OSD_DEBUG("OSDv2 execute opcode 0x%x\n", be16_to_cpu(act));
618 ocdb->h.varlen_cdb.opcode = VARIABLE_LENGTH_CMD;
619 ocdb->h.varlen_cdb.additional_cdb_length = OSD_ADDITIONAL_CDB_LENGTH;
620 ocdb->h.varlen_cdb.service_action = act;
622 ocdb->h.partition = cpu_to_be64(obj->partition);
623 ocdb->h.object = cpu_to_be64(obj->id);
624 ocdb->h.v2.length = cpu_to_be64(len);
625 ocdb->h.v2.start_address = cpu_to_be64(offset);
628 static void _osd_req_encode_common(struct osd_request *or,
629 __be16 act, const struct osd_obj_id *obj, u64 offset, u64 len)
631 if (osd_req_is_ver1(or))
632 _osdv1_req_encode_common(or, act, obj, offset, len);
633 else
634 _osdv2_req_encode_common(or, act, obj, offset, len);
638 * Device commands
640 /*TODO: void osd_req_set_master_seed_xchg(struct osd_request *, ...); */
641 /*TODO: void osd_req_set_master_key(struct osd_request *, ...); */
643 void osd_req_format(struct osd_request *or, u64 tot_capacity)
645 _osd_req_encode_common(or, OSD_ACT_FORMAT_OSD, &osd_root_object, 0,
646 tot_capacity);
648 EXPORT_SYMBOL(osd_req_format);
650 int osd_req_list_dev_partitions(struct osd_request *or,
651 osd_id initial_id, struct osd_obj_id_list *list, unsigned nelem)
653 return osd_req_list_partition_objects(or, 0, initial_id, list, nelem);
655 EXPORT_SYMBOL(osd_req_list_dev_partitions);
657 static void _osd_req_encode_flush(struct osd_request *or,
658 enum osd_options_flush_scope_values op)
660 struct osd_cdb_head *ocdb = osd_cdb_head(&or->cdb);
662 ocdb->command_specific_options = op;
665 void osd_req_flush_obsd(struct osd_request *or,
666 enum osd_options_flush_scope_values op)
668 _osd_req_encode_common(or, OSD_ACT_FLUSH_OSD, &osd_root_object, 0, 0);
669 _osd_req_encode_flush(or, op);
671 EXPORT_SYMBOL(osd_req_flush_obsd);
673 /*TODO: void osd_req_perform_scsi_command(struct osd_request *,
674 const u8 *cdb, ...); */
675 /*TODO: void osd_req_task_management(struct osd_request *, ...); */
678 * Partition commands
680 static void _osd_req_encode_partition(struct osd_request *or,
681 __be16 act, osd_id partition)
683 struct osd_obj_id par = {
684 .partition = partition,
685 .id = 0,
688 _osd_req_encode_common(or, act, &par, 0, 0);
691 void osd_req_create_partition(struct osd_request *or, osd_id partition)
693 _osd_req_encode_partition(or, OSD_ACT_CREATE_PARTITION, partition);
695 EXPORT_SYMBOL(osd_req_create_partition);
697 void osd_req_remove_partition(struct osd_request *or, osd_id partition)
699 _osd_req_encode_partition(or, OSD_ACT_REMOVE_PARTITION, partition);
701 EXPORT_SYMBOL(osd_req_remove_partition);
703 /*TODO: void osd_req_set_partition_key(struct osd_request *,
704 osd_id partition, u8 new_key_id[OSD_CRYPTO_KEYID_SIZE],
705 u8 seed[OSD_CRYPTO_SEED_SIZE]); */
707 static int _osd_req_list_objects(struct osd_request *or,
708 __be16 action, const struct osd_obj_id *obj, osd_id initial_id,
709 struct osd_obj_id_list *list, unsigned nelem)
711 struct request_queue *q = osd_request_queue(or->osd_dev);
712 u64 len = nelem * sizeof(osd_id) + sizeof(*list);
713 struct bio *bio;
715 _osd_req_encode_common(or, action, obj, (u64)initial_id, len);
717 if (list->list_identifier)
718 _osd_req_encode_olist(or, list);
720 WARN_ON(or->in.bio);
721 bio = bio_map_kern(q, list, len, or->alloc_flags);
722 if (IS_ERR(bio)) {
723 OSD_ERR("!!! Failed to allocate list_objects BIO\n");
724 return PTR_ERR(bio);
727 bio->bi_rw &= ~REQ_WRITE;
728 or->in.bio = bio;
729 or->in.total_bytes = bio->bi_size;
730 return 0;
733 int osd_req_list_partition_collections(struct osd_request *or,
734 osd_id partition, osd_id initial_id, struct osd_obj_id_list *list,
735 unsigned nelem)
737 struct osd_obj_id par = {
738 .partition = partition,
739 .id = 0,
742 return osd_req_list_collection_objects(or, &par, initial_id, list,
743 nelem);
745 EXPORT_SYMBOL(osd_req_list_partition_collections);
747 int osd_req_list_partition_objects(struct osd_request *or,
748 osd_id partition, osd_id initial_id, struct osd_obj_id_list *list,
749 unsigned nelem)
751 struct osd_obj_id par = {
752 .partition = partition,
753 .id = 0,
756 return _osd_req_list_objects(or, OSD_ACT_LIST, &par, initial_id, list,
757 nelem);
759 EXPORT_SYMBOL(osd_req_list_partition_objects);
761 void osd_req_flush_partition(struct osd_request *or,
762 osd_id partition, enum osd_options_flush_scope_values op)
764 _osd_req_encode_partition(or, OSD_ACT_FLUSH_PARTITION, partition);
765 _osd_req_encode_flush(or, op);
767 EXPORT_SYMBOL(osd_req_flush_partition);
770 * Collection commands
772 /*TODO: void osd_req_create_collection(struct osd_request *,
773 const struct osd_obj_id *); */
774 /*TODO: void osd_req_remove_collection(struct osd_request *,
775 const struct osd_obj_id *); */
777 int osd_req_list_collection_objects(struct osd_request *or,
778 const struct osd_obj_id *obj, osd_id initial_id,
779 struct osd_obj_id_list *list, unsigned nelem)
781 return _osd_req_list_objects(or, OSD_ACT_LIST_COLLECTION, obj,
782 initial_id, list, nelem);
784 EXPORT_SYMBOL(osd_req_list_collection_objects);
786 /*TODO: void query(struct osd_request *, ...); V2 */
788 void osd_req_flush_collection(struct osd_request *or,
789 const struct osd_obj_id *obj, enum osd_options_flush_scope_values op)
791 _osd_req_encode_common(or, OSD_ACT_FLUSH_PARTITION, obj, 0, 0);
792 _osd_req_encode_flush(or, op);
794 EXPORT_SYMBOL(osd_req_flush_collection);
796 /*TODO: void get_member_attrs(struct osd_request *, ...); V2 */
797 /*TODO: void set_member_attrs(struct osd_request *, ...); V2 */
800 * Object commands
802 void osd_req_create_object(struct osd_request *or, struct osd_obj_id *obj)
804 _osd_req_encode_common(or, OSD_ACT_CREATE, obj, 0, 0);
806 EXPORT_SYMBOL(osd_req_create_object);
808 void osd_req_remove_object(struct osd_request *or, struct osd_obj_id *obj)
810 _osd_req_encode_common(or, OSD_ACT_REMOVE, obj, 0, 0);
812 EXPORT_SYMBOL(osd_req_remove_object);
815 /*TODO: void osd_req_create_multi(struct osd_request *or,
816 struct osd_obj_id *first, struct osd_obj_id_list *list, unsigned nelem);
819 void osd_req_write(struct osd_request *or,
820 const struct osd_obj_id *obj, u64 offset,
821 struct bio *bio, u64 len)
823 _osd_req_encode_common(or, OSD_ACT_WRITE, obj, offset, len);
824 WARN_ON(or->out.bio || or->out.total_bytes);
825 WARN_ON(0 == (bio->bi_rw & REQ_WRITE));
826 or->out.bio = bio;
827 or->out.total_bytes = len;
829 EXPORT_SYMBOL(osd_req_write);
831 int osd_req_write_kern(struct osd_request *or,
832 const struct osd_obj_id *obj, u64 offset, void* buff, u64 len)
834 struct request_queue *req_q = osd_request_queue(or->osd_dev);
835 struct bio *bio = bio_map_kern(req_q, buff, len, GFP_KERNEL);
837 if (IS_ERR(bio))
838 return PTR_ERR(bio);
840 bio->bi_rw |= REQ_WRITE; /* FIXME: bio_set_dir() */
841 osd_req_write(or, obj, offset, bio, len);
842 return 0;
844 EXPORT_SYMBOL(osd_req_write_kern);
846 /*TODO: void osd_req_append(struct osd_request *,
847 const struct osd_obj_id *, struct bio *data_out); */
848 /*TODO: void osd_req_create_write(struct osd_request *,
849 const struct osd_obj_id *, struct bio *data_out, u64 offset); */
850 /*TODO: void osd_req_clear(struct osd_request *,
851 const struct osd_obj_id *, u64 offset, u64 len); */
852 /*TODO: void osd_req_punch(struct osd_request *,
853 const struct osd_obj_id *, u64 offset, u64 len); V2 */
855 void osd_req_flush_object(struct osd_request *or,
856 const struct osd_obj_id *obj, enum osd_options_flush_scope_values op,
857 /*V2*/ u64 offset, /*V2*/ u64 len)
859 if (unlikely(osd_req_is_ver1(or) && (offset || len))) {
860 OSD_DEBUG("OSD Ver1 flush on specific range ignored\n");
861 offset = 0;
862 len = 0;
865 _osd_req_encode_common(or, OSD_ACT_FLUSH, obj, offset, len);
866 _osd_req_encode_flush(or, op);
868 EXPORT_SYMBOL(osd_req_flush_object);
870 void osd_req_read(struct osd_request *or,
871 const struct osd_obj_id *obj, u64 offset,
872 struct bio *bio, u64 len)
874 _osd_req_encode_common(or, OSD_ACT_READ, obj, offset, len);
875 WARN_ON(or->in.bio || or->in.total_bytes);
876 WARN_ON(bio->bi_rw & REQ_WRITE);
877 or->in.bio = bio;
878 or->in.total_bytes = len;
880 EXPORT_SYMBOL(osd_req_read);
882 int osd_req_read_kern(struct osd_request *or,
883 const struct osd_obj_id *obj, u64 offset, void* buff, u64 len)
885 struct request_queue *req_q = osd_request_queue(or->osd_dev);
886 struct bio *bio = bio_map_kern(req_q, buff, len, GFP_KERNEL);
888 if (IS_ERR(bio))
889 return PTR_ERR(bio);
891 osd_req_read(or, obj, offset, bio, len);
892 return 0;
894 EXPORT_SYMBOL(osd_req_read_kern);
896 static int _add_sg_continuation_descriptor(struct osd_request *or,
897 const struct osd_sg_entry *sglist, unsigned numentries, u64 *len)
899 struct osd_sg_continuation_descriptor *oscd;
900 u32 oscd_size;
901 unsigned i;
902 int ret;
904 oscd_size = sizeof(*oscd) + numentries * sizeof(oscd->entries[0]);
906 if (!or->cdb_cont.total_bytes) {
907 /* First time, jump over the header, we will write to:
908 * cdb_cont.buff + cdb_cont.total_bytes
910 or->cdb_cont.total_bytes =
911 sizeof(struct osd_continuation_segment_header);
914 ret = _alloc_cdb_cont(or, or->cdb_cont.total_bytes + oscd_size);
915 if (unlikely(ret))
916 return ret;
918 oscd = or->cdb_cont.buff + or->cdb_cont.total_bytes;
919 oscd->hdr.type = cpu_to_be16(SCATTER_GATHER_LIST);
920 oscd->hdr.pad_length = 0;
921 oscd->hdr.length = cpu_to_be32(oscd_size - sizeof(*oscd));
923 *len = 0;
924 /* copy the sg entries and convert to network byte order */
925 for (i = 0; i < numentries; i++) {
926 oscd->entries[i].offset = cpu_to_be64(sglist[i].offset);
927 oscd->entries[i].len = cpu_to_be64(sglist[i].len);
928 *len += sglist[i].len;
931 or->cdb_cont.total_bytes += oscd_size;
932 OSD_DEBUG("total_bytes=%d oscd_size=%d numentries=%d\n",
933 or->cdb_cont.total_bytes, oscd_size, numentries);
934 return 0;
937 static int _osd_req_finalize_cdb_cont(struct osd_request *or, const u8 *cap_key)
939 struct request_queue *req_q = osd_request_queue(or->osd_dev);
940 struct bio *bio;
941 struct osd_cdb_head *cdbh = osd_cdb_head(&or->cdb);
942 struct osd_continuation_segment_header *cont_seg_hdr;
944 if (!or->cdb_cont.total_bytes)
945 return 0;
947 cont_seg_hdr = or->cdb_cont.buff;
948 cont_seg_hdr->format = CDB_CONTINUATION_FORMAT_V2;
949 cont_seg_hdr->service_action = cdbh->varlen_cdb.service_action;
951 /* create a bio for continuation segment */
952 bio = bio_map_kern(req_q, or->cdb_cont.buff, or->cdb_cont.total_bytes,
953 GFP_KERNEL);
954 if (IS_ERR(bio))
955 return PTR_ERR(bio);
957 bio->bi_rw |= REQ_WRITE;
959 /* integrity check the continuation before the bio is linked
960 * with the other data segments since the continuation
961 * integrity is separate from the other data segments.
963 osd_sec_sign_data(cont_seg_hdr->integrity_check, bio, cap_key);
965 cdbh->v2.cdb_continuation_length = cpu_to_be32(or->cdb_cont.total_bytes);
967 /* we can't use _req_append_segment, because we need to link in the
968 * continuation bio to the head of the bio list - the
969 * continuation segment (if it exists) is always the first segment in
970 * the out data buffer.
972 bio->bi_next = or->out.bio;
973 or->out.bio = bio;
974 or->out.total_bytes += or->cdb_cont.total_bytes;
976 return 0;
979 /* osd_req_write_sg: Takes a @bio that points to the data out buffer and an
980 * @sglist that has the scatter gather entries. Scatter-gather enables a write
981 * of multiple none-contiguous areas of an object, in a single call. The extents
982 * may overlap and/or be in any order. The only constrain is that:
983 * total_bytes(sglist) >= total_bytes(bio)
985 int osd_req_write_sg(struct osd_request *or,
986 const struct osd_obj_id *obj, struct bio *bio,
987 const struct osd_sg_entry *sglist, unsigned numentries)
989 u64 len;
990 int ret = _add_sg_continuation_descriptor(or, sglist, numentries, &len);
992 if (ret)
993 return ret;
994 osd_req_write(or, obj, 0, bio, len);
996 return 0;
998 EXPORT_SYMBOL(osd_req_write_sg);
1000 /* osd_req_read_sg: Read multiple extents of an object into @bio
1001 * See osd_req_write_sg
1003 int osd_req_read_sg(struct osd_request *or,
1004 const struct osd_obj_id *obj, struct bio *bio,
1005 const struct osd_sg_entry *sglist, unsigned numentries)
1007 u64 len;
1008 int ret = _add_sg_continuation_descriptor(or, sglist, numentries, &len);
1010 if (ret)
1011 return ret;
1012 osd_req_read(or, obj, 0, bio, len);
1014 return 0;
1016 EXPORT_SYMBOL(osd_req_read_sg);
1018 /* SG-list write/read Kern API
1020 * osd_req_{write,read}_sg_kern takes an array of @buff pointers and an array
1021 * of sg_entries. @numentries indicates how many pointers and sg_entries there
1022 * are. By requiring an array of buff pointers. This allows a caller to do a
1023 * single write/read and scatter into multiple buffers.
1024 * NOTE: Each buffer + len should not cross a page boundary.
1026 static struct bio *_create_sg_bios(struct osd_request *or,
1027 void **buff, const struct osd_sg_entry *sglist, unsigned numentries)
1029 struct request_queue *q = osd_request_queue(or->osd_dev);
1030 struct bio *bio;
1031 unsigned i;
1033 bio = bio_kmalloc(GFP_KERNEL, numentries);
1034 if (unlikely(!bio)) {
1035 OSD_DEBUG("Faild to allocate BIO size=%u\n", numentries);
1036 return ERR_PTR(-ENOMEM);
1039 for (i = 0; i < numentries; i++) {
1040 unsigned offset = offset_in_page(buff[i]);
1041 struct page *page = virt_to_page(buff[i]);
1042 unsigned len = sglist[i].len;
1043 unsigned added_len;
1045 BUG_ON(offset + len > PAGE_SIZE);
1046 added_len = bio_add_pc_page(q, bio, page, len, offset);
1047 if (unlikely(len != added_len)) {
1048 OSD_DEBUG("bio_add_pc_page len(%d) != added_len(%d)\n",
1049 len, added_len);
1050 bio_put(bio);
1051 return ERR_PTR(-ENOMEM);
1055 return bio;
1058 int osd_req_write_sg_kern(struct osd_request *or,
1059 const struct osd_obj_id *obj, void **buff,
1060 const struct osd_sg_entry *sglist, unsigned numentries)
1062 struct bio *bio = _create_sg_bios(or, buff, sglist, numentries);
1063 if (IS_ERR(bio))
1064 return PTR_ERR(bio);
1066 bio->bi_rw |= REQ_WRITE;
1067 osd_req_write_sg(or, obj, bio, sglist, numentries);
1069 return 0;
1071 EXPORT_SYMBOL(osd_req_write_sg_kern);
1073 int osd_req_read_sg_kern(struct osd_request *or,
1074 const struct osd_obj_id *obj, void **buff,
1075 const struct osd_sg_entry *sglist, unsigned numentries)
1077 struct bio *bio = _create_sg_bios(or, buff, sglist, numentries);
1078 if (IS_ERR(bio))
1079 return PTR_ERR(bio);
1081 osd_req_read_sg(or, obj, bio, sglist, numentries);
1083 return 0;
1085 EXPORT_SYMBOL(osd_req_read_sg_kern);
1089 void osd_req_get_attributes(struct osd_request *or,
1090 const struct osd_obj_id *obj)
1092 _osd_req_encode_common(or, OSD_ACT_GET_ATTRIBUTES, obj, 0, 0);
1094 EXPORT_SYMBOL(osd_req_get_attributes);
1096 void osd_req_set_attributes(struct osd_request *or,
1097 const struct osd_obj_id *obj)
1099 _osd_req_encode_common(or, OSD_ACT_SET_ATTRIBUTES, obj, 0, 0);
1101 EXPORT_SYMBOL(osd_req_set_attributes);
1104 * Attributes List-mode
1107 int osd_req_add_set_attr_list(struct osd_request *or,
1108 const struct osd_attr *oa, unsigned nelem)
1110 unsigned total_bytes = or->set_attr.total_bytes;
1111 void *attr_last;
1112 int ret;
1114 if (or->attributes_mode &&
1115 or->attributes_mode != OSD_CDB_GET_SET_ATTR_LISTS) {
1116 WARN_ON(1);
1117 return -EINVAL;
1119 or->attributes_mode = OSD_CDB_GET_SET_ATTR_LISTS;
1121 if (!total_bytes) { /* first-time: allocate and put list header */
1122 total_bytes = _osd_req_sizeof_alist_header(or);
1123 ret = _alloc_set_attr_list(or, oa, nelem, total_bytes);
1124 if (ret)
1125 return ret;
1126 _osd_req_set_alist_type(or, or->set_attr.buff,
1127 OSD_ATTR_LIST_SET_RETRIEVE);
1129 attr_last = or->set_attr.buff + total_bytes;
1131 for (; nelem; --nelem) {
1132 unsigned elem_size = _osd_req_alist_elem_size(or, oa->len);
1134 total_bytes += elem_size;
1135 if (unlikely(or->set_attr.alloc_size < total_bytes)) {
1136 or->set_attr.total_bytes = total_bytes - elem_size;
1137 ret = _alloc_set_attr_list(or, oa, nelem, total_bytes);
1138 if (ret)
1139 return ret;
1140 attr_last =
1141 or->set_attr.buff + or->set_attr.total_bytes;
1144 _osd_req_alist_elem_encode(or, attr_last, oa);
1146 attr_last += elem_size;
1147 ++oa;
1150 or->set_attr.total_bytes = total_bytes;
1151 return 0;
1153 EXPORT_SYMBOL(osd_req_add_set_attr_list);
1155 static int _req_append_segment(struct osd_request *or,
1156 unsigned padding, struct _osd_req_data_segment *seg,
1157 struct _osd_req_data_segment *last_seg, struct _osd_io_info *io)
1159 void *pad_buff;
1160 int ret;
1162 if (padding) {
1163 /* check if we can just add it to last buffer */
1164 if (last_seg &&
1165 (padding <= last_seg->alloc_size - last_seg->total_bytes))
1166 pad_buff = last_seg->buff + last_seg->total_bytes;
1167 else
1168 pad_buff = io->pad_buff;
1170 ret = blk_rq_map_kern(io->req->q, io->req, pad_buff, padding,
1171 or->alloc_flags);
1172 if (ret)
1173 return ret;
1174 io->total_bytes += padding;
1177 ret = blk_rq_map_kern(io->req->q, io->req, seg->buff, seg->total_bytes,
1178 or->alloc_flags);
1179 if (ret)
1180 return ret;
1182 io->total_bytes += seg->total_bytes;
1183 OSD_DEBUG("padding=%d buff=%p total_bytes=%d\n", padding, seg->buff,
1184 seg->total_bytes);
1185 return 0;
1188 static int _osd_req_finalize_set_attr_list(struct osd_request *or)
1190 struct osd_cdb_head *cdbh = osd_cdb_head(&or->cdb);
1191 unsigned padding;
1192 int ret;
1194 if (!or->set_attr.total_bytes) {
1195 cdbh->attrs_list.set_attr_offset = OSD_OFFSET_UNUSED;
1196 return 0;
1199 cdbh->attrs_list.set_attr_bytes = cpu_to_be32(or->set_attr.total_bytes);
1200 cdbh->attrs_list.set_attr_offset =
1201 osd_req_encode_offset(or, or->out.total_bytes, &padding);
1203 ret = _req_append_segment(or, padding, &or->set_attr,
1204 or->out.last_seg, &or->out);
1205 if (ret)
1206 return ret;
1208 or->out.last_seg = &or->set_attr;
1209 return 0;
1212 int osd_req_add_get_attr_list(struct osd_request *or,
1213 const struct osd_attr *oa, unsigned nelem)
1215 unsigned total_bytes = or->enc_get_attr.total_bytes;
1216 void *attr_last;
1217 int ret;
1219 if (or->attributes_mode &&
1220 or->attributes_mode != OSD_CDB_GET_SET_ATTR_LISTS) {
1221 WARN_ON(1);
1222 return -EINVAL;
1224 or->attributes_mode = OSD_CDB_GET_SET_ATTR_LISTS;
1226 /* first time calc data-in list header size */
1227 if (!or->get_attr.total_bytes)
1228 or->get_attr.total_bytes = _osd_req_sizeof_alist_header(or);
1230 /* calc data-out info */
1231 if (!total_bytes) { /* first-time: allocate and put list header */
1232 unsigned max_bytes;
1234 total_bytes = _osd_req_sizeof_alist_header(or);
1235 max_bytes = total_bytes +
1236 nelem * sizeof(struct osd_attributes_list_attrid);
1237 ret = _alloc_get_attr_desc(or, max_bytes);
1238 if (ret)
1239 return ret;
1241 _osd_req_set_alist_type(or, or->enc_get_attr.buff,
1242 OSD_ATTR_LIST_GET);
1244 attr_last = or->enc_get_attr.buff + total_bytes;
1246 for (; nelem; --nelem) {
1247 struct osd_attributes_list_attrid *attrid;
1248 const unsigned cur_size = sizeof(*attrid);
1250 total_bytes += cur_size;
1251 if (unlikely(or->enc_get_attr.alloc_size < total_bytes)) {
1252 or->enc_get_attr.total_bytes = total_bytes - cur_size;
1253 ret = _alloc_get_attr_desc(or,
1254 total_bytes + nelem * sizeof(*attrid));
1255 if (ret)
1256 return ret;
1257 attr_last = or->enc_get_attr.buff +
1258 or->enc_get_attr.total_bytes;
1261 attrid = attr_last;
1262 attrid->attr_page = cpu_to_be32(oa->attr_page);
1263 attrid->attr_id = cpu_to_be32(oa->attr_id);
1265 attr_last += cur_size;
1267 /* calc data-in size */
1268 or->get_attr.total_bytes +=
1269 _osd_req_alist_elem_size(or, oa->len);
1270 ++oa;
1273 or->enc_get_attr.total_bytes = total_bytes;
1275 OSD_DEBUG(
1276 "get_attr.total_bytes=%u(%u) enc_get_attr.total_bytes=%u(%Zu)\n",
1277 or->get_attr.total_bytes,
1278 or->get_attr.total_bytes - _osd_req_sizeof_alist_header(or),
1279 or->enc_get_attr.total_bytes,
1280 (or->enc_get_attr.total_bytes - _osd_req_sizeof_alist_header(or))
1281 / sizeof(struct osd_attributes_list_attrid));
1283 return 0;
1285 EXPORT_SYMBOL(osd_req_add_get_attr_list);
1287 static int _osd_req_finalize_get_attr_list(struct osd_request *or)
1289 struct osd_cdb_head *cdbh = osd_cdb_head(&or->cdb);
1290 unsigned out_padding;
1291 unsigned in_padding;
1292 int ret;
1294 if (!or->enc_get_attr.total_bytes) {
1295 cdbh->attrs_list.get_attr_desc_offset = OSD_OFFSET_UNUSED;
1296 cdbh->attrs_list.get_attr_offset = OSD_OFFSET_UNUSED;
1297 return 0;
1300 ret = _alloc_get_attr_list(or);
1301 if (ret)
1302 return ret;
1304 /* The out-going buffer info update */
1305 OSD_DEBUG("out-going\n");
1306 cdbh->attrs_list.get_attr_desc_bytes =
1307 cpu_to_be32(or->enc_get_attr.total_bytes);
1309 cdbh->attrs_list.get_attr_desc_offset =
1310 osd_req_encode_offset(or, or->out.total_bytes, &out_padding);
1312 ret = _req_append_segment(or, out_padding, &or->enc_get_attr,
1313 or->out.last_seg, &or->out);
1314 if (ret)
1315 return ret;
1316 or->out.last_seg = &or->enc_get_attr;
1318 /* The incoming buffer info update */
1319 OSD_DEBUG("in-coming\n");
1320 cdbh->attrs_list.get_attr_alloc_length =
1321 cpu_to_be32(or->get_attr.total_bytes);
1323 cdbh->attrs_list.get_attr_offset =
1324 osd_req_encode_offset(or, or->in.total_bytes, &in_padding);
1326 ret = _req_append_segment(or, in_padding, &or->get_attr, NULL,
1327 &or->in);
1328 if (ret)
1329 return ret;
1330 or->in.last_seg = &or->get_attr;
1332 return 0;
1335 int osd_req_decode_get_attr_list(struct osd_request *or,
1336 struct osd_attr *oa, int *nelem, void **iterator)
1338 unsigned cur_bytes, returned_bytes;
1339 int n;
1340 const unsigned sizeof_attr_list = _osd_req_sizeof_alist_header(or);
1341 void *cur_p;
1343 if (!_osd_req_is_alist_type(or, or->get_attr.buff,
1344 OSD_ATTR_LIST_SET_RETRIEVE)) {
1345 oa->attr_page = 0;
1346 oa->attr_id = 0;
1347 oa->val_ptr = NULL;
1348 oa->len = 0;
1349 *iterator = NULL;
1350 return 0;
1353 if (*iterator) {
1354 BUG_ON((*iterator < or->get_attr.buff) ||
1355 (or->get_attr.buff + or->get_attr.alloc_size < *iterator));
1356 cur_p = *iterator;
1357 cur_bytes = (*iterator - or->get_attr.buff) - sizeof_attr_list;
1358 returned_bytes = or->get_attr.total_bytes;
1359 } else { /* first time decode the list header */
1360 cur_bytes = sizeof_attr_list;
1361 returned_bytes = _osd_req_alist_size(or, or->get_attr.buff) +
1362 sizeof_attr_list;
1364 cur_p = or->get_attr.buff + sizeof_attr_list;
1366 if (returned_bytes > or->get_attr.alloc_size) {
1367 OSD_DEBUG("target report: space was not big enough! "
1368 "Allocate=%u Needed=%u\n",
1369 or->get_attr.alloc_size,
1370 returned_bytes + sizeof_attr_list);
1372 returned_bytes =
1373 or->get_attr.alloc_size - sizeof_attr_list;
1375 or->get_attr.total_bytes = returned_bytes;
1378 for (n = 0; (n < *nelem) && (cur_bytes < returned_bytes); ++n) {
1379 int inc = _osd_req_alist_elem_decode(or, cur_p, oa,
1380 returned_bytes - cur_bytes);
1382 if (inc < 0) {
1383 OSD_ERR("BAD FOOD from target. list not valid!"
1384 "c=%d r=%d n=%d\n",
1385 cur_bytes, returned_bytes, n);
1386 oa->val_ptr = NULL;
1387 cur_bytes = returned_bytes; /* break the caller loop */
1388 break;
1391 cur_bytes += inc;
1392 cur_p += inc;
1393 ++oa;
1396 *iterator = (returned_bytes - cur_bytes) ? cur_p : NULL;
1397 *nelem = n;
1398 return returned_bytes - cur_bytes;
1400 EXPORT_SYMBOL(osd_req_decode_get_attr_list);
1403 * Attributes Page-mode
1406 int osd_req_add_get_attr_page(struct osd_request *or,
1407 u32 page_id, void *attar_page, unsigned max_page_len,
1408 const struct osd_attr *set_one_attr)
1410 struct osd_cdb_head *cdbh = osd_cdb_head(&or->cdb);
1412 if (or->attributes_mode &&
1413 or->attributes_mode != OSD_CDB_GET_ATTR_PAGE_SET_ONE) {
1414 WARN_ON(1);
1415 return -EINVAL;
1417 or->attributes_mode = OSD_CDB_GET_ATTR_PAGE_SET_ONE;
1419 or->get_attr.buff = attar_page;
1420 or->get_attr.total_bytes = max_page_len;
1422 cdbh->attrs_page.get_attr_page = cpu_to_be32(page_id);
1423 cdbh->attrs_page.get_attr_alloc_length = cpu_to_be32(max_page_len);
1425 if (!set_one_attr || !set_one_attr->attr_page)
1426 return 0; /* The set is optional */
1428 or->set_attr.buff = set_one_attr->val_ptr;
1429 or->set_attr.total_bytes = set_one_attr->len;
1431 cdbh->attrs_page.set_attr_page = cpu_to_be32(set_one_attr->attr_page);
1432 cdbh->attrs_page.set_attr_id = cpu_to_be32(set_one_attr->attr_id);
1433 cdbh->attrs_page.set_attr_length = cpu_to_be32(set_one_attr->len);
1434 return 0;
1436 EXPORT_SYMBOL(osd_req_add_get_attr_page);
1438 static int _osd_req_finalize_attr_page(struct osd_request *or)
1440 struct osd_cdb_head *cdbh = osd_cdb_head(&or->cdb);
1441 unsigned in_padding, out_padding;
1442 int ret;
1444 /* returned page */
1445 cdbh->attrs_page.get_attr_offset =
1446 osd_req_encode_offset(or, or->in.total_bytes, &in_padding);
1448 ret = _req_append_segment(or, in_padding, &or->get_attr, NULL,
1449 &or->in);
1450 if (ret)
1451 return ret;
1453 if (or->set_attr.total_bytes == 0)
1454 return 0;
1456 /* set one value */
1457 cdbh->attrs_page.set_attr_offset =
1458 osd_req_encode_offset(or, or->out.total_bytes, &out_padding);
1460 ret = _req_append_segment(or, out_padding, &or->set_attr, NULL,
1461 &or->out);
1462 return ret;
1465 static inline void osd_sec_parms_set_out_offset(bool is_v1,
1466 struct osd_security_parameters *sec_parms, osd_cdb_offset offset)
1468 if (is_v1)
1469 sec_parms->v1.data_out_integrity_check_offset = offset;
1470 else
1471 sec_parms->v2.data_out_integrity_check_offset = offset;
1474 static inline void osd_sec_parms_set_in_offset(bool is_v1,
1475 struct osd_security_parameters *sec_parms, osd_cdb_offset offset)
1477 if (is_v1)
1478 sec_parms->v1.data_in_integrity_check_offset = offset;
1479 else
1480 sec_parms->v2.data_in_integrity_check_offset = offset;
1483 static int _osd_req_finalize_data_integrity(struct osd_request *or,
1484 bool has_in, bool has_out, struct bio *out_data_bio, u64 out_data_bytes,
1485 const u8 *cap_key)
1487 struct osd_security_parameters *sec_parms = _osd_req_sec_params(or);
1488 int ret;
1490 if (!osd_is_sec_alldata(sec_parms))
1491 return 0;
1493 if (has_out) {
1494 struct _osd_req_data_segment seg = {
1495 .buff = &or->out_data_integ,
1496 .total_bytes = sizeof(or->out_data_integ),
1498 unsigned pad;
1500 or->out_data_integ.data_bytes = cpu_to_be64(out_data_bytes);
1501 or->out_data_integ.set_attributes_bytes = cpu_to_be64(
1502 or->set_attr.total_bytes);
1503 or->out_data_integ.get_attributes_bytes = cpu_to_be64(
1504 or->enc_get_attr.total_bytes);
1506 osd_sec_parms_set_out_offset(osd_req_is_ver1(or), sec_parms,
1507 osd_req_encode_offset(or, or->out.total_bytes, &pad));
1509 ret = _req_append_segment(or, pad, &seg, or->out.last_seg,
1510 &or->out);
1511 if (ret)
1512 return ret;
1513 or->out.last_seg = NULL;
1515 /* they are now all chained to request sign them all together */
1516 osd_sec_sign_data(&or->out_data_integ, out_data_bio,
1517 cap_key);
1520 if (has_in) {
1521 struct _osd_req_data_segment seg = {
1522 .buff = &or->in_data_integ,
1523 .total_bytes = sizeof(or->in_data_integ),
1525 unsigned pad;
1527 osd_sec_parms_set_in_offset(osd_req_is_ver1(or), sec_parms,
1528 osd_req_encode_offset(or, or->in.total_bytes, &pad));
1530 ret = _req_append_segment(or, pad, &seg, or->in.last_seg,
1531 &or->in);
1532 if (ret)
1533 return ret;
1535 or->in.last_seg = NULL;
1538 return 0;
1542 * osd_finalize_request and helpers
1544 static struct request *_make_request(struct request_queue *q, bool has_write,
1545 struct _osd_io_info *oii, gfp_t flags)
1547 if (oii->bio)
1548 return blk_make_request(q, oii->bio, flags);
1549 else {
1550 struct request *req;
1552 req = blk_get_request(q, has_write ? WRITE : READ, flags);
1553 if (unlikely(!req))
1554 return ERR_PTR(-ENOMEM);
1556 return req;
1560 static int _init_blk_request(struct osd_request *or,
1561 bool has_in, bool has_out)
1563 gfp_t flags = or->alloc_flags;
1564 struct scsi_device *scsi_device = or->osd_dev->scsi_device;
1565 struct request_queue *q = scsi_device->request_queue;
1566 struct request *req;
1567 int ret;
1569 req = _make_request(q, has_out, has_out ? &or->out : &or->in, flags);
1570 if (IS_ERR(req)) {
1571 ret = PTR_ERR(req);
1572 goto out;
1575 or->request = req;
1576 req->cmd_type = REQ_TYPE_BLOCK_PC;
1577 req->cmd_flags |= REQ_QUIET;
1579 req->timeout = or->timeout;
1580 req->retries = or->retries;
1581 req->sense = or->sense;
1582 req->sense_len = 0;
1584 if (has_out) {
1585 or->out.req = req;
1586 if (has_in) {
1587 /* allocate bidi request */
1588 req = _make_request(q, false, &or->in, flags);
1589 if (IS_ERR(req)) {
1590 OSD_DEBUG("blk_get_request for bidi failed\n");
1591 ret = PTR_ERR(req);
1592 goto out;
1594 req->cmd_type = REQ_TYPE_BLOCK_PC;
1595 or->in.req = or->request->next_rq = req;
1597 } else if (has_in)
1598 or->in.req = req;
1600 ret = 0;
1601 out:
1602 OSD_DEBUG("or=%p has_in=%d has_out=%d => %d, %p\n",
1603 or, has_in, has_out, ret, or->request);
1604 return ret;
1607 int osd_finalize_request(struct osd_request *or,
1608 u8 options, const void *cap, const u8 *cap_key)
1610 struct osd_cdb_head *cdbh = osd_cdb_head(&or->cdb);
1611 bool has_in, has_out;
1612 /* Save for data_integrity without the cdb_continuation */
1613 struct bio *out_data_bio = or->out.bio;
1614 u64 out_data_bytes = or->out.total_bytes;
1615 int ret;
1617 if (options & OSD_REQ_FUA)
1618 cdbh->options |= OSD_CDB_FUA;
1620 if (options & OSD_REQ_DPO)
1621 cdbh->options |= OSD_CDB_DPO;
1623 if (options & OSD_REQ_BYPASS_TIMESTAMPS)
1624 cdbh->timestamp_control = OSD_CDB_BYPASS_TIMESTAMPS;
1626 osd_set_caps(&or->cdb, cap);
1628 has_in = or->in.bio || or->get_attr.total_bytes;
1629 has_out = or->out.bio || or->cdb_cont.total_bytes ||
1630 or->set_attr.total_bytes || or->enc_get_attr.total_bytes;
1632 ret = _osd_req_finalize_cdb_cont(or, cap_key);
1633 if (ret) {
1634 OSD_DEBUG("_osd_req_finalize_cdb_cont failed\n");
1635 return ret;
1637 ret = _init_blk_request(or, has_in, has_out);
1638 if (ret) {
1639 OSD_DEBUG("_init_blk_request failed\n");
1640 return ret;
1643 or->out.pad_buff = sg_out_pad_buffer;
1644 or->in.pad_buff = sg_in_pad_buffer;
1646 if (!or->attributes_mode)
1647 or->attributes_mode = OSD_CDB_GET_SET_ATTR_LISTS;
1648 cdbh->command_specific_options |= or->attributes_mode;
1649 if (or->attributes_mode == OSD_CDB_GET_ATTR_PAGE_SET_ONE) {
1650 ret = _osd_req_finalize_attr_page(or);
1651 if (ret) {
1652 OSD_DEBUG("_osd_req_finalize_attr_page failed\n");
1653 return ret;
1655 } else {
1656 /* TODO: I think that for the GET_ATTR command these 2 should
1657 * be reversed to keep them in execution order (for embeded
1658 * targets with low memory footprint)
1660 ret = _osd_req_finalize_set_attr_list(or);
1661 if (ret) {
1662 OSD_DEBUG("_osd_req_finalize_set_attr_list failed\n");
1663 return ret;
1666 ret = _osd_req_finalize_get_attr_list(or);
1667 if (ret) {
1668 OSD_DEBUG("_osd_req_finalize_get_attr_list failed\n");
1669 return ret;
1673 ret = _osd_req_finalize_data_integrity(or, has_in, has_out,
1674 out_data_bio, out_data_bytes,
1675 cap_key);
1676 if (ret)
1677 return ret;
1679 osd_sec_sign_cdb(&or->cdb, cap_key);
1681 or->request->cmd = or->cdb.buff;
1682 or->request->cmd_len = _osd_req_cdb_len(or);
1684 return 0;
1686 EXPORT_SYMBOL(osd_finalize_request);
1688 static bool _is_osd_security_code(int code)
1690 return (code == osd_security_audit_value_frozen) ||
1691 (code == osd_security_working_key_frozen) ||
1692 (code == osd_nonce_not_unique) ||
1693 (code == osd_nonce_timestamp_out_of_range) ||
1694 (code == osd_invalid_dataout_buffer_integrity_check_value);
1697 #define OSD_SENSE_PRINT1(fmt, a...) \
1698 do { \
1699 if (__cur_sense_need_output) \
1700 OSD_ERR(fmt, ##a); \
1701 } while (0)
1703 #define OSD_SENSE_PRINT2(fmt, a...) OSD_SENSE_PRINT1(" " fmt, ##a)
1705 int osd_req_decode_sense_full(struct osd_request *or,
1706 struct osd_sense_info *osi, bool silent,
1707 struct osd_obj_id *bad_obj_list __unused, int max_obj __unused,
1708 struct osd_attr *bad_attr_list, int max_attr)
1710 int sense_len, original_sense_len;
1711 struct osd_sense_info local_osi;
1712 struct scsi_sense_descriptor_based *ssdb;
1713 void *cur_descriptor;
1714 #if (CONFIG_SCSI_OSD_DPRINT_SENSE == 0)
1715 const bool __cur_sense_need_output = false;
1716 #else
1717 bool __cur_sense_need_output = !silent;
1718 #endif
1719 int ret;
1721 if (likely(!or->req_errors))
1722 return 0;
1724 osi = osi ? : &local_osi;
1725 memset(osi, 0, sizeof(*osi));
1727 ssdb = (typeof(ssdb))or->sense;
1728 sense_len = or->sense_len;
1729 if ((sense_len < (int)sizeof(*ssdb) || !ssdb->sense_key)) {
1730 OSD_ERR("Block-layer returned error(0x%x) but "
1731 "sense_len(%u) || key(%d) is empty\n",
1732 or->req_errors, sense_len, ssdb->sense_key);
1733 goto analyze;
1736 if ((ssdb->response_code != 0x72) && (ssdb->response_code != 0x73)) {
1737 OSD_ERR("Unrecognized scsi sense: rcode=%x length=%d\n",
1738 ssdb->response_code, sense_len);
1739 goto analyze;
1742 osi->key = ssdb->sense_key;
1743 osi->additional_code = be16_to_cpu(ssdb->additional_sense_code);
1744 original_sense_len = ssdb->additional_sense_length + 8;
1746 #if (CONFIG_SCSI_OSD_DPRINT_SENSE == 1)
1747 if (__cur_sense_need_output)
1748 __cur_sense_need_output = (osi->key > scsi_sk_recovered_error);
1749 #endif
1750 OSD_SENSE_PRINT1("Main Sense information key=0x%x length(%d, %d) "
1751 "additional_code=0x%x async_error=%d errors=0x%x\n",
1752 osi->key, original_sense_len, sense_len,
1753 osi->additional_code, or->async_error,
1754 or->req_errors);
1756 if (original_sense_len < sense_len)
1757 sense_len = original_sense_len;
1759 cur_descriptor = ssdb->ssd;
1760 sense_len -= sizeof(*ssdb);
1761 while (sense_len > 0) {
1762 struct scsi_sense_descriptor *ssd = cur_descriptor;
1763 int cur_len = ssd->additional_length + 2;
1765 sense_len -= cur_len;
1767 if (sense_len < 0)
1768 break; /* sense was truncated */
1770 switch (ssd->descriptor_type) {
1771 case scsi_sense_information:
1772 case scsi_sense_command_specific_information:
1774 struct scsi_sense_command_specific_data_descriptor
1775 *sscd = cur_descriptor;
1777 osi->command_info =
1778 get_unaligned_be64(&sscd->information) ;
1779 OSD_SENSE_PRINT2(
1780 "command_specific_information 0x%llx \n",
1781 _LLU(osi->command_info));
1782 break;
1784 case scsi_sense_key_specific:
1786 struct scsi_sense_key_specific_data_descriptor
1787 *ssks = cur_descriptor;
1789 osi->sense_info = get_unaligned_be16(&ssks->value);
1790 OSD_SENSE_PRINT2(
1791 "sense_key_specific_information %u"
1792 "sksv_cd_bpv_bp (0x%x)\n",
1793 osi->sense_info, ssks->sksv_cd_bpv_bp);
1794 break;
1796 case osd_sense_object_identification:
1797 { /*FIXME: Keep first not last, Store in array*/
1798 struct osd_sense_identification_data_descriptor
1799 *osidd = cur_descriptor;
1801 osi->not_initiated_command_functions =
1802 le32_to_cpu(osidd->not_initiated_functions);
1803 osi->completed_command_functions =
1804 le32_to_cpu(osidd->completed_functions);
1805 osi->obj.partition = be64_to_cpu(osidd->partition_id);
1806 osi->obj.id = be64_to_cpu(osidd->object_id);
1807 OSD_SENSE_PRINT2(
1808 "object_identification pid=0x%llx oid=0x%llx\n",
1809 _LLU(osi->obj.partition), _LLU(osi->obj.id));
1810 OSD_SENSE_PRINT2(
1811 "not_initiated_bits(%x) "
1812 "completed_command_bits(%x)\n",
1813 osi->not_initiated_command_functions,
1814 osi->completed_command_functions);
1815 break;
1817 case osd_sense_response_integrity_check:
1819 struct osd_sense_response_integrity_check_descriptor
1820 *osricd = cur_descriptor;
1821 const unsigned len =
1822 sizeof(osricd->integrity_check_value);
1823 char key_dump[len*4 + 2]; /* 2nibbles+space+ASCII */
1825 hex_dump_to_buffer(osricd->integrity_check_value, len,
1826 32, 1, key_dump, sizeof(key_dump), true);
1827 OSD_SENSE_PRINT2("response_integrity [%s]\n", key_dump);
1829 case osd_sense_attribute_identification:
1831 struct osd_sense_attributes_data_descriptor
1832 *osadd = cur_descriptor;
1833 unsigned len = min(cur_len, sense_len);
1834 struct osd_sense_attr *pattr = osadd->sense_attrs;
1836 while (len >= sizeof(*pattr)) {
1837 u32 attr_page = be32_to_cpu(pattr->attr_page);
1838 u32 attr_id = be32_to_cpu(pattr->attr_id);
1840 if (!osi->attr.attr_page) {
1841 osi->attr.attr_page = attr_page;
1842 osi->attr.attr_id = attr_id;
1845 if (bad_attr_list && max_attr) {
1846 bad_attr_list->attr_page = attr_page;
1847 bad_attr_list->attr_id = attr_id;
1848 bad_attr_list++;
1849 max_attr--;
1852 len -= sizeof(*pattr);
1853 OSD_SENSE_PRINT2(
1854 "osd_sense_attribute_identification"
1855 "attr_page=0x%x attr_id=0x%x\n",
1856 attr_page, attr_id);
1859 /*These are not legal for OSD*/
1860 case scsi_sense_field_replaceable_unit:
1861 OSD_SENSE_PRINT2("scsi_sense_field_replaceable_unit\n");
1862 break;
1863 case scsi_sense_stream_commands:
1864 OSD_SENSE_PRINT2("scsi_sense_stream_commands\n");
1865 break;
1866 case scsi_sense_block_commands:
1867 OSD_SENSE_PRINT2("scsi_sense_block_commands\n");
1868 break;
1869 case scsi_sense_ata_return:
1870 OSD_SENSE_PRINT2("scsi_sense_ata_return\n");
1871 break;
1872 default:
1873 if (ssd->descriptor_type <= scsi_sense_Reserved_last)
1874 OSD_SENSE_PRINT2(
1875 "scsi_sense Reserved descriptor (0x%x)",
1876 ssd->descriptor_type);
1877 else
1878 OSD_SENSE_PRINT2(
1879 "scsi_sense Vendor descriptor (0x%x)",
1880 ssd->descriptor_type);
1883 cur_descriptor += cur_len;
1886 analyze:
1887 if (!osi->key) {
1888 /* scsi sense is Empty, the request was never issued to target
1889 * linux return code might tell us what happened.
1891 if (or->async_error == -ENOMEM)
1892 osi->osd_err_pri = OSD_ERR_PRI_RESOURCE;
1893 else
1894 osi->osd_err_pri = OSD_ERR_PRI_UNREACHABLE;
1895 ret = or->async_error;
1896 } else if (osi->key <= scsi_sk_recovered_error) {
1897 osi->osd_err_pri = 0;
1898 ret = 0;
1899 } else if (osi->additional_code == scsi_invalid_field_in_cdb) {
1900 if (osi->cdb_field_offset == OSD_CFO_STARTING_BYTE) {
1901 osi->osd_err_pri = OSD_ERR_PRI_CLEAR_PAGES;
1902 ret = -EFAULT; /* caller should recover from this */
1903 } else if (osi->cdb_field_offset == OSD_CFO_OBJECT_ID) {
1904 osi->osd_err_pri = OSD_ERR_PRI_NOT_FOUND;
1905 ret = -ENOENT;
1906 } else if (osi->cdb_field_offset == OSD_CFO_PERMISSIONS) {
1907 osi->osd_err_pri = OSD_ERR_PRI_NO_ACCESS;
1908 ret = -EACCES;
1909 } else {
1910 osi->osd_err_pri = OSD_ERR_PRI_BAD_CRED;
1911 ret = -EINVAL;
1913 } else if (osi->additional_code == osd_quota_error) {
1914 osi->osd_err_pri = OSD_ERR_PRI_NO_SPACE;
1915 ret = -ENOSPC;
1916 } else if (_is_osd_security_code(osi->additional_code)) {
1917 osi->osd_err_pri = OSD_ERR_PRI_BAD_CRED;
1918 ret = -EINVAL;
1919 } else {
1920 osi->osd_err_pri = OSD_ERR_PRI_EIO;
1921 ret = -EIO;
1924 if (!or->out.residual)
1925 or->out.residual = or->out.total_bytes;
1926 if (!or->in.residual)
1927 or->in.residual = or->in.total_bytes;
1929 return ret;
1931 EXPORT_SYMBOL(osd_req_decode_sense_full);
1934 * Implementation of osd_sec.h API
1935 * TODO: Move to a separate osd_sec.c file at a later stage.
1938 enum { OSD_SEC_CAP_V1_ALL_CAPS =
1939 OSD_SEC_CAP_APPEND | OSD_SEC_CAP_OBJ_MGMT | OSD_SEC_CAP_REMOVE |
1940 OSD_SEC_CAP_CREATE | OSD_SEC_CAP_SET_ATTR | OSD_SEC_CAP_GET_ATTR |
1941 OSD_SEC_CAP_WRITE | OSD_SEC_CAP_READ | OSD_SEC_CAP_POL_SEC |
1942 OSD_SEC_CAP_GLOBAL | OSD_SEC_CAP_DEV_MGMT
1945 enum { OSD_SEC_CAP_V2_ALL_CAPS =
1946 OSD_SEC_CAP_V1_ALL_CAPS | OSD_SEC_CAP_QUERY | OSD_SEC_CAP_M_OBJECT
1949 void osd_sec_init_nosec_doall_caps(void *caps,
1950 const struct osd_obj_id *obj, bool is_collection, const bool is_v1)
1952 struct osd_capability *cap = caps;
1953 u8 type;
1954 u8 descriptor_type;
1956 if (likely(obj->id)) {
1957 if (unlikely(is_collection)) {
1958 type = OSD_SEC_OBJ_COLLECTION;
1959 descriptor_type = is_v1 ? OSD_SEC_OBJ_DESC_OBJ :
1960 OSD_SEC_OBJ_DESC_COL;
1961 } else {
1962 type = OSD_SEC_OBJ_USER;
1963 descriptor_type = OSD_SEC_OBJ_DESC_OBJ;
1965 WARN_ON(!obj->partition);
1966 } else {
1967 type = obj->partition ? OSD_SEC_OBJ_PARTITION :
1968 OSD_SEC_OBJ_ROOT;
1969 descriptor_type = OSD_SEC_OBJ_DESC_PAR;
1972 memset(cap, 0, sizeof(*cap));
1974 cap->h.format = OSD_SEC_CAP_FORMAT_VER1;
1975 cap->h.integrity_algorithm__key_version = 0; /* MAKE_BYTE(0, 0); */
1976 cap->h.security_method = OSD_SEC_NOSEC;
1977 /* cap->expiration_time;
1978 cap->AUDIT[30-10];
1979 cap->discriminator[42-30];
1980 cap->object_created_time; */
1981 cap->h.object_type = type;
1982 osd_sec_set_caps(&cap->h, OSD_SEC_CAP_V1_ALL_CAPS);
1983 cap->h.object_descriptor_type = descriptor_type;
1984 cap->od.obj_desc.policy_access_tag = 0;
1985 cap->od.obj_desc.allowed_partition_id = cpu_to_be64(obj->partition);
1986 cap->od.obj_desc.allowed_object_id = cpu_to_be64(obj->id);
1988 EXPORT_SYMBOL(osd_sec_init_nosec_doall_caps);
1990 /* FIXME: Extract version from caps pointer.
1991 * Also Pete's target only supports caps from OSDv1 for now
1993 void osd_set_caps(struct osd_cdb *cdb, const void *caps)
1995 bool is_ver1 = true;
1996 /* NOTE: They start at same address */
1997 memcpy(&cdb->v1.caps, caps, is_ver1 ? OSDv1_CAP_LEN : OSD_CAP_LEN);
2000 bool osd_is_sec_alldata(struct osd_security_parameters *sec_parms __unused)
2002 return false;
2005 void osd_sec_sign_cdb(struct osd_cdb *ocdb __unused, const u8 *cap_key __unused)
2009 void osd_sec_sign_data(void *data_integ __unused,
2010 struct bio *bio __unused, const u8 *cap_key __unused)
2015 * Declared in osd_protocol.h
2016 * 4.12.5 Data-In and Data-Out buffer offsets
2017 * byte offset = mantissa * (2^(exponent+8))
2018 * Returns the smallest allowed encoded offset that contains given @offset
2019 * The actual encoded offset returned is @offset + *@padding.
2021 osd_cdb_offset __osd_encode_offset(
2022 u64 offset, unsigned *padding, int min_shift, int max_shift)
2024 u64 try_offset = -1, mod, align;
2025 osd_cdb_offset be32_offset;
2026 int shift;
2028 *padding = 0;
2029 if (!offset)
2030 return 0;
2032 for (shift = min_shift; shift < max_shift; ++shift) {
2033 try_offset = offset >> shift;
2034 if (try_offset < (1 << OSD_OFFSET_MAX_BITS))
2035 break;
2038 BUG_ON(shift == max_shift);
2040 align = 1 << shift;
2041 mod = offset & (align - 1);
2042 if (mod) {
2043 *padding = align - mod;
2044 try_offset += 1;
2047 try_offset |= ((shift - 8) & 0xf) << 28;
2048 be32_offset = cpu_to_be32((u32)try_offset);
2050 OSD_DEBUG("offset=%llu mantissa=%llu exp=%d encoded=%x pad=%d\n",
2051 _LLU(offset), _LLU(try_offset & 0x0FFFFFFF), shift,
2052 be32_offset, *padding);
2053 return be32_offset;