1 // SPDX-License-Identifier: GPL-2.0
3 * Author(s)......: Holger Smolinski <Holger.Smolinski@de.ibm.com>
4 * Based on.......: linux/drivers/s390/block/mdisk.c
5 * ...............: by Hartmunt Penner <hpenner@de.ibm.com>
6 * Bugreports.to..: <Linux390@de.ibm.com>
7 * Copyright IBM Corp. 1999, 2000
11 #include <linux/kernel_stat.h>
12 #include <linux/stddef.h>
13 #include <linux/kernel.h>
14 #include <linux/slab.h>
15 #include <linux/hdreg.h>
16 #include <linux/bio.h>
17 #include <linux/module.h>
18 #include <linux/init.h>
19 #include <linux/jiffies.h>
20 #include <asm/asm-extable.h>
22 #include <asm/debug.h>
24 #include <asm/ebcdic.h>
31 #include "dasd_diag.h"
33 MODULE_DESCRIPTION("S/390 Support for DIAG access to DASD Disks");
34 MODULE_LICENSE("GPL");
36 /* The maximum number of blocks per request (max_blocks) is dependent on the
37 * amount of storage that is available in the static I/O buffer for each
38 * device. Currently each device gets 2 pages. We want to fit two requests
39 * into the available memory so that we can immediately start the next if one
41 #define DIAG_MAX_BLOCKS (((2 * PAGE_SIZE - sizeof(struct dasd_ccw_req) - \
42 sizeof(struct dasd_diag_req)) / \
43 sizeof(struct dasd_diag_bio)) / 2)
44 #define DIAG_MAX_RETRIES 32
45 #define DIAG_TIMEOUT 50
47 static struct dasd_discipline dasd_diag_discipline
;
49 struct dasd_diag_private
{
50 struct dasd_diag_characteristics rdc_data
;
51 struct dasd_diag_rw_io iob
;
52 struct dasd_diag_init_io iib
;
54 struct ccw_dev_id dev_id
;
57 struct dasd_diag_req
{
58 unsigned int block_count
;
59 struct dasd_diag_bio bio
[];
62 static const u8 DASD_DIAG_CMS1
[] = { 0xc3, 0xd4, 0xe2, 0xf1 };/* EBCDIC CMS1 */
64 /* Perform DIAG250 call with block I/O parameter list iob (input and output)
65 * and function code cmd.
66 * In case of an exception return 3. Otherwise return result of bitwise OR of
67 * resulting condition code and DIAG return code. */
68 static inline int __dia250(void *iob
, int cmd
)
70 union register_pair rx
= { .even
= (unsigned long)iob
, };
73 struct dasd_diag_init_io init_io
;
74 struct dasd_diag_rw_io rw_io
;
79 " diag %[rx],%[cmd],0x250\n"
84 : CC_OUT(cc
, cc
), [rx
] "+d" (rx
.pair
),
85 "+m" (*(addr_type
*)iob
), [exc
] "+d" (exception
)
88 cc
= exception
? 3 : CC_TRANSFORM(cc
);
92 static inline int dia250(void *iob
, int cmd
)
94 diag_stat_inc(DIAG_STAT_X250
);
95 return __dia250(iob
, cmd
);
98 /* Initialize block I/O to DIAG device using the specified blocksize and
99 * block offset. On success, return zero and set end_block to contain the
100 * number of blocks on the device minus the specified offset. Return non-zero
103 mdsk_init_io(struct dasd_device
*device
, unsigned int blocksize
,
104 blocknum_t offset
, blocknum_t
*end_block
)
106 struct dasd_diag_private
*private = device
->private;
107 struct dasd_diag_init_io
*iib
= &private->iib
;
110 memset(iib
, 0, sizeof (struct dasd_diag_init_io
));
112 iib
->dev_nr
= private->dev_id
.devno
;
113 iib
->block_size
= blocksize
;
114 iib
->offset
= offset
;
115 iib
->flaga
= DASD_DIAG_FLAGA_DEFAULT
;
117 rc
= dia250(iib
, INIT_BIO
);
119 if ((rc
& 3) == 0 && end_block
)
120 *end_block
= iib
->end_block
;
125 /* Remove block I/O environment for device. Return zero on success, non-zero
128 mdsk_term_io(struct dasd_device
* device
)
130 struct dasd_diag_private
*private = device
->private;
131 struct dasd_diag_init_io
*iib
= &private->iib
;
134 memset(iib
, 0, sizeof (struct dasd_diag_init_io
));
135 iib
->dev_nr
= private->dev_id
.devno
;
136 rc
= dia250(iib
, TERM_BIO
);
140 /* Error recovery for failed DIAG requests - try to reestablish the DIAG
143 dasd_diag_erp(struct dasd_device
*device
)
147 mdsk_term_io(device
);
148 rc
= mdsk_init_io(device
, device
->block
->bp_block
, 0, NULL
);
150 if (!(test_and_set_bit(DASD_FLAG_DEVICE_RO
, &device
->flags
)))
151 pr_warn("%s: The access mode of a DIAG device changed to read-only\n",
152 dev_name(&device
->cdev
->dev
));
156 pr_warn("%s: DIAG ERP failed with rc=%d\n",
157 dev_name(&device
->cdev
->dev
), rc
);
160 /* Start a given request at the device. Return zero on success, non-zero
163 dasd_start_diag(struct dasd_ccw_req
* cqr
)
165 struct dasd_device
*device
;
166 struct dasd_diag_private
*private;
167 struct dasd_diag_req
*dreq
;
170 device
= cqr
->startdev
;
171 if (cqr
->retries
< 0) {
172 DBF_DEV_EVENT(DBF_ERR
, device
, "DIAG start_IO: request %p "
173 "- no retry left)", cqr
);
174 cqr
->status
= DASD_CQR_ERROR
;
177 private = device
->private;
180 private->iob
.dev_nr
= private->dev_id
.devno
;
181 private->iob
.key
= 0;
182 private->iob
.flags
= DASD_DIAG_RWFLAG_ASYNC
;
183 private->iob
.block_count
= dreq
->block_count
;
184 private->iob
.interrupt_params
= (addr_t
) cqr
;
185 private->iob
.bio_list
= dreq
->bio
;
186 private->iob
.flaga
= DASD_DIAG_FLAGA_DEFAULT
;
188 cqr
->startclk
= get_tod_clock();
189 cqr
->starttime
= jiffies
;
192 rc
= dia250(&private->iob
, RW_BIO
);
194 case 0: /* Synchronous I/O finished successfully */
195 cqr
->stopclk
= get_tod_clock();
196 cqr
->status
= DASD_CQR_SUCCESS
;
197 /* Indicate to calling function that only a dasd_schedule_bh()
198 and no timer is needed */
201 case 8: /* Asynchronous I/O was started */
202 cqr
->status
= DASD_CQR_IN_IO
;
205 default: /* Error condition */
206 cqr
->status
= DASD_CQR_QUEUED
;
207 DBF_DEV_EVENT(DBF_WARNING
, device
, "dia250 returned rc=%d", rc
);
208 dasd_diag_erp(device
);
216 /* Terminate given request at the device. */
218 dasd_diag_term_IO(struct dasd_ccw_req
* cqr
)
220 struct dasd_device
*device
;
222 device
= cqr
->startdev
;
223 mdsk_term_io(device
);
224 mdsk_init_io(device
, device
->block
->bp_block
, 0, NULL
);
225 cqr
->status
= DASD_CQR_CLEAR_PENDING
;
226 cqr
->stopclk
= get_tod_clock();
227 dasd_schedule_device_bh(device
);
231 /* Handle external interruption. */
232 static void dasd_ext_handler(struct ext_code ext_code
,
233 unsigned int param32
, unsigned long param64
)
235 struct dasd_ccw_req
*cqr
, *next
;
236 struct dasd_device
*device
;
237 unsigned long expires
;
242 switch (ext_code
.subcode
>> 8) {
243 case DASD_DIAG_CODE_31BIT
:
244 ip
= (addr_t
) param32
;
246 case DASD_DIAG_CODE_64BIT
:
247 ip
= (addr_t
) param64
;
252 inc_irq_stat(IRQEXT_DSD
);
253 if (!ip
) { /* no intparm: unsolicited interrupt */
254 DBF_EVENT(DBF_NOTICE
, "%s", "caught unsolicited "
258 cqr
= (struct dasd_ccw_req
*) ip
;
259 device
= (struct dasd_device
*) cqr
->startdev
;
260 if (strncmp(device
->discipline
->ebcname
, (char *) &cqr
->magic
, 4)) {
261 DBF_DEV_EVENT(DBF_WARNING
, device
,
262 " magic number of dasd_ccw_req 0x%08X doesn't"
263 " match discipline 0x%08X",
264 cqr
->magic
, *(int *) (&device
->discipline
->name
));
268 /* get irq lock to modify request queue */
269 spin_lock_irqsave(get_ccwdev_lock(device
->cdev
), flags
);
271 /* Check for a pending clear operation */
272 if (cqr
->status
== DASD_CQR_CLEAR_PENDING
) {
273 cqr
->status
= DASD_CQR_CLEARED
;
274 dasd_device_clear_timer(device
);
275 dasd_schedule_device_bh(device
);
276 spin_unlock_irqrestore(get_ccwdev_lock(device
->cdev
), flags
);
280 cqr
->stopclk
= get_tod_clock();
283 if ((ext_code
.subcode
& 0xff) == 0) {
284 cqr
->status
= DASD_CQR_SUCCESS
;
285 /* Start first request on queue if possible -> fast_io. */
286 if (!list_empty(&device
->ccw_queue
)) {
287 next
= list_entry(device
->ccw_queue
.next
,
288 struct dasd_ccw_req
, devlist
);
289 if (next
->status
== DASD_CQR_QUEUED
) {
290 rc
= dasd_start_diag(next
);
292 expires
= next
->expires
;
296 cqr
->status
= DASD_CQR_QUEUED
;
297 DBF_DEV_EVENT(DBF_DEBUG
, device
, "interrupt status for "
298 "request %p was %d (%d retries left)", cqr
,
299 ext_code
.subcode
& 0xff, cqr
->retries
);
300 dasd_diag_erp(device
);
304 dasd_device_set_timer(device
, expires
);
306 dasd_device_clear_timer(device
);
307 dasd_schedule_device_bh(device
);
309 spin_unlock_irqrestore(get_ccwdev_lock(device
->cdev
), flags
);
312 /* Check whether device can be controlled by DIAG discipline. Return zero on
313 * success, non-zero otherwise. */
315 dasd_diag_check_device(struct dasd_device
*device
)
317 struct dasd_diag_private
*private = device
->private;
318 struct dasd_diag_characteristics
*rdc_data
;
319 struct vtoc_cms_label
*label
;
320 struct dasd_block
*block
;
321 struct dasd_diag_bio
*bio
;
322 unsigned int sb
, bsize
;
323 blocknum_t end_block
;
326 if (private == NULL
) {
327 private = kzalloc(sizeof(*private), GFP_KERNEL
);
328 if (private == NULL
) {
329 DBF_DEV_EVENT(DBF_WARNING
, device
, "%s",
330 "Allocating memory for private DASD data "
334 ccw_device_get_id(device
->cdev
, &private->dev_id
);
335 device
->private = private;
337 block
= dasd_alloc_block();
339 DBF_DEV_EVENT(DBF_WARNING
, device
, "%s",
340 "could not allocate dasd block structure");
341 device
->private = NULL
;
343 return PTR_ERR(block
);
345 device
->block
= block
;
346 block
->base
= device
;
348 /* Read Device Characteristics */
349 rdc_data
= &private->rdc_data
;
350 rdc_data
->dev_nr
= private->dev_id
.devno
;
351 rdc_data
->rdc_len
= sizeof (struct dasd_diag_characteristics
);
353 rc
= diag210((struct diag210
*) rdc_data
);
355 DBF_DEV_EVENT(DBF_WARNING
, device
, "failed to retrieve device "
356 "information (rc=%d)", rc
);
361 device
->default_expires
= DIAG_TIMEOUT
;
362 device
->default_retries
= DIAG_MAX_RETRIES
;
364 /* Figure out position of label block */
365 switch (private->rdc_data
.vdev_class
) {
367 private->pt_block
= 1;
370 private->pt_block
= 2;
373 pr_warn("%s: Device type %d is not supported in DIAG mode\n",
374 dev_name(&device
->cdev
->dev
),
375 private->rdc_data
.vdev_class
);
380 DBF_DEV_EVENT(DBF_INFO
, device
,
381 "%04X: %04X on real %04X/%02X",
384 rdc_data
->rdev_type
, rdc_data
->rdev_model
);
386 /* terminate all outstanding operations */
387 mdsk_term_io(device
);
389 /* figure out blocksize of device */
390 label
= (struct vtoc_cms_label
*) get_zeroed_page(GFP_KERNEL
);
392 DBF_DEV_EVENT(DBF_WARNING
, device
, "%s",
393 "No memory to allocate initialization request");
397 bio
= kzalloc(sizeof(*bio
), GFP_KERNEL
);
399 DBF_DEV_EVENT(DBF_WARNING
, device
, "%s",
400 "No memory to allocate initialization bio");
406 /* try all sizes - needed for ECKD devices */
407 for (bsize
= 512; bsize
<= PAGE_SIZE
; bsize
<<= 1) {
408 mdsk_init_io(device
, bsize
, 0, &end_block
);
409 memset(bio
, 0, sizeof(*bio
));
410 bio
->type
= MDSK_READ_REQ
;
411 bio
->block_number
= private->pt_block
+ 1;
413 memset(&private->iob
, 0, sizeof (struct dasd_diag_rw_io
));
414 private->iob
.dev_nr
= rdc_data
->dev_nr
;
415 private->iob
.key
= 0;
416 private->iob
.flags
= 0; /* do synchronous io */
417 private->iob
.block_count
= 1;
418 private->iob
.interrupt_params
= 0;
419 private->iob
.bio_list
= bio
;
420 private->iob
.flaga
= DASD_DIAG_FLAGA_DEFAULT
;
421 rc
= dia250(&private->iob
, RW_BIO
);
423 pr_warn("%s: A 64-bit DIAG call failed\n",
424 dev_name(&device
->cdev
->dev
));
428 mdsk_term_io(device
);
432 if (bsize
> PAGE_SIZE
) {
433 pr_warn("%s: Accessing the DASD failed because of an incorrect format (rc=%d)\n",
434 dev_name(&device
->cdev
->dev
), rc
);
438 /* check for label block */
439 if (memcmp(label
->label_id
, DASD_DIAG_CMS1
,
440 sizeof(DASD_DIAG_CMS1
)) == 0) {
441 /* get formatted blocksize from label block */
442 bsize
= (unsigned int) label
->block_size
;
443 block
->blocks
= (unsigned long) label
->block_count
;
445 block
->blocks
= end_block
;
446 block
->bp_block
= bsize
;
447 block
->s2b_shift
= 0; /* bits to shift 512 to get a block */
448 for (sb
= 512; sb
< bsize
; sb
= sb
<< 1)
450 rc
= mdsk_init_io(device
, block
->bp_block
, 0, NULL
);
451 if (rc
&& (rc
!= 4)) {
452 pr_warn("%s: DIAG initialization failed with rc=%d\n",
453 dev_name(&device
->cdev
->dev
), rc
);
457 set_bit(DASD_FLAG_DEVICE_RO
, &device
->flags
);
458 pr_info("%s: New DASD with %ld byte/block, total size %ld "
459 "KB%s\n", dev_name(&device
->cdev
->dev
),
460 (unsigned long) block
->bp_block
,
461 (unsigned long) (block
->blocks
<<
462 block
->s2b_shift
) >> 1,
463 (rc
== 4) ? ", read-only device" : "");
469 free_page((long) label
);
472 device
->block
= NULL
;
473 dasd_free_block(block
);
474 device
->private = NULL
;
480 /* Fill in virtual disk geometry for device. Return zero on success, non-zero
483 dasd_diag_fill_geometry(struct dasd_block
*block
, struct hd_geometry
*geo
)
485 if (dasd_check_blocksize(block
->bp_block
) != 0)
487 geo
->cylinders
= (block
->blocks
<< block
->s2b_shift
) >> 10;
489 geo
->sectors
= 128 >> block
->s2b_shift
;
494 dasd_diag_erp_action(struct dasd_ccw_req
* cqr
)
496 return dasd_default_erp_action
;
500 dasd_diag_erp_postaction(struct dasd_ccw_req
* cqr
)
502 return dasd_default_erp_postaction
;
505 /* Create DASD request from block device request. Return pointer to new
506 * request on success, ERR_PTR otherwise. */
507 static struct dasd_ccw_req
*dasd_diag_build_cp(struct dasd_device
*memdev
,
508 struct dasd_block
*block
,
511 struct dasd_ccw_req
*cqr
;
512 struct dasd_diag_req
*dreq
;
513 struct dasd_diag_bio
*dbio
;
514 struct req_iterator iter
;
518 sector_t recid
, first_rec
, last_rec
;
519 unsigned int blksize
, off
;
520 unsigned char rw_cmd
;
522 if (rq_data_dir(req
) == READ
)
523 rw_cmd
= MDSK_READ_REQ
;
524 else if (rq_data_dir(req
) == WRITE
)
525 rw_cmd
= MDSK_WRITE_REQ
;
527 return ERR_PTR(-EINVAL
);
528 blksize
= block
->bp_block
;
529 /* Calculate record id of first and last block. */
530 first_rec
= blk_rq_pos(req
) >> block
->s2b_shift
;
532 (blk_rq_pos(req
) + blk_rq_sectors(req
) - 1) >> block
->s2b_shift
;
533 /* Check struct bio and count the number of blocks for the request. */
535 rq_for_each_segment(bv
, req
, iter
) {
536 if (bv
.bv_len
& (blksize
- 1))
537 /* Fba can only do full blocks. */
538 return ERR_PTR(-EINVAL
);
539 count
+= bv
.bv_len
>> (block
->s2b_shift
+ 9);
542 if (count
!= last_rec
- first_rec
+ 1)
543 return ERR_PTR(-EINVAL
);
544 /* Build the request */
545 cqr
= dasd_smalloc_request(DASD_DIAG_MAGIC
, 0, struct_size(dreq
, bio
, count
),
546 memdev
, blk_mq_rq_to_pdu(req
));
550 dreq
= (struct dasd_diag_req
*) cqr
->data
;
551 dreq
->block_count
= count
;
554 rq_for_each_segment(bv
, req
, iter
) {
555 dst
= bvec_virt(&bv
);
556 for (off
= 0; off
< bv
.bv_len
; off
+= blksize
) {
557 memset(dbio
, 0, sizeof (struct dasd_diag_bio
));
559 dbio
->block_number
= recid
+ 1;
566 cqr
->retries
= memdev
->default_retries
;
567 cqr
->buildclk
= get_tod_clock();
568 if (blk_noretry_request(req
) ||
569 block
->base
->features
& DASD_FEATURE_FAILFAST
)
570 set_bit(DASD_CQR_FLAGS_FAILFAST
, &cqr
->flags
);
571 cqr
->startdev
= memdev
;
572 cqr
->memdev
= memdev
;
574 cqr
->expires
= memdev
->default_expires
* HZ
;
575 cqr
->status
= DASD_CQR_FILLED
;
579 /* Release DASD request. Return non-zero if request was successful, zero
582 dasd_diag_free_cp(struct dasd_ccw_req
*cqr
, struct request
*req
)
586 status
= cqr
->status
== DASD_CQR_DONE
;
587 dasd_sfree_request(cqr
, cqr
->memdev
);
591 static void dasd_diag_handle_terminated_request(struct dasd_ccw_req
*cqr
)
593 if (cqr
->retries
< 0)
594 cqr
->status
= DASD_CQR_FAILED
;
596 cqr
->status
= DASD_CQR_FILLED
;
599 /* Fill in IOCTL data for device. */
601 dasd_diag_fill_info(struct dasd_device
* device
,
602 struct dasd_information2_t
* info
)
604 struct dasd_diag_private
*private = device
->private;
606 info
->label_block
= (unsigned int) private->pt_block
;
607 info
->FBA_layout
= 1;
608 info
->format
= DASD_FORMAT_LDL
;
609 info
->characteristics_size
= sizeof(private->rdc_data
);
610 memcpy(info
->characteristics
, &private->rdc_data
,
611 sizeof(private->rdc_data
));
612 info
->confdata_size
= 0;
617 dasd_diag_dump_sense(struct dasd_device
*device
, struct dasd_ccw_req
* req
,
620 DBF_DEV_EVENT(DBF_WARNING
, device
, "%s",
621 "dump sense not available for DIAG data");
624 static unsigned int dasd_diag_max_sectors(struct dasd_block
*block
)
626 return DIAG_MAX_BLOCKS
<< block
->s2b_shift
;
629 static int dasd_diag_pe_handler(struct dasd_device
*device
,
630 __u8 tbvpm
, __u8 fcsecpm
)
632 return dasd_generic_verify_path(device
, tbvpm
);
635 static struct dasd_discipline dasd_diag_discipline
= {
636 .owner
= THIS_MODULE
,
639 .max_sectors
= dasd_diag_max_sectors
,
640 .check_device
= dasd_diag_check_device
,
641 .pe_handler
= dasd_diag_pe_handler
,
642 .fill_geometry
= dasd_diag_fill_geometry
,
643 .start_IO
= dasd_start_diag
,
644 .term_IO
= dasd_diag_term_IO
,
645 .handle_terminated_request
= dasd_diag_handle_terminated_request
,
646 .erp_action
= dasd_diag_erp_action
,
647 .erp_postaction
= dasd_diag_erp_postaction
,
648 .build_cp
= dasd_diag_build_cp
,
649 .free_cp
= dasd_diag_free_cp
,
650 .dump_sense
= dasd_diag_dump_sense
,
651 .fill_info
= dasd_diag_fill_info
,
657 if (!MACHINE_IS_VM
) {
658 pr_info("Discipline %s cannot be used without z/VM\n",
659 dasd_diag_discipline
.name
);
662 ASCEBC(dasd_diag_discipline
.ebcname
, 4);
664 irq_subclass_register(IRQ_SUBCLASS_SERVICE_SIGNAL
);
665 register_external_irq(EXT_IRQ_CP_SERVICE
, dasd_ext_handler
);
666 dasd_diag_discipline_pointer
= &dasd_diag_discipline
;
671 dasd_diag_cleanup(void)
673 unregister_external_irq(EXT_IRQ_CP_SERVICE
, dasd_ext_handler
);
674 irq_subclass_unregister(IRQ_SUBCLASS_SERVICE_SIGNAL
);
675 dasd_diag_discipline_pointer
= NULL
;
678 module_init(dasd_diag_init
);
679 module_exit(dasd_diag_cleanup
);