1 // SPDX-License-Identifier: GPL-2.0
3 * Author(s)......: Holger Smolinski <Holger.Smolinski@de.ibm.com>
4 * Based on.......: linux/drivers/s390/block/mdisk.c
5 * ...............: by Hartmunt Penner <hpenner@de.ibm.com>
6 * Bugreports.to..: <Linux390@de.ibm.com>
7 * Copyright IBM Corp. 1999, 2000
11 #define KMSG_COMPONENT "dasd"
13 #include <linux/kernel_stat.h>
14 #include <linux/stddef.h>
15 #include <linux/kernel.h>
16 #include <linux/slab.h>
17 #include <linux/hdreg.h>
18 #include <linux/bio.h>
19 #include <linux/module.h>
20 #include <linux/init.h>
21 #include <linux/jiffies.h>
24 #include <asm/debug.h>
26 #include <asm/ebcdic.h>
32 #include "dasd_diag.h"
34 #define PRINTK_HEADER "dasd(diag):"
36 MODULE_LICENSE("GPL");
38 /* The maximum number of blocks per request (max_blocks) is dependent on the
39 * amount of storage that is available in the static I/O buffer for each
40 * device. Currently each device gets 2 pages. We want to fit two requests
41 * into the available memory so that we can immediately start the next if one
43 #define DIAG_MAX_BLOCKS (((2 * PAGE_SIZE - sizeof(struct dasd_ccw_req) - \
44 sizeof(struct dasd_diag_req)) / \
45 sizeof(struct dasd_diag_bio)) / 2)
46 #define DIAG_MAX_RETRIES 32
47 #define DIAG_TIMEOUT 50
49 static struct dasd_discipline dasd_diag_discipline
;
51 struct dasd_diag_private
{
52 struct dasd_diag_characteristics rdc_data
;
53 struct dasd_diag_rw_io iob
;
54 struct dasd_diag_init_io iib
;
56 struct ccw_dev_id dev_id
;
59 struct dasd_diag_req
{
60 unsigned int block_count
;
61 struct dasd_diag_bio bio
[];
64 static const u8 DASD_DIAG_CMS1
[] = { 0xc3, 0xd4, 0xe2, 0xf1 };/* EBCDIC CMS1 */
66 /* Perform DIAG250 call with block I/O parameter list iob (input and output)
67 * and function code cmd.
68 * In case of an exception return 3. Otherwise return result of bitwise OR of
69 * resulting condition code and DIAG return code. */
70 static inline int __dia250(void *iob
, int cmd
)
72 register unsigned long reg2
asm ("2") = (unsigned long) iob
;
74 struct dasd_diag_init_io init_io
;
75 struct dasd_diag_rw_io rw_io
;
87 : "+d" (rc
), "=m" (*(addr_type
*) iob
)
88 : "d" (cmd
), "d" (reg2
), "m" (*(addr_type
*) iob
)
93 static inline int dia250(void *iob
, int cmd
)
95 diag_stat_inc(DIAG_STAT_X250
);
96 return __dia250(iob
, cmd
);
99 /* Initialize block I/O to DIAG device using the specified blocksize and
100 * block offset. On success, return zero and set end_block to contain the
101 * number of blocks on the device minus the specified offset. Return non-zero
104 mdsk_init_io(struct dasd_device
*device
, unsigned int blocksize
,
105 blocknum_t offset
, blocknum_t
*end_block
)
107 struct dasd_diag_private
*private = device
->private;
108 struct dasd_diag_init_io
*iib
= &private->iib
;
111 memset(iib
, 0, sizeof (struct dasd_diag_init_io
));
113 iib
->dev_nr
= private->dev_id
.devno
;
114 iib
->block_size
= blocksize
;
115 iib
->offset
= offset
;
116 iib
->flaga
= DASD_DIAG_FLAGA_DEFAULT
;
118 rc
= dia250(iib
, INIT_BIO
);
120 if ((rc
& 3) == 0 && end_block
)
121 *end_block
= iib
->end_block
;
126 /* Remove block I/O environment for device. Return zero on success, non-zero
129 mdsk_term_io(struct dasd_device
* device
)
131 struct dasd_diag_private
*private = device
->private;
132 struct dasd_diag_init_io
*iib
= &private->iib
;
135 memset(iib
, 0, sizeof (struct dasd_diag_init_io
));
136 iib
->dev_nr
= private->dev_id
.devno
;
137 rc
= dia250(iib
, TERM_BIO
);
141 /* Error recovery for failed DIAG requests - try to reestablish the DIAG
144 dasd_diag_erp(struct dasd_device
*device
)
148 mdsk_term_io(device
);
149 rc
= mdsk_init_io(device
, device
->block
->bp_block
, 0, NULL
);
151 if (!(test_and_set_bit(DASD_FLAG_DEVICE_RO
, &device
->flags
)))
152 pr_warn("%s: The access mode of a DIAG device changed to read-only\n",
153 dev_name(&device
->cdev
->dev
));
157 pr_warn("%s: DIAG ERP failed with rc=%d\n",
158 dev_name(&device
->cdev
->dev
), rc
);
161 /* Start a given request at the device. Return zero on success, non-zero
164 dasd_start_diag(struct dasd_ccw_req
* cqr
)
166 struct dasd_device
*device
;
167 struct dasd_diag_private
*private;
168 struct dasd_diag_req
*dreq
;
171 device
= cqr
->startdev
;
172 if (cqr
->retries
< 0) {
173 DBF_DEV_EVENT(DBF_ERR
, device
, "DIAG start_IO: request %p "
174 "- no retry left)", cqr
);
175 cqr
->status
= DASD_CQR_ERROR
;
178 private = device
->private;
181 private->iob
.dev_nr
= private->dev_id
.devno
;
182 private->iob
.key
= 0;
183 private->iob
.flags
= DASD_DIAG_RWFLAG_ASYNC
;
184 private->iob
.block_count
= dreq
->block_count
;
185 private->iob
.interrupt_params
= (addr_t
) cqr
;
186 private->iob
.bio_list
= dreq
->bio
;
187 private->iob
.flaga
= DASD_DIAG_FLAGA_DEFAULT
;
189 cqr
->startclk
= get_tod_clock();
190 cqr
->starttime
= jiffies
;
193 rc
= dia250(&private->iob
, RW_BIO
);
195 case 0: /* Synchronous I/O finished successfully */
196 cqr
->stopclk
= get_tod_clock();
197 cqr
->status
= DASD_CQR_SUCCESS
;
198 /* Indicate to calling function that only a dasd_schedule_bh()
199 and no timer is needed */
202 case 8: /* Asynchronous I/O was started */
203 cqr
->status
= DASD_CQR_IN_IO
;
206 default: /* Error condition */
207 cqr
->status
= DASD_CQR_QUEUED
;
208 DBF_DEV_EVENT(DBF_WARNING
, device
, "dia250 returned rc=%d", rc
);
209 dasd_diag_erp(device
);
217 /* Terminate given request at the device. */
219 dasd_diag_term_IO(struct dasd_ccw_req
* cqr
)
221 struct dasd_device
*device
;
223 device
= cqr
->startdev
;
224 mdsk_term_io(device
);
225 mdsk_init_io(device
, device
->block
->bp_block
, 0, NULL
);
226 cqr
->status
= DASD_CQR_CLEAR_PENDING
;
227 cqr
->stopclk
= get_tod_clock();
228 dasd_schedule_device_bh(device
);
232 /* Handle external interruption. */
233 static void dasd_ext_handler(struct ext_code ext_code
,
234 unsigned int param32
, unsigned long param64
)
236 struct dasd_ccw_req
*cqr
, *next
;
237 struct dasd_device
*device
;
238 unsigned long expires
;
243 switch (ext_code
.subcode
>> 8) {
244 case DASD_DIAG_CODE_31BIT
:
245 ip
= (addr_t
) param32
;
247 case DASD_DIAG_CODE_64BIT
:
248 ip
= (addr_t
) param64
;
253 inc_irq_stat(IRQEXT_DSD
);
254 if (!ip
) { /* no intparm: unsolicited interrupt */
255 DBF_EVENT(DBF_NOTICE
, "%s", "caught unsolicited "
259 cqr
= (struct dasd_ccw_req
*) ip
;
260 device
= (struct dasd_device
*) cqr
->startdev
;
261 if (strncmp(device
->discipline
->ebcname
, (char *) &cqr
->magic
, 4)) {
262 DBF_DEV_EVENT(DBF_WARNING
, device
,
263 " magic number of dasd_ccw_req 0x%08X doesn't"
264 " match discipline 0x%08X",
265 cqr
->magic
, *(int *) (&device
->discipline
->name
));
269 /* get irq lock to modify request queue */
270 spin_lock_irqsave(get_ccwdev_lock(device
->cdev
), flags
);
272 /* Check for a pending clear operation */
273 if (cqr
->status
== DASD_CQR_CLEAR_PENDING
) {
274 cqr
->status
= DASD_CQR_CLEARED
;
275 dasd_device_clear_timer(device
);
276 dasd_schedule_device_bh(device
);
277 spin_unlock_irqrestore(get_ccwdev_lock(device
->cdev
), flags
);
281 cqr
->stopclk
= get_tod_clock();
284 if ((ext_code
.subcode
& 0xff) == 0) {
285 cqr
->status
= DASD_CQR_SUCCESS
;
286 /* Start first request on queue if possible -> fast_io. */
287 if (!list_empty(&device
->ccw_queue
)) {
288 next
= list_entry(device
->ccw_queue
.next
,
289 struct dasd_ccw_req
, devlist
);
290 if (next
->status
== DASD_CQR_QUEUED
) {
291 rc
= dasd_start_diag(next
);
293 expires
= next
->expires
;
297 cqr
->status
= DASD_CQR_QUEUED
;
298 DBF_DEV_EVENT(DBF_DEBUG
, device
, "interrupt status for "
299 "request %p was %d (%d retries left)", cqr
,
300 ext_code
.subcode
& 0xff, cqr
->retries
);
301 dasd_diag_erp(device
);
305 dasd_device_set_timer(device
, expires
);
307 dasd_device_clear_timer(device
);
308 dasd_schedule_device_bh(device
);
310 spin_unlock_irqrestore(get_ccwdev_lock(device
->cdev
), flags
);
313 /* Check whether device can be controlled by DIAG discipline. Return zero on
314 * success, non-zero otherwise. */
316 dasd_diag_check_device(struct dasd_device
*device
)
318 struct dasd_diag_private
*private = device
->private;
319 struct dasd_diag_characteristics
*rdc_data
;
320 struct vtoc_cms_label
*label
;
321 struct dasd_block
*block
;
322 struct dasd_diag_bio
*bio
;
323 unsigned int sb
, bsize
;
324 blocknum_t end_block
;
327 if (private == NULL
) {
328 private = kzalloc(sizeof(*private), GFP_KERNEL
);
329 if (private == NULL
) {
330 DBF_DEV_EVENT(DBF_WARNING
, device
, "%s",
331 "Allocating memory for private DASD data "
335 ccw_device_get_id(device
->cdev
, &private->dev_id
);
336 device
->private = private;
338 block
= dasd_alloc_block();
340 DBF_DEV_EVENT(DBF_WARNING
, device
, "%s",
341 "could not allocate dasd block structure");
342 device
->private = NULL
;
344 return PTR_ERR(block
);
346 device
->block
= block
;
347 block
->base
= device
;
349 /* Read Device Characteristics */
350 rdc_data
= &private->rdc_data
;
351 rdc_data
->dev_nr
= private->dev_id
.devno
;
352 rdc_data
->rdc_len
= sizeof (struct dasd_diag_characteristics
);
354 rc
= diag210((struct diag210
*) rdc_data
);
356 DBF_DEV_EVENT(DBF_WARNING
, device
, "failed to retrieve device "
357 "information (rc=%d)", rc
);
362 device
->default_expires
= DIAG_TIMEOUT
;
363 device
->default_retries
= DIAG_MAX_RETRIES
;
365 /* Figure out position of label block */
366 switch (private->rdc_data
.vdev_class
) {
368 private->pt_block
= 1;
371 private->pt_block
= 2;
374 pr_warn("%s: Device type %d is not supported in DIAG mode\n",
375 dev_name(&device
->cdev
->dev
),
376 private->rdc_data
.vdev_class
);
381 DBF_DEV_EVENT(DBF_INFO
, device
,
382 "%04X: %04X on real %04X/%02X",
385 rdc_data
->rdev_type
, rdc_data
->rdev_model
);
387 /* terminate all outstanding operations */
388 mdsk_term_io(device
);
390 /* figure out blocksize of device */
391 label
= (struct vtoc_cms_label
*) get_zeroed_page(GFP_KERNEL
);
393 DBF_DEV_EVENT(DBF_WARNING
, device
, "%s",
394 "No memory to allocate initialization request");
398 bio
= kzalloc(sizeof(*bio
), GFP_KERNEL
);
400 DBF_DEV_EVENT(DBF_WARNING
, device
, "%s",
401 "No memory to allocate initialization bio");
407 /* try all sizes - needed for ECKD devices */
408 for (bsize
= 512; bsize
<= PAGE_SIZE
; bsize
<<= 1) {
409 mdsk_init_io(device
, bsize
, 0, &end_block
);
410 memset(bio
, 0, sizeof(*bio
));
411 bio
->type
= MDSK_READ_REQ
;
412 bio
->block_number
= private->pt_block
+ 1;
414 memset(&private->iob
, 0, sizeof (struct dasd_diag_rw_io
));
415 private->iob
.dev_nr
= rdc_data
->dev_nr
;
416 private->iob
.key
= 0;
417 private->iob
.flags
= 0; /* do synchronous io */
418 private->iob
.block_count
= 1;
419 private->iob
.interrupt_params
= 0;
420 private->iob
.bio_list
= bio
;
421 private->iob
.flaga
= DASD_DIAG_FLAGA_DEFAULT
;
422 rc
= dia250(&private->iob
, RW_BIO
);
424 pr_warn("%s: A 64-bit DIAG call failed\n",
425 dev_name(&device
->cdev
->dev
));
429 mdsk_term_io(device
);
433 if (bsize
> PAGE_SIZE
) {
434 pr_warn("%s: Accessing the DASD failed because of an incorrect format (rc=%d)\n",
435 dev_name(&device
->cdev
->dev
), rc
);
439 /* check for label block */
440 if (memcmp(label
->label_id
, DASD_DIAG_CMS1
,
441 sizeof(DASD_DIAG_CMS1
)) == 0) {
442 /* get formatted blocksize from label block */
443 bsize
= (unsigned int) label
->block_size
;
444 block
->blocks
= (unsigned long) label
->block_count
;
446 block
->blocks
= end_block
;
447 block
->bp_block
= bsize
;
448 block
->s2b_shift
= 0; /* bits to shift 512 to get a block */
449 for (sb
= 512; sb
< bsize
; sb
= sb
<< 1)
451 rc
= mdsk_init_io(device
, block
->bp_block
, 0, NULL
);
452 if (rc
&& (rc
!= 4)) {
453 pr_warn("%s: DIAG initialization failed with rc=%d\n",
454 dev_name(&device
->cdev
->dev
), rc
);
458 set_bit(DASD_FLAG_DEVICE_RO
, &device
->flags
);
459 pr_info("%s: New DASD with %ld byte/block, total size %ld "
460 "KB%s\n", dev_name(&device
->cdev
->dev
),
461 (unsigned long) block
->bp_block
,
462 (unsigned long) (block
->blocks
<<
463 block
->s2b_shift
) >> 1,
464 (rc
== 4) ? ", read-only device" : "");
470 free_page((long) label
);
473 device
->block
= NULL
;
474 dasd_free_block(block
);
475 device
->private = NULL
;
481 /* Fill in virtual disk geometry for device. Return zero on success, non-zero
484 dasd_diag_fill_geometry(struct dasd_block
*block
, struct hd_geometry
*geo
)
486 if (dasd_check_blocksize(block
->bp_block
) != 0)
488 geo
->cylinders
= (block
->blocks
<< block
->s2b_shift
) >> 10;
490 geo
->sectors
= 128 >> block
->s2b_shift
;
495 dasd_diag_erp_action(struct dasd_ccw_req
* cqr
)
497 return dasd_default_erp_action
;
501 dasd_diag_erp_postaction(struct dasd_ccw_req
* cqr
)
503 return dasd_default_erp_postaction
;
506 /* Create DASD request from block device request. Return pointer to new
507 * request on success, ERR_PTR otherwise. */
508 static struct dasd_ccw_req
*dasd_diag_build_cp(struct dasd_device
*memdev
,
509 struct dasd_block
*block
,
512 struct dasd_ccw_req
*cqr
;
513 struct dasd_diag_req
*dreq
;
514 struct dasd_diag_bio
*dbio
;
515 struct req_iterator iter
;
519 sector_t recid
, first_rec
, last_rec
;
520 unsigned int blksize
, off
;
521 unsigned char rw_cmd
;
523 if (rq_data_dir(req
) == READ
)
524 rw_cmd
= MDSK_READ_REQ
;
525 else if (rq_data_dir(req
) == WRITE
)
526 rw_cmd
= MDSK_WRITE_REQ
;
528 return ERR_PTR(-EINVAL
);
529 blksize
= block
->bp_block
;
530 /* Calculate record id of first and last block. */
531 first_rec
= blk_rq_pos(req
) >> block
->s2b_shift
;
533 (blk_rq_pos(req
) + blk_rq_sectors(req
) - 1) >> block
->s2b_shift
;
534 /* Check struct bio and count the number of blocks for the request. */
536 rq_for_each_segment(bv
, req
, iter
) {
537 if (bv
.bv_len
& (blksize
- 1))
538 /* Fba can only do full blocks. */
539 return ERR_PTR(-EINVAL
);
540 count
+= bv
.bv_len
>> (block
->s2b_shift
+ 9);
543 if (count
!= last_rec
- first_rec
+ 1)
544 return ERR_PTR(-EINVAL
);
545 /* Build the request */
546 cqr
= dasd_smalloc_request(DASD_DIAG_MAGIC
, 0, struct_size(dreq
, bio
, count
),
547 memdev
, blk_mq_rq_to_pdu(req
));
551 dreq
= (struct dasd_diag_req
*) cqr
->data
;
552 dreq
->block_count
= count
;
555 rq_for_each_segment(bv
, req
, iter
) {
556 dst
= page_address(bv
.bv_page
) + bv
.bv_offset
;
557 for (off
= 0; off
< bv
.bv_len
; off
+= blksize
) {
558 memset(dbio
, 0, sizeof (struct dasd_diag_bio
));
560 dbio
->block_number
= recid
+ 1;
567 cqr
->retries
= memdev
->default_retries
;
568 cqr
->buildclk
= get_tod_clock();
569 if (blk_noretry_request(req
) ||
570 block
->base
->features
& DASD_FEATURE_FAILFAST
)
571 set_bit(DASD_CQR_FLAGS_FAILFAST
, &cqr
->flags
);
572 cqr
->startdev
= memdev
;
573 cqr
->memdev
= memdev
;
575 cqr
->expires
= memdev
->default_expires
* HZ
;
576 cqr
->status
= DASD_CQR_FILLED
;
580 /* Release DASD request. Return non-zero if request was successful, zero
583 dasd_diag_free_cp(struct dasd_ccw_req
*cqr
, struct request
*req
)
587 status
= cqr
->status
== DASD_CQR_DONE
;
588 dasd_sfree_request(cqr
, cqr
->memdev
);
592 static void dasd_diag_handle_terminated_request(struct dasd_ccw_req
*cqr
)
594 if (cqr
->retries
< 0)
595 cqr
->status
= DASD_CQR_FAILED
;
597 cqr
->status
= DASD_CQR_FILLED
;
600 /* Fill in IOCTL data for device. */
602 dasd_diag_fill_info(struct dasd_device
* device
,
603 struct dasd_information2_t
* info
)
605 struct dasd_diag_private
*private = device
->private;
607 info
->label_block
= (unsigned int) private->pt_block
;
608 info
->FBA_layout
= 1;
609 info
->format
= DASD_FORMAT_LDL
;
610 info
->characteristics_size
= sizeof(private->rdc_data
);
611 memcpy(info
->characteristics
, &private->rdc_data
,
612 sizeof(private->rdc_data
));
613 info
->confdata_size
= 0;
618 dasd_diag_dump_sense(struct dasd_device
*device
, struct dasd_ccw_req
* req
,
621 DBF_DEV_EVENT(DBF_WARNING
, device
, "%s",
622 "dump sense not available for DIAG data");
626 * Initialize block layer request queue.
628 static void dasd_diag_setup_blk_queue(struct dasd_block
*block
)
630 unsigned int logical_block_size
= block
->bp_block
;
631 struct request_queue
*q
= block
->request_queue
;
634 max
= DIAG_MAX_BLOCKS
<< block
->s2b_shift
;
635 blk_queue_flag_set(QUEUE_FLAG_NONROT
, q
);
636 q
->limits
.max_dev_sectors
= max
;
637 blk_queue_logical_block_size(q
, logical_block_size
);
638 blk_queue_max_hw_sectors(q
, max
);
639 blk_queue_max_segments(q
, USHRT_MAX
);
640 /* With page sized segments each segment can be translated into one idaw/tidaw */
641 blk_queue_max_segment_size(q
, PAGE_SIZE
);
642 blk_queue_segment_boundary(q
, PAGE_SIZE
- 1);
645 static struct dasd_discipline dasd_diag_discipline
= {
646 .owner
= THIS_MODULE
,
649 .check_device
= dasd_diag_check_device
,
650 .verify_path
= dasd_generic_verify_path
,
651 .fill_geometry
= dasd_diag_fill_geometry
,
652 .setup_blk_queue
= dasd_diag_setup_blk_queue
,
653 .start_IO
= dasd_start_diag
,
654 .term_IO
= dasd_diag_term_IO
,
655 .handle_terminated_request
= dasd_diag_handle_terminated_request
,
656 .erp_action
= dasd_diag_erp_action
,
657 .erp_postaction
= dasd_diag_erp_postaction
,
658 .build_cp
= dasd_diag_build_cp
,
659 .free_cp
= dasd_diag_free_cp
,
660 .dump_sense
= dasd_diag_dump_sense
,
661 .fill_info
= dasd_diag_fill_info
,
667 if (!MACHINE_IS_VM
) {
668 pr_info("Discipline %s cannot be used without z/VM\n",
669 dasd_diag_discipline
.name
);
672 ASCEBC(dasd_diag_discipline
.ebcname
, 4);
674 irq_subclass_register(IRQ_SUBCLASS_SERVICE_SIGNAL
);
675 register_external_irq(EXT_IRQ_CP_SERVICE
, dasd_ext_handler
);
676 dasd_diag_discipline_pointer
= &dasd_diag_discipline
;
681 dasd_diag_cleanup(void)
683 unregister_external_irq(EXT_IRQ_CP_SERVICE
, dasd_ext_handler
);
684 irq_subclass_unregister(IRQ_SUBCLASS_SERVICE_SIGNAL
);
685 dasd_diag_discipline_pointer
= NULL
;
688 module_init(dasd_diag_init
);
689 module_exit(dasd_diag_cleanup
);