2 * File...........: linux/drivers/s390/block/dasd_fba.c
3 * Author(s)......: Holger Smolinski <Holger.Smolinski@de.ibm.com>
4 * Bugreports.to..: <Linux390@de.ibm.com>
5 * (C) IBM Corporation, IBM Deutschland Entwicklung GmbH, 1999,2000
9 #include <linux/stddef.h>
10 #include <linux/kernel.h>
11 #include <asm/debug.h>
13 #include <linux/slab.h>
14 #include <linux/hdreg.h> /* HDIO_GETGEO */
15 #include <linux/bio.h>
16 #include <linux/module.h>
17 #include <linux/init.h>
19 #include <asm/idals.h>
20 #include <asm/ebcdic.h>
22 #include <asm/todclk.h>
23 #include <asm/ccwdev.h>
30 #endif /* PRINTK_HEADER */
31 #define PRINTK_HEADER "dasd(fba):"
33 #define DASD_FBA_CCW_WRITE 0x41
34 #define DASD_FBA_CCW_READ 0x42
35 #define DASD_FBA_CCW_LOCATE 0x43
36 #define DASD_FBA_CCW_DEFINE_EXTENT 0x63
38 MODULE_LICENSE("GPL");
40 static struct dasd_discipline dasd_fba_discipline
;
42 struct dasd_fba_private
{
43 struct dasd_fba_characteristics rdc_data
;
46 static struct ccw_device_id dasd_fba_ids
[] = {
47 { CCW_DEVICE_DEVTYPE (0x6310, 0, 0x9336, 0), .driver_info
= 0x1},
48 { CCW_DEVICE_DEVTYPE (0x3880, 0, 0x3370, 0), .driver_info
= 0x2},
49 { /* end of list */ },
52 MODULE_DEVICE_TABLE(ccw
, dasd_fba_ids
);
54 static struct ccw_driver dasd_fba_driver
; /* see below */
56 dasd_fba_probe(struct ccw_device
*cdev
)
58 return dasd_generic_probe(cdev
, &dasd_fba_discipline
);
62 dasd_fba_set_online(struct ccw_device
*cdev
)
64 return dasd_generic_set_online(cdev
, &dasd_fba_discipline
);
67 static struct ccw_driver dasd_fba_driver
= {
71 .probe
= dasd_fba_probe
,
72 .remove
= dasd_generic_remove
,
73 .set_offline
= dasd_generic_set_offline
,
74 .set_online
= dasd_fba_set_online
,
75 .notify
= dasd_generic_notify
,
79 define_extent(struct ccw1
* ccw
, struct DE_fba_data
*data
, int rw
,
80 int blksize
, int beg
, int nr
)
82 ccw
->cmd_code
= DASD_FBA_CCW_DEFINE_EXTENT
;
85 ccw
->cda
= (__u32
) __pa(data
);
86 memset(data
, 0, sizeof (struct DE_fba_data
));
88 (data
->mask
).perm
= 0x0;
90 (data
->mask
).perm
= 0x1;
92 data
->mask
.perm
= 0x2;
93 data
->blk_size
= blksize
;
95 data
->ext_end
= nr
- 1;
99 locate_record(struct ccw1
* ccw
, struct LO_fba_data
*data
, int rw
,
100 int block_nr
, int block_ct
)
102 ccw
->cmd_code
= DASD_FBA_CCW_LOCATE
;
105 ccw
->cda
= (__u32
) __pa(data
);
106 memset(data
, 0, sizeof (struct LO_fba_data
));
108 data
->operation
.cmd
= 0x5;
110 data
->operation
.cmd
= 0x6;
112 data
->operation
.cmd
= 0x8;
113 data
->blk_nr
= block_nr
;
114 data
->blk_ct
= block_ct
;
118 dasd_fba_check_characteristics(struct dasd_device
*device
)
120 struct dasd_block
*block
;
121 struct dasd_fba_private
*private;
122 struct ccw_device
*cdev
= device
->cdev
;
126 private = (struct dasd_fba_private
*) device
->private;
127 if (private == NULL
) {
128 private = kzalloc(sizeof(struct dasd_fba_private
), GFP_KERNEL
);
129 if (private == NULL
) {
130 DEV_MESSAGE(KERN_WARNING
, device
, "%s",
131 "memory allocation failed for private "
135 device
->private = (void *) private;
137 block
= dasd_alloc_block();
139 DEV_MESSAGE(KERN_WARNING
, device
, "%s",
140 "could not allocate dasd block structure");
141 kfree(device
->private);
142 return PTR_ERR(block
);
144 device
->block
= block
;
145 block
->base
= device
;
147 /* Read Device Characteristics */
148 rdc_data
= (void *) &(private->rdc_data
);
149 rc
= dasd_generic_read_dev_chars(device
, "FBA ", &rdc_data
, 32);
151 DEV_MESSAGE(KERN_WARNING
, device
,
152 "Read device characteristics returned error %d",
157 DEV_MESSAGE(KERN_INFO
, device
,
158 "%04X/%02X(CU:%04X/%02X) %dMB at(%d B/blk)",
163 ((private->rdc_data
.blk_bdsa
*
164 (private->rdc_data
.blk_size
>> 9)) >> 11),
165 private->rdc_data
.blk_size
);
169 static int dasd_fba_do_analysis(struct dasd_block
*block
)
171 struct dasd_fba_private
*private;
174 private = (struct dasd_fba_private
*) block
->base
->private;
175 rc
= dasd_check_blocksize(private->rdc_data
.blk_size
);
177 DEV_MESSAGE(KERN_INFO
, block
->base
, "unknown blocksize %d",
178 private->rdc_data
.blk_size
);
181 block
->blocks
= private->rdc_data
.blk_bdsa
;
182 block
->bp_block
= private->rdc_data
.blk_size
;
183 block
->s2b_shift
= 0; /* bits to shift 512 to get a block */
184 for (sb
= 512; sb
< private->rdc_data
.blk_size
; sb
= sb
<< 1)
189 static int dasd_fba_fill_geometry(struct dasd_block
*block
,
190 struct hd_geometry
*geo
)
192 if (dasd_check_blocksize(block
->bp_block
) != 0)
194 geo
->cylinders
= (block
->blocks
<< block
->s2b_shift
) >> 10;
196 geo
->sectors
= 128 >> block
->s2b_shift
;
201 dasd_fba_erp_action(struct dasd_ccw_req
* cqr
)
203 return dasd_default_erp_action
;
207 dasd_fba_erp_postaction(struct dasd_ccw_req
* cqr
)
209 if (cqr
->function
== dasd_default_erp_action
)
210 return dasd_default_erp_postaction
;
212 DEV_MESSAGE(KERN_WARNING
, cqr
->startdev
, "unknown ERP action %p",
217 static void dasd_fba_handle_unsolicited_interrupt(struct dasd_device
*device
,
222 /* first of all check for state change pending interrupt */
223 mask
= DEV_STAT_ATTENTION
| DEV_STAT_DEV_END
| DEV_STAT_UNIT_EXCEP
;
224 if ((irb
->scsw
.dstat
& mask
) == mask
) {
225 dasd_generic_handle_state_change(device
);
229 /* check for unsolicited interrupts */
230 DEV_MESSAGE(KERN_DEBUG
, device
, "%s",
231 "unsolicited interrupt received");
232 device
->discipline
->dump_sense(device
, NULL
, irb
);
233 dasd_schedule_device_bh(device
);
237 static struct dasd_ccw_req
*dasd_fba_build_cp(struct dasd_device
* memdev
,
238 struct dasd_block
*block
,
241 struct dasd_fba_private
*private;
242 unsigned long *idaws
;
243 struct LO_fba_data
*LO_data
;
244 struct dasd_ccw_req
*cqr
;
246 struct req_iterator iter
;
249 int count
, cidaw
, cplength
, datasize
;
250 sector_t recid
, first_rec
, last_rec
;
251 unsigned int blksize
, off
;
254 private = (struct dasd_fba_private
*) block
->base
->private;
255 if (rq_data_dir(req
) == READ
) {
256 cmd
= DASD_FBA_CCW_READ
;
257 } else if (rq_data_dir(req
) == WRITE
) {
258 cmd
= DASD_FBA_CCW_WRITE
;
260 return ERR_PTR(-EINVAL
);
261 blksize
= block
->bp_block
;
262 /* Calculate record id of first and last block. */
263 first_rec
= req
->sector
>> block
->s2b_shift
;
264 last_rec
= (req
->sector
+ req
->nr_sectors
- 1) >> block
->s2b_shift
;
265 /* Check struct bio and count the number of blocks for the request. */
268 rq_for_each_segment(bv
, req
, iter
) {
269 if (bv
->bv_len
& (blksize
- 1))
270 /* Fba can only do full blocks. */
271 return ERR_PTR(-EINVAL
);
272 count
+= bv
->bv_len
>> (block
->s2b_shift
+ 9);
273 #if defined(CONFIG_64BIT)
274 if (idal_is_needed (page_address(bv
->bv_page
), bv
->bv_len
))
275 cidaw
+= bv
->bv_len
/ blksize
;
279 if (count
!= last_rec
- first_rec
+ 1)
280 return ERR_PTR(-EINVAL
);
281 /* 1x define extent + 1x locate record + number of blocks */
282 cplength
= 2 + count
;
283 /* 1x define extent + 1x locate record */
284 datasize
= sizeof(struct DE_fba_data
) + sizeof(struct LO_fba_data
) +
285 cidaw
* sizeof(unsigned long);
287 * Find out number of additional locate record ccws if the device
288 * can't do data chaining.
290 if (private->rdc_data
.mode
.bits
.data_chain
== 0) {
291 cplength
+= count
- 1;
292 datasize
+= (count
- 1)*sizeof(struct LO_fba_data
);
294 /* Allocate the ccw request. */
295 cqr
= dasd_smalloc_request(dasd_fba_discipline
.name
,
296 cplength
, datasize
, memdev
);
300 /* First ccw is define extent. */
301 define_extent(ccw
++, cqr
->data
, rq_data_dir(req
),
302 block
->bp_block
, req
->sector
, req
->nr_sectors
);
303 /* Build locate_record + read/write ccws. */
304 idaws
= (unsigned long *) (cqr
->data
+ sizeof(struct DE_fba_data
));
305 LO_data
= (struct LO_fba_data
*) (idaws
+ cidaw
);
306 /* Locate record for all blocks for smart devices. */
307 if (private->rdc_data
.mode
.bits
.data_chain
!= 0) {
308 ccw
[-1].flags
|= CCW_FLAG_CC
;
309 locate_record(ccw
++, LO_data
++, rq_data_dir(req
), 0, count
);
312 rq_for_each_segment(bv
, req
, iter
) {
313 dst
= page_address(bv
->bv_page
) + bv
->bv_offset
;
314 if (dasd_page_cache
) {
315 char *copy
= kmem_cache_alloc(dasd_page_cache
,
316 GFP_DMA
| __GFP_NOWARN
);
317 if (copy
&& rq_data_dir(req
) == WRITE
)
318 memcpy(copy
+ bv
->bv_offset
, dst
, bv
->bv_len
);
320 dst
= copy
+ bv
->bv_offset
;
322 for (off
= 0; off
< bv
->bv_len
; off
+= blksize
) {
323 /* Locate record for stupid devices. */
324 if (private->rdc_data
.mode
.bits
.data_chain
== 0) {
325 ccw
[-1].flags
|= CCW_FLAG_CC
;
326 locate_record(ccw
, LO_data
++,
328 recid
- first_rec
, 1);
329 ccw
->flags
= CCW_FLAG_CC
;
332 if (recid
> first_rec
)
333 ccw
[-1].flags
|= CCW_FLAG_DC
;
335 ccw
[-1].flags
|= CCW_FLAG_CC
;
338 ccw
->count
= block
->bp_block
;
339 if (idal_is_needed(dst
, blksize
)) {
340 ccw
->cda
= (__u32
)(addr_t
) idaws
;
341 ccw
->flags
= CCW_FLAG_IDA
;
342 idaws
= idal_create_words(idaws
, dst
, blksize
);
344 ccw
->cda
= (__u32
)(addr_t
) dst
;
352 if (req
->cmd_flags
& REQ_FAILFAST
)
353 set_bit(DASD_CQR_FLAGS_FAILFAST
, &cqr
->flags
);
354 cqr
->startdev
= memdev
;
355 cqr
->memdev
= memdev
;
357 cqr
->expires
= 5 * 60 * HZ
; /* 5 minutes */
359 cqr
->buildclk
= get_clock();
360 cqr
->status
= DASD_CQR_FILLED
;
365 dasd_fba_free_cp(struct dasd_ccw_req
*cqr
, struct request
*req
)
367 struct dasd_fba_private
*private;
369 struct req_iterator iter
;
372 unsigned int blksize
, off
;
375 if (!dasd_page_cache
)
377 private = (struct dasd_fba_private
*) cqr
->block
->base
->private;
378 blksize
= cqr
->block
->bp_block
;
380 /* Skip over define extent & locate record. */
382 if (private->rdc_data
.mode
.bits
.data_chain
!= 0)
384 rq_for_each_segment(bv
, req
, iter
) {
385 dst
= page_address(bv
->bv_page
) + bv
->bv_offset
;
386 for (off
= 0; off
< bv
->bv_len
; off
+= blksize
) {
387 /* Skip locate record. */
388 if (private->rdc_data
.mode
.bits
.data_chain
== 0)
391 if (ccw
->flags
& CCW_FLAG_IDA
)
392 cda
= *((char **)((addr_t
) ccw
->cda
));
394 cda
= (char *)((addr_t
) ccw
->cda
);
396 if (rq_data_dir(req
) == READ
)
397 memcpy(dst
, cda
, bv
->bv_len
);
398 kmem_cache_free(dasd_page_cache
,
399 (void *)((addr_t
)cda
& PAGE_MASK
));
407 status
= cqr
->status
== DASD_CQR_DONE
;
408 dasd_sfree_request(cqr
, cqr
->memdev
);
412 static void dasd_fba_handle_terminated_request(struct dasd_ccw_req
*cqr
)
414 cqr
->status
= DASD_CQR_FILLED
;
418 dasd_fba_fill_info(struct dasd_device
* device
,
419 struct dasd_information2_t
* info
)
421 info
->label_block
= 1;
422 info
->FBA_layout
= 1;
423 info
->format
= DASD_FORMAT_LDL
;
424 info
->characteristics_size
= sizeof(struct dasd_fba_characteristics
);
425 memcpy(info
->characteristics
,
426 &((struct dasd_fba_private
*) device
->private)->rdc_data
,
427 sizeof (struct dasd_fba_characteristics
));
428 info
->confdata_size
= 0;
433 dasd_fba_dump_sense(struct dasd_device
*device
, struct dasd_ccw_req
* req
,
437 struct ccw1
*act
, *end
, *last
;
438 int len
, sl
, sct
, count
;
440 page
= (char *) get_zeroed_page(GFP_ATOMIC
);
442 DEV_MESSAGE(KERN_ERR
, device
, " %s",
443 "No memory to dump sense data");
446 len
= sprintf(page
, KERN_ERR PRINTK_HEADER
447 " I/O status report for device %s:\n",
448 device
->cdev
->dev
.bus_id
);
449 len
+= sprintf(page
+ len
, KERN_ERR PRINTK_HEADER
450 " in req: %p CS: 0x%02X DS: 0x%02X\n", req
,
451 irb
->scsw
.cstat
, irb
->scsw
.dstat
);
452 len
+= sprintf(page
+ len
, KERN_ERR PRINTK_HEADER
453 " device %s: Failing CCW: %p\n",
454 device
->cdev
->dev
.bus_id
,
455 (void *) (addr_t
) irb
->scsw
.cpa
);
456 if (irb
->esw
.esw0
.erw
.cons
) {
457 for (sl
= 0; sl
< 4; sl
++) {
458 len
+= sprintf(page
+ len
, KERN_ERR PRINTK_HEADER
459 " Sense(hex) %2d-%2d:",
460 (8 * sl
), ((8 * sl
) + 7));
462 for (sct
= 0; sct
< 8; sct
++) {
463 len
+= sprintf(page
+ len
, " %02x",
464 irb
->ecw
[8 * sl
+ sct
]);
466 len
+= sprintf(page
+ len
, "\n");
469 len
+= sprintf(page
+ len
, KERN_ERR PRINTK_HEADER
470 " SORRY - NO VALID SENSE AVAILABLE\n");
472 MESSAGE_LOG(KERN_ERR
, "%s",
473 page
+ sizeof(KERN_ERR PRINTK_HEADER
));
475 /* dump the Channel Program */
476 /* print first CCWs (maximum 8) */
478 for (last
= act
; last
->flags
& (CCW_FLAG_CC
| CCW_FLAG_DC
); last
++);
479 end
= min(act
+ 8, last
);
480 len
= sprintf(page
, KERN_ERR PRINTK_HEADER
481 " Related CP in req: %p\n", req
);
483 len
+= sprintf(page
+ len
, KERN_ERR PRINTK_HEADER
484 " CCW %p: %08X %08X DAT:",
485 act
, ((int *) act
)[0], ((int *) act
)[1]);
486 for (count
= 0; count
< 32 && count
< act
->count
;
487 count
+= sizeof(int))
488 len
+= sprintf(page
+ len
, " %08X",
489 ((int *) (addr_t
) act
->cda
)
491 len
+= sprintf(page
+ len
, "\n");
494 MESSAGE_LOG(KERN_ERR
, "%s",
495 page
+ sizeof(KERN_ERR PRINTK_HEADER
));
498 /* print failing CCW area */
500 if (act
< ((struct ccw1
*)(addr_t
) irb
->scsw
.cpa
) - 2) {
501 act
= ((struct ccw1
*)(addr_t
) irb
->scsw
.cpa
) - 2;
502 len
+= sprintf(page
+ len
, KERN_ERR PRINTK_HEADER
"......\n");
504 end
= min((struct ccw1
*)(addr_t
) irb
->scsw
.cpa
+ 2, last
);
506 len
+= sprintf(page
+ len
, KERN_ERR PRINTK_HEADER
507 " CCW %p: %08X %08X DAT:",
508 act
, ((int *) act
)[0], ((int *) act
)[1]);
509 for (count
= 0; count
< 32 && count
< act
->count
;
510 count
+= sizeof(int))
511 len
+= sprintf(page
+ len
, " %08X",
512 ((int *) (addr_t
) act
->cda
)
514 len
+= sprintf(page
+ len
, "\n");
518 /* print last CCWs */
519 if (act
< last
- 2) {
521 len
+= sprintf(page
+ len
, KERN_ERR PRINTK_HEADER
"......\n");
523 while (act
<= last
) {
524 len
+= sprintf(page
+ len
, KERN_ERR PRINTK_HEADER
525 " CCW %p: %08X %08X DAT:",
526 act
, ((int *) act
)[0], ((int *) act
)[1]);
527 for (count
= 0; count
< 32 && count
< act
->count
;
528 count
+= sizeof(int))
529 len
+= sprintf(page
+ len
, " %08X",
530 ((int *) (addr_t
) act
->cda
)
532 len
+= sprintf(page
+ len
, "\n");
536 MESSAGE_LOG(KERN_ERR
, "%s",
537 page
+ sizeof(KERN_ERR PRINTK_HEADER
));
538 free_page((unsigned long) page
);
542 * max_blocks is dependent on the amount of storage that is available
543 * in the static io buffer for each device. Currently each device has
544 * 8192 bytes (=2 pages). For 64 bit one dasd_mchunkt_t structure has
545 * 24 bytes, the struct dasd_ccw_req has 136 bytes and each block can use
546 * up to 16 bytes (8 for the ccw and 8 for the idal pointer). In
547 * addition we have one define extent ccw + 16 bytes of data and a
548 * locate record ccw for each block (stupid devices!) + 16 bytes of data.
550 * (8192 - 24 - 136 - 8 - 16) / 40 = 200.2 blocks at maximum.
551 * We want to fit two into the available memory so that we can immediately
552 * start the next request if one finishes off. That makes 100.1 blocks
553 * for one request. Give a little safety and the result is 96.
555 static struct dasd_discipline dasd_fba_discipline
= {
556 .owner
= THIS_MODULE
,
560 .check_device
= dasd_fba_check_characteristics
,
561 .do_analysis
= dasd_fba_do_analysis
,
562 .fill_geometry
= dasd_fba_fill_geometry
,
563 .start_IO
= dasd_start_IO
,
564 .term_IO
= dasd_term_IO
,
565 .handle_terminated_request
= dasd_fba_handle_terminated_request
,
566 .erp_action
= dasd_fba_erp_action
,
567 .erp_postaction
= dasd_fba_erp_postaction
,
568 .handle_unsolicited_interrupt
= dasd_fba_handle_unsolicited_interrupt
,
569 .build_cp
= dasd_fba_build_cp
,
570 .free_cp
= dasd_fba_free_cp
,
571 .dump_sense
= dasd_fba_dump_sense
,
572 .fill_info
= dasd_fba_fill_info
,
578 ASCEBC(dasd_fba_discipline
.ebcname
, 4);
579 return ccw_driver_register(&dasd_fba_driver
);
583 dasd_fba_cleanup(void)
585 ccw_driver_unregister(&dasd_fba_driver
);
588 module_init(dasd_fba_init
);
589 module_exit(dasd_fba_cleanup
);