2 * File...........: linux/drivers/s390/block/dasd_eckd.c
3 * Author(s)......: Holger Smolinski <Holger.Smolinski@de.ibm.com>
4 * Horst Hummel <Horst.Hummel@de.ibm.com>
5 * Carsten Otte <Cotte@de.ibm.com>
6 * Martin Schwidefsky <schwidefsky@de.ibm.com>
7 * Bugreports.to..: <Linux390@de.ibm.com>
8 * (C) IBM Corporation, IBM Deutschland Entwicklung GmbH, 1999,2000
9 * EMC Symmetrix ioctl Copyright EMC Corporation, 2008
10 * Author.........: Nigel Hislop <hislop_nigel@emc.com>
14 #define KMSG_COMPONENT "dasd"
16 #include <linux/stddef.h>
17 #include <linux/kernel.h>
18 #include <linux/slab.h>
19 #include <linux/hdreg.h> /* HDIO_GETGEO */
20 #include <linux/bio.h>
21 #include <linux/module.h>
22 #include <linux/init.h>
24 #include <asm/debug.h>
25 #include <asm/idals.h>
26 #include <asm/ebcdic.h>
28 #include <asm/todclk.h>
29 #include <asm/uaccess.h>
31 #include <asm/ccwdev.h>
35 #include "dasd_eckd.h"
36 #include "../cio/chsc.h"
41 #endif /* PRINTK_HEADER */
42 #define PRINTK_HEADER "dasd(eckd):"
44 #define ECKD_C0(i) (i->home_bytes)
45 #define ECKD_F(i) (i->formula)
46 #define ECKD_F1(i) (ECKD_F(i)==0x01?(i->factors.f_0x01.f1):\
47 (i->factors.f_0x02.f1))
48 #define ECKD_F2(i) (ECKD_F(i)==0x01?(i->factors.f_0x01.f2):\
49 (i->factors.f_0x02.f2))
50 #define ECKD_F3(i) (ECKD_F(i)==0x01?(i->factors.f_0x01.f3):\
51 (i->factors.f_0x02.f3))
52 #define ECKD_F4(i) (ECKD_F(i)==0x02?(i->factors.f_0x02.f4):0)
53 #define ECKD_F5(i) (ECKD_F(i)==0x02?(i->factors.f_0x02.f5):0)
54 #define ECKD_F6(i) (i->factor6)
55 #define ECKD_F7(i) (i->factor7)
56 #define ECKD_F8(i) (i->factor8)
58 MODULE_LICENSE("GPL");
60 static struct dasd_discipline dasd_eckd_discipline
;
62 /* The ccw bus type uses this table to find devices that it sends to
64 static struct ccw_device_id dasd_eckd_ids
[] = {
65 { CCW_DEVICE_DEVTYPE (0x3990, 0, 0x3390, 0), .driver_info
= 0x1},
66 { CCW_DEVICE_DEVTYPE (0x2105, 0, 0x3390, 0), .driver_info
= 0x2},
67 { CCW_DEVICE_DEVTYPE (0x3880, 0, 0x3390, 0), .driver_info
= 0x3},
68 { CCW_DEVICE_DEVTYPE (0x3990, 0, 0x3380, 0), .driver_info
= 0x4},
69 { CCW_DEVICE_DEVTYPE (0x2105, 0, 0x3380, 0), .driver_info
= 0x5},
70 { CCW_DEVICE_DEVTYPE (0x9343, 0, 0x9345, 0), .driver_info
= 0x6},
71 { CCW_DEVICE_DEVTYPE (0x2107, 0, 0x3390, 0), .driver_info
= 0x7},
72 { CCW_DEVICE_DEVTYPE (0x2107, 0, 0x3380, 0), .driver_info
= 0x8},
73 { CCW_DEVICE_DEVTYPE (0x1750, 0, 0x3390, 0), .driver_info
= 0x9},
74 { CCW_DEVICE_DEVTYPE (0x1750, 0, 0x3380, 0), .driver_info
= 0xa},
75 { /* end of list */ },
78 MODULE_DEVICE_TABLE(ccw
, dasd_eckd_ids
);
80 static struct ccw_driver dasd_eckd_driver
; /* see below */
82 /* initial attempt at a probe function. this can be simplified once
83 * the other detection code is gone */
85 dasd_eckd_probe (struct ccw_device
*cdev
)
89 /* set ECKD specific ccw-device options */
90 ret
= ccw_device_set_options(cdev
, CCWDEV_ALLOW_FORCE
);
92 DBF_EVENT(DBF_WARNING
,
93 "dasd_eckd_probe: could not set ccw-device options "
94 "for %s\n", dev_name(&cdev
->dev
));
97 ret
= dasd_generic_probe(cdev
, &dasd_eckd_discipline
);
102 dasd_eckd_set_online(struct ccw_device
*cdev
)
104 return dasd_generic_set_online(cdev
, &dasd_eckd_discipline
);
107 static struct ccw_driver dasd_eckd_driver
= {
109 .owner
= THIS_MODULE
,
110 .ids
= dasd_eckd_ids
,
111 .probe
= dasd_eckd_probe
,
112 .remove
= dasd_generic_remove
,
113 .set_offline
= dasd_generic_set_offline
,
114 .set_online
= dasd_eckd_set_online
,
115 .notify
= dasd_generic_notify
,
118 static const int sizes_trk0
[] = { 28, 148, 84 };
119 #define LABEL_SIZE 140
121 static inline unsigned int
122 round_up_multiple(unsigned int no
, unsigned int mult
)
125 return (rem
? no
- rem
+ mult
: no
);
128 static inline unsigned int
129 ceil_quot(unsigned int d1
, unsigned int d2
)
131 return (d1
+ (d2
- 1)) / d2
;
135 recs_per_track(struct dasd_eckd_characteristics
* rdc
,
136 unsigned int kl
, unsigned int dl
)
140 switch (rdc
->dev_type
) {
143 return 1499 / (15 + 7 + ceil_quot(kl
+ 12, 32) +
144 ceil_quot(dl
+ 12, 32));
146 return 1499 / (15 + ceil_quot(dl
+ 12, 32));
148 dn
= ceil_quot(dl
+ 6, 232) + 1;
150 kn
= ceil_quot(kl
+ 6, 232) + 1;
151 return 1729 / (10 + 9 + ceil_quot(kl
+ 6 * kn
, 34) +
152 9 + ceil_quot(dl
+ 6 * dn
, 34));
154 return 1729 / (10 + 9 + ceil_quot(dl
+ 6 * dn
, 34));
156 dn
= ceil_quot(dl
+ 6, 232) + 1;
158 kn
= ceil_quot(kl
+ 6, 232) + 1;
159 return 1420 / (18 + 7 + ceil_quot(kl
+ 6 * kn
, 34) +
160 ceil_quot(dl
+ 6 * dn
, 34));
162 return 1420 / (18 + 7 + ceil_quot(dl
+ 6 * dn
, 34));
167 static void set_ch_t(struct ch_t
*geo
, __u32 cyl
, __u8 head
)
169 geo
->cyl
= (__u16
) cyl
;
170 geo
->head
= cyl
>> 16;
176 check_XRC (struct ccw1
*de_ccw
,
177 struct DE_eckd_data
*data
,
178 struct dasd_device
*device
)
180 struct dasd_eckd_private
*private;
183 private = (struct dasd_eckd_private
*) device
->private;
184 if (!private->rdc_data
.facilities
.XRC_supported
)
187 /* switch on System Time Stamp - needed for XRC Support */
188 data
->ga_extended
|= 0x08; /* switch on 'Time Stamp Valid' */
189 data
->ga_extended
|= 0x02; /* switch on 'Extended Parameter' */
191 rc
= get_sync_clock(&data
->ep_sys_time
);
192 /* Ignore return code if sync clock is switched off. */
193 if (rc
== -ENOSYS
|| rc
== -EACCES
)
196 de_ccw
->count
= sizeof(struct DE_eckd_data
);
197 de_ccw
->flags
|= CCW_FLAG_SLI
;
202 define_extent(struct ccw1
*ccw
, struct DE_eckd_data
*data
, unsigned int trk
,
203 unsigned int totrk
, int cmd
, struct dasd_device
*device
)
205 struct dasd_eckd_private
*private;
207 u16 heads
, beghead
, endhead
;
210 private = (struct dasd_eckd_private
*) device
->private;
212 ccw
->cmd_code
= DASD_ECKD_CCW_DEFINE_EXTENT
;
215 ccw
->cda
= (__u32
) __pa(data
);
217 memset(data
, 0, sizeof(struct DE_eckd_data
));
219 case DASD_ECKD_CCW_READ_HOME_ADDRESS
:
220 case DASD_ECKD_CCW_READ_RECORD_ZERO
:
221 case DASD_ECKD_CCW_READ
:
222 case DASD_ECKD_CCW_READ_MT
:
223 case DASD_ECKD_CCW_READ_CKD
:
224 case DASD_ECKD_CCW_READ_CKD_MT
:
225 case DASD_ECKD_CCW_READ_KD
:
226 case DASD_ECKD_CCW_READ_KD_MT
:
227 case DASD_ECKD_CCW_READ_COUNT
:
228 data
->mask
.perm
= 0x1;
229 data
->attributes
.operation
= private->attrib
.operation
;
231 case DASD_ECKD_CCW_WRITE
:
232 case DASD_ECKD_CCW_WRITE_MT
:
233 case DASD_ECKD_CCW_WRITE_KD
:
234 case DASD_ECKD_CCW_WRITE_KD_MT
:
235 data
->mask
.perm
= 0x02;
236 data
->attributes
.operation
= private->attrib
.operation
;
237 rc
= check_XRC (ccw
, data
, device
);
239 case DASD_ECKD_CCW_WRITE_CKD
:
240 case DASD_ECKD_CCW_WRITE_CKD_MT
:
241 data
->attributes
.operation
= DASD_BYPASS_CACHE
;
242 rc
= check_XRC (ccw
, data
, device
);
244 case DASD_ECKD_CCW_ERASE
:
245 case DASD_ECKD_CCW_WRITE_HOME_ADDRESS
:
246 case DASD_ECKD_CCW_WRITE_RECORD_ZERO
:
247 data
->mask
.perm
= 0x3;
248 data
->mask
.auth
= 0x1;
249 data
->attributes
.operation
= DASD_BYPASS_CACHE
;
250 rc
= check_XRC (ccw
, data
, device
);
253 dev_err(&device
->cdev
->dev
,
254 "0x%x is not a known command\n", cmd
);
258 data
->attributes
.mode
= 0x3; /* ECKD */
260 if ((private->rdc_data
.cu_type
== 0x2105 ||
261 private->rdc_data
.cu_type
== 0x2107 ||
262 private->rdc_data
.cu_type
== 0x1750)
263 && !(private->uses_cdl
&& trk
< 2))
264 data
->ga_extended
|= 0x40; /* Regular Data Format Mode */
266 heads
= private->rdc_data
.trk_per_cyl
;
267 begcyl
= trk
/ heads
;
268 beghead
= trk
% heads
;
269 endcyl
= totrk
/ heads
;
270 endhead
= totrk
% heads
;
272 /* check for sequential prestage - enhance cylinder range */
273 if (data
->attributes
.operation
== DASD_SEQ_PRESTAGE
||
274 data
->attributes
.operation
== DASD_SEQ_ACCESS
) {
276 if (endcyl
+ private->attrib
.nr_cyl
< private->real_cyl
)
277 endcyl
+= private->attrib
.nr_cyl
;
279 endcyl
= (private->real_cyl
- 1);
282 set_ch_t(&data
->beg_ext
, begcyl
, beghead
);
283 set_ch_t(&data
->end_ext
, endcyl
, endhead
);
287 static int check_XRC_on_prefix(struct PFX_eckd_data
*pfxdata
,
288 struct dasd_device
*device
)
290 struct dasd_eckd_private
*private;
293 private = (struct dasd_eckd_private
*) device
->private;
294 if (!private->rdc_data
.facilities
.XRC_supported
)
297 /* switch on System Time Stamp - needed for XRC Support */
298 pfxdata
->define_extent
.ga_extended
|= 0x08; /* 'Time Stamp Valid' */
299 pfxdata
->define_extent
.ga_extended
|= 0x02; /* 'Extended Parameter' */
300 pfxdata
->validity
.time_stamp
= 1; /* 'Time Stamp Valid' */
302 rc
= get_sync_clock(&pfxdata
->define_extent
.ep_sys_time
);
303 /* Ignore return code if sync clock is switched off. */
304 if (rc
== -ENOSYS
|| rc
== -EACCES
)
309 static void fill_LRE_data(struct LRE_eckd_data
*data
, unsigned int trk
,
310 unsigned int rec_on_trk
, int count
, int cmd
,
311 struct dasd_device
*device
, unsigned int reclen
,
314 struct dasd_eckd_private
*private;
318 private = (struct dasd_eckd_private
*) device
->private;
320 memset(data
, 0, sizeof(*data
));
323 switch (private->rdc_data
.dev_type
) {
325 dn
= ceil_quot(reclen
+ 6, 232);
326 d
= 9 + ceil_quot(reclen
+ 6 * (dn
+ 1), 34);
327 sector
= (49 + (rec_on_trk
- 1) * (10 + d
)) / 8;
330 d
= 7 + ceil_quot(reclen
+ 12, 32);
331 sector
= (39 + (rec_on_trk
- 1) * (8 + d
)) / 7;
335 data
->sector
= sector
;
336 /* note: meaning of count depends on the operation
337 * for record based I/O it's the number of records, but for
338 * track based I/O it's the number of tracks
342 case DASD_ECKD_CCW_WRITE_HOME_ADDRESS
:
343 data
->operation
.orientation
= 0x3;
344 data
->operation
.operation
= 0x03;
346 case DASD_ECKD_CCW_READ_HOME_ADDRESS
:
347 data
->operation
.orientation
= 0x3;
348 data
->operation
.operation
= 0x16;
350 case DASD_ECKD_CCW_WRITE_RECORD_ZERO
:
351 data
->operation
.orientation
= 0x1;
352 data
->operation
.operation
= 0x03;
355 case DASD_ECKD_CCW_READ_RECORD_ZERO
:
356 data
->operation
.orientation
= 0x3;
357 data
->operation
.operation
= 0x16;
360 case DASD_ECKD_CCW_WRITE
:
361 case DASD_ECKD_CCW_WRITE_MT
:
362 case DASD_ECKD_CCW_WRITE_KD
:
363 case DASD_ECKD_CCW_WRITE_KD_MT
:
364 data
->auxiliary
.length_valid
= 0x1;
365 data
->length
= reclen
;
366 data
->operation
.operation
= 0x01;
368 case DASD_ECKD_CCW_WRITE_CKD
:
369 case DASD_ECKD_CCW_WRITE_CKD_MT
:
370 data
->auxiliary
.length_valid
= 0x1;
371 data
->length
= reclen
;
372 data
->operation
.operation
= 0x03;
374 case DASD_ECKD_CCW_WRITE_TRACK_DATA
:
375 data
->auxiliary
.length_valid
= 0x1;
376 data
->length
= reclen
; /* not tlf, as one might think */
377 data
->operation
.operation
= 0x3F;
378 data
->extended_operation
= 0x23;
380 case DASD_ECKD_CCW_READ
:
381 case DASD_ECKD_CCW_READ_MT
:
382 case DASD_ECKD_CCW_READ_KD
:
383 case DASD_ECKD_CCW_READ_KD_MT
:
384 data
->auxiliary
.length_valid
= 0x1;
385 data
->length
= reclen
;
386 data
->operation
.operation
= 0x06;
388 case DASD_ECKD_CCW_READ_CKD
:
389 case DASD_ECKD_CCW_READ_CKD_MT
:
390 data
->auxiliary
.length_valid
= 0x1;
391 data
->length
= reclen
;
392 data
->operation
.operation
= 0x16;
394 case DASD_ECKD_CCW_READ_COUNT
:
395 data
->operation
.operation
= 0x06;
397 case DASD_ECKD_CCW_READ_TRACK_DATA
:
398 data
->auxiliary
.length_valid
= 0x1;
400 data
->operation
.operation
= 0x0C;
402 case DASD_ECKD_CCW_ERASE
:
403 data
->length
= reclen
;
404 data
->auxiliary
.length_valid
= 0x1;
405 data
->operation
.operation
= 0x0b;
408 DBF_DEV_EVENT(DBF_ERR
, device
,
409 "fill LRE unknown opcode 0x%x", cmd
);
412 set_ch_t(&data
->seek_addr
,
413 trk
/ private->rdc_data
.trk_per_cyl
,
414 trk
% private->rdc_data
.trk_per_cyl
);
415 data
->search_arg
.cyl
= data
->seek_addr
.cyl
;
416 data
->search_arg
.head
= data
->seek_addr
.head
;
417 data
->search_arg
.record
= rec_on_trk
;
420 static int prefix_LRE(struct ccw1
*ccw
, struct PFX_eckd_data
*pfxdata
,
421 unsigned int trk
, unsigned int totrk
, int cmd
,
422 struct dasd_device
*basedev
, struct dasd_device
*startdev
,
423 unsigned char format
, unsigned int rec_on_trk
, int count
,
424 unsigned int blksize
, unsigned int tlf
)
426 struct dasd_eckd_private
*basepriv
, *startpriv
;
427 struct DE_eckd_data
*dedata
;
428 struct LRE_eckd_data
*lredata
;
430 u16 heads
, beghead
, endhead
;
433 basepriv
= (struct dasd_eckd_private
*) basedev
->private;
434 startpriv
= (struct dasd_eckd_private
*) startdev
->private;
435 dedata
= &pfxdata
->define_extent
;
436 lredata
= &pfxdata
->locate_record
;
438 ccw
->cmd_code
= DASD_ECKD_CCW_PFX
;
440 ccw
->count
= sizeof(*pfxdata
);
441 ccw
->cda
= (__u32
) __pa(pfxdata
);
443 memset(pfxdata
, 0, sizeof(*pfxdata
));
446 DBF_DEV_EVENT(DBF_ERR
, basedev
,
447 "PFX LRE unknown format 0x%x", format
);
451 pfxdata
->format
= format
;
452 pfxdata
->base_address
= basepriv
->ned
->unit_addr
;
453 pfxdata
->base_lss
= basepriv
->ned
->ID
;
454 pfxdata
->validity
.define_extent
= 1;
456 /* private uid is kept up to date, conf_data may be outdated */
457 if (startpriv
->uid
.type
!= UA_BASE_DEVICE
) {
458 pfxdata
->validity
.verify_base
= 1;
459 if (startpriv
->uid
.type
== UA_HYPER_PAV_ALIAS
)
460 pfxdata
->validity
.hyper_pav
= 1;
463 /* define extend data (mostly)*/
465 case DASD_ECKD_CCW_READ_HOME_ADDRESS
:
466 case DASD_ECKD_CCW_READ_RECORD_ZERO
:
467 case DASD_ECKD_CCW_READ
:
468 case DASD_ECKD_CCW_READ_MT
:
469 case DASD_ECKD_CCW_READ_CKD
:
470 case DASD_ECKD_CCW_READ_CKD_MT
:
471 case DASD_ECKD_CCW_READ_KD
:
472 case DASD_ECKD_CCW_READ_KD_MT
:
473 case DASD_ECKD_CCW_READ_COUNT
:
474 dedata
->mask
.perm
= 0x1;
475 dedata
->attributes
.operation
= basepriv
->attrib
.operation
;
477 case DASD_ECKD_CCW_READ_TRACK_DATA
:
478 dedata
->mask
.perm
= 0x1;
479 dedata
->attributes
.operation
= basepriv
->attrib
.operation
;
480 dedata
->blk_size
= 0;
482 case DASD_ECKD_CCW_WRITE
:
483 case DASD_ECKD_CCW_WRITE_MT
:
484 case DASD_ECKD_CCW_WRITE_KD
:
485 case DASD_ECKD_CCW_WRITE_KD_MT
:
486 dedata
->mask
.perm
= 0x02;
487 dedata
->attributes
.operation
= basepriv
->attrib
.operation
;
488 rc
= check_XRC_on_prefix(pfxdata
, basedev
);
490 case DASD_ECKD_CCW_WRITE_CKD
:
491 case DASD_ECKD_CCW_WRITE_CKD_MT
:
492 dedata
->attributes
.operation
= DASD_BYPASS_CACHE
;
493 rc
= check_XRC_on_prefix(pfxdata
, basedev
);
495 case DASD_ECKD_CCW_ERASE
:
496 case DASD_ECKD_CCW_WRITE_HOME_ADDRESS
:
497 case DASD_ECKD_CCW_WRITE_RECORD_ZERO
:
498 dedata
->mask
.perm
= 0x3;
499 dedata
->mask
.auth
= 0x1;
500 dedata
->attributes
.operation
= DASD_BYPASS_CACHE
;
501 rc
= check_XRC_on_prefix(pfxdata
, basedev
);
503 case DASD_ECKD_CCW_WRITE_TRACK_DATA
:
504 dedata
->mask
.perm
= 0x02;
505 dedata
->attributes
.operation
= basepriv
->attrib
.operation
;
506 dedata
->blk_size
= blksize
;
507 rc
= check_XRC_on_prefix(pfxdata
, basedev
);
510 DBF_DEV_EVENT(DBF_ERR
, basedev
,
511 "PFX LRE unknown opcode 0x%x", cmd
);
516 dedata
->attributes
.mode
= 0x3; /* ECKD */
518 if ((basepriv
->rdc_data
.cu_type
== 0x2105 ||
519 basepriv
->rdc_data
.cu_type
== 0x2107 ||
520 basepriv
->rdc_data
.cu_type
== 0x1750)
521 && !(basepriv
->uses_cdl
&& trk
< 2))
522 dedata
->ga_extended
|= 0x40; /* Regular Data Format Mode */
524 heads
= basepriv
->rdc_data
.trk_per_cyl
;
525 begcyl
= trk
/ heads
;
526 beghead
= trk
% heads
;
527 endcyl
= totrk
/ heads
;
528 endhead
= totrk
% heads
;
530 /* check for sequential prestage - enhance cylinder range */
531 if (dedata
->attributes
.operation
== DASD_SEQ_PRESTAGE
||
532 dedata
->attributes
.operation
== DASD_SEQ_ACCESS
) {
534 if (endcyl
+ basepriv
->attrib
.nr_cyl
< basepriv
->real_cyl
)
535 endcyl
+= basepriv
->attrib
.nr_cyl
;
537 endcyl
= (basepriv
->real_cyl
- 1);
540 set_ch_t(&dedata
->beg_ext
, begcyl
, beghead
);
541 set_ch_t(&dedata
->end_ext
, endcyl
, endhead
);
544 fill_LRE_data(lredata
, trk
, rec_on_trk
, count
, cmd
,
545 basedev
, blksize
, tlf
);
551 static int prefix(struct ccw1
*ccw
, struct PFX_eckd_data
*pfxdata
,
552 unsigned int trk
, unsigned int totrk
, int cmd
,
553 struct dasd_device
*basedev
, struct dasd_device
*startdev
)
555 return prefix_LRE(ccw
, pfxdata
, trk
, totrk
, cmd
, basedev
, startdev
,
560 locate_record(struct ccw1
*ccw
, struct LO_eckd_data
*data
, unsigned int trk
,
561 unsigned int rec_on_trk
, int no_rec
, int cmd
,
562 struct dasd_device
* device
, int reclen
)
564 struct dasd_eckd_private
*private;
568 private = (struct dasd_eckd_private
*) device
->private;
570 DBF_DEV_EVENT(DBF_INFO
, device
,
571 "Locate: trk %d, rec %d, no_rec %d, cmd %d, reclen %d",
572 trk
, rec_on_trk
, no_rec
, cmd
, reclen
);
574 ccw
->cmd_code
= DASD_ECKD_CCW_LOCATE_RECORD
;
577 ccw
->cda
= (__u32
) __pa(data
);
579 memset(data
, 0, sizeof(struct LO_eckd_data
));
582 switch (private->rdc_data
.dev_type
) {
584 dn
= ceil_quot(reclen
+ 6, 232);
585 d
= 9 + ceil_quot(reclen
+ 6 * (dn
+ 1), 34);
586 sector
= (49 + (rec_on_trk
- 1) * (10 + d
)) / 8;
589 d
= 7 + ceil_quot(reclen
+ 12, 32);
590 sector
= (39 + (rec_on_trk
- 1) * (8 + d
)) / 7;
594 data
->sector
= sector
;
595 data
->count
= no_rec
;
597 case DASD_ECKD_CCW_WRITE_HOME_ADDRESS
:
598 data
->operation
.orientation
= 0x3;
599 data
->operation
.operation
= 0x03;
601 case DASD_ECKD_CCW_READ_HOME_ADDRESS
:
602 data
->operation
.orientation
= 0x3;
603 data
->operation
.operation
= 0x16;
605 case DASD_ECKD_CCW_WRITE_RECORD_ZERO
:
606 data
->operation
.orientation
= 0x1;
607 data
->operation
.operation
= 0x03;
610 case DASD_ECKD_CCW_READ_RECORD_ZERO
:
611 data
->operation
.orientation
= 0x3;
612 data
->operation
.operation
= 0x16;
615 case DASD_ECKD_CCW_WRITE
:
616 case DASD_ECKD_CCW_WRITE_MT
:
617 case DASD_ECKD_CCW_WRITE_KD
:
618 case DASD_ECKD_CCW_WRITE_KD_MT
:
619 data
->auxiliary
.last_bytes_used
= 0x1;
620 data
->length
= reclen
;
621 data
->operation
.operation
= 0x01;
623 case DASD_ECKD_CCW_WRITE_CKD
:
624 case DASD_ECKD_CCW_WRITE_CKD_MT
:
625 data
->auxiliary
.last_bytes_used
= 0x1;
626 data
->length
= reclen
;
627 data
->operation
.operation
= 0x03;
629 case DASD_ECKD_CCW_READ
:
630 case DASD_ECKD_CCW_READ_MT
:
631 case DASD_ECKD_CCW_READ_KD
:
632 case DASD_ECKD_CCW_READ_KD_MT
:
633 data
->auxiliary
.last_bytes_used
= 0x1;
634 data
->length
= reclen
;
635 data
->operation
.operation
= 0x06;
637 case DASD_ECKD_CCW_READ_CKD
:
638 case DASD_ECKD_CCW_READ_CKD_MT
:
639 data
->auxiliary
.last_bytes_used
= 0x1;
640 data
->length
= reclen
;
641 data
->operation
.operation
= 0x16;
643 case DASD_ECKD_CCW_READ_COUNT
:
644 data
->operation
.operation
= 0x06;
646 case DASD_ECKD_CCW_ERASE
:
647 data
->length
= reclen
;
648 data
->auxiliary
.last_bytes_used
= 0x1;
649 data
->operation
.operation
= 0x0b;
652 DBF_DEV_EVENT(DBF_ERR
, device
, "unknown locate record "
655 set_ch_t(&data
->seek_addr
,
656 trk
/ private->rdc_data
.trk_per_cyl
,
657 trk
% private->rdc_data
.trk_per_cyl
);
658 data
->search_arg
.cyl
= data
->seek_addr
.cyl
;
659 data
->search_arg
.head
= data
->seek_addr
.head
;
660 data
->search_arg
.record
= rec_on_trk
;
664 * Returns 1 if the block is one of the special blocks that needs
665 * to get read/written with the KD variant of the command.
666 * That is DASD_ECKD_READ_KD_MT instead of DASD_ECKD_READ_MT and
667 * DASD_ECKD_WRITE_KD_MT instead of DASD_ECKD_WRITE_MT.
668 * Luckily the KD variants differ only by one bit (0x08) from the
669 * normal variant. So don't wonder about code like:
670 * if (dasd_eckd_cdl_special(blk_per_trk, recid))
671 * ccw->cmd_code |= 0x8;
674 dasd_eckd_cdl_special(int blk_per_trk
, int recid
)
678 if (recid
< blk_per_trk
)
680 if (recid
< 2 * blk_per_trk
)
686 * Returns the record size for the special blocks of the cdl format.
687 * Only returns something useful if dasd_eckd_cdl_special is true
691 dasd_eckd_cdl_reclen(int recid
)
694 return sizes_trk0
[recid
];
699 * Generate device unique id that specifies the physical device.
701 static int dasd_eckd_generate_uid(struct dasd_device
*device
,
702 struct dasd_uid
*uid
)
704 struct dasd_eckd_private
*private;
707 private = (struct dasd_eckd_private
*) device
->private;
710 if (!private->ned
|| !private->gneq
)
713 memset(uid
, 0, sizeof(struct dasd_uid
));
714 memcpy(uid
->vendor
, private->ned
->HDA_manufacturer
,
715 sizeof(uid
->vendor
) - 1);
716 EBCASC(uid
->vendor
, sizeof(uid
->vendor
) - 1);
717 memcpy(uid
->serial
, private->ned
->HDA_location
,
718 sizeof(uid
->serial
) - 1);
719 EBCASC(uid
->serial
, sizeof(uid
->serial
) - 1);
720 uid
->ssid
= private->gneq
->subsystemID
;
721 uid
->real_unit_addr
= private->ned
->unit_addr
;;
723 uid
->type
= private->sneq
->sua_flags
;
724 if (uid
->type
== UA_BASE_PAV_ALIAS
)
725 uid
->base_unit_addr
= private->sneq
->base_unit_addr
;
727 uid
->type
= UA_BASE_DEVICE
;
729 if (private->vdsneq
) {
730 for (count
= 0; count
< 16; count
++) {
731 sprintf(uid
->vduit
+2*count
, "%02x",
732 private->vdsneq
->uit
[count
]);
738 static struct dasd_ccw_req
*dasd_eckd_build_rcd_lpm(struct dasd_device
*device
,
740 struct ciw
*ciw
, __u8 lpm
)
742 struct dasd_ccw_req
*cqr
;
745 cqr
= dasd_smalloc_request("ECKD", 1 /* RCD */, ciw
->count
, device
);
748 DBF_DEV_EVENT(DBF_WARNING
, device
, "%s",
749 "Could not allocate RCD request");
754 ccw
->cmd_code
= ciw
->cmd
;
755 ccw
->cda
= (__u32
)(addr_t
)rcd_buffer
;
756 ccw
->count
= ciw
->count
;
758 cqr
->startdev
= device
;
759 cqr
->memdev
= device
;
761 cqr
->expires
= 10*HZ
;
763 clear_bit(DASD_CQR_FLAGS_USE_ERP
, &cqr
->flags
);
765 cqr
->buildclk
= get_clock();
766 cqr
->status
= DASD_CQR_FILLED
;
770 static int dasd_eckd_read_conf_lpm(struct dasd_device
*device
,
772 int *rcd_buffer_size
, __u8 lpm
)
775 char *rcd_buf
= NULL
;
777 struct dasd_ccw_req
*cqr
;
780 * scan for RCD command in extended SenseID data
782 ciw
= ccw_device_get_ciw(device
->cdev
, CIW_TYPE_RCD
);
783 if (!ciw
|| ciw
->cmd
== 0) {
787 rcd_buf
= kzalloc(ciw
->count
, GFP_KERNEL
| GFP_DMA
);
794 * buffer has to start with EBCDIC "V1.0" to show
795 * support for virtual device SNEQ
801 cqr
= dasd_eckd_build_rcd_lpm(device
, rcd_buf
, ciw
, lpm
);
806 ret
= dasd_sleep_on(cqr
);
808 * on success we update the user input parms
810 dasd_sfree_request(cqr
, cqr
->memdev
);
814 *rcd_buffer_size
= ciw
->count
;
815 *rcd_buffer
= rcd_buf
;
820 *rcd_buffer_size
= 0;
824 static int dasd_eckd_identify_conf_parts(struct dasd_eckd_private
*private)
827 struct dasd_sneq
*sneq
;
831 private->sneq
= NULL
;
832 private->vdsneq
= NULL
;
833 private->gneq
= NULL
;
834 count
= private->conf_len
/ sizeof(struct dasd_sneq
);
835 sneq
= (struct dasd_sneq
*)private->conf_data
;
836 for (i
= 0; i
< count
; ++i
) {
837 if (sneq
->flags
.identifier
== 1 && sneq
->format
== 1)
838 private->sneq
= sneq
;
839 else if (sneq
->flags
.identifier
== 1 && sneq
->format
== 4)
840 private->vdsneq
= (struct vd_sneq
*)sneq
;
841 else if (sneq
->flags
.identifier
== 2)
842 private->gneq
= (struct dasd_gneq
*)sneq
;
843 else if (sneq
->flags
.identifier
== 3 && sneq
->res1
== 1)
844 private->ned
= (struct dasd_ned
*)sneq
;
847 if (!private->ned
|| !private->gneq
) {
849 private->sneq
= NULL
;
850 private->vdsneq
= NULL
;
851 private->gneq
= NULL
;
858 static unsigned char dasd_eckd_path_access(void *conf_data
, int conf_len
)
860 struct dasd_gneq
*gneq
;
863 count
= conf_len
/ sizeof(*gneq
);
864 gneq
= (struct dasd_gneq
*)conf_data
;
866 for (i
= 0; i
< count
; ++i
) {
867 if (gneq
->flags
.identifier
== 2) {
874 return ((char *)gneq
)[18] & 0x07;
879 static int dasd_eckd_read_conf(struct dasd_device
*device
)
882 int conf_len
, conf_data_saved
;
885 struct dasd_eckd_private
*private;
886 struct dasd_eckd_path
*path_data
;
888 private = (struct dasd_eckd_private
*) device
->private;
889 path_data
= (struct dasd_eckd_path
*) &private->path_data
;
890 path_data
->opm
= ccw_device_get_path_mask(device
->cdev
);
893 /* get configuration data per operational path */
894 for (lpm
= 0x80; lpm
; lpm
>>= 1) {
895 if (lpm
& path_data
->opm
){
896 rc
= dasd_eckd_read_conf_lpm(device
, &conf_data
,
898 if (rc
&& rc
!= -EOPNOTSUPP
) { /* -EOPNOTSUPP is ok */
899 DBF_EVENT(DBF_WARNING
,
900 "Read configuration data returned "
901 "error %d for device: %s", rc
,
902 dev_name(&device
->cdev
->dev
));
905 if (conf_data
== NULL
) {
906 DBF_EVENT(DBF_WARNING
, "No configuration "
907 "data retrieved for device: %s",
908 dev_name(&device
->cdev
->dev
));
909 continue; /* no error */
911 /* save first valid configuration data */
912 if (!conf_data_saved
) {
913 kfree(private->conf_data
);
914 private->conf_data
= conf_data
;
915 private->conf_len
= conf_len
;
916 if (dasd_eckd_identify_conf_parts(private)) {
917 private->conf_data
= NULL
;
918 private->conf_len
= 0;
924 switch (dasd_eckd_path_access(conf_data
, conf_len
)) {
926 path_data
->npm
|= lpm
;
929 path_data
->ppm
|= lpm
;
932 if (conf_data
!= private->conf_data
)
939 static int dasd_eckd_read_features(struct dasd_device
*device
)
941 struct dasd_psf_prssd_data
*prssdp
;
942 struct dasd_rssd_features
*features
;
943 struct dasd_ccw_req
*cqr
;
946 struct dasd_eckd_private
*private;
948 private = (struct dasd_eckd_private
*) device
->private;
949 cqr
= dasd_smalloc_request(dasd_eckd_discipline
.name
,
950 1 /* PSF */ + 1 /* RSSD */ ,
951 (sizeof(struct dasd_psf_prssd_data
) +
952 sizeof(struct dasd_rssd_features
)),
955 DBF_EVENT(DBF_WARNING
, "Could not allocate initialization "
956 "request for device: %s",
957 dev_name(&device
->cdev
->dev
));
960 cqr
->startdev
= device
;
961 cqr
->memdev
= device
;
963 clear_bit(DASD_CQR_FLAGS_USE_ERP
, &cqr
->flags
);
965 cqr
->expires
= 10 * HZ
;
967 /* Prepare for Read Subsystem Data */
968 prssdp
= (struct dasd_psf_prssd_data
*) cqr
->data
;
969 memset(prssdp
, 0, sizeof(struct dasd_psf_prssd_data
));
970 prssdp
->order
= PSF_ORDER_PRSSD
;
971 prssdp
->suborder
= 0x41; /* Read Feature Codes */
972 /* all other bytes of prssdp must be zero */
975 ccw
->cmd_code
= DASD_ECKD_CCW_PSF
;
976 ccw
->count
= sizeof(struct dasd_psf_prssd_data
);
977 ccw
->flags
|= CCW_FLAG_CC
;
978 ccw
->cda
= (__u32
)(addr_t
) prssdp
;
980 /* Read Subsystem Data - feature codes */
981 features
= (struct dasd_rssd_features
*) (prssdp
+ 1);
982 memset(features
, 0, sizeof(struct dasd_rssd_features
));
985 ccw
->cmd_code
= DASD_ECKD_CCW_RSSD
;
986 ccw
->count
= sizeof(struct dasd_rssd_features
);
987 ccw
->cda
= (__u32
)(addr_t
) features
;
989 cqr
->buildclk
= get_clock();
990 cqr
->status
= DASD_CQR_FILLED
;
991 rc
= dasd_sleep_on(cqr
);
993 prssdp
= (struct dasd_psf_prssd_data
*) cqr
->data
;
994 features
= (struct dasd_rssd_features
*) (prssdp
+ 1);
995 memcpy(&private->features
, features
,
996 sizeof(struct dasd_rssd_features
));
998 dasd_sfree_request(cqr
, cqr
->memdev
);
1004 * Build CP for Perform Subsystem Function - SSC.
1006 static struct dasd_ccw_req
*dasd_eckd_build_psf_ssc(struct dasd_device
*device
,
1009 struct dasd_ccw_req
*cqr
;
1010 struct dasd_psf_ssc_data
*psf_ssc_data
;
1013 cqr
= dasd_smalloc_request("ECKD", 1 /* PSF */ ,
1014 sizeof(struct dasd_psf_ssc_data
),
1018 DBF_DEV_EVENT(DBF_WARNING
, device
, "%s",
1019 "Could not allocate PSF-SSC request");
1022 psf_ssc_data
= (struct dasd_psf_ssc_data
*)cqr
->data
;
1023 psf_ssc_data
->order
= PSF_ORDER_SSC
;
1024 psf_ssc_data
->suborder
= 0x40;
1026 psf_ssc_data
->suborder
|= 0x88;
1027 psf_ssc_data
->reserved
[0] = 0x88;
1030 ccw
->cmd_code
= DASD_ECKD_CCW_PSF
;
1031 ccw
->cda
= (__u32
)(addr_t
)psf_ssc_data
;
1034 cqr
->startdev
= device
;
1035 cqr
->memdev
= device
;
1037 cqr
->expires
= 10*HZ
;
1038 cqr
->buildclk
= get_clock();
1039 cqr
->status
= DASD_CQR_FILLED
;
1044 * Perform Subsystem Function.
1045 * It is necessary to trigger CIO for channel revalidation since this
1046 * call might change behaviour of DASD devices.
1049 dasd_eckd_psf_ssc(struct dasd_device
*device
, int enable_pav
)
1051 struct dasd_ccw_req
*cqr
;
1054 cqr
= dasd_eckd_build_psf_ssc(device
, enable_pav
);
1056 return PTR_ERR(cqr
);
1058 rc
= dasd_sleep_on(cqr
);
1060 /* trigger CIO to reprobe devices */
1061 css_schedule_reprobe();
1062 dasd_sfree_request(cqr
, cqr
->memdev
);
1067 * Valide storage server of current device.
1069 static int dasd_eckd_validate_server(struct dasd_device
*device
)
1072 struct dasd_eckd_private
*private;
1075 if (dasd_nopav
|| MACHINE_IS_VM
)
1079 rc
= dasd_eckd_psf_ssc(device
, enable_pav
);
1080 /* may be requested feature is not available on server,
1081 * therefore just report error and go ahead */
1082 private = (struct dasd_eckd_private
*) device
->private;
1083 DBF_EVENT(DBF_WARNING
, "PSF-SSC on storage subsystem %s.%s.%04x "
1084 "returned rc=%d for device: %s",
1085 private->uid
.vendor
, private->uid
.serial
,
1086 private->uid
.ssid
, rc
, dev_name(&device
->cdev
->dev
));
1087 /* RE-Read Configuration Data */
1088 return dasd_eckd_read_conf(device
);
1092 * Check device characteristics.
1093 * If the device is accessible using ECKD discipline, the device is enabled.
1096 dasd_eckd_check_characteristics(struct dasd_device
*device
)
1098 struct dasd_eckd_private
*private;
1099 struct dasd_block
*block
;
1103 private = (struct dasd_eckd_private
*) device
->private;
1104 if (private == NULL
) {
1105 private = kzalloc(sizeof(struct dasd_eckd_private
),
1106 GFP_KERNEL
| GFP_DMA
);
1107 if (private == NULL
) {
1108 dev_warn(&device
->cdev
->dev
,
1109 "Allocating memory for private DASD data "
1113 device
->private = (void *) private;
1115 /* Invalidate status of initial analysis. */
1116 private->init_cqr_status
= -1;
1117 /* Set default cache operations. */
1118 private->attrib
.operation
= DASD_NORMAL_CACHE
;
1119 private->attrib
.nr_cyl
= 0;
1121 /* Read Configuration Data */
1122 rc
= dasd_eckd_read_conf(device
);
1126 /* Generate device unique id and register in devmap */
1127 rc
= dasd_eckd_generate_uid(device
, &private->uid
);
1130 dasd_set_uid(device
->cdev
, &private->uid
);
1132 if (private->uid
.type
== UA_BASE_DEVICE
) {
1133 block
= dasd_alloc_block();
1134 if (IS_ERR(block
)) {
1135 DBF_EVENT(DBF_WARNING
, "could not allocate dasd "
1136 "block structure for device: %s",
1137 dev_name(&device
->cdev
->dev
));
1138 rc
= PTR_ERR(block
);
1141 device
->block
= block
;
1142 block
->base
= device
;
1145 /* register lcu with alias handling, enable PAV if this is a new lcu */
1146 is_known
= dasd_alias_make_device_known_to_lcu(device
);
1153 rc
= dasd_eckd_validate_server(device
); /* will switch pav on */
1158 /* Read Feature Codes */
1159 rc
= dasd_eckd_read_features(device
);
1163 /* Read Device Characteristics */
1164 rdc_data
= (void *) &(private->rdc_data
);
1165 memset(rdc_data
, 0, sizeof(rdc_data
));
1166 rc
= dasd_generic_read_dev_chars(device
, "ECKD", &rdc_data
, 64);
1168 DBF_EVENT(DBF_WARNING
,
1169 "Read device characteristics failed, rc=%d for "
1170 "device: %s", rc
, dev_name(&device
->cdev
->dev
));
1173 /* find the vaild cylinder size */
1174 if (private->rdc_data
.no_cyl
== LV_COMPAT_CYL
&&
1175 private->rdc_data
.long_no_cyl
)
1176 private->real_cyl
= private->rdc_data
.long_no_cyl
;
1178 private->real_cyl
= private->rdc_data
.no_cyl
;
1180 dev_info(&device
->cdev
->dev
, "New DASD %04X/%02X (CU %04X/%02X) "
1181 "with %d cylinders, %d heads, %d sectors\n",
1182 private->rdc_data
.dev_type
,
1183 private->rdc_data
.dev_model
,
1184 private->rdc_data
.cu_type
,
1185 private->rdc_data
.cu_model
.model
,
1187 private->rdc_data
.trk_per_cyl
,
1188 private->rdc_data
.sec_per_trk
);
1192 dasd_alias_disconnect_device_from_lcu(device
);
1194 dasd_free_block(device
->block
);
1195 device
->block
= NULL
;
1197 kfree(private->conf_data
);
1198 kfree(device
->private);
1199 device
->private = NULL
;
1203 static void dasd_eckd_uncheck_device(struct dasd_device
*device
)
1205 struct dasd_eckd_private
*private;
1207 private = (struct dasd_eckd_private
*) device
->private;
1208 dasd_alias_disconnect_device_from_lcu(device
);
1209 private->ned
= NULL
;
1210 private->sneq
= NULL
;
1211 private->vdsneq
= NULL
;
1212 private->gneq
= NULL
;
1213 private->conf_len
= 0;
1214 kfree(private->conf_data
);
1215 private->conf_data
= NULL
;
1218 static struct dasd_ccw_req
*
1219 dasd_eckd_analysis_ccw(struct dasd_device
*device
)
1221 struct dasd_eckd_private
*private;
1222 struct eckd_count
*count_data
;
1223 struct LO_eckd_data
*LO_data
;
1224 struct dasd_ccw_req
*cqr
;
1226 int cplength
, datasize
;
1229 private = (struct dasd_eckd_private
*) device
->private;
1232 datasize
= sizeof(struct DE_eckd_data
) + 2*sizeof(struct LO_eckd_data
);
1233 cqr
= dasd_smalloc_request(dasd_eckd_discipline
.name
,
1234 cplength
, datasize
, device
);
1238 /* Define extent for the first 3 tracks. */
1239 define_extent(ccw
++, cqr
->data
, 0, 2,
1240 DASD_ECKD_CCW_READ_COUNT
, device
);
1241 LO_data
= cqr
->data
+ sizeof(struct DE_eckd_data
);
1242 /* Locate record for the first 4 records on track 0. */
1243 ccw
[-1].flags
|= CCW_FLAG_CC
;
1244 locate_record(ccw
++, LO_data
++, 0, 0, 4,
1245 DASD_ECKD_CCW_READ_COUNT
, device
, 0);
1247 count_data
= private->count_area
;
1248 for (i
= 0; i
< 4; i
++) {
1249 ccw
[-1].flags
|= CCW_FLAG_CC
;
1250 ccw
->cmd_code
= DASD_ECKD_CCW_READ_COUNT
;
1253 ccw
->cda
= (__u32
)(addr_t
) count_data
;
1258 /* Locate record for the first record on track 2. */
1259 ccw
[-1].flags
|= CCW_FLAG_CC
;
1260 locate_record(ccw
++, LO_data
++, 2, 0, 1,
1261 DASD_ECKD_CCW_READ_COUNT
, device
, 0);
1262 /* Read count ccw. */
1263 ccw
[-1].flags
|= CCW_FLAG_CC
;
1264 ccw
->cmd_code
= DASD_ECKD_CCW_READ_COUNT
;
1267 ccw
->cda
= (__u32
)(addr_t
) count_data
;
1270 cqr
->startdev
= device
;
1271 cqr
->memdev
= device
;
1273 cqr
->buildclk
= get_clock();
1274 cqr
->status
= DASD_CQR_FILLED
;
1279 * This is the callback function for the init_analysis cqr. It saves
1280 * the status of the initial analysis ccw before it frees it and kicks
1281 * the device to continue the startup sequence. This will call
1282 * dasd_eckd_do_analysis again (if the devices has not been marked
1283 * for deletion in the meantime).
1286 dasd_eckd_analysis_callback(struct dasd_ccw_req
*init_cqr
, void *data
)
1288 struct dasd_eckd_private
*private;
1289 struct dasd_device
*device
;
1291 device
= init_cqr
->startdev
;
1292 private = (struct dasd_eckd_private
*) device
->private;
1293 private->init_cqr_status
= init_cqr
->status
;
1294 dasd_sfree_request(init_cqr
, device
);
1295 dasd_kick_device(device
);
1299 dasd_eckd_start_analysis(struct dasd_block
*block
)
1301 struct dasd_eckd_private
*private;
1302 struct dasd_ccw_req
*init_cqr
;
1304 private = (struct dasd_eckd_private
*) block
->base
->private;
1305 init_cqr
= dasd_eckd_analysis_ccw(block
->base
);
1306 if (IS_ERR(init_cqr
))
1307 return PTR_ERR(init_cqr
);
1308 init_cqr
->callback
= dasd_eckd_analysis_callback
;
1309 init_cqr
->callback_data
= NULL
;
1310 init_cqr
->expires
= 5*HZ
;
1311 dasd_add_request_head(init_cqr
);
1316 dasd_eckd_end_analysis(struct dasd_block
*block
)
1318 struct dasd_device
*device
;
1319 struct dasd_eckd_private
*private;
1320 struct eckd_count
*count_area
;
1321 unsigned int sb
, blk_per_trk
;
1324 device
= block
->base
;
1325 private = (struct dasd_eckd_private
*) device
->private;
1326 status
= private->init_cqr_status
;
1327 private->init_cqr_status
= -1;
1328 if (status
!= DASD_CQR_DONE
) {
1329 dev_warn(&device
->cdev
->dev
,
1330 "The DASD is not formatted\n");
1331 return -EMEDIUMTYPE
;
1334 private->uses_cdl
= 1;
1335 /* Check Track 0 for Compatible Disk Layout */
1337 for (i
= 0; i
< 3; i
++) {
1338 if (private->count_area
[i
].kl
!= 4 ||
1339 private->count_area
[i
].dl
!= dasd_eckd_cdl_reclen(i
) - 4) {
1340 private->uses_cdl
= 0;
1345 count_area
= &private->count_area
[4];
1347 if (private->uses_cdl
== 0) {
1348 for (i
= 0; i
< 5; i
++) {
1349 if ((private->count_area
[i
].kl
!= 0) ||
1350 (private->count_area
[i
].dl
!=
1351 private->count_area
[0].dl
))
1355 count_area
= &private->count_area
[0];
1357 if (private->count_area
[3].record
== 1)
1358 dev_warn(&device
->cdev
->dev
,
1359 "Track 0 has no records following the VTOC\n");
1361 if (count_area
!= NULL
&& count_area
->kl
== 0) {
1362 /* we found notthing violating our disk layout */
1363 if (dasd_check_blocksize(count_area
->dl
) == 0)
1364 block
->bp_block
= count_area
->dl
;
1366 if (block
->bp_block
== 0) {
1367 dev_warn(&device
->cdev
->dev
,
1368 "The disk layout of the DASD is not supported\n");
1369 return -EMEDIUMTYPE
;
1371 block
->s2b_shift
= 0; /* bits to shift 512 to get a block */
1372 for (sb
= 512; sb
< block
->bp_block
; sb
= sb
<< 1)
1375 blk_per_trk
= recs_per_track(&private->rdc_data
, 0, block
->bp_block
);
1376 block
->blocks
= (private->real_cyl
*
1377 private->rdc_data
.trk_per_cyl
*
1380 dev_info(&device
->cdev
->dev
,
1381 "DASD with %d KB/block, %d KB total size, %d KB/track, "
1382 "%s\n", (block
->bp_block
>> 10),
1383 ((private->real_cyl
*
1384 private->rdc_data
.trk_per_cyl
*
1385 blk_per_trk
* (block
->bp_block
>> 9)) >> 1),
1386 ((blk_per_trk
* block
->bp_block
) >> 10),
1388 "compatible disk layout" : "linux disk layout");
1393 static int dasd_eckd_do_analysis(struct dasd_block
*block
)
1395 struct dasd_eckd_private
*private;
1397 private = (struct dasd_eckd_private
*) block
->base
->private;
1398 if (private->init_cqr_status
< 0)
1399 return dasd_eckd_start_analysis(block
);
1401 return dasd_eckd_end_analysis(block
);
1404 static int dasd_eckd_ready_to_online(struct dasd_device
*device
)
1406 return dasd_alias_add_device(device
);
1409 static int dasd_eckd_online_to_ready(struct dasd_device
*device
)
1411 return dasd_alias_remove_device(device
);
1415 dasd_eckd_fill_geometry(struct dasd_block
*block
, struct hd_geometry
*geo
)
1417 struct dasd_eckd_private
*private;
1419 private = (struct dasd_eckd_private
*) block
->base
->private;
1420 if (dasd_check_blocksize(block
->bp_block
) == 0) {
1421 geo
->sectors
= recs_per_track(&private->rdc_data
,
1422 0, block
->bp_block
);
1424 geo
->cylinders
= private->rdc_data
.no_cyl
;
1425 geo
->heads
= private->rdc_data
.trk_per_cyl
;
1429 static struct dasd_ccw_req
*
1430 dasd_eckd_format_device(struct dasd_device
* device
,
1431 struct format_data_t
* fdata
)
1433 struct dasd_eckd_private
*private;
1434 struct dasd_ccw_req
*fcp
;
1435 struct eckd_count
*ect
;
1439 struct ch_t address
;
1440 int cplength
, datasize
;
1445 private = (struct dasd_eckd_private
*) device
->private;
1446 rpt
= recs_per_track(&private->rdc_data
, 0, fdata
->blksize
);
1448 fdata
->start_unit
/ private->rdc_data
.trk_per_cyl
,
1449 fdata
->start_unit
% private->rdc_data
.trk_per_cyl
);
1451 /* Sanity checks. */
1452 if (fdata
->start_unit
>=
1453 (private->real_cyl
* private->rdc_data
.trk_per_cyl
)) {
1454 dev_warn(&device
->cdev
->dev
, "Start track number %d used in "
1455 "formatting is too big\n", fdata
->start_unit
);
1456 return ERR_PTR(-EINVAL
);
1458 if (fdata
->start_unit
> fdata
->stop_unit
) {
1459 dev_warn(&device
->cdev
->dev
, "Start track %d used in "
1460 "formatting exceeds end track\n", fdata
->start_unit
);
1461 return ERR_PTR(-EINVAL
);
1463 if (dasd_check_blocksize(fdata
->blksize
) != 0) {
1464 dev_warn(&device
->cdev
->dev
,
1465 "The DASD cannot be formatted with block size %d\n",
1467 return ERR_PTR(-EINVAL
);
1471 * fdata->intensity is a bit string that tells us what to do:
1472 * Bit 0: write record zero
1473 * Bit 1: write home address, currently not supported
1474 * Bit 2: invalidate tracks
1475 * Bit 3: use OS/390 compatible disk layout (cdl)
1476 * Bit 4: do not allow storage subsystem to modify record zero
1477 * Only some bit combinations do make sense.
1479 if (fdata
->intensity
& 0x10) {
1481 intensity
= fdata
->intensity
& ~0x10;
1484 intensity
= fdata
->intensity
;
1486 switch (intensity
) {
1487 case 0x00: /* Normal format */
1488 case 0x08: /* Normal format, use cdl. */
1490 datasize
= sizeof(struct DE_eckd_data
) +
1491 sizeof(struct LO_eckd_data
) +
1492 rpt
* sizeof(struct eckd_count
);
1494 case 0x01: /* Write record zero and format track. */
1495 case 0x09: /* Write record zero and format track, use cdl. */
1497 datasize
= sizeof(struct DE_eckd_data
) +
1498 sizeof(struct LO_eckd_data
) +
1499 sizeof(struct eckd_count
) +
1500 rpt
* sizeof(struct eckd_count
);
1502 case 0x04: /* Invalidate track. */
1503 case 0x0c: /* Invalidate track, use cdl. */
1505 datasize
= sizeof(struct DE_eckd_data
) +
1506 sizeof(struct LO_eckd_data
) +
1507 sizeof(struct eckd_count
);
1510 dev_warn(&device
->cdev
->dev
, "An I/O control call used "
1511 "incorrect flags 0x%x\n", fdata
->intensity
);
1512 return ERR_PTR(-EINVAL
);
1514 /* Allocate the format ccw request. */
1515 fcp
= dasd_smalloc_request(dasd_eckd_discipline
.name
,
1516 cplength
, datasize
, device
);
1523 switch (intensity
& ~0x08) {
1524 case 0x00: /* Normal format. */
1525 define_extent(ccw
++, (struct DE_eckd_data
*) data
,
1526 fdata
->start_unit
, fdata
->start_unit
,
1527 DASD_ECKD_CCW_WRITE_CKD
, device
);
1528 /* grant subsystem permission to format R0 */
1530 ((struct DE_eckd_data
*)data
)->ga_extended
|= 0x04;
1531 data
+= sizeof(struct DE_eckd_data
);
1532 ccw
[-1].flags
|= CCW_FLAG_CC
;
1533 locate_record(ccw
++, (struct LO_eckd_data
*) data
,
1534 fdata
->start_unit
, 0, rpt
,
1535 DASD_ECKD_CCW_WRITE_CKD
, device
,
1537 data
+= sizeof(struct LO_eckd_data
);
1539 case 0x01: /* Write record zero + format track. */
1540 define_extent(ccw
++, (struct DE_eckd_data
*) data
,
1541 fdata
->start_unit
, fdata
->start_unit
,
1542 DASD_ECKD_CCW_WRITE_RECORD_ZERO
,
1544 data
+= sizeof(struct DE_eckd_data
);
1545 ccw
[-1].flags
|= CCW_FLAG_CC
;
1546 locate_record(ccw
++, (struct LO_eckd_data
*) data
,
1547 fdata
->start_unit
, 0, rpt
+ 1,
1548 DASD_ECKD_CCW_WRITE_RECORD_ZERO
, device
,
1549 device
->block
->bp_block
);
1550 data
+= sizeof(struct LO_eckd_data
);
1552 case 0x04: /* Invalidate track. */
1553 define_extent(ccw
++, (struct DE_eckd_data
*) data
,
1554 fdata
->start_unit
, fdata
->start_unit
,
1555 DASD_ECKD_CCW_WRITE_CKD
, device
);
1556 data
+= sizeof(struct DE_eckd_data
);
1557 ccw
[-1].flags
|= CCW_FLAG_CC
;
1558 locate_record(ccw
++, (struct LO_eckd_data
*) data
,
1559 fdata
->start_unit
, 0, 1,
1560 DASD_ECKD_CCW_WRITE_CKD
, device
, 8);
1561 data
+= sizeof(struct LO_eckd_data
);
1564 if (intensity
& 0x01) { /* write record zero */
1565 ect
= (struct eckd_count
*) data
;
1566 data
+= sizeof(struct eckd_count
);
1567 ect
->cyl
= address
.cyl
;
1568 ect
->head
= address
.head
;
1572 ccw
[-1].flags
|= CCW_FLAG_CC
;
1573 ccw
->cmd_code
= DASD_ECKD_CCW_WRITE_RECORD_ZERO
;
1574 ccw
->flags
= CCW_FLAG_SLI
;
1576 ccw
->cda
= (__u32
)(addr_t
) ect
;
1579 if ((intensity
& ~0x08) & 0x04) { /* erase track */
1580 ect
= (struct eckd_count
*) data
;
1581 data
+= sizeof(struct eckd_count
);
1582 ect
->cyl
= address
.cyl
;
1583 ect
->head
= address
.head
;
1587 ccw
[-1].flags
|= CCW_FLAG_CC
;
1588 ccw
->cmd_code
= DASD_ECKD_CCW_WRITE_CKD
;
1589 ccw
->flags
= CCW_FLAG_SLI
;
1591 ccw
->cda
= (__u32
)(addr_t
) ect
;
1592 } else { /* write remaining records */
1593 for (i
= 0; i
< rpt
; i
++) {
1594 ect
= (struct eckd_count
*) data
;
1595 data
+= sizeof(struct eckd_count
);
1596 ect
->cyl
= address
.cyl
;
1597 ect
->head
= address
.head
;
1598 ect
->record
= i
+ 1;
1600 ect
->dl
= fdata
->blksize
;
1601 /* Check for special tracks 0-1 when formatting CDL */
1602 if ((intensity
& 0x08) &&
1603 fdata
->start_unit
== 0) {
1606 ect
->dl
= sizes_trk0
[i
] - 4;
1609 if ((intensity
& 0x08) &&
1610 fdata
->start_unit
== 1) {
1612 ect
->dl
= LABEL_SIZE
- 44;
1614 ccw
[-1].flags
|= CCW_FLAG_CC
;
1615 ccw
->cmd_code
= DASD_ECKD_CCW_WRITE_CKD
;
1616 ccw
->flags
= CCW_FLAG_SLI
;
1618 ccw
->cda
= (__u32
)(addr_t
) ect
;
1622 fcp
->startdev
= device
;
1623 fcp
->memdev
= device
;
1624 clear_bit(DASD_CQR_FLAGS_USE_ERP
, &fcp
->flags
);
1625 fcp
->retries
= 5; /* set retry counter to enable default ERP */
1626 fcp
->buildclk
= get_clock();
1627 fcp
->status
= DASD_CQR_FILLED
;
1631 static void dasd_eckd_handle_terminated_request(struct dasd_ccw_req
*cqr
)
1633 cqr
->status
= DASD_CQR_FILLED
;
1634 if (cqr
->block
&& (cqr
->startdev
!= cqr
->block
->base
)) {
1635 dasd_eckd_reset_ccw_to_base_io(cqr
);
1636 cqr
->startdev
= cqr
->block
->base
;
1640 static dasd_erp_fn_t
1641 dasd_eckd_erp_action(struct dasd_ccw_req
* cqr
)
1643 struct dasd_device
*device
= (struct dasd_device
*) cqr
->startdev
;
1644 struct ccw_device
*cdev
= device
->cdev
;
1646 switch (cdev
->id
.cu_type
) {
1651 return dasd_3990_erp_action
;
1655 return dasd_default_erp_action
;
1659 static dasd_erp_fn_t
1660 dasd_eckd_erp_postaction(struct dasd_ccw_req
* cqr
)
1662 return dasd_default_erp_postaction
;
1666 static void dasd_eckd_handle_unsolicited_interrupt(struct dasd_device
*device
,
1672 /* first of all check for state change pending interrupt */
1673 mask
= DEV_STAT_ATTENTION
| DEV_STAT_DEV_END
| DEV_STAT_UNIT_EXCEP
;
1674 if ((scsw_dstat(&irb
->scsw
) & mask
) == mask
) {
1675 dasd_generic_handle_state_change(device
);
1679 /* summary unit check */
1680 if ((scsw_dstat(&irb
->scsw
) & DEV_STAT_UNIT_CHECK
) &&
1681 (irb
->ecw
[7] == 0x0D)) {
1682 dasd_alias_handle_summary_unit_check(device
, irb
);
1686 sense
= dasd_get_sense(irb
);
1687 /* service information message SIM */
1688 if (sense
&& !(sense
[27] & DASD_SENSE_BIT_0
) &&
1689 ((sense
[6] & DASD_SIM_SENSE
) == DASD_SIM_SENSE
)) {
1690 dasd_3990_erp_handle_sim(device
, sense
);
1691 dasd_schedule_device_bh(device
);
1695 if ((scsw_cc(&irb
->scsw
) == 1) &&
1696 (scsw_fctl(&irb
->scsw
) & SCSW_FCTL_START_FUNC
) &&
1697 (scsw_actl(&irb
->scsw
) & SCSW_ACTL_START_PEND
) &&
1698 (scsw_stctl(&irb
->scsw
) & SCSW_STCTL_STATUS_PEND
)) {
1699 /* fake irb do nothing, they are handled elsewhere */
1700 dasd_schedule_device_bh(device
);
1705 /* just report other unsolicited interrupts */
1706 DBF_DEV_EVENT(DBF_ERR
, device
, "%s",
1707 "unsolicited interrupt received");
1709 DBF_DEV_EVENT(DBF_ERR
, device
, "%s",
1710 "unsolicited interrupt received "
1711 "(sense available)");
1712 device
->discipline
->dump_sense_dbf(device
, NULL
, irb
,
1716 dasd_schedule_device_bh(device
);
1721 static struct dasd_ccw_req
*dasd_eckd_build_cp_cmd_single(
1722 struct dasd_device
*startdev
,
1723 struct dasd_block
*block
,
1724 struct request
*req
,
1729 unsigned int first_offs
,
1730 unsigned int last_offs
,
1731 unsigned int blk_per_trk
,
1732 unsigned int blksize
)
1734 struct dasd_eckd_private
*private;
1735 unsigned long *idaws
;
1736 struct LO_eckd_data
*LO_data
;
1737 struct dasd_ccw_req
*cqr
;
1739 struct req_iterator iter
;
1743 int count
, cidaw
, cplength
, datasize
;
1745 unsigned char cmd
, rcmd
;
1747 struct dasd_device
*basedev
;
1749 basedev
= block
->base
;
1750 private = (struct dasd_eckd_private
*) basedev
->private;
1751 if (rq_data_dir(req
) == READ
)
1752 cmd
= DASD_ECKD_CCW_READ_MT
;
1753 else if (rq_data_dir(req
) == WRITE
)
1754 cmd
= DASD_ECKD_CCW_WRITE_MT
;
1756 return ERR_PTR(-EINVAL
);
1758 /* Check struct bio and count the number of blocks for the request. */
1761 rq_for_each_segment(bv
, req
, iter
) {
1762 if (bv
->bv_len
& (blksize
- 1))
1763 /* Eckd can only do full blocks. */
1764 return ERR_PTR(-EINVAL
);
1765 count
+= bv
->bv_len
>> (block
->s2b_shift
+ 9);
1766 #if defined(CONFIG_64BIT)
1767 if (idal_is_needed (page_address(bv
->bv_page
), bv
->bv_len
))
1768 cidaw
+= bv
->bv_len
>> (block
->s2b_shift
+ 9);
1772 if (count
!= last_rec
- first_rec
+ 1)
1773 return ERR_PTR(-EINVAL
);
1775 /* use the prefix command if available */
1776 use_prefix
= private->features
.feature
[8] & 0x01;
1778 /* 1x prefix + number of blocks */
1779 cplength
= 2 + count
;
1780 /* 1x prefix + cidaws*sizeof(long) */
1781 datasize
= sizeof(struct PFX_eckd_data
) +
1782 sizeof(struct LO_eckd_data
) +
1783 cidaw
* sizeof(unsigned long);
1785 /* 1x define extent + 1x locate record + number of blocks */
1786 cplength
= 2 + count
;
1787 /* 1x define extent + 1x locate record + cidaws*sizeof(long) */
1788 datasize
= sizeof(struct DE_eckd_data
) +
1789 sizeof(struct LO_eckd_data
) +
1790 cidaw
* sizeof(unsigned long);
1792 /* Find out the number of additional locate record ccws for cdl. */
1793 if (private->uses_cdl
&& first_rec
< 2*blk_per_trk
) {
1794 if (last_rec
>= 2*blk_per_trk
)
1795 count
= 2*blk_per_trk
- first_rec
;
1797 datasize
+= count
*sizeof(struct LO_eckd_data
);
1799 /* Allocate the ccw request. */
1800 cqr
= dasd_smalloc_request(dasd_eckd_discipline
.name
,
1801 cplength
, datasize
, startdev
);
1805 /* First ccw is define extent or prefix. */
1807 if (prefix(ccw
++, cqr
->data
, first_trk
,
1808 last_trk
, cmd
, basedev
, startdev
) == -EAGAIN
) {
1809 /* Clock not in sync and XRC is enabled.
1812 dasd_sfree_request(cqr
, startdev
);
1813 return ERR_PTR(-EAGAIN
);
1815 idaws
= (unsigned long *) (cqr
->data
+
1816 sizeof(struct PFX_eckd_data
));
1818 if (define_extent(ccw
++, cqr
->data
, first_trk
,
1819 last_trk
, cmd
, startdev
) == -EAGAIN
) {
1820 /* Clock not in sync and XRC is enabled.
1823 dasd_sfree_request(cqr
, startdev
);
1824 return ERR_PTR(-EAGAIN
);
1826 idaws
= (unsigned long *) (cqr
->data
+
1827 sizeof(struct DE_eckd_data
));
1829 /* Build locate_record+read/write/ccws. */
1830 LO_data
= (struct LO_eckd_data
*) (idaws
+ cidaw
);
1832 if (private->uses_cdl
== 0 || recid
> 2*blk_per_trk
) {
1833 /* Only standard blocks so there is just one locate record. */
1834 ccw
[-1].flags
|= CCW_FLAG_CC
;
1835 locate_record(ccw
++, LO_data
++, first_trk
, first_offs
+ 1,
1836 last_rec
- recid
+ 1, cmd
, basedev
, blksize
);
1838 rq_for_each_segment(bv
, req
, iter
) {
1839 dst
= page_address(bv
->bv_page
) + bv
->bv_offset
;
1840 if (dasd_page_cache
) {
1841 char *copy
= kmem_cache_alloc(dasd_page_cache
,
1842 GFP_DMA
| __GFP_NOWARN
);
1843 if (copy
&& rq_data_dir(req
) == WRITE
)
1844 memcpy(copy
+ bv
->bv_offset
, dst
, bv
->bv_len
);
1846 dst
= copy
+ bv
->bv_offset
;
1848 for (off
= 0; off
< bv
->bv_len
; off
+= blksize
) {
1849 sector_t trkid
= recid
;
1850 unsigned int recoffs
= sector_div(trkid
, blk_per_trk
);
1853 /* Locate record for cdl special block ? */
1854 if (private->uses_cdl
&& recid
< 2*blk_per_trk
) {
1855 if (dasd_eckd_cdl_special(blk_per_trk
, recid
)){
1857 count
= dasd_eckd_cdl_reclen(recid
);
1858 if (count
< blksize
&&
1859 rq_data_dir(req
) == READ
)
1860 memset(dst
+ count
, 0xe5,
1863 ccw
[-1].flags
|= CCW_FLAG_CC
;
1864 locate_record(ccw
++, LO_data
++,
1866 1, rcmd
, basedev
, count
);
1868 /* Locate record for standard blocks ? */
1869 if (private->uses_cdl
&& recid
== 2*blk_per_trk
) {
1870 ccw
[-1].flags
|= CCW_FLAG_CC
;
1871 locate_record(ccw
++, LO_data
++,
1873 last_rec
- recid
+ 1,
1874 cmd
, basedev
, count
);
1876 /* Read/write ccw. */
1877 ccw
[-1].flags
|= CCW_FLAG_CC
;
1878 ccw
->cmd_code
= rcmd
;
1880 if (idal_is_needed(dst
, blksize
)) {
1881 ccw
->cda
= (__u32
)(addr_t
) idaws
;
1882 ccw
->flags
= CCW_FLAG_IDA
;
1883 idaws
= idal_create_words(idaws
, dst
, blksize
);
1885 ccw
->cda
= (__u32
)(addr_t
) dst
;
1893 if (blk_noretry_request(req
) ||
1894 block
->base
->features
& DASD_FEATURE_FAILFAST
)
1895 set_bit(DASD_CQR_FLAGS_FAILFAST
, &cqr
->flags
);
1896 cqr
->startdev
= startdev
;
1897 cqr
->memdev
= startdev
;
1899 cqr
->expires
= 5 * 60 * HZ
; /* 5 minutes */
1900 cqr
->lpm
= private->path_data
.ppm
;
1902 cqr
->buildclk
= get_clock();
1903 cqr
->status
= DASD_CQR_FILLED
;
1907 static struct dasd_ccw_req
*dasd_eckd_build_cp_cmd_track(
1908 struct dasd_device
*startdev
,
1909 struct dasd_block
*block
,
1910 struct request
*req
,
1915 unsigned int first_offs
,
1916 unsigned int last_offs
,
1917 unsigned int blk_per_trk
,
1918 unsigned int blksize
)
1920 struct dasd_eckd_private
*private;
1921 unsigned long *idaws
;
1922 struct dasd_ccw_req
*cqr
;
1924 struct req_iterator iter
;
1926 char *dst
, *idaw_dst
;
1927 unsigned int cidaw
, cplength
, datasize
;
1931 struct dasd_device
*basedev
;
1932 unsigned int trkcount
, count
, count_to_trk_end
;
1933 unsigned int idaw_len
, seg_len
, part_len
, len_to_track_end
;
1934 unsigned char new_track
, end_idaw
;
1936 unsigned int recoffs
;
1938 basedev
= block
->base
;
1939 private = (struct dasd_eckd_private
*) basedev
->private;
1940 if (rq_data_dir(req
) == READ
)
1941 cmd
= DASD_ECKD_CCW_READ_TRACK_DATA
;
1942 else if (rq_data_dir(req
) == WRITE
)
1943 cmd
= DASD_ECKD_CCW_WRITE_TRACK_DATA
;
1945 return ERR_PTR(-EINVAL
);
1947 /* Track based I/O needs IDAWs for each page, and not just for
1948 * 64 bit addresses. We need additional idals for pages
1949 * that get filled from two tracks, so we use the number
1950 * of records as upper limit.
1952 cidaw
= last_rec
- first_rec
+ 1;
1953 trkcount
= last_trk
- first_trk
+ 1;
1955 /* 1x prefix + one read/write ccw per track */
1956 cplength
= 1 + trkcount
;
1958 /* on 31-bit we need space for two 32 bit addresses per page
1959 * on 64-bit one 64 bit address
1961 datasize
= sizeof(struct PFX_eckd_data
) +
1962 cidaw
* sizeof(unsigned long long);
1964 /* Allocate the ccw request. */
1965 cqr
= dasd_smalloc_request(dasd_eckd_discipline
.name
,
1966 cplength
, datasize
, startdev
);
1970 /* transfer length factor: how many bytes to read from the last track */
1971 if (first_trk
== last_trk
)
1972 tlf
= last_offs
- first_offs
+ 1;
1974 tlf
= last_offs
+ 1;
1977 if (prefix_LRE(ccw
++, cqr
->data
, first_trk
,
1978 last_trk
, cmd
, basedev
, startdev
,
1979 1 /* format */, first_offs
+ 1,
1982 /* Clock not in sync and XRC is enabled.
1985 dasd_sfree_request(cqr
, startdev
);
1986 return ERR_PTR(-EAGAIN
);
1990 * The translation of request into ccw programs must meet the
1991 * following conditions:
1992 * - all idaws but the first and the last must address full pages
1993 * (or 2K blocks on 31-bit)
1994 * - the scope of a ccw and it's idal ends with the track boundaries
1996 idaws
= (unsigned long *) (cqr
->data
+ sizeof(struct PFX_eckd_data
));
2000 len_to_track_end
= 0;
2003 rq_for_each_segment(bv
, req
, iter
) {
2004 dst
= page_address(bv
->bv_page
) + bv
->bv_offset
;
2005 seg_len
= bv
->bv_len
;
2009 recoffs
= sector_div(trkid
, blk_per_trk
);
2010 count_to_trk_end
= blk_per_trk
- recoffs
;
2011 count
= min((last_rec
- recid
+ 1),
2012 (sector_t
)count_to_trk_end
);
2013 len_to_track_end
= count
* blksize
;
2014 ccw
[-1].flags
|= CCW_FLAG_CC
;
2015 ccw
->cmd_code
= cmd
;
2016 ccw
->count
= len_to_track_end
;
2017 ccw
->cda
= (__u32
)(addr_t
)idaws
;
2018 ccw
->flags
= CCW_FLAG_IDA
;
2023 /* If we start a new idaw, everything is fine and the
2024 * start of the new idaw is the start of this segment.
2025 * If we continue an idaw, we must make sure that the
2026 * current segment begins where the so far accumulated
2031 if ((idaw_dst
+ idaw_len
) != dst
) {
2032 dasd_sfree_request(cqr
, startdev
);
2033 return ERR_PTR(-ERANGE
);
2035 part_len
= min(seg_len
, len_to_track_end
);
2036 seg_len
-= part_len
;
2038 idaw_len
+= part_len
;
2039 len_to_track_end
-= part_len
;
2040 /* collected memory area ends on an IDA_BLOCK border,
2042 * idal_create_words will handle cases where idaw_len
2043 * is larger then IDA_BLOCK_SIZE
2045 if (!(__pa(idaw_dst
+ idaw_len
) & (IDA_BLOCK_SIZE
-1)))
2047 /* We also need to end the idaw at track end */
2048 if (!len_to_track_end
) {
2053 idaws
= idal_create_words(idaws
, idaw_dst
,
2062 if (blk_noretry_request(req
) ||
2063 block
->base
->features
& DASD_FEATURE_FAILFAST
)
2064 set_bit(DASD_CQR_FLAGS_FAILFAST
, &cqr
->flags
);
2065 cqr
->startdev
= startdev
;
2066 cqr
->memdev
= startdev
;
2068 cqr
->expires
= 5 * 60 * HZ
; /* 5 minutes */
2069 cqr
->lpm
= private->path_data
.ppm
;
2071 cqr
->buildclk
= get_clock();
2072 cqr
->status
= DASD_CQR_FILLED
;
2076 static int prepare_itcw(struct itcw
*itcw
,
2077 unsigned int trk
, unsigned int totrk
, int cmd
,
2078 struct dasd_device
*basedev
,
2079 struct dasd_device
*startdev
,
2080 unsigned int rec_on_trk
, int count
,
2081 unsigned int blksize
,
2082 unsigned int total_data_size
,
2084 unsigned int blk_per_trk
)
2086 struct PFX_eckd_data pfxdata
;
2087 struct dasd_eckd_private
*basepriv
, *startpriv
;
2088 struct DE_eckd_data
*dedata
;
2089 struct LRE_eckd_data
*lredata
;
2093 u16 heads
, beghead
, endhead
;
2101 /* setup prefix data */
2102 basepriv
= (struct dasd_eckd_private
*) basedev
->private;
2103 startpriv
= (struct dasd_eckd_private
*) startdev
->private;
2104 dedata
= &pfxdata
.define_extent
;
2105 lredata
= &pfxdata
.locate_record
;
2107 memset(&pfxdata
, 0, sizeof(pfxdata
));
2108 pfxdata
.format
= 1; /* PFX with LRE */
2109 pfxdata
.base_address
= basepriv
->ned
->unit_addr
;
2110 pfxdata
.base_lss
= basepriv
->ned
->ID
;
2111 pfxdata
.validity
.define_extent
= 1;
2113 /* private uid is kept up to date, conf_data may be outdated */
2114 if (startpriv
->uid
.type
!= UA_BASE_DEVICE
) {
2115 pfxdata
.validity
.verify_base
= 1;
2116 if (startpriv
->uid
.type
== UA_HYPER_PAV_ALIAS
)
2117 pfxdata
.validity
.hyper_pav
= 1;
2121 case DASD_ECKD_CCW_READ_TRACK_DATA
:
2122 dedata
->mask
.perm
= 0x1;
2123 dedata
->attributes
.operation
= basepriv
->attrib
.operation
;
2124 dedata
->blk_size
= blksize
;
2125 dedata
->ga_extended
|= 0x42;
2126 lredata
->operation
.orientation
= 0x0;
2127 lredata
->operation
.operation
= 0x0C;
2128 lredata
->auxiliary
.check_bytes
= 0x01;
2129 pfx_cmd
= DASD_ECKD_CCW_PFX_READ
;
2131 case DASD_ECKD_CCW_WRITE_TRACK_DATA
:
2132 dedata
->mask
.perm
= 0x02;
2133 dedata
->attributes
.operation
= basepriv
->attrib
.operation
;
2134 dedata
->blk_size
= blksize
;
2135 rc
= check_XRC_on_prefix(&pfxdata
, basedev
);
2136 dedata
->ga_extended
|= 0x42;
2137 lredata
->operation
.orientation
= 0x0;
2138 lredata
->operation
.operation
= 0x3F;
2139 lredata
->extended_operation
= 0x23;
2140 lredata
->auxiliary
.check_bytes
= 0x2;
2141 pfx_cmd
= DASD_ECKD_CCW_PFX
;
2144 DBF_DEV_EVENT(DBF_ERR
, basedev
,
2145 "prepare itcw, unknown opcode 0x%x", cmd
);
2152 dedata
->attributes
.mode
= 0x3; /* ECKD */
2154 heads
= basepriv
->rdc_data
.trk_per_cyl
;
2155 begcyl
= trk
/ heads
;
2156 beghead
= trk
% heads
;
2157 endcyl
= totrk
/ heads
;
2158 endhead
= totrk
% heads
;
2160 /* check for sequential prestage - enhance cylinder range */
2161 if (dedata
->attributes
.operation
== DASD_SEQ_PRESTAGE
||
2162 dedata
->attributes
.operation
== DASD_SEQ_ACCESS
) {
2164 if (endcyl
+ basepriv
->attrib
.nr_cyl
< basepriv
->real_cyl
)
2165 endcyl
+= basepriv
->attrib
.nr_cyl
;
2167 endcyl
= (basepriv
->real_cyl
- 1);
2170 set_ch_t(&dedata
->beg_ext
, begcyl
, beghead
);
2171 set_ch_t(&dedata
->end_ext
, endcyl
, endhead
);
2173 dedata
->ep_format
= 0x20; /* records per track is valid */
2174 dedata
->ep_rec_per_track
= blk_per_trk
;
2177 switch (basepriv
->rdc_data
.dev_type
) {
2179 dn
= ceil_quot(blksize
+ 6, 232);
2180 d
= 9 + ceil_quot(blksize
+ 6 * (dn
+ 1), 34);
2181 sector
= (49 + (rec_on_trk
- 1) * (10 + d
)) / 8;
2184 d
= 7 + ceil_quot(blksize
+ 12, 32);
2185 sector
= (39 + (rec_on_trk
- 1) * (8 + d
)) / 7;
2190 lredata
->auxiliary
.length_valid
= 1;
2191 lredata
->auxiliary
.length_scope
= 1;
2192 lredata
->auxiliary
.imbedded_ccw_valid
= 1;
2193 lredata
->length
= tlf
;
2194 lredata
->imbedded_ccw
= cmd
;
2195 lredata
->count
= count
;
2196 lredata
->sector
= sector
;
2197 set_ch_t(&lredata
->seek_addr
, begcyl
, beghead
);
2198 lredata
->search_arg
.cyl
= lredata
->seek_addr
.cyl
;
2199 lredata
->search_arg
.head
= lredata
->seek_addr
.head
;
2200 lredata
->search_arg
.record
= rec_on_trk
;
2202 dcw
= itcw_add_dcw(itcw
, pfx_cmd
, 0,
2203 &pfxdata
, sizeof(pfxdata
), total_data_size
);
2208 static struct dasd_ccw_req
*dasd_eckd_build_cp_tpm_track(
2209 struct dasd_device
*startdev
,
2210 struct dasd_block
*block
,
2211 struct request
*req
,
2216 unsigned int first_offs
,
2217 unsigned int last_offs
,
2218 unsigned int blk_per_trk
,
2219 unsigned int blksize
)
2221 struct dasd_eckd_private
*private;
2222 struct dasd_ccw_req
*cqr
;
2223 struct req_iterator iter
;
2226 unsigned int trkcount
, ctidaw
;
2228 struct dasd_device
*basedev
;
2231 struct tidaw
*last_tidaw
= NULL
;
2235 basedev
= block
->base
;
2236 private = (struct dasd_eckd_private
*) basedev
->private;
2237 if (rq_data_dir(req
) == READ
) {
2238 cmd
= DASD_ECKD_CCW_READ_TRACK_DATA
;
2239 itcw_op
= ITCW_OP_READ
;
2240 } else if (rq_data_dir(req
) == WRITE
) {
2241 cmd
= DASD_ECKD_CCW_WRITE_TRACK_DATA
;
2242 itcw_op
= ITCW_OP_WRITE
;
2244 return ERR_PTR(-EINVAL
);
2246 /* trackbased I/O needs address all memory via TIDAWs,
2247 * not just for 64 bit addresses. This allows us to map
2248 * each segment directly to one tidaw.
2250 trkcount
= last_trk
- first_trk
+ 1;
2252 rq_for_each_segment(bv
, req
, iter
) {
2256 /* Allocate the ccw request. */
2257 itcw_size
= itcw_calc_size(0, ctidaw
, 0);
2258 cqr
= dasd_smalloc_request(dasd_eckd_discipline
.name
,
2259 0, itcw_size
, startdev
);
2264 cqr
->startdev
= startdev
;
2265 cqr
->memdev
= startdev
;
2267 cqr
->expires
= 100*HZ
;
2268 cqr
->buildclk
= get_clock();
2269 cqr
->status
= DASD_CQR_FILLED
;
2272 /* transfer length factor: how many bytes to read from the last track */
2273 if (first_trk
== last_trk
)
2274 tlf
= last_offs
- first_offs
+ 1;
2276 tlf
= last_offs
+ 1;
2279 itcw
= itcw_init(cqr
->data
, itcw_size
, itcw_op
, 0, ctidaw
, 0);
2280 cqr
->cpaddr
= itcw_get_tcw(itcw
);
2282 if (prepare_itcw(itcw
, first_trk
, last_trk
,
2283 cmd
, basedev
, startdev
,
2286 (last_rec
- first_rec
+ 1) * blksize
,
2287 tlf
, blk_per_trk
) == -EAGAIN
) {
2288 /* Clock not in sync and XRC is enabled.
2291 dasd_sfree_request(cqr
, startdev
);
2292 return ERR_PTR(-EAGAIN
);
2296 * A tidaw can address 4k of memory, but must not cross page boundaries
2297 * We can let the block layer handle this by setting
2298 * blk_queue_segment_boundary to page boundaries and
2299 * blk_max_segment_size to page size when setting up the request queue.
2301 rq_for_each_segment(bv
, req
, iter
) {
2302 dst
= page_address(bv
->bv_page
) + bv
->bv_offset
;
2303 last_tidaw
= itcw_add_tidaw(itcw
, 0x00, dst
, bv
->bv_len
);
2304 if (IS_ERR(last_tidaw
))
2305 return (struct dasd_ccw_req
*)last_tidaw
;
2308 last_tidaw
->flags
|= 0x80;
2309 itcw_finalize(itcw
);
2311 if (blk_noretry_request(req
) ||
2312 block
->base
->features
& DASD_FEATURE_FAILFAST
)
2313 set_bit(DASD_CQR_FLAGS_FAILFAST
, &cqr
->flags
);
2314 cqr
->startdev
= startdev
;
2315 cqr
->memdev
= startdev
;
2317 cqr
->expires
= 5 * 60 * HZ
; /* 5 minutes */
2318 cqr
->lpm
= private->path_data
.ppm
;
2320 cqr
->buildclk
= get_clock();
2321 cqr
->status
= DASD_CQR_FILLED
;
2325 static struct dasd_ccw_req
*dasd_eckd_build_cp(struct dasd_device
*startdev
,
2326 struct dasd_block
*block
,
2327 struct request
*req
)
2329 int tpm
, cmdrtd
, cmdwtd
;
2332 struct dasd_eckd_private
*private;
2333 int fcx_in_css
, fcx_in_gneq
, fcx_in_features
;
2334 struct dasd_device
*basedev
;
2335 sector_t first_rec
, last_rec
;
2336 sector_t first_trk
, last_trk
;
2337 unsigned int first_offs
, last_offs
;
2338 unsigned int blk_per_trk
, blksize
;
2340 struct dasd_ccw_req
*cqr
;
2342 basedev
= block
->base
;
2343 private = (struct dasd_eckd_private
*) basedev
->private;
2345 /* Calculate number of blocks/records per track. */
2346 blksize
= block
->bp_block
;
2347 blk_per_trk
= recs_per_track(&private->rdc_data
, 0, blksize
);
2348 /* Calculate record id of first and last block. */
2349 first_rec
= first_trk
= req
->sector
>> block
->s2b_shift
;
2350 first_offs
= sector_div(first_trk
, blk_per_trk
);
2351 last_rec
= last_trk
=
2352 (req
->sector
+ req
->nr_sectors
- 1) >> block
->s2b_shift
;
2353 last_offs
= sector_div(last_trk
, blk_per_trk
);
2354 cdlspecial
= (private->uses_cdl
&& first_rec
< 2*blk_per_trk
);
2356 /* is transport mode supported ? */
2357 fcx_in_css
= css_general_characteristics
.fcx
;
2358 fcx_in_gneq
= private->gneq
->reserved2
[7] & 0x04;
2359 fcx_in_features
= private->features
.feature
[40] & 0x80;
2360 tpm
= fcx_in_css
&& fcx_in_gneq
&& fcx_in_features
;
2362 /* is read track data and write track data in command mode supported? */
2363 cmdrtd
= private->features
.feature
[9] & 0x20;
2364 cmdwtd
= private->features
.feature
[12] & 0x40;
2365 use_prefix
= private->features
.feature
[8] & 0x01;
2368 if (cdlspecial
|| dasd_page_cache
) {
2369 /* do nothing, just fall through to the cmd mode single case */
2370 } else if (!dasd_nofcx
&& tpm
&& (first_trk
== last_trk
)) {
2371 cqr
= dasd_eckd_build_cp_tpm_track(startdev
, block
, req
,
2372 first_rec
, last_rec
,
2373 first_trk
, last_trk
,
2374 first_offs
, last_offs
,
2375 blk_per_trk
, blksize
);
2376 if (IS_ERR(cqr
) && PTR_ERR(cqr
) != -EAGAIN
)
2378 } else if (use_prefix
&&
2379 (((rq_data_dir(req
) == READ
) && cmdrtd
) ||
2380 ((rq_data_dir(req
) == WRITE
) && cmdwtd
))) {
2381 cqr
= dasd_eckd_build_cp_cmd_track(startdev
, block
, req
,
2382 first_rec
, last_rec
,
2383 first_trk
, last_trk
,
2384 first_offs
, last_offs
,
2385 blk_per_trk
, blksize
);
2386 if (IS_ERR(cqr
) && PTR_ERR(cqr
) != -EAGAIN
)
2390 cqr
= dasd_eckd_build_cp_cmd_single(startdev
, block
, req
,
2391 first_rec
, last_rec
,
2392 first_trk
, last_trk
,
2393 first_offs
, last_offs
,
2394 blk_per_trk
, blksize
);
2399 dasd_eckd_free_cp(struct dasd_ccw_req
*cqr
, struct request
*req
)
2401 struct dasd_eckd_private
*private;
2403 struct req_iterator iter
;
2406 unsigned int blksize
, blk_per_trk
, off
;
2410 if (!dasd_page_cache
)
2412 private = (struct dasd_eckd_private
*) cqr
->block
->base
->private;
2413 blksize
= cqr
->block
->bp_block
;
2414 blk_per_trk
= recs_per_track(&private->rdc_data
, 0, blksize
);
2415 recid
= req
->sector
>> cqr
->block
->s2b_shift
;
2417 /* Skip over define extent & locate record. */
2419 if (private->uses_cdl
== 0 || recid
> 2*blk_per_trk
)
2421 rq_for_each_segment(bv
, req
, iter
) {
2422 dst
= page_address(bv
->bv_page
) + bv
->bv_offset
;
2423 for (off
= 0; off
< bv
->bv_len
; off
+= blksize
) {
2424 /* Skip locate record. */
2425 if (private->uses_cdl
&& recid
<= 2*blk_per_trk
)
2428 if (ccw
->flags
& CCW_FLAG_IDA
)
2429 cda
= *((char **)((addr_t
) ccw
->cda
));
2431 cda
= (char *)((addr_t
) ccw
->cda
);
2433 if (rq_data_dir(req
) == READ
)
2434 memcpy(dst
, cda
, bv
->bv_len
);
2435 kmem_cache_free(dasd_page_cache
,
2436 (void *)((addr_t
)cda
& PAGE_MASK
));
2445 status
= cqr
->status
== DASD_CQR_DONE
;
2446 dasd_sfree_request(cqr
, cqr
->memdev
);
2451 * Modify ccw/tcw in cqr so it can be started on a base device.
2453 * Note that this is not enough to restart the cqr!
2454 * Either reset cqr->startdev as well (summary unit check handling)
2455 * or restart via separate cqr (as in ERP handling).
2457 void dasd_eckd_reset_ccw_to_base_io(struct dasd_ccw_req
*cqr
)
2460 struct PFX_eckd_data
*pfxdata
;
2465 if (cqr
->cpmode
== 1) {
2467 tccb
= tcw_get_tccb(tcw
);
2468 dcw
= (struct dcw
*)&tccb
->tca
[0];
2469 pfxdata
= (struct PFX_eckd_data
*)&dcw
->cd
[0];
2470 pfxdata
->validity
.verify_base
= 0;
2471 pfxdata
->validity
.hyper_pav
= 0;
2474 pfxdata
= cqr
->data
;
2475 if (ccw
->cmd_code
== DASD_ECKD_CCW_PFX
) {
2476 pfxdata
->validity
.verify_base
= 0;
2477 pfxdata
->validity
.hyper_pav
= 0;
2482 #define DASD_ECKD_CHANQ_MAX_SIZE 4
2484 static struct dasd_ccw_req
*dasd_eckd_build_alias_cp(struct dasd_device
*base
,
2485 struct dasd_block
*block
,
2486 struct request
*req
)
2488 struct dasd_eckd_private
*private;
2489 struct dasd_device
*startdev
;
2490 unsigned long flags
;
2491 struct dasd_ccw_req
*cqr
;
2493 startdev
= dasd_alias_get_start_dev(base
);
2496 private = (struct dasd_eckd_private
*) startdev
->private;
2497 if (private->count
>= DASD_ECKD_CHANQ_MAX_SIZE
)
2498 return ERR_PTR(-EBUSY
);
2500 spin_lock_irqsave(get_ccwdev_lock(startdev
->cdev
), flags
);
2502 cqr
= dasd_eckd_build_cp(startdev
, block
, req
);
2505 spin_unlock_irqrestore(get_ccwdev_lock(startdev
->cdev
), flags
);
2509 static int dasd_eckd_free_alias_cp(struct dasd_ccw_req
*cqr
,
2510 struct request
*req
)
2512 struct dasd_eckd_private
*private;
2513 unsigned long flags
;
2515 spin_lock_irqsave(get_ccwdev_lock(cqr
->memdev
->cdev
), flags
);
2516 private = (struct dasd_eckd_private
*) cqr
->memdev
->private;
2518 spin_unlock_irqrestore(get_ccwdev_lock(cqr
->memdev
->cdev
), flags
);
2519 return dasd_eckd_free_cp(cqr
, req
);
2523 dasd_eckd_fill_info(struct dasd_device
* device
,
2524 struct dasd_information2_t
* info
)
2526 struct dasd_eckd_private
*private;
2528 private = (struct dasd_eckd_private
*) device
->private;
2529 info
->label_block
= 2;
2530 info
->FBA_layout
= private->uses_cdl
? 0 : 1;
2531 info
->format
= private->uses_cdl
? DASD_FORMAT_CDL
: DASD_FORMAT_LDL
;
2532 info
->characteristics_size
= sizeof(struct dasd_eckd_characteristics
);
2533 memcpy(info
->characteristics
, &private->rdc_data
,
2534 sizeof(struct dasd_eckd_characteristics
));
2535 info
->confdata_size
= min((unsigned long)private->conf_len
,
2536 sizeof(info
->configuration_data
));
2537 memcpy(info
->configuration_data
, private->conf_data
,
2538 info
->confdata_size
);
2543 * SECTION: ioctl functions for eckd devices.
2547 * Release device ioctl.
2548 * Buils a channel programm to releases a prior reserved
2549 * (see dasd_eckd_reserve) device.
2552 dasd_eckd_release(struct dasd_device
*device
)
2554 struct dasd_ccw_req
*cqr
;
2558 if (!capable(CAP_SYS_ADMIN
))
2561 cqr
= dasd_smalloc_request(dasd_eckd_discipline
.name
,
2564 DBF_DEV_EVENT(DBF_WARNING
, device
, "%s",
2565 "Could not allocate initialization request");
2566 return PTR_ERR(cqr
);
2569 ccw
->cmd_code
= DASD_ECKD_CCW_RELEASE
;
2570 ccw
->flags
|= CCW_FLAG_SLI
;
2572 ccw
->cda
= (__u32
)(addr_t
) cqr
->data
;
2573 cqr
->startdev
= device
;
2574 cqr
->memdev
= device
;
2575 clear_bit(DASD_CQR_FLAGS_USE_ERP
, &cqr
->flags
);
2576 set_bit(DASD_CQR_FLAGS_FAILFAST
, &cqr
->flags
);
2577 cqr
->retries
= 2; /* set retry counter to enable basic ERP */
2578 cqr
->expires
= 2 * HZ
;
2579 cqr
->buildclk
= get_clock();
2580 cqr
->status
= DASD_CQR_FILLED
;
2582 rc
= dasd_sleep_on_immediatly(cqr
);
2584 dasd_sfree_request(cqr
, cqr
->memdev
);
2589 * Reserve device ioctl.
2590 * Options are set to 'synchronous wait for interrupt' and
2591 * 'timeout the request'. This leads to a terminate IO if
2592 * the interrupt is outstanding for a certain time.
2595 dasd_eckd_reserve(struct dasd_device
*device
)
2597 struct dasd_ccw_req
*cqr
;
2601 if (!capable(CAP_SYS_ADMIN
))
2604 cqr
= dasd_smalloc_request(dasd_eckd_discipline
.name
,
2607 DBF_DEV_EVENT(DBF_WARNING
, device
, "%s",
2608 "Could not allocate initialization request");
2609 return PTR_ERR(cqr
);
2612 ccw
->cmd_code
= DASD_ECKD_CCW_RESERVE
;
2613 ccw
->flags
|= CCW_FLAG_SLI
;
2615 ccw
->cda
= (__u32
)(addr_t
) cqr
->data
;
2616 cqr
->startdev
= device
;
2617 cqr
->memdev
= device
;
2618 clear_bit(DASD_CQR_FLAGS_USE_ERP
, &cqr
->flags
);
2619 set_bit(DASD_CQR_FLAGS_FAILFAST
, &cqr
->flags
);
2620 cqr
->retries
= 2; /* set retry counter to enable basic ERP */
2621 cqr
->expires
= 2 * HZ
;
2622 cqr
->buildclk
= get_clock();
2623 cqr
->status
= DASD_CQR_FILLED
;
2625 rc
= dasd_sleep_on_immediatly(cqr
);
2627 dasd_sfree_request(cqr
, cqr
->memdev
);
2632 * Steal lock ioctl - unconditional reserve device.
2633 * Buils a channel programm to break a device's reservation.
2634 * (unconditional reserve)
2637 dasd_eckd_steal_lock(struct dasd_device
*device
)
2639 struct dasd_ccw_req
*cqr
;
2643 if (!capable(CAP_SYS_ADMIN
))
2646 cqr
= dasd_smalloc_request(dasd_eckd_discipline
.name
,
2649 DBF_DEV_EVENT(DBF_WARNING
, device
, "%s",
2650 "Could not allocate initialization request");
2651 return PTR_ERR(cqr
);
2654 ccw
->cmd_code
= DASD_ECKD_CCW_SLCK
;
2655 ccw
->flags
|= CCW_FLAG_SLI
;
2657 ccw
->cda
= (__u32
)(addr_t
) cqr
->data
;
2658 cqr
->startdev
= device
;
2659 cqr
->memdev
= device
;
2660 clear_bit(DASD_CQR_FLAGS_USE_ERP
, &cqr
->flags
);
2661 set_bit(DASD_CQR_FLAGS_FAILFAST
, &cqr
->flags
);
2662 cqr
->retries
= 2; /* set retry counter to enable basic ERP */
2663 cqr
->expires
= 2 * HZ
;
2664 cqr
->buildclk
= get_clock();
2665 cqr
->status
= DASD_CQR_FILLED
;
2667 rc
= dasd_sleep_on_immediatly(cqr
);
2669 dasd_sfree_request(cqr
, cqr
->memdev
);
2674 * Read performance statistics
2677 dasd_eckd_performance(struct dasd_device
*device
, void __user
*argp
)
2679 struct dasd_psf_prssd_data
*prssdp
;
2680 struct dasd_rssd_perf_stats_t
*stats
;
2681 struct dasd_ccw_req
*cqr
;
2685 cqr
= dasd_smalloc_request(dasd_eckd_discipline
.name
,
2686 1 /* PSF */ + 1 /* RSSD */ ,
2687 (sizeof(struct dasd_psf_prssd_data
) +
2688 sizeof(struct dasd_rssd_perf_stats_t
)),
2691 DBF_DEV_EVENT(DBF_WARNING
, device
, "%s",
2692 "Could not allocate initialization request");
2693 return PTR_ERR(cqr
);
2695 cqr
->startdev
= device
;
2696 cqr
->memdev
= device
;
2698 cqr
->expires
= 10 * HZ
;
2700 /* Prepare for Read Subsystem Data */
2701 prssdp
= (struct dasd_psf_prssd_data
*) cqr
->data
;
2702 memset(prssdp
, 0, sizeof(struct dasd_psf_prssd_data
));
2703 prssdp
->order
= PSF_ORDER_PRSSD
;
2704 prssdp
->suborder
= 0x01; /* Performance Statistics */
2705 prssdp
->varies
[1] = 0x01; /* Perf Statistics for the Subsystem */
2708 ccw
->cmd_code
= DASD_ECKD_CCW_PSF
;
2709 ccw
->count
= sizeof(struct dasd_psf_prssd_data
);
2710 ccw
->flags
|= CCW_FLAG_CC
;
2711 ccw
->cda
= (__u32
)(addr_t
) prssdp
;
2713 /* Read Subsystem Data - Performance Statistics */
2714 stats
= (struct dasd_rssd_perf_stats_t
*) (prssdp
+ 1);
2715 memset(stats
, 0, sizeof(struct dasd_rssd_perf_stats_t
));
2718 ccw
->cmd_code
= DASD_ECKD_CCW_RSSD
;
2719 ccw
->count
= sizeof(struct dasd_rssd_perf_stats_t
);
2720 ccw
->cda
= (__u32
)(addr_t
) stats
;
2722 cqr
->buildclk
= get_clock();
2723 cqr
->status
= DASD_CQR_FILLED
;
2724 rc
= dasd_sleep_on(cqr
);
2726 prssdp
= (struct dasd_psf_prssd_data
*) cqr
->data
;
2727 stats
= (struct dasd_rssd_perf_stats_t
*) (prssdp
+ 1);
2728 if (copy_to_user(argp
, stats
,
2729 sizeof(struct dasd_rssd_perf_stats_t
)))
2732 dasd_sfree_request(cqr
, cqr
->memdev
);
2737 * Get attributes (cache operations)
2738 * Returnes the cache attributes used in Define Extend (DE).
2741 dasd_eckd_get_attrib(struct dasd_device
*device
, void __user
*argp
)
2743 struct dasd_eckd_private
*private =
2744 (struct dasd_eckd_private
*)device
->private;
2745 struct attrib_data_t attrib
= private->attrib
;
2748 if (!capable(CAP_SYS_ADMIN
))
2754 if (copy_to_user(argp
, (long *) &attrib
,
2755 sizeof(struct attrib_data_t
)))
2762 * Set attributes (cache operations)
2763 * Stores the attributes for cache operation to be used in Define Extend (DE).
2766 dasd_eckd_set_attrib(struct dasd_device
*device
, void __user
*argp
)
2768 struct dasd_eckd_private
*private =
2769 (struct dasd_eckd_private
*)device
->private;
2770 struct attrib_data_t attrib
;
2772 if (!capable(CAP_SYS_ADMIN
))
2777 if (copy_from_user(&attrib
, argp
, sizeof(struct attrib_data_t
)))
2779 private->attrib
= attrib
;
2781 dev_info(&device
->cdev
->dev
,
2782 "The DASD cache mode was set to %x (%i cylinder prestage)\n",
2783 private->attrib
.operation
, private->attrib
.nr_cyl
);
2788 * Issue syscall I/O to EMC Symmetrix array.
2789 * CCWs are PSF and RSSD
2791 static int dasd_symm_io(struct dasd_device
*device
, void __user
*argp
)
2793 struct dasd_symmio_parms usrparm
;
2794 char *psf_data
, *rssd_result
;
2795 struct dasd_ccw_req
*cqr
;
2799 /* Copy parms from caller */
2801 if (copy_from_user(&usrparm
, argp
, sizeof(usrparm
)))
2803 #ifndef CONFIG_64BIT
2804 /* Make sure pointers are sane even on 31 bit. */
2805 if ((usrparm
.psf_data
>> 32) != 0 || (usrparm
.rssd_result
>> 32) != 0) {
2810 /* alloc I/O data area */
2811 psf_data
= kzalloc(usrparm
.psf_data_len
, GFP_KERNEL
| GFP_DMA
);
2812 rssd_result
= kzalloc(usrparm
.rssd_result_len
, GFP_KERNEL
| GFP_DMA
);
2813 if (!psf_data
|| !rssd_result
) {
2818 /* get syscall header from user space */
2820 if (copy_from_user(psf_data
,
2821 (void __user
*)(unsigned long) usrparm
.psf_data
,
2822 usrparm
.psf_data_len
))
2825 /* sanity check on syscall header */
2826 if (psf_data
[0] != 0x17 && psf_data
[1] != 0xce) {
2831 /* setup CCWs for PSF + RSSD */
2832 cqr
= dasd_smalloc_request("ECKD", 2 , 0, device
);
2834 DBF_DEV_EVENT(DBF_WARNING
, device
, "%s",
2835 "Could not allocate initialization request");
2840 cqr
->startdev
= device
;
2841 cqr
->memdev
= device
;
2843 cqr
->expires
= 10 * HZ
;
2844 cqr
->buildclk
= get_clock();
2845 cqr
->status
= DASD_CQR_FILLED
;
2847 /* Build the ccws */
2851 ccw
->cmd_code
= DASD_ECKD_CCW_PSF
;
2852 ccw
->count
= usrparm
.psf_data_len
;
2853 ccw
->flags
|= CCW_FLAG_CC
;
2854 ccw
->cda
= (__u32
)(addr_t
) psf_data
;
2859 ccw
->cmd_code
= DASD_ECKD_CCW_RSSD
;
2860 ccw
->count
= usrparm
.rssd_result_len
;
2861 ccw
->flags
= CCW_FLAG_SLI
;
2862 ccw
->cda
= (__u32
)(addr_t
) rssd_result
;
2864 rc
= dasd_sleep_on(cqr
);
2869 if (copy_to_user((void __user
*)(unsigned long) usrparm
.rssd_result
,
2870 rssd_result
, usrparm
.rssd_result_len
))
2875 dasd_sfree_request(cqr
, cqr
->memdev
);
2880 DBF_DEV_EVENT(DBF_WARNING
, device
, "Symmetrix ioctl: rc=%d", rc
);
2885 dasd_eckd_ioctl(struct dasd_block
*block
, unsigned int cmd
, void __user
*argp
)
2887 struct dasd_device
*device
= block
->base
;
2891 return dasd_eckd_get_attrib(device
, argp
);
2893 return dasd_eckd_set_attrib(device
, argp
);
2895 return dasd_eckd_performance(device
, argp
);
2897 return dasd_eckd_release(device
);
2899 return dasd_eckd_reserve(device
);
2901 return dasd_eckd_steal_lock(device
);
2903 return dasd_symm_io(device
, argp
);
2905 return -ENOIOCTLCMD
;
2910 * Dump the range of CCWs into 'page' buffer
2911 * and return number of printed chars.
2914 dasd_eckd_dump_ccw_range(struct ccw1
*from
, struct ccw1
*to
, char *page
)
2920 while (from
<= to
) {
2921 len
+= sprintf(page
+ len
, KERN_ERR PRINTK_HEADER
2922 " CCW %p: %08X %08X DAT:",
2923 from
, ((int *) from
)[0], ((int *) from
)[1]);
2925 /* get pointer to data (consider IDALs) */
2926 if (from
->flags
& CCW_FLAG_IDA
)
2927 datap
= (char *) *((addr_t
*) (addr_t
) from
->cda
);
2929 datap
= (char *) ((addr_t
) from
->cda
);
2931 /* dump data (max 32 bytes) */
2932 for (count
= 0; count
< from
->count
&& count
< 32; count
++) {
2933 if (count
% 8 == 0) len
+= sprintf(page
+ len
, " ");
2934 if (count
% 4 == 0) len
+= sprintf(page
+ len
, " ");
2935 len
+= sprintf(page
+ len
, "%02x", datap
[count
]);
2937 len
+= sprintf(page
+ len
, "\n");
2944 dasd_eckd_dump_sense_dbf(struct dasd_device
*device
, struct dasd_ccw_req
*req
,
2945 struct irb
*irb
, char *reason
)
2953 if (req
&& scsw_is_tm(&req
->irb
.scsw
)) {
2954 if (irb
->scsw
.tm
.tcw
)
2956 (struct tcw
*)(unsigned long)irb
->scsw
.tm
.tcw
);
2957 if (tsb
&& (irb
->scsw
.tm
.fcxs
== 0x01)) {
2958 switch (tsb
->flags
& 0x07) {
2959 case 1: /* tsa_iostat */
2960 sense
= (u64
*)tsb
->tsa
.iostat
.sense
;
2962 case 2: /* ts_ddpc */
2963 sense
= (u64
*)tsb
->tsa
.ddpc
.sense
;
2965 case 3: /* tsa_intrg */
2970 if (irb
->esw
.esw0
.erw
.cons
)
2971 sense
= (u64
*)irb
->ecw
;
2974 for (sl
= 0; sl
< 4; sl
++) {
2975 DBF_DEV_EVENT(DBF_EMERG
, device
,
2976 "%s: %016llx %016llx %016llx %016llx",
2977 reason
, sense
[0], sense
[1], sense
[2],
2981 DBF_DEV_EVENT(DBF_EMERG
, device
, "%s",
2982 "SORRY - NO VALID SENSE AVAILABLE\n");
2987 * Print sense data and related channel program.
2988 * Parts are printed because printk buffer is only 1024 bytes.
2990 static void dasd_eckd_dump_sense_ccw(struct dasd_device
*device
,
2991 struct dasd_ccw_req
*req
, struct irb
*irb
)
2994 struct ccw1
*first
, *last
, *fail
, *from
, *to
;
2997 page
= (char *) get_zeroed_page(GFP_ATOMIC
);
2999 DBF_DEV_EVENT(DBF_WARNING
, device
, "%s",
3000 "No memory to dump sense data\n");
3003 /* dump the sense data */
3004 len
= sprintf(page
, KERN_ERR PRINTK_HEADER
3005 " I/O status report for device %s:\n",
3006 dev_name(&device
->cdev
->dev
));
3007 len
+= sprintf(page
+ len
, KERN_ERR PRINTK_HEADER
3008 " in req: %p CS: 0x%02X DS: 0x%02X\n", req
,
3009 scsw_cstat(&irb
->scsw
), scsw_dstat(&irb
->scsw
));
3010 len
+= sprintf(page
+ len
, KERN_ERR PRINTK_HEADER
3011 " device %s: Failing CCW: %p\n",
3012 dev_name(&device
->cdev
->dev
),
3013 (void *) (addr_t
) irb
->scsw
.cmd
.cpa
);
3014 if (irb
->esw
.esw0
.erw
.cons
) {
3015 for (sl
= 0; sl
< 4; sl
++) {
3016 len
+= sprintf(page
+ len
, KERN_ERR PRINTK_HEADER
3017 " Sense(hex) %2d-%2d:",
3018 (8 * sl
), ((8 * sl
) + 7));
3020 for (sct
= 0; sct
< 8; sct
++) {
3021 len
+= sprintf(page
+ len
, " %02x",
3022 irb
->ecw
[8 * sl
+ sct
]);
3024 len
+= sprintf(page
+ len
, "\n");
3027 if (irb
->ecw
[27] & DASD_SENSE_BIT_0
) {
3028 /* 24 Byte Sense Data */
3029 sprintf(page
+ len
, KERN_ERR PRINTK_HEADER
3030 " 24 Byte: %x MSG %x, "
3031 "%s MSGb to SYSOP\n",
3032 irb
->ecw
[7] >> 4, irb
->ecw
[7] & 0x0f,
3033 irb
->ecw
[1] & 0x10 ? "" : "no");
3035 /* 32 Byte Sense Data */
3036 sprintf(page
+ len
, KERN_ERR PRINTK_HEADER
3037 " 32 Byte: Format: %x "
3038 "Exception class %x\n",
3039 irb
->ecw
[6] & 0x0f, irb
->ecw
[22] >> 4);
3042 sprintf(page
+ len
, KERN_ERR PRINTK_HEADER
3043 " SORRY - NO VALID SENSE AVAILABLE\n");
3048 /* req == NULL for unsolicited interrupts */
3049 /* dump the Channel Program (max 140 Bytes per line) */
3050 /* Count CCW and print first CCWs (maximum 1024 % 140 = 7) */
3051 first
= req
->cpaddr
;
3052 for (last
= first
; last
->flags
& (CCW_FLAG_CC
| CCW_FLAG_DC
); last
++);
3053 to
= min(first
+ 6, last
);
3054 len
= sprintf(page
, KERN_ERR PRINTK_HEADER
3055 " Related CP in req: %p\n", req
);
3056 dasd_eckd_dump_ccw_range(first
, to
, page
+ len
);
3059 /* print failing CCW area (maximum 4) */
3060 /* scsw->cda is either valid or zero */
3063 fail
= (struct ccw1
*)(addr_t
)
3064 irb
->scsw
.cmd
.cpa
; /* failing CCW */
3065 if (from
< fail
- 2) {
3066 from
= fail
- 2; /* there is a gap - print header */
3067 len
+= sprintf(page
, KERN_ERR PRINTK_HEADER
"......\n");
3069 to
= min(fail
+ 1, last
);
3070 len
+= dasd_eckd_dump_ccw_range(from
, to
, page
+ len
);
3072 /* print last CCWs (maximum 2) */
3073 from
= max(from
, ++to
);
3074 if (from
< last
- 1) {
3075 from
= last
- 1; /* there is a gap - print header */
3076 len
+= sprintf(page
+ len
, KERN_ERR PRINTK_HEADER
"......\n");
3078 len
+= dasd_eckd_dump_ccw_range(from
, last
, page
+ len
);
3082 free_page((unsigned long) page
);
3087 * Print sense data from a tcw.
3089 static void dasd_eckd_dump_sense_tcw(struct dasd_device
*device
,
3090 struct dasd_ccw_req
*req
, struct irb
*irb
)
3093 int len
, sl
, sct
, residual
;
3099 page
= (char *) get_zeroed_page(GFP_ATOMIC
);
3101 DBF_DEV_EVENT(DBF_WARNING
, device
, " %s",
3102 "No memory to dump sense data");
3105 /* dump the sense data */
3106 len
= sprintf(page
, KERN_ERR PRINTK_HEADER
3107 " I/O status report for device %s:\n",
3108 dev_name(&device
->cdev
->dev
));
3109 len
+= sprintf(page
+ len
, KERN_ERR PRINTK_HEADER
3110 " in req: %p CS: 0x%02X DS: 0x%02X "
3111 "fcxs: 0x%02X schxs: 0x%02X\n", req
,
3112 scsw_cstat(&irb
->scsw
), scsw_dstat(&irb
->scsw
),
3113 irb
->scsw
.tm
.fcxs
, irb
->scsw
.tm
.schxs
);
3114 len
+= sprintf(page
+ len
, KERN_ERR PRINTK_HEADER
3115 " device %s: Failing TCW: %p\n",
3116 dev_name(&device
->cdev
->dev
),
3117 (void *) (addr_t
) irb
->scsw
.tm
.tcw
);
3121 if (irb
->scsw
.tm
.tcw
)
3123 (struct tcw
*)(unsigned long)irb
->scsw
.tm
.tcw
);
3125 if (tsb
&& (irb
->scsw
.tm
.fcxs
== 0x01)) {
3126 len
+= sprintf(page
+ len
, KERN_ERR PRINTK_HEADER
3127 " tsb->length %d\n", tsb
->length
);
3128 len
+= sprintf(page
+ len
, KERN_ERR PRINTK_HEADER
3129 " tsb->flags %x\n", tsb
->flags
);
3130 len
+= sprintf(page
+ len
, KERN_ERR PRINTK_HEADER
3131 " tsb->dcw_offset %d\n", tsb
->dcw_offset
);
3132 len
+= sprintf(page
+ len
, KERN_ERR PRINTK_HEADER
3133 " tsb->count %d\n", tsb
->count
);
3134 residual
= tsb
->count
- 28;
3135 len
+= sprintf(page
+ len
, KERN_ERR PRINTK_HEADER
3136 " residual %d\n", residual
);
3138 switch (tsb
->flags
& 0x07) {
3139 case 1: /* tsa_iostat */
3140 len
+= sprintf(page
+ len
, KERN_ERR PRINTK_HEADER
3141 " tsb->tsa.iostat.dev_time %d\n",
3142 tsb
->tsa
.iostat
.dev_time
);
3143 len
+= sprintf(page
+ len
, KERN_ERR PRINTK_HEADER
3144 " tsb->tsa.iostat.def_time %d\n",
3145 tsb
->tsa
.iostat
.def_time
);
3146 len
+= sprintf(page
+ len
, KERN_ERR PRINTK_HEADER
3147 " tsb->tsa.iostat.queue_time %d\n",
3148 tsb
->tsa
.iostat
.queue_time
);
3149 len
+= sprintf(page
+ len
, KERN_ERR PRINTK_HEADER
3150 " tsb->tsa.iostat.dev_busy_time %d\n",
3151 tsb
->tsa
.iostat
.dev_busy_time
);
3152 len
+= sprintf(page
+ len
, KERN_ERR PRINTK_HEADER
3153 " tsb->tsa.iostat.dev_act_time %d\n",
3154 tsb
->tsa
.iostat
.dev_act_time
);
3155 sense
= tsb
->tsa
.iostat
.sense
;
3157 case 2: /* ts_ddpc */
3158 len
+= sprintf(page
+ len
, KERN_ERR PRINTK_HEADER
3159 " tsb->tsa.ddpc.rc %d\n", tsb
->tsa
.ddpc
.rc
);
3160 len
+= sprintf(page
+ len
, KERN_ERR PRINTK_HEADER
3161 " tsb->tsa.ddpc.rcq: ");
3162 for (sl
= 0; sl
< 16; sl
++) {
3163 for (sct
= 0; sct
< 8; sct
++) {
3164 len
+= sprintf(page
+ len
, " %02x",
3165 tsb
->tsa
.ddpc
.rcq
[sl
]);
3167 len
+= sprintf(page
+ len
, "\n");
3169 sense
= tsb
->tsa
.ddpc
.sense
;
3171 case 3: /* tsa_intrg */
3172 len
+= sprintf(page
+ len
, KERN_ERR PRINTK_HEADER
3173 " tsb->tsa.intrg.: not supportet yet \n");
3178 for (sl
= 0; sl
< 4; sl
++) {
3179 len
+= sprintf(page
+ len
,
3180 KERN_ERR PRINTK_HEADER
3181 " Sense(hex) %2d-%2d:",
3182 (8 * sl
), ((8 * sl
) + 7));
3183 for (sct
= 0; sct
< 8; sct
++) {
3184 len
+= sprintf(page
+ len
, " %02x",
3185 sense
[8 * sl
+ sct
]);
3187 len
+= sprintf(page
+ len
, "\n");
3190 if (sense
[27] & DASD_SENSE_BIT_0
) {
3191 /* 24 Byte Sense Data */
3192 sprintf(page
+ len
, KERN_ERR PRINTK_HEADER
3193 " 24 Byte: %x MSG %x, "
3194 "%s MSGb to SYSOP\n",
3195 sense
[7] >> 4, sense
[7] & 0x0f,
3196 sense
[1] & 0x10 ? "" : "no");
3198 /* 32 Byte Sense Data */
3199 sprintf(page
+ len
, KERN_ERR PRINTK_HEADER
3200 " 32 Byte: Format: %x "
3201 "Exception class %x\n",
3202 sense
[6] & 0x0f, sense
[22] >> 4);
3205 sprintf(page
+ len
, KERN_ERR PRINTK_HEADER
3206 " SORRY - NO VALID SENSE AVAILABLE\n");
3209 sprintf(page
+ len
, KERN_ERR PRINTK_HEADER
3210 " SORRY - NO TSB DATA AVAILABLE\n");
3213 free_page((unsigned long) page
);
3216 static void dasd_eckd_dump_sense(struct dasd_device
*device
,
3217 struct dasd_ccw_req
*req
, struct irb
*irb
)
3219 if (req
&& scsw_is_tm(&req
->irb
.scsw
))
3220 dasd_eckd_dump_sense_tcw(device
, req
, irb
);
3222 dasd_eckd_dump_sense_ccw(device
, req
, irb
);
3227 * max_blocks is dependent on the amount of storage that is available
3228 * in the static io buffer for each device. Currently each device has
3229 * 8192 bytes (=2 pages). For 64 bit one dasd_mchunkt_t structure has
3230 * 24 bytes, the struct dasd_ccw_req has 136 bytes and each block can use
3231 * up to 16 bytes (8 for the ccw and 8 for the idal pointer). In
3232 * addition we have one define extent ccw + 16 bytes of data and one
3233 * locate record ccw + 16 bytes of data. That makes:
3234 * (8192 - 24 - 136 - 8 - 16 - 8 - 16) / 16 = 499 blocks at maximum.
3235 * We want to fit two into the available memory so that we can immediately
3236 * start the next request if one finishes off. That makes 249.5 blocks
3237 * for one request. Give a little safety and the result is 240.
3239 static struct dasd_discipline dasd_eckd_discipline
= {
3240 .owner
= THIS_MODULE
,
3244 .check_device
= dasd_eckd_check_characteristics
,
3245 .uncheck_device
= dasd_eckd_uncheck_device
,
3246 .do_analysis
= dasd_eckd_do_analysis
,
3247 .ready_to_online
= dasd_eckd_ready_to_online
,
3248 .online_to_ready
= dasd_eckd_online_to_ready
,
3249 .fill_geometry
= dasd_eckd_fill_geometry
,
3250 .start_IO
= dasd_start_IO
,
3251 .term_IO
= dasd_term_IO
,
3252 .handle_terminated_request
= dasd_eckd_handle_terminated_request
,
3253 .format_device
= dasd_eckd_format_device
,
3254 .erp_action
= dasd_eckd_erp_action
,
3255 .erp_postaction
= dasd_eckd_erp_postaction
,
3256 .handle_unsolicited_interrupt
= dasd_eckd_handle_unsolicited_interrupt
,
3257 .build_cp
= dasd_eckd_build_alias_cp
,
3258 .free_cp
= dasd_eckd_free_alias_cp
,
3259 .dump_sense
= dasd_eckd_dump_sense
,
3260 .dump_sense_dbf
= dasd_eckd_dump_sense_dbf
,
3261 .fill_info
= dasd_eckd_fill_info
,
3262 .ioctl
= dasd_eckd_ioctl
,
3266 dasd_eckd_init(void)
3268 ASCEBC(dasd_eckd_discipline
.ebcname
, 4);
3269 return ccw_driver_register(&dasd_eckd_driver
);
3273 dasd_eckd_cleanup(void)
3275 ccw_driver_unregister(&dasd_eckd_driver
);
3278 module_init(dasd_eckd_init
);
3279 module_exit(dasd_eckd_cleanup
);