2 * File...........: linux/drivers/s390/block/dasd_eckd.c
3 * Author(s)......: Holger Smolinski <Holger.Smolinski@de.ibm.com>
4 * Horst Hummel <Horst.Hummel@de.ibm.com>
5 * Carsten Otte <Cotte@de.ibm.com>
6 * Martin Schwidefsky <schwidefsky@de.ibm.com>
7 * Bugreports.to..: <Linux390@de.ibm.com>
8 * Copyright IBM Corp. 1999, 2009
9 * EMC Symmetrix ioctl Copyright EMC Corporation, 2008
10 * Author.........: Nigel Hislop <hislop_nigel@emc.com>
13 #define KMSG_COMPONENT "dasd-eckd"
15 #include <linux/stddef.h>
16 #include <linux/kernel.h>
17 #include <linux/slab.h>
18 #include <linux/hdreg.h> /* HDIO_GETGEO */
19 #include <linux/bio.h>
20 #include <linux/module.h>
21 #include <linux/init.h>
22 #include <linux/compat.h>
24 #include <asm/debug.h>
25 #include <asm/idals.h>
26 #include <asm/ebcdic.h>
28 #include <asm/todclk.h>
29 #include <asm/uaccess.h>
31 #include <asm/ccwdev.h>
35 #include "dasd_eckd.h"
36 #include "../cio/chsc.h"
41 #endif /* PRINTK_HEADER */
42 #define PRINTK_HEADER "dasd(eckd):"
44 #define ECKD_C0(i) (i->home_bytes)
45 #define ECKD_F(i) (i->formula)
46 #define ECKD_F1(i) (ECKD_F(i)==0x01?(i->factors.f_0x01.f1):\
47 (i->factors.f_0x02.f1))
48 #define ECKD_F2(i) (ECKD_F(i)==0x01?(i->factors.f_0x01.f2):\
49 (i->factors.f_0x02.f2))
50 #define ECKD_F3(i) (ECKD_F(i)==0x01?(i->factors.f_0x01.f3):\
51 (i->factors.f_0x02.f3))
52 #define ECKD_F4(i) (ECKD_F(i)==0x02?(i->factors.f_0x02.f4):0)
53 #define ECKD_F5(i) (ECKD_F(i)==0x02?(i->factors.f_0x02.f5):0)
54 #define ECKD_F6(i) (i->factor6)
55 #define ECKD_F7(i) (i->factor7)
56 #define ECKD_F8(i) (i->factor8)
58 MODULE_LICENSE("GPL");
60 static struct dasd_discipline dasd_eckd_discipline
;
62 /* The ccw bus type uses this table to find devices that it sends to
64 static struct ccw_device_id dasd_eckd_ids
[] = {
65 { CCW_DEVICE_DEVTYPE (0x3990, 0, 0x3390, 0), .driver_info
= 0x1},
66 { CCW_DEVICE_DEVTYPE (0x2105, 0, 0x3390, 0), .driver_info
= 0x2},
67 { CCW_DEVICE_DEVTYPE (0x3880, 0, 0x3380, 0), .driver_info
= 0x3},
68 { CCW_DEVICE_DEVTYPE (0x3990, 0, 0x3380, 0), .driver_info
= 0x4},
69 { CCW_DEVICE_DEVTYPE (0x2105, 0, 0x3380, 0), .driver_info
= 0x5},
70 { CCW_DEVICE_DEVTYPE (0x9343, 0, 0x9345, 0), .driver_info
= 0x6},
71 { CCW_DEVICE_DEVTYPE (0x2107, 0, 0x3390, 0), .driver_info
= 0x7},
72 { CCW_DEVICE_DEVTYPE (0x2107, 0, 0x3380, 0), .driver_info
= 0x8},
73 { CCW_DEVICE_DEVTYPE (0x1750, 0, 0x3390, 0), .driver_info
= 0x9},
74 { CCW_DEVICE_DEVTYPE (0x1750, 0, 0x3380, 0), .driver_info
= 0xa},
75 { /* end of list */ },
78 MODULE_DEVICE_TABLE(ccw
, dasd_eckd_ids
);
80 static struct ccw_driver dasd_eckd_driver
; /* see below */
82 /* initial attempt at a probe function. this can be simplified once
83 * the other detection code is gone */
85 dasd_eckd_probe (struct ccw_device
*cdev
)
89 /* set ECKD specific ccw-device options */
90 ret
= ccw_device_set_options(cdev
, CCWDEV_ALLOW_FORCE
);
92 DBF_EVENT_DEVID(DBF_WARNING
, cdev
, "%s",
93 "dasd_eckd_probe: could not set "
94 "ccw-device options");
97 ret
= dasd_generic_probe(cdev
, &dasd_eckd_discipline
);
102 dasd_eckd_set_online(struct ccw_device
*cdev
)
104 return dasd_generic_set_online(cdev
, &dasd_eckd_discipline
);
107 static const int sizes_trk0
[] = { 28, 148, 84 };
108 #define LABEL_SIZE 140
110 static inline unsigned int
111 round_up_multiple(unsigned int no
, unsigned int mult
)
114 return (rem
? no
- rem
+ mult
: no
);
117 static inline unsigned int
118 ceil_quot(unsigned int d1
, unsigned int d2
)
120 return (d1
+ (d2
- 1)) / d2
;
124 recs_per_track(struct dasd_eckd_characteristics
* rdc
,
125 unsigned int kl
, unsigned int dl
)
129 switch (rdc
->dev_type
) {
132 return 1499 / (15 + 7 + ceil_quot(kl
+ 12, 32) +
133 ceil_quot(dl
+ 12, 32));
135 return 1499 / (15 + ceil_quot(dl
+ 12, 32));
137 dn
= ceil_quot(dl
+ 6, 232) + 1;
139 kn
= ceil_quot(kl
+ 6, 232) + 1;
140 return 1729 / (10 + 9 + ceil_quot(kl
+ 6 * kn
, 34) +
141 9 + ceil_quot(dl
+ 6 * dn
, 34));
143 return 1729 / (10 + 9 + ceil_quot(dl
+ 6 * dn
, 34));
145 dn
= ceil_quot(dl
+ 6, 232) + 1;
147 kn
= ceil_quot(kl
+ 6, 232) + 1;
148 return 1420 / (18 + 7 + ceil_quot(kl
+ 6 * kn
, 34) +
149 ceil_quot(dl
+ 6 * dn
, 34));
151 return 1420 / (18 + 7 + ceil_quot(dl
+ 6 * dn
, 34));
156 static void set_ch_t(struct ch_t
*geo
, __u32 cyl
, __u8 head
)
158 geo
->cyl
= (__u16
) cyl
;
159 geo
->head
= cyl
>> 16;
165 check_XRC (struct ccw1
*de_ccw
,
166 struct DE_eckd_data
*data
,
167 struct dasd_device
*device
)
169 struct dasd_eckd_private
*private;
172 private = (struct dasd_eckd_private
*) device
->private;
173 if (!private->rdc_data
.facilities
.XRC_supported
)
176 /* switch on System Time Stamp - needed for XRC Support */
177 data
->ga_extended
|= 0x08; /* switch on 'Time Stamp Valid' */
178 data
->ga_extended
|= 0x02; /* switch on 'Extended Parameter' */
180 rc
= get_sync_clock(&data
->ep_sys_time
);
181 /* Ignore return code if sync clock is switched off. */
182 if (rc
== -ENOSYS
|| rc
== -EACCES
)
185 de_ccw
->count
= sizeof(struct DE_eckd_data
);
186 de_ccw
->flags
|= CCW_FLAG_SLI
;
191 define_extent(struct ccw1
*ccw
, struct DE_eckd_data
*data
, unsigned int trk
,
192 unsigned int totrk
, int cmd
, struct dasd_device
*device
)
194 struct dasd_eckd_private
*private;
196 u16 heads
, beghead
, endhead
;
199 private = (struct dasd_eckd_private
*) device
->private;
201 ccw
->cmd_code
= DASD_ECKD_CCW_DEFINE_EXTENT
;
204 ccw
->cda
= (__u32
) __pa(data
);
206 memset(data
, 0, sizeof(struct DE_eckd_data
));
208 case DASD_ECKD_CCW_READ_HOME_ADDRESS
:
209 case DASD_ECKD_CCW_READ_RECORD_ZERO
:
210 case DASD_ECKD_CCW_READ
:
211 case DASD_ECKD_CCW_READ_MT
:
212 case DASD_ECKD_CCW_READ_CKD
:
213 case DASD_ECKD_CCW_READ_CKD_MT
:
214 case DASD_ECKD_CCW_READ_KD
:
215 case DASD_ECKD_CCW_READ_KD_MT
:
216 case DASD_ECKD_CCW_READ_COUNT
:
217 data
->mask
.perm
= 0x1;
218 data
->attributes
.operation
= private->attrib
.operation
;
220 case DASD_ECKD_CCW_WRITE
:
221 case DASD_ECKD_CCW_WRITE_MT
:
222 case DASD_ECKD_CCW_WRITE_KD
:
223 case DASD_ECKD_CCW_WRITE_KD_MT
:
224 data
->mask
.perm
= 0x02;
225 data
->attributes
.operation
= private->attrib
.operation
;
226 rc
= check_XRC (ccw
, data
, device
);
228 case DASD_ECKD_CCW_WRITE_CKD
:
229 case DASD_ECKD_CCW_WRITE_CKD_MT
:
230 data
->attributes
.operation
= DASD_BYPASS_CACHE
;
231 rc
= check_XRC (ccw
, data
, device
);
233 case DASD_ECKD_CCW_ERASE
:
234 case DASD_ECKD_CCW_WRITE_HOME_ADDRESS
:
235 case DASD_ECKD_CCW_WRITE_RECORD_ZERO
:
236 data
->mask
.perm
= 0x3;
237 data
->mask
.auth
= 0x1;
238 data
->attributes
.operation
= DASD_BYPASS_CACHE
;
239 rc
= check_XRC (ccw
, data
, device
);
242 dev_err(&device
->cdev
->dev
,
243 "0x%x is not a known command\n", cmd
);
247 data
->attributes
.mode
= 0x3; /* ECKD */
249 if ((private->rdc_data
.cu_type
== 0x2105 ||
250 private->rdc_data
.cu_type
== 0x2107 ||
251 private->rdc_data
.cu_type
== 0x1750)
252 && !(private->uses_cdl
&& trk
< 2))
253 data
->ga_extended
|= 0x40; /* Regular Data Format Mode */
255 heads
= private->rdc_data
.trk_per_cyl
;
256 begcyl
= trk
/ heads
;
257 beghead
= trk
% heads
;
258 endcyl
= totrk
/ heads
;
259 endhead
= totrk
% heads
;
261 /* check for sequential prestage - enhance cylinder range */
262 if (data
->attributes
.operation
== DASD_SEQ_PRESTAGE
||
263 data
->attributes
.operation
== DASD_SEQ_ACCESS
) {
265 if (endcyl
+ private->attrib
.nr_cyl
< private->real_cyl
)
266 endcyl
+= private->attrib
.nr_cyl
;
268 endcyl
= (private->real_cyl
- 1);
271 set_ch_t(&data
->beg_ext
, begcyl
, beghead
);
272 set_ch_t(&data
->end_ext
, endcyl
, endhead
);
276 static int check_XRC_on_prefix(struct PFX_eckd_data
*pfxdata
,
277 struct dasd_device
*device
)
279 struct dasd_eckd_private
*private;
282 private = (struct dasd_eckd_private
*) device
->private;
283 if (!private->rdc_data
.facilities
.XRC_supported
)
286 /* switch on System Time Stamp - needed for XRC Support */
287 pfxdata
->define_extent
.ga_extended
|= 0x08; /* 'Time Stamp Valid' */
288 pfxdata
->define_extent
.ga_extended
|= 0x02; /* 'Extended Parameter' */
289 pfxdata
->validity
.time_stamp
= 1; /* 'Time Stamp Valid' */
291 rc
= get_sync_clock(&pfxdata
->define_extent
.ep_sys_time
);
292 /* Ignore return code if sync clock is switched off. */
293 if (rc
== -ENOSYS
|| rc
== -EACCES
)
298 static void fill_LRE_data(struct LRE_eckd_data
*data
, unsigned int trk
,
299 unsigned int rec_on_trk
, int count
, int cmd
,
300 struct dasd_device
*device
, unsigned int reclen
,
303 struct dasd_eckd_private
*private;
307 private = (struct dasd_eckd_private
*) device
->private;
309 memset(data
, 0, sizeof(*data
));
312 switch (private->rdc_data
.dev_type
) {
314 dn
= ceil_quot(reclen
+ 6, 232);
315 d
= 9 + ceil_quot(reclen
+ 6 * (dn
+ 1), 34);
316 sector
= (49 + (rec_on_trk
- 1) * (10 + d
)) / 8;
319 d
= 7 + ceil_quot(reclen
+ 12, 32);
320 sector
= (39 + (rec_on_trk
- 1) * (8 + d
)) / 7;
324 data
->sector
= sector
;
325 /* note: meaning of count depends on the operation
326 * for record based I/O it's the number of records, but for
327 * track based I/O it's the number of tracks
331 case DASD_ECKD_CCW_WRITE_HOME_ADDRESS
:
332 data
->operation
.orientation
= 0x3;
333 data
->operation
.operation
= 0x03;
335 case DASD_ECKD_CCW_READ_HOME_ADDRESS
:
336 data
->operation
.orientation
= 0x3;
337 data
->operation
.operation
= 0x16;
339 case DASD_ECKD_CCW_WRITE_RECORD_ZERO
:
340 data
->operation
.orientation
= 0x1;
341 data
->operation
.operation
= 0x03;
344 case DASD_ECKD_CCW_READ_RECORD_ZERO
:
345 data
->operation
.orientation
= 0x3;
346 data
->operation
.operation
= 0x16;
349 case DASD_ECKD_CCW_WRITE
:
350 case DASD_ECKD_CCW_WRITE_MT
:
351 case DASD_ECKD_CCW_WRITE_KD
:
352 case DASD_ECKD_CCW_WRITE_KD_MT
:
353 data
->auxiliary
.length_valid
= 0x1;
354 data
->length
= reclen
;
355 data
->operation
.operation
= 0x01;
357 case DASD_ECKD_CCW_WRITE_CKD
:
358 case DASD_ECKD_CCW_WRITE_CKD_MT
:
359 data
->auxiliary
.length_valid
= 0x1;
360 data
->length
= reclen
;
361 data
->operation
.operation
= 0x03;
363 case DASD_ECKD_CCW_WRITE_TRACK_DATA
:
364 data
->auxiliary
.length_valid
= 0x1;
365 data
->length
= reclen
; /* not tlf, as one might think */
366 data
->operation
.operation
= 0x3F;
367 data
->extended_operation
= 0x23;
369 case DASD_ECKD_CCW_READ
:
370 case DASD_ECKD_CCW_READ_MT
:
371 case DASD_ECKD_CCW_READ_KD
:
372 case DASD_ECKD_CCW_READ_KD_MT
:
373 data
->auxiliary
.length_valid
= 0x1;
374 data
->length
= reclen
;
375 data
->operation
.operation
= 0x06;
377 case DASD_ECKD_CCW_READ_CKD
:
378 case DASD_ECKD_CCW_READ_CKD_MT
:
379 data
->auxiliary
.length_valid
= 0x1;
380 data
->length
= reclen
;
381 data
->operation
.operation
= 0x16;
383 case DASD_ECKD_CCW_READ_COUNT
:
384 data
->operation
.operation
= 0x06;
386 case DASD_ECKD_CCW_READ_TRACK_DATA
:
387 data
->auxiliary
.length_valid
= 0x1;
389 data
->operation
.operation
= 0x0C;
391 case DASD_ECKD_CCW_ERASE
:
392 data
->length
= reclen
;
393 data
->auxiliary
.length_valid
= 0x1;
394 data
->operation
.operation
= 0x0b;
397 DBF_DEV_EVENT(DBF_ERR
, device
,
398 "fill LRE unknown opcode 0x%x", cmd
);
401 set_ch_t(&data
->seek_addr
,
402 trk
/ private->rdc_data
.trk_per_cyl
,
403 trk
% private->rdc_data
.trk_per_cyl
);
404 data
->search_arg
.cyl
= data
->seek_addr
.cyl
;
405 data
->search_arg
.head
= data
->seek_addr
.head
;
406 data
->search_arg
.record
= rec_on_trk
;
409 static int prefix_LRE(struct ccw1
*ccw
, struct PFX_eckd_data
*pfxdata
,
410 unsigned int trk
, unsigned int totrk
, int cmd
,
411 struct dasd_device
*basedev
, struct dasd_device
*startdev
,
412 unsigned char format
, unsigned int rec_on_trk
, int count
,
413 unsigned int blksize
, unsigned int tlf
)
415 struct dasd_eckd_private
*basepriv
, *startpriv
;
416 struct DE_eckd_data
*dedata
;
417 struct LRE_eckd_data
*lredata
;
419 u16 heads
, beghead
, endhead
;
422 basepriv
= (struct dasd_eckd_private
*) basedev
->private;
423 startpriv
= (struct dasd_eckd_private
*) startdev
->private;
424 dedata
= &pfxdata
->define_extent
;
425 lredata
= &pfxdata
->locate_record
;
427 ccw
->cmd_code
= DASD_ECKD_CCW_PFX
;
429 ccw
->count
= sizeof(*pfxdata
);
430 ccw
->cda
= (__u32
) __pa(pfxdata
);
432 memset(pfxdata
, 0, sizeof(*pfxdata
));
435 DBF_DEV_EVENT(DBF_ERR
, basedev
,
436 "PFX LRE unknown format 0x%x", format
);
440 pfxdata
->format
= format
;
441 pfxdata
->base_address
= basepriv
->ned
->unit_addr
;
442 pfxdata
->base_lss
= basepriv
->ned
->ID
;
443 pfxdata
->validity
.define_extent
= 1;
445 /* private uid is kept up to date, conf_data may be outdated */
446 if (startpriv
->uid
.type
!= UA_BASE_DEVICE
) {
447 pfxdata
->validity
.verify_base
= 1;
448 if (startpriv
->uid
.type
== UA_HYPER_PAV_ALIAS
)
449 pfxdata
->validity
.hyper_pav
= 1;
452 /* define extend data (mostly)*/
454 case DASD_ECKD_CCW_READ_HOME_ADDRESS
:
455 case DASD_ECKD_CCW_READ_RECORD_ZERO
:
456 case DASD_ECKD_CCW_READ
:
457 case DASD_ECKD_CCW_READ_MT
:
458 case DASD_ECKD_CCW_READ_CKD
:
459 case DASD_ECKD_CCW_READ_CKD_MT
:
460 case DASD_ECKD_CCW_READ_KD
:
461 case DASD_ECKD_CCW_READ_KD_MT
:
462 case DASD_ECKD_CCW_READ_COUNT
:
463 dedata
->mask
.perm
= 0x1;
464 dedata
->attributes
.operation
= basepriv
->attrib
.operation
;
466 case DASD_ECKD_CCW_READ_TRACK_DATA
:
467 dedata
->mask
.perm
= 0x1;
468 dedata
->attributes
.operation
= basepriv
->attrib
.operation
;
469 dedata
->blk_size
= 0;
471 case DASD_ECKD_CCW_WRITE
:
472 case DASD_ECKD_CCW_WRITE_MT
:
473 case DASD_ECKD_CCW_WRITE_KD
:
474 case DASD_ECKD_CCW_WRITE_KD_MT
:
475 dedata
->mask
.perm
= 0x02;
476 dedata
->attributes
.operation
= basepriv
->attrib
.operation
;
477 rc
= check_XRC_on_prefix(pfxdata
, basedev
);
479 case DASD_ECKD_CCW_WRITE_CKD
:
480 case DASD_ECKD_CCW_WRITE_CKD_MT
:
481 dedata
->attributes
.operation
= DASD_BYPASS_CACHE
;
482 rc
= check_XRC_on_prefix(pfxdata
, basedev
);
484 case DASD_ECKD_CCW_ERASE
:
485 case DASD_ECKD_CCW_WRITE_HOME_ADDRESS
:
486 case DASD_ECKD_CCW_WRITE_RECORD_ZERO
:
487 dedata
->mask
.perm
= 0x3;
488 dedata
->mask
.auth
= 0x1;
489 dedata
->attributes
.operation
= DASD_BYPASS_CACHE
;
490 rc
= check_XRC_on_prefix(pfxdata
, basedev
);
492 case DASD_ECKD_CCW_WRITE_TRACK_DATA
:
493 dedata
->mask
.perm
= 0x02;
494 dedata
->attributes
.operation
= basepriv
->attrib
.operation
;
495 dedata
->blk_size
= blksize
;
496 rc
= check_XRC_on_prefix(pfxdata
, basedev
);
499 DBF_DEV_EVENT(DBF_ERR
, basedev
,
500 "PFX LRE unknown opcode 0x%x", cmd
);
505 dedata
->attributes
.mode
= 0x3; /* ECKD */
507 if ((basepriv
->rdc_data
.cu_type
== 0x2105 ||
508 basepriv
->rdc_data
.cu_type
== 0x2107 ||
509 basepriv
->rdc_data
.cu_type
== 0x1750)
510 && !(basepriv
->uses_cdl
&& trk
< 2))
511 dedata
->ga_extended
|= 0x40; /* Regular Data Format Mode */
513 heads
= basepriv
->rdc_data
.trk_per_cyl
;
514 begcyl
= trk
/ heads
;
515 beghead
= trk
% heads
;
516 endcyl
= totrk
/ heads
;
517 endhead
= totrk
% heads
;
519 /* check for sequential prestage - enhance cylinder range */
520 if (dedata
->attributes
.operation
== DASD_SEQ_PRESTAGE
||
521 dedata
->attributes
.operation
== DASD_SEQ_ACCESS
) {
523 if (endcyl
+ basepriv
->attrib
.nr_cyl
< basepriv
->real_cyl
)
524 endcyl
+= basepriv
->attrib
.nr_cyl
;
526 endcyl
= (basepriv
->real_cyl
- 1);
529 set_ch_t(&dedata
->beg_ext
, begcyl
, beghead
);
530 set_ch_t(&dedata
->end_ext
, endcyl
, endhead
);
533 fill_LRE_data(lredata
, trk
, rec_on_trk
, count
, cmd
,
534 basedev
, blksize
, tlf
);
540 static int prefix(struct ccw1
*ccw
, struct PFX_eckd_data
*pfxdata
,
541 unsigned int trk
, unsigned int totrk
, int cmd
,
542 struct dasd_device
*basedev
, struct dasd_device
*startdev
)
544 return prefix_LRE(ccw
, pfxdata
, trk
, totrk
, cmd
, basedev
, startdev
,
549 locate_record(struct ccw1
*ccw
, struct LO_eckd_data
*data
, unsigned int trk
,
550 unsigned int rec_on_trk
, int no_rec
, int cmd
,
551 struct dasd_device
* device
, int reclen
)
553 struct dasd_eckd_private
*private;
557 private = (struct dasd_eckd_private
*) device
->private;
559 DBF_DEV_EVENT(DBF_INFO
, device
,
560 "Locate: trk %d, rec %d, no_rec %d, cmd %d, reclen %d",
561 trk
, rec_on_trk
, no_rec
, cmd
, reclen
);
563 ccw
->cmd_code
= DASD_ECKD_CCW_LOCATE_RECORD
;
566 ccw
->cda
= (__u32
) __pa(data
);
568 memset(data
, 0, sizeof(struct LO_eckd_data
));
571 switch (private->rdc_data
.dev_type
) {
573 dn
= ceil_quot(reclen
+ 6, 232);
574 d
= 9 + ceil_quot(reclen
+ 6 * (dn
+ 1), 34);
575 sector
= (49 + (rec_on_trk
- 1) * (10 + d
)) / 8;
578 d
= 7 + ceil_quot(reclen
+ 12, 32);
579 sector
= (39 + (rec_on_trk
- 1) * (8 + d
)) / 7;
583 data
->sector
= sector
;
584 data
->count
= no_rec
;
586 case DASD_ECKD_CCW_WRITE_HOME_ADDRESS
:
587 data
->operation
.orientation
= 0x3;
588 data
->operation
.operation
= 0x03;
590 case DASD_ECKD_CCW_READ_HOME_ADDRESS
:
591 data
->operation
.orientation
= 0x3;
592 data
->operation
.operation
= 0x16;
594 case DASD_ECKD_CCW_WRITE_RECORD_ZERO
:
595 data
->operation
.orientation
= 0x1;
596 data
->operation
.operation
= 0x03;
599 case DASD_ECKD_CCW_READ_RECORD_ZERO
:
600 data
->operation
.orientation
= 0x3;
601 data
->operation
.operation
= 0x16;
604 case DASD_ECKD_CCW_WRITE
:
605 case DASD_ECKD_CCW_WRITE_MT
:
606 case DASD_ECKD_CCW_WRITE_KD
:
607 case DASD_ECKD_CCW_WRITE_KD_MT
:
608 data
->auxiliary
.last_bytes_used
= 0x1;
609 data
->length
= reclen
;
610 data
->operation
.operation
= 0x01;
612 case DASD_ECKD_CCW_WRITE_CKD
:
613 case DASD_ECKD_CCW_WRITE_CKD_MT
:
614 data
->auxiliary
.last_bytes_used
= 0x1;
615 data
->length
= reclen
;
616 data
->operation
.operation
= 0x03;
618 case DASD_ECKD_CCW_READ
:
619 case DASD_ECKD_CCW_READ_MT
:
620 case DASD_ECKD_CCW_READ_KD
:
621 case DASD_ECKD_CCW_READ_KD_MT
:
622 data
->auxiliary
.last_bytes_used
= 0x1;
623 data
->length
= reclen
;
624 data
->operation
.operation
= 0x06;
626 case DASD_ECKD_CCW_READ_CKD
:
627 case DASD_ECKD_CCW_READ_CKD_MT
:
628 data
->auxiliary
.last_bytes_used
= 0x1;
629 data
->length
= reclen
;
630 data
->operation
.operation
= 0x16;
632 case DASD_ECKD_CCW_READ_COUNT
:
633 data
->operation
.operation
= 0x06;
635 case DASD_ECKD_CCW_ERASE
:
636 data
->length
= reclen
;
637 data
->auxiliary
.last_bytes_used
= 0x1;
638 data
->operation
.operation
= 0x0b;
641 DBF_DEV_EVENT(DBF_ERR
, device
, "unknown locate record "
644 set_ch_t(&data
->seek_addr
,
645 trk
/ private->rdc_data
.trk_per_cyl
,
646 trk
% private->rdc_data
.trk_per_cyl
);
647 data
->search_arg
.cyl
= data
->seek_addr
.cyl
;
648 data
->search_arg
.head
= data
->seek_addr
.head
;
649 data
->search_arg
.record
= rec_on_trk
;
653 * Returns 1 if the block is one of the special blocks that needs
654 * to get read/written with the KD variant of the command.
655 * That is DASD_ECKD_READ_KD_MT instead of DASD_ECKD_READ_MT and
656 * DASD_ECKD_WRITE_KD_MT instead of DASD_ECKD_WRITE_MT.
657 * Luckily the KD variants differ only by one bit (0x08) from the
658 * normal variant. So don't wonder about code like:
659 * if (dasd_eckd_cdl_special(blk_per_trk, recid))
660 * ccw->cmd_code |= 0x8;
663 dasd_eckd_cdl_special(int blk_per_trk
, int recid
)
667 if (recid
< blk_per_trk
)
669 if (recid
< 2 * blk_per_trk
)
675 * Returns the record size for the special blocks of the cdl format.
676 * Only returns something useful if dasd_eckd_cdl_special is true
680 dasd_eckd_cdl_reclen(int recid
)
683 return sizes_trk0
[recid
];
688 * Generate device unique id that specifies the physical device.
690 static int dasd_eckd_generate_uid(struct dasd_device
*device
,
691 struct dasd_uid
*uid
)
693 struct dasd_eckd_private
*private;
696 private = (struct dasd_eckd_private
*) device
->private;
699 if (!private->ned
|| !private->gneq
)
702 memset(uid
, 0, sizeof(struct dasd_uid
));
703 memcpy(uid
->vendor
, private->ned
->HDA_manufacturer
,
704 sizeof(uid
->vendor
) - 1);
705 EBCASC(uid
->vendor
, sizeof(uid
->vendor
) - 1);
706 memcpy(uid
->serial
, private->ned
->HDA_location
,
707 sizeof(uid
->serial
) - 1);
708 EBCASC(uid
->serial
, sizeof(uid
->serial
) - 1);
709 uid
->ssid
= private->gneq
->subsystemID
;
710 uid
->real_unit_addr
= private->ned
->unit_addr
;
712 uid
->type
= private->sneq
->sua_flags
;
713 if (uid
->type
== UA_BASE_PAV_ALIAS
)
714 uid
->base_unit_addr
= private->sneq
->base_unit_addr
;
716 uid
->type
= UA_BASE_DEVICE
;
718 if (private->vdsneq
) {
719 for (count
= 0; count
< 16; count
++) {
720 sprintf(uid
->vduit
+2*count
, "%02x",
721 private->vdsneq
->uit
[count
]);
727 static struct dasd_ccw_req
*dasd_eckd_build_rcd_lpm(struct dasd_device
*device
,
729 struct ciw
*ciw
, __u8 lpm
)
731 struct dasd_ccw_req
*cqr
;
734 cqr
= dasd_smalloc_request(DASD_ECKD_MAGIC
, 1 /* RCD */, ciw
->count
,
738 DBF_DEV_EVENT(DBF_WARNING
, device
, "%s",
739 "Could not allocate RCD request");
744 ccw
->cmd_code
= ciw
->cmd
;
745 ccw
->cda
= (__u32
)(addr_t
)rcd_buffer
;
746 ccw
->count
= ciw
->count
;
748 cqr
->startdev
= device
;
749 cqr
->memdev
= device
;
751 cqr
->expires
= 10*HZ
;
753 clear_bit(DASD_CQR_FLAGS_USE_ERP
, &cqr
->flags
);
755 cqr
->buildclk
= get_clock();
756 cqr
->status
= DASD_CQR_FILLED
;
760 static int dasd_eckd_read_conf_lpm(struct dasd_device
*device
,
762 int *rcd_buffer_size
, __u8 lpm
)
765 char *rcd_buf
= NULL
;
767 struct dasd_ccw_req
*cqr
;
770 * scan for RCD command in extended SenseID data
772 ciw
= ccw_device_get_ciw(device
->cdev
, CIW_TYPE_RCD
);
773 if (!ciw
|| ciw
->cmd
== 0) {
777 rcd_buf
= kzalloc(ciw
->count
, GFP_KERNEL
| GFP_DMA
);
784 * buffer has to start with EBCDIC "V1.0" to show
785 * support for virtual device SNEQ
791 cqr
= dasd_eckd_build_rcd_lpm(device
, rcd_buf
, ciw
, lpm
);
796 ret
= dasd_sleep_on(cqr
);
798 * on success we update the user input parms
800 dasd_sfree_request(cqr
, cqr
->memdev
);
804 *rcd_buffer_size
= ciw
->count
;
805 *rcd_buffer
= rcd_buf
;
810 *rcd_buffer_size
= 0;
814 static int dasd_eckd_identify_conf_parts(struct dasd_eckd_private
*private)
817 struct dasd_sneq
*sneq
;
821 private->sneq
= NULL
;
822 private->vdsneq
= NULL
;
823 private->gneq
= NULL
;
824 count
= private->conf_len
/ sizeof(struct dasd_sneq
);
825 sneq
= (struct dasd_sneq
*)private->conf_data
;
826 for (i
= 0; i
< count
; ++i
) {
827 if (sneq
->flags
.identifier
== 1 && sneq
->format
== 1)
828 private->sneq
= sneq
;
829 else if (sneq
->flags
.identifier
== 1 && sneq
->format
== 4)
830 private->vdsneq
= (struct vd_sneq
*)sneq
;
831 else if (sneq
->flags
.identifier
== 2)
832 private->gneq
= (struct dasd_gneq
*)sneq
;
833 else if (sneq
->flags
.identifier
== 3 && sneq
->res1
== 1)
834 private->ned
= (struct dasd_ned
*)sneq
;
837 if (!private->ned
|| !private->gneq
) {
839 private->sneq
= NULL
;
840 private->vdsneq
= NULL
;
841 private->gneq
= NULL
;
848 static unsigned char dasd_eckd_path_access(void *conf_data
, int conf_len
)
850 struct dasd_gneq
*gneq
;
853 count
= conf_len
/ sizeof(*gneq
);
854 gneq
= (struct dasd_gneq
*)conf_data
;
856 for (i
= 0; i
< count
; ++i
) {
857 if (gneq
->flags
.identifier
== 2) {
864 return ((char *)gneq
)[18] & 0x07;
869 static int dasd_eckd_read_conf(struct dasd_device
*device
)
872 int conf_len
, conf_data_saved
;
875 struct dasd_eckd_private
*private;
876 struct dasd_eckd_path
*path_data
;
878 private = (struct dasd_eckd_private
*) device
->private;
879 path_data
= (struct dasd_eckd_path
*) &private->path_data
;
880 path_data
->opm
= ccw_device_get_path_mask(device
->cdev
);
883 /* get configuration data per operational path */
884 for (lpm
= 0x80; lpm
; lpm
>>= 1) {
885 if (lpm
& path_data
->opm
){
886 rc
= dasd_eckd_read_conf_lpm(device
, &conf_data
,
888 if (rc
&& rc
!= -EOPNOTSUPP
) { /* -EOPNOTSUPP is ok */
889 DBF_EVENT_DEVID(DBF_WARNING
, device
->cdev
,
890 "Read configuration data returned "
894 if (conf_data
== NULL
) {
895 DBF_EVENT_DEVID(DBF_WARNING
, device
->cdev
, "%s",
896 "No configuration data "
898 continue; /* no error */
900 /* save first valid configuration data */
901 if (!conf_data_saved
) {
902 kfree(private->conf_data
);
903 private->conf_data
= conf_data
;
904 private->conf_len
= conf_len
;
905 if (dasd_eckd_identify_conf_parts(private)) {
906 private->conf_data
= NULL
;
907 private->conf_len
= 0;
913 switch (dasd_eckd_path_access(conf_data
, conf_len
)) {
915 path_data
->npm
|= lpm
;
918 path_data
->ppm
|= lpm
;
921 if (conf_data
!= private->conf_data
)
928 static int dasd_eckd_read_features(struct dasd_device
*device
)
930 struct dasd_psf_prssd_data
*prssdp
;
931 struct dasd_rssd_features
*features
;
932 struct dasd_ccw_req
*cqr
;
935 struct dasd_eckd_private
*private;
937 private = (struct dasd_eckd_private
*) device
->private;
938 memset(&private->features
, 0, sizeof(struct dasd_rssd_features
));
939 cqr
= dasd_smalloc_request(DASD_ECKD_MAGIC
, 1 /* PSF */ + 1 /* RSSD */,
940 (sizeof(struct dasd_psf_prssd_data
) +
941 sizeof(struct dasd_rssd_features
)),
944 DBF_EVENT_DEVID(DBF_WARNING
, device
->cdev
, "%s", "Could not "
945 "allocate initialization request");
948 cqr
->startdev
= device
;
949 cqr
->memdev
= device
;
951 clear_bit(DASD_CQR_FLAGS_USE_ERP
, &cqr
->flags
);
953 cqr
->expires
= 10 * HZ
;
955 /* Prepare for Read Subsystem Data */
956 prssdp
= (struct dasd_psf_prssd_data
*) cqr
->data
;
957 memset(prssdp
, 0, sizeof(struct dasd_psf_prssd_data
));
958 prssdp
->order
= PSF_ORDER_PRSSD
;
959 prssdp
->suborder
= 0x41; /* Read Feature Codes */
960 /* all other bytes of prssdp must be zero */
963 ccw
->cmd_code
= DASD_ECKD_CCW_PSF
;
964 ccw
->count
= sizeof(struct dasd_psf_prssd_data
);
965 ccw
->flags
|= CCW_FLAG_CC
;
966 ccw
->cda
= (__u32
)(addr_t
) prssdp
;
968 /* Read Subsystem Data - feature codes */
969 features
= (struct dasd_rssd_features
*) (prssdp
+ 1);
970 memset(features
, 0, sizeof(struct dasd_rssd_features
));
973 ccw
->cmd_code
= DASD_ECKD_CCW_RSSD
;
974 ccw
->count
= sizeof(struct dasd_rssd_features
);
975 ccw
->cda
= (__u32
)(addr_t
) features
;
977 cqr
->buildclk
= get_clock();
978 cqr
->status
= DASD_CQR_FILLED
;
979 rc
= dasd_sleep_on(cqr
);
981 prssdp
= (struct dasd_psf_prssd_data
*) cqr
->data
;
982 features
= (struct dasd_rssd_features
*) (prssdp
+ 1);
983 memcpy(&private->features
, features
,
984 sizeof(struct dasd_rssd_features
));
986 dev_warn(&device
->cdev
->dev
, "Reading device feature codes"
987 " failed with rc=%d\n", rc
);
988 dasd_sfree_request(cqr
, cqr
->memdev
);
994 * Build CP for Perform Subsystem Function - SSC.
996 static struct dasd_ccw_req
*dasd_eckd_build_psf_ssc(struct dasd_device
*device
,
999 struct dasd_ccw_req
*cqr
;
1000 struct dasd_psf_ssc_data
*psf_ssc_data
;
1003 cqr
= dasd_smalloc_request(DASD_ECKD_MAGIC
, 1 /* PSF */ ,
1004 sizeof(struct dasd_psf_ssc_data
),
1008 DBF_DEV_EVENT(DBF_WARNING
, device
, "%s",
1009 "Could not allocate PSF-SSC request");
1012 psf_ssc_data
= (struct dasd_psf_ssc_data
*)cqr
->data
;
1013 psf_ssc_data
->order
= PSF_ORDER_SSC
;
1014 psf_ssc_data
->suborder
= 0x40;
1016 psf_ssc_data
->suborder
|= 0x88;
1017 psf_ssc_data
->reserved
[0] = 0x88;
1020 ccw
->cmd_code
= DASD_ECKD_CCW_PSF
;
1021 ccw
->cda
= (__u32
)(addr_t
)psf_ssc_data
;
1024 cqr
->startdev
= device
;
1025 cqr
->memdev
= device
;
1027 cqr
->expires
= 10*HZ
;
1028 cqr
->buildclk
= get_clock();
1029 cqr
->status
= DASD_CQR_FILLED
;
1034 * Perform Subsystem Function.
1035 * It is necessary to trigger CIO for channel revalidation since this
1036 * call might change behaviour of DASD devices.
1039 dasd_eckd_psf_ssc(struct dasd_device
*device
, int enable_pav
)
1041 struct dasd_ccw_req
*cqr
;
1044 cqr
= dasd_eckd_build_psf_ssc(device
, enable_pav
);
1046 return PTR_ERR(cqr
);
1048 rc
= dasd_sleep_on(cqr
);
1050 /* trigger CIO to reprobe devices */
1051 css_schedule_reprobe();
1052 dasd_sfree_request(cqr
, cqr
->memdev
);
1057 * Valide storage server of current device.
1059 static int dasd_eckd_validate_server(struct dasd_device
*device
)
1062 struct dasd_eckd_private
*private;
1065 if (dasd_nopav
|| MACHINE_IS_VM
)
1069 rc
= dasd_eckd_psf_ssc(device
, enable_pav
);
1070 /* may be requested feature is not available on server,
1071 * therefore just report error and go ahead */
1072 private = (struct dasd_eckd_private
*) device
->private;
1073 DBF_EVENT_DEVID(DBF_WARNING
, device
->cdev
, "PSF-SSC for SSID %04x "
1074 "returned rc=%d", private->uid
.ssid
, rc
);
1075 /* RE-Read Configuration Data */
1076 return dasd_eckd_read_conf(device
);
1080 * Check device characteristics.
1081 * If the device is accessible using ECKD discipline, the device is enabled.
1084 dasd_eckd_check_characteristics(struct dasd_device
*device
)
1086 struct dasd_eckd_private
*private;
1087 struct dasd_block
*block
;
1090 private = (struct dasd_eckd_private
*) device
->private;
1092 private = kzalloc(sizeof(*private), GFP_KERNEL
| GFP_DMA
);
1094 dev_warn(&device
->cdev
->dev
,
1095 "Allocating memory for private DASD data "
1099 device
->private = (void *) private;
1101 memset(private, 0, sizeof(*private));
1103 /* Invalidate status of initial analysis. */
1104 private->init_cqr_status
= -1;
1105 /* Set default cache operations. */
1106 private->attrib
.operation
= DASD_NORMAL_CACHE
;
1107 private->attrib
.nr_cyl
= 0;
1109 /* Read Configuration Data */
1110 rc
= dasd_eckd_read_conf(device
);
1114 /* Generate device unique id and register in devmap */
1115 rc
= dasd_eckd_generate_uid(device
, &private->uid
);
1118 dasd_set_uid(device
->cdev
, &private->uid
);
1120 if (private->uid
.type
== UA_BASE_DEVICE
) {
1121 block
= dasd_alloc_block();
1122 if (IS_ERR(block
)) {
1123 DBF_EVENT_DEVID(DBF_WARNING
, device
->cdev
, "%s",
1124 "could not allocate dasd "
1126 rc
= PTR_ERR(block
);
1129 device
->block
= block
;
1130 block
->base
= device
;
1133 /* register lcu with alias handling, enable PAV if this is a new lcu */
1134 is_known
= dasd_alias_make_device_known_to_lcu(device
);
1141 rc
= dasd_eckd_validate_server(device
); /* will switch pav on */
1146 /* Read Feature Codes */
1147 dasd_eckd_read_features(device
);
1149 /* Read Device Characteristics */
1150 rc
= dasd_generic_read_dev_chars(device
, DASD_ECKD_MAGIC
,
1151 &private->rdc_data
, 64);
1153 DBF_EVENT_DEVID(DBF_WARNING
, device
->cdev
,
1154 "Read device characteristic failed, rc=%d", rc
);
1157 /* find the vaild cylinder size */
1158 if (private->rdc_data
.no_cyl
== LV_COMPAT_CYL
&&
1159 private->rdc_data
.long_no_cyl
)
1160 private->real_cyl
= private->rdc_data
.long_no_cyl
;
1162 private->real_cyl
= private->rdc_data
.no_cyl
;
1164 dev_info(&device
->cdev
->dev
, "New DASD %04X/%02X (CU %04X/%02X) "
1165 "with %d cylinders, %d heads, %d sectors\n",
1166 private->rdc_data
.dev_type
,
1167 private->rdc_data
.dev_model
,
1168 private->rdc_data
.cu_type
,
1169 private->rdc_data
.cu_model
.model
,
1171 private->rdc_data
.trk_per_cyl
,
1172 private->rdc_data
.sec_per_trk
);
1176 dasd_alias_disconnect_device_from_lcu(device
);
1178 dasd_free_block(device
->block
);
1179 device
->block
= NULL
;
1181 kfree(private->conf_data
);
1182 kfree(device
->private);
1183 device
->private = NULL
;
1187 static void dasd_eckd_uncheck_device(struct dasd_device
*device
)
1189 struct dasd_eckd_private
*private;
1191 private = (struct dasd_eckd_private
*) device
->private;
1192 dasd_alias_disconnect_device_from_lcu(device
);
1193 private->ned
= NULL
;
1194 private->sneq
= NULL
;
1195 private->vdsneq
= NULL
;
1196 private->gneq
= NULL
;
1197 private->conf_len
= 0;
1198 kfree(private->conf_data
);
1199 private->conf_data
= NULL
;
1202 static struct dasd_ccw_req
*
1203 dasd_eckd_analysis_ccw(struct dasd_device
*device
)
1205 struct dasd_eckd_private
*private;
1206 struct eckd_count
*count_data
;
1207 struct LO_eckd_data
*LO_data
;
1208 struct dasd_ccw_req
*cqr
;
1210 int cplength
, datasize
;
1213 private = (struct dasd_eckd_private
*) device
->private;
1216 datasize
= sizeof(struct DE_eckd_data
) + 2*sizeof(struct LO_eckd_data
);
1217 cqr
= dasd_smalloc_request(DASD_ECKD_MAGIC
, cplength
, datasize
, device
);
1221 /* Define extent for the first 3 tracks. */
1222 define_extent(ccw
++, cqr
->data
, 0, 2,
1223 DASD_ECKD_CCW_READ_COUNT
, device
);
1224 LO_data
= cqr
->data
+ sizeof(struct DE_eckd_data
);
1225 /* Locate record for the first 4 records on track 0. */
1226 ccw
[-1].flags
|= CCW_FLAG_CC
;
1227 locate_record(ccw
++, LO_data
++, 0, 0, 4,
1228 DASD_ECKD_CCW_READ_COUNT
, device
, 0);
1230 count_data
= private->count_area
;
1231 for (i
= 0; i
< 4; i
++) {
1232 ccw
[-1].flags
|= CCW_FLAG_CC
;
1233 ccw
->cmd_code
= DASD_ECKD_CCW_READ_COUNT
;
1236 ccw
->cda
= (__u32
)(addr_t
) count_data
;
1241 /* Locate record for the first record on track 2. */
1242 ccw
[-1].flags
|= CCW_FLAG_CC
;
1243 locate_record(ccw
++, LO_data
++, 2, 0, 1,
1244 DASD_ECKD_CCW_READ_COUNT
, device
, 0);
1245 /* Read count ccw. */
1246 ccw
[-1].flags
|= CCW_FLAG_CC
;
1247 ccw
->cmd_code
= DASD_ECKD_CCW_READ_COUNT
;
1250 ccw
->cda
= (__u32
)(addr_t
) count_data
;
1253 cqr
->startdev
= device
;
1254 cqr
->memdev
= device
;
1256 cqr
->buildclk
= get_clock();
1257 cqr
->status
= DASD_CQR_FILLED
;
1262 * This is the callback function for the init_analysis cqr. It saves
1263 * the status of the initial analysis ccw before it frees it and kicks
1264 * the device to continue the startup sequence. This will call
1265 * dasd_eckd_do_analysis again (if the devices has not been marked
1266 * for deletion in the meantime).
1269 dasd_eckd_analysis_callback(struct dasd_ccw_req
*init_cqr
, void *data
)
1271 struct dasd_eckd_private
*private;
1272 struct dasd_device
*device
;
1274 device
= init_cqr
->startdev
;
1275 private = (struct dasd_eckd_private
*) device
->private;
1276 private->init_cqr_status
= init_cqr
->status
;
1277 dasd_sfree_request(init_cqr
, device
);
1278 dasd_kick_device(device
);
1282 dasd_eckd_start_analysis(struct dasd_block
*block
)
1284 struct dasd_eckd_private
*private;
1285 struct dasd_ccw_req
*init_cqr
;
1287 private = (struct dasd_eckd_private
*) block
->base
->private;
1288 init_cqr
= dasd_eckd_analysis_ccw(block
->base
);
1289 if (IS_ERR(init_cqr
))
1290 return PTR_ERR(init_cqr
);
1291 init_cqr
->callback
= dasd_eckd_analysis_callback
;
1292 init_cqr
->callback_data
= NULL
;
1293 init_cqr
->expires
= 5*HZ
;
1294 dasd_add_request_head(init_cqr
);
1299 dasd_eckd_end_analysis(struct dasd_block
*block
)
1301 struct dasd_device
*device
;
1302 struct dasd_eckd_private
*private;
1303 struct eckd_count
*count_area
;
1304 unsigned int sb
, blk_per_trk
;
1307 device
= block
->base
;
1308 private = (struct dasd_eckd_private
*) device
->private;
1309 status
= private->init_cqr_status
;
1310 private->init_cqr_status
= -1;
1311 if (status
!= DASD_CQR_DONE
) {
1312 dev_warn(&device
->cdev
->dev
,
1313 "The DASD is not formatted\n");
1314 return -EMEDIUMTYPE
;
1317 private->uses_cdl
= 1;
1318 /* Check Track 0 for Compatible Disk Layout */
1320 for (i
= 0; i
< 3; i
++) {
1321 if (private->count_area
[i
].kl
!= 4 ||
1322 private->count_area
[i
].dl
!= dasd_eckd_cdl_reclen(i
) - 4) {
1323 private->uses_cdl
= 0;
1328 count_area
= &private->count_area
[4];
1330 if (private->uses_cdl
== 0) {
1331 for (i
= 0; i
< 5; i
++) {
1332 if ((private->count_area
[i
].kl
!= 0) ||
1333 (private->count_area
[i
].dl
!=
1334 private->count_area
[0].dl
))
1338 count_area
= &private->count_area
[0];
1340 if (private->count_area
[3].record
== 1)
1341 dev_warn(&device
->cdev
->dev
,
1342 "Track 0 has no records following the VTOC\n");
1344 if (count_area
!= NULL
&& count_area
->kl
== 0) {
1345 /* we found notthing violating our disk layout */
1346 if (dasd_check_blocksize(count_area
->dl
) == 0)
1347 block
->bp_block
= count_area
->dl
;
1349 if (block
->bp_block
== 0) {
1350 dev_warn(&device
->cdev
->dev
,
1351 "The disk layout of the DASD is not supported\n");
1352 return -EMEDIUMTYPE
;
1354 block
->s2b_shift
= 0; /* bits to shift 512 to get a block */
1355 for (sb
= 512; sb
< block
->bp_block
; sb
= sb
<< 1)
1358 blk_per_trk
= recs_per_track(&private->rdc_data
, 0, block
->bp_block
);
1359 block
->blocks
= (private->real_cyl
*
1360 private->rdc_data
.trk_per_cyl
*
1363 dev_info(&device
->cdev
->dev
,
1364 "DASD with %d KB/block, %d KB total size, %d KB/track, "
1365 "%s\n", (block
->bp_block
>> 10),
1366 ((private->real_cyl
*
1367 private->rdc_data
.trk_per_cyl
*
1368 blk_per_trk
* (block
->bp_block
>> 9)) >> 1),
1369 ((blk_per_trk
* block
->bp_block
) >> 10),
1371 "compatible disk layout" : "linux disk layout");
1376 static int dasd_eckd_do_analysis(struct dasd_block
*block
)
1378 struct dasd_eckd_private
*private;
1380 private = (struct dasd_eckd_private
*) block
->base
->private;
1381 if (private->init_cqr_status
< 0)
1382 return dasd_eckd_start_analysis(block
);
1384 return dasd_eckd_end_analysis(block
);
1387 static int dasd_eckd_ready_to_online(struct dasd_device
*device
)
1389 return dasd_alias_add_device(device
);
1392 static int dasd_eckd_online_to_ready(struct dasd_device
*device
)
1394 return dasd_alias_remove_device(device
);
1398 dasd_eckd_fill_geometry(struct dasd_block
*block
, struct hd_geometry
*geo
)
1400 struct dasd_eckd_private
*private;
1402 private = (struct dasd_eckd_private
*) block
->base
->private;
1403 if (dasd_check_blocksize(block
->bp_block
) == 0) {
1404 geo
->sectors
= recs_per_track(&private->rdc_data
,
1405 0, block
->bp_block
);
1407 geo
->cylinders
= private->rdc_data
.no_cyl
;
1408 geo
->heads
= private->rdc_data
.trk_per_cyl
;
1412 static struct dasd_ccw_req
*
1413 dasd_eckd_format_device(struct dasd_device
* device
,
1414 struct format_data_t
* fdata
)
1416 struct dasd_eckd_private
*private;
1417 struct dasd_ccw_req
*fcp
;
1418 struct eckd_count
*ect
;
1422 struct ch_t address
;
1423 int cplength
, datasize
;
1428 private = (struct dasd_eckd_private
*) device
->private;
1429 rpt
= recs_per_track(&private->rdc_data
, 0, fdata
->blksize
);
1431 fdata
->start_unit
/ private->rdc_data
.trk_per_cyl
,
1432 fdata
->start_unit
% private->rdc_data
.trk_per_cyl
);
1434 /* Sanity checks. */
1435 if (fdata
->start_unit
>=
1436 (private->real_cyl
* private->rdc_data
.trk_per_cyl
)) {
1437 dev_warn(&device
->cdev
->dev
, "Start track number %d used in "
1438 "formatting is too big\n", fdata
->start_unit
);
1439 return ERR_PTR(-EINVAL
);
1441 if (fdata
->start_unit
> fdata
->stop_unit
) {
1442 dev_warn(&device
->cdev
->dev
, "Start track %d used in "
1443 "formatting exceeds end track\n", fdata
->start_unit
);
1444 return ERR_PTR(-EINVAL
);
1446 if (dasd_check_blocksize(fdata
->blksize
) != 0) {
1447 dev_warn(&device
->cdev
->dev
,
1448 "The DASD cannot be formatted with block size %d\n",
1450 return ERR_PTR(-EINVAL
);
1454 * fdata->intensity is a bit string that tells us what to do:
1455 * Bit 0: write record zero
1456 * Bit 1: write home address, currently not supported
1457 * Bit 2: invalidate tracks
1458 * Bit 3: use OS/390 compatible disk layout (cdl)
1459 * Bit 4: do not allow storage subsystem to modify record zero
1460 * Only some bit combinations do make sense.
1462 if (fdata
->intensity
& 0x10) {
1464 intensity
= fdata
->intensity
& ~0x10;
1467 intensity
= fdata
->intensity
;
1469 switch (intensity
) {
1470 case 0x00: /* Normal format */
1471 case 0x08: /* Normal format, use cdl. */
1473 datasize
= sizeof(struct DE_eckd_data
) +
1474 sizeof(struct LO_eckd_data
) +
1475 rpt
* sizeof(struct eckd_count
);
1477 case 0x01: /* Write record zero and format track. */
1478 case 0x09: /* Write record zero and format track, use cdl. */
1480 datasize
= sizeof(struct DE_eckd_data
) +
1481 sizeof(struct LO_eckd_data
) +
1482 sizeof(struct eckd_count
) +
1483 rpt
* sizeof(struct eckd_count
);
1485 case 0x04: /* Invalidate track. */
1486 case 0x0c: /* Invalidate track, use cdl. */
1488 datasize
= sizeof(struct DE_eckd_data
) +
1489 sizeof(struct LO_eckd_data
) +
1490 sizeof(struct eckd_count
);
1493 dev_warn(&device
->cdev
->dev
, "An I/O control call used "
1494 "incorrect flags 0x%x\n", fdata
->intensity
);
1495 return ERR_PTR(-EINVAL
);
1497 /* Allocate the format ccw request. */
1498 fcp
= dasd_smalloc_request(DASD_ECKD_MAGIC
, cplength
, datasize
, device
);
1505 switch (intensity
& ~0x08) {
1506 case 0x00: /* Normal format. */
1507 define_extent(ccw
++, (struct DE_eckd_data
*) data
,
1508 fdata
->start_unit
, fdata
->start_unit
,
1509 DASD_ECKD_CCW_WRITE_CKD
, device
);
1510 /* grant subsystem permission to format R0 */
1512 ((struct DE_eckd_data
*)data
)->ga_extended
|= 0x04;
1513 data
+= sizeof(struct DE_eckd_data
);
1514 ccw
[-1].flags
|= CCW_FLAG_CC
;
1515 locate_record(ccw
++, (struct LO_eckd_data
*) data
,
1516 fdata
->start_unit
, 0, rpt
,
1517 DASD_ECKD_CCW_WRITE_CKD
, device
,
1519 data
+= sizeof(struct LO_eckd_data
);
1521 case 0x01: /* Write record zero + format track. */
1522 define_extent(ccw
++, (struct DE_eckd_data
*) data
,
1523 fdata
->start_unit
, fdata
->start_unit
,
1524 DASD_ECKD_CCW_WRITE_RECORD_ZERO
,
1526 data
+= sizeof(struct DE_eckd_data
);
1527 ccw
[-1].flags
|= CCW_FLAG_CC
;
1528 locate_record(ccw
++, (struct LO_eckd_data
*) data
,
1529 fdata
->start_unit
, 0, rpt
+ 1,
1530 DASD_ECKD_CCW_WRITE_RECORD_ZERO
, device
,
1531 device
->block
->bp_block
);
1532 data
+= sizeof(struct LO_eckd_data
);
1534 case 0x04: /* Invalidate track. */
1535 define_extent(ccw
++, (struct DE_eckd_data
*) data
,
1536 fdata
->start_unit
, fdata
->start_unit
,
1537 DASD_ECKD_CCW_WRITE_CKD
, device
);
1538 data
+= sizeof(struct DE_eckd_data
);
1539 ccw
[-1].flags
|= CCW_FLAG_CC
;
1540 locate_record(ccw
++, (struct LO_eckd_data
*) data
,
1541 fdata
->start_unit
, 0, 1,
1542 DASD_ECKD_CCW_WRITE_CKD
, device
, 8);
1543 data
+= sizeof(struct LO_eckd_data
);
1546 if (intensity
& 0x01) { /* write record zero */
1547 ect
= (struct eckd_count
*) data
;
1548 data
+= sizeof(struct eckd_count
);
1549 ect
->cyl
= address
.cyl
;
1550 ect
->head
= address
.head
;
1554 ccw
[-1].flags
|= CCW_FLAG_CC
;
1555 ccw
->cmd_code
= DASD_ECKD_CCW_WRITE_RECORD_ZERO
;
1556 ccw
->flags
= CCW_FLAG_SLI
;
1558 ccw
->cda
= (__u32
)(addr_t
) ect
;
1561 if ((intensity
& ~0x08) & 0x04) { /* erase track */
1562 ect
= (struct eckd_count
*) data
;
1563 data
+= sizeof(struct eckd_count
);
1564 ect
->cyl
= address
.cyl
;
1565 ect
->head
= address
.head
;
1569 ccw
[-1].flags
|= CCW_FLAG_CC
;
1570 ccw
->cmd_code
= DASD_ECKD_CCW_WRITE_CKD
;
1571 ccw
->flags
= CCW_FLAG_SLI
;
1573 ccw
->cda
= (__u32
)(addr_t
) ect
;
1574 } else { /* write remaining records */
1575 for (i
= 0; i
< rpt
; i
++) {
1576 ect
= (struct eckd_count
*) data
;
1577 data
+= sizeof(struct eckd_count
);
1578 ect
->cyl
= address
.cyl
;
1579 ect
->head
= address
.head
;
1580 ect
->record
= i
+ 1;
1582 ect
->dl
= fdata
->blksize
;
1583 /* Check for special tracks 0-1 when formatting CDL */
1584 if ((intensity
& 0x08) &&
1585 fdata
->start_unit
== 0) {
1588 ect
->dl
= sizes_trk0
[i
] - 4;
1591 if ((intensity
& 0x08) &&
1592 fdata
->start_unit
== 1) {
1594 ect
->dl
= LABEL_SIZE
- 44;
1596 ccw
[-1].flags
|= CCW_FLAG_CC
;
1597 ccw
->cmd_code
= DASD_ECKD_CCW_WRITE_CKD
;
1598 ccw
->flags
= CCW_FLAG_SLI
;
1600 ccw
->cda
= (__u32
)(addr_t
) ect
;
1604 fcp
->startdev
= device
;
1605 fcp
->memdev
= device
;
1606 clear_bit(DASD_CQR_FLAGS_USE_ERP
, &fcp
->flags
);
1607 fcp
->retries
= 5; /* set retry counter to enable default ERP */
1608 fcp
->buildclk
= get_clock();
1609 fcp
->status
= DASD_CQR_FILLED
;
1613 static void dasd_eckd_handle_terminated_request(struct dasd_ccw_req
*cqr
)
1615 cqr
->status
= DASD_CQR_FILLED
;
1616 if (cqr
->block
&& (cqr
->startdev
!= cqr
->block
->base
)) {
1617 dasd_eckd_reset_ccw_to_base_io(cqr
);
1618 cqr
->startdev
= cqr
->block
->base
;
1622 static dasd_erp_fn_t
1623 dasd_eckd_erp_action(struct dasd_ccw_req
* cqr
)
1625 struct dasd_device
*device
= (struct dasd_device
*) cqr
->startdev
;
1626 struct ccw_device
*cdev
= device
->cdev
;
1628 switch (cdev
->id
.cu_type
) {
1633 return dasd_3990_erp_action
;
1637 return dasd_default_erp_action
;
1641 static dasd_erp_fn_t
1642 dasd_eckd_erp_postaction(struct dasd_ccw_req
* cqr
)
1644 return dasd_default_erp_postaction
;
1648 static void dasd_eckd_handle_unsolicited_interrupt(struct dasd_device
*device
,
1654 /* first of all check for state change pending interrupt */
1655 mask
= DEV_STAT_ATTENTION
| DEV_STAT_DEV_END
| DEV_STAT_UNIT_EXCEP
;
1656 if ((scsw_dstat(&irb
->scsw
) & mask
) == mask
) {
1657 dasd_generic_handle_state_change(device
);
1661 /* summary unit check */
1662 if ((scsw_dstat(&irb
->scsw
) & DEV_STAT_UNIT_CHECK
) &&
1663 (irb
->ecw
[7] == 0x0D)) {
1664 dasd_alias_handle_summary_unit_check(device
, irb
);
1668 sense
= dasd_get_sense(irb
);
1669 /* service information message SIM */
1670 if (sense
&& !(sense
[27] & DASD_SENSE_BIT_0
) &&
1671 ((sense
[6] & DASD_SIM_SENSE
) == DASD_SIM_SENSE
)) {
1672 dasd_3990_erp_handle_sim(device
, sense
);
1673 dasd_schedule_device_bh(device
);
1677 if ((scsw_cc(&irb
->scsw
) == 1) &&
1678 (scsw_fctl(&irb
->scsw
) & SCSW_FCTL_START_FUNC
) &&
1679 (scsw_actl(&irb
->scsw
) & SCSW_ACTL_START_PEND
) &&
1680 (scsw_stctl(&irb
->scsw
) & SCSW_STCTL_STATUS_PEND
)) {
1681 /* fake irb do nothing, they are handled elsewhere */
1682 dasd_schedule_device_bh(device
);
1687 /* just report other unsolicited interrupts */
1688 DBF_DEV_EVENT(DBF_ERR
, device
, "%s",
1689 "unsolicited interrupt received");
1691 DBF_DEV_EVENT(DBF_ERR
, device
, "%s",
1692 "unsolicited interrupt received "
1693 "(sense available)");
1694 device
->discipline
->dump_sense_dbf(device
, irb
, "unsolicited");
1697 dasd_schedule_device_bh(device
);
1702 static struct dasd_ccw_req
*dasd_eckd_build_cp_cmd_single(
1703 struct dasd_device
*startdev
,
1704 struct dasd_block
*block
,
1705 struct request
*req
,
1710 unsigned int first_offs
,
1711 unsigned int last_offs
,
1712 unsigned int blk_per_trk
,
1713 unsigned int blksize
)
1715 struct dasd_eckd_private
*private;
1716 unsigned long *idaws
;
1717 struct LO_eckd_data
*LO_data
;
1718 struct dasd_ccw_req
*cqr
;
1720 struct req_iterator iter
;
1724 int count
, cidaw
, cplength
, datasize
;
1726 unsigned char cmd
, rcmd
;
1728 struct dasd_device
*basedev
;
1730 basedev
= block
->base
;
1731 private = (struct dasd_eckd_private
*) basedev
->private;
1732 if (rq_data_dir(req
) == READ
)
1733 cmd
= DASD_ECKD_CCW_READ_MT
;
1734 else if (rq_data_dir(req
) == WRITE
)
1735 cmd
= DASD_ECKD_CCW_WRITE_MT
;
1737 return ERR_PTR(-EINVAL
);
1739 /* Check struct bio and count the number of blocks for the request. */
1742 rq_for_each_segment(bv
, req
, iter
) {
1743 if (bv
->bv_len
& (blksize
- 1))
1744 /* Eckd can only do full blocks. */
1745 return ERR_PTR(-EINVAL
);
1746 count
+= bv
->bv_len
>> (block
->s2b_shift
+ 9);
1747 #if defined(CONFIG_64BIT)
1748 if (idal_is_needed (page_address(bv
->bv_page
), bv
->bv_len
))
1749 cidaw
+= bv
->bv_len
>> (block
->s2b_shift
+ 9);
1753 if (count
!= last_rec
- first_rec
+ 1)
1754 return ERR_PTR(-EINVAL
);
1756 /* use the prefix command if available */
1757 use_prefix
= private->features
.feature
[8] & 0x01;
1759 /* 1x prefix + number of blocks */
1760 cplength
= 2 + count
;
1761 /* 1x prefix + cidaws*sizeof(long) */
1762 datasize
= sizeof(struct PFX_eckd_data
) +
1763 sizeof(struct LO_eckd_data
) +
1764 cidaw
* sizeof(unsigned long);
1766 /* 1x define extent + 1x locate record + number of blocks */
1767 cplength
= 2 + count
;
1768 /* 1x define extent + 1x locate record + cidaws*sizeof(long) */
1769 datasize
= sizeof(struct DE_eckd_data
) +
1770 sizeof(struct LO_eckd_data
) +
1771 cidaw
* sizeof(unsigned long);
1773 /* Find out the number of additional locate record ccws for cdl. */
1774 if (private->uses_cdl
&& first_rec
< 2*blk_per_trk
) {
1775 if (last_rec
>= 2*blk_per_trk
)
1776 count
= 2*blk_per_trk
- first_rec
;
1778 datasize
+= count
*sizeof(struct LO_eckd_data
);
1780 /* Allocate the ccw request. */
1781 cqr
= dasd_smalloc_request(DASD_ECKD_MAGIC
, cplength
, datasize
,
1786 /* First ccw is define extent or prefix. */
1788 if (prefix(ccw
++, cqr
->data
, first_trk
,
1789 last_trk
, cmd
, basedev
, startdev
) == -EAGAIN
) {
1790 /* Clock not in sync and XRC is enabled.
1793 dasd_sfree_request(cqr
, startdev
);
1794 return ERR_PTR(-EAGAIN
);
1796 idaws
= (unsigned long *) (cqr
->data
+
1797 sizeof(struct PFX_eckd_data
));
1799 if (define_extent(ccw
++, cqr
->data
, first_trk
,
1800 last_trk
, cmd
, startdev
) == -EAGAIN
) {
1801 /* Clock not in sync and XRC is enabled.
1804 dasd_sfree_request(cqr
, startdev
);
1805 return ERR_PTR(-EAGAIN
);
1807 idaws
= (unsigned long *) (cqr
->data
+
1808 sizeof(struct DE_eckd_data
));
1810 /* Build locate_record+read/write/ccws. */
1811 LO_data
= (struct LO_eckd_data
*) (idaws
+ cidaw
);
1813 if (private->uses_cdl
== 0 || recid
> 2*blk_per_trk
) {
1814 /* Only standard blocks so there is just one locate record. */
1815 ccw
[-1].flags
|= CCW_FLAG_CC
;
1816 locate_record(ccw
++, LO_data
++, first_trk
, first_offs
+ 1,
1817 last_rec
- recid
+ 1, cmd
, basedev
, blksize
);
1819 rq_for_each_segment(bv
, req
, iter
) {
1820 dst
= page_address(bv
->bv_page
) + bv
->bv_offset
;
1821 if (dasd_page_cache
) {
1822 char *copy
= kmem_cache_alloc(dasd_page_cache
,
1823 GFP_DMA
| __GFP_NOWARN
);
1824 if (copy
&& rq_data_dir(req
) == WRITE
)
1825 memcpy(copy
+ bv
->bv_offset
, dst
, bv
->bv_len
);
1827 dst
= copy
+ bv
->bv_offset
;
1829 for (off
= 0; off
< bv
->bv_len
; off
+= blksize
) {
1830 sector_t trkid
= recid
;
1831 unsigned int recoffs
= sector_div(trkid
, blk_per_trk
);
1834 /* Locate record for cdl special block ? */
1835 if (private->uses_cdl
&& recid
< 2*blk_per_trk
) {
1836 if (dasd_eckd_cdl_special(blk_per_trk
, recid
)){
1838 count
= dasd_eckd_cdl_reclen(recid
);
1839 if (count
< blksize
&&
1840 rq_data_dir(req
) == READ
)
1841 memset(dst
+ count
, 0xe5,
1844 ccw
[-1].flags
|= CCW_FLAG_CC
;
1845 locate_record(ccw
++, LO_data
++,
1847 1, rcmd
, basedev
, count
);
1849 /* Locate record for standard blocks ? */
1850 if (private->uses_cdl
&& recid
== 2*blk_per_trk
) {
1851 ccw
[-1].flags
|= CCW_FLAG_CC
;
1852 locate_record(ccw
++, LO_data
++,
1854 last_rec
- recid
+ 1,
1855 cmd
, basedev
, count
);
1857 /* Read/write ccw. */
1858 ccw
[-1].flags
|= CCW_FLAG_CC
;
1859 ccw
->cmd_code
= rcmd
;
1861 if (idal_is_needed(dst
, blksize
)) {
1862 ccw
->cda
= (__u32
)(addr_t
) idaws
;
1863 ccw
->flags
= CCW_FLAG_IDA
;
1864 idaws
= idal_create_words(idaws
, dst
, blksize
);
1866 ccw
->cda
= (__u32
)(addr_t
) dst
;
1874 if (blk_noretry_request(req
) ||
1875 block
->base
->features
& DASD_FEATURE_FAILFAST
)
1876 set_bit(DASD_CQR_FLAGS_FAILFAST
, &cqr
->flags
);
1877 cqr
->startdev
= startdev
;
1878 cqr
->memdev
= startdev
;
1880 cqr
->expires
= 5 * 60 * HZ
; /* 5 minutes */
1881 cqr
->lpm
= private->path_data
.ppm
;
1883 cqr
->buildclk
= get_clock();
1884 cqr
->status
= DASD_CQR_FILLED
;
1888 static struct dasd_ccw_req
*dasd_eckd_build_cp_cmd_track(
1889 struct dasd_device
*startdev
,
1890 struct dasd_block
*block
,
1891 struct request
*req
,
1896 unsigned int first_offs
,
1897 unsigned int last_offs
,
1898 unsigned int blk_per_trk
,
1899 unsigned int blksize
)
1901 struct dasd_eckd_private
*private;
1902 unsigned long *idaws
;
1903 struct dasd_ccw_req
*cqr
;
1905 struct req_iterator iter
;
1907 char *dst
, *idaw_dst
;
1908 unsigned int cidaw
, cplength
, datasize
;
1912 struct dasd_device
*basedev
;
1913 unsigned int trkcount
, count
, count_to_trk_end
;
1914 unsigned int idaw_len
, seg_len
, part_len
, len_to_track_end
;
1915 unsigned char new_track
, end_idaw
;
1917 unsigned int recoffs
;
1919 basedev
= block
->base
;
1920 private = (struct dasd_eckd_private
*) basedev
->private;
1921 if (rq_data_dir(req
) == READ
)
1922 cmd
= DASD_ECKD_CCW_READ_TRACK_DATA
;
1923 else if (rq_data_dir(req
) == WRITE
)
1924 cmd
= DASD_ECKD_CCW_WRITE_TRACK_DATA
;
1926 return ERR_PTR(-EINVAL
);
1928 /* Track based I/O needs IDAWs for each page, and not just for
1929 * 64 bit addresses. We need additional idals for pages
1930 * that get filled from two tracks, so we use the number
1931 * of records as upper limit.
1933 cidaw
= last_rec
- first_rec
+ 1;
1934 trkcount
= last_trk
- first_trk
+ 1;
1936 /* 1x prefix + one read/write ccw per track */
1937 cplength
= 1 + trkcount
;
1939 /* on 31-bit we need space for two 32 bit addresses per page
1940 * on 64-bit one 64 bit address
1942 datasize
= sizeof(struct PFX_eckd_data
) +
1943 cidaw
* sizeof(unsigned long long);
1945 /* Allocate the ccw request. */
1946 cqr
= dasd_smalloc_request(DASD_ECKD_MAGIC
, cplength
, datasize
,
1951 /* transfer length factor: how many bytes to read from the last track */
1952 if (first_trk
== last_trk
)
1953 tlf
= last_offs
- first_offs
+ 1;
1955 tlf
= last_offs
+ 1;
1958 if (prefix_LRE(ccw
++, cqr
->data
, first_trk
,
1959 last_trk
, cmd
, basedev
, startdev
,
1960 1 /* format */, first_offs
+ 1,
1963 /* Clock not in sync and XRC is enabled.
1966 dasd_sfree_request(cqr
, startdev
);
1967 return ERR_PTR(-EAGAIN
);
1971 * The translation of request into ccw programs must meet the
1972 * following conditions:
1973 * - all idaws but the first and the last must address full pages
1974 * (or 2K blocks on 31-bit)
1975 * - the scope of a ccw and it's idal ends with the track boundaries
1977 idaws
= (unsigned long *) (cqr
->data
+ sizeof(struct PFX_eckd_data
));
1981 len_to_track_end
= 0;
1984 rq_for_each_segment(bv
, req
, iter
) {
1985 dst
= page_address(bv
->bv_page
) + bv
->bv_offset
;
1986 seg_len
= bv
->bv_len
;
1990 recoffs
= sector_div(trkid
, blk_per_trk
);
1991 count_to_trk_end
= blk_per_trk
- recoffs
;
1992 count
= min((last_rec
- recid
+ 1),
1993 (sector_t
)count_to_trk_end
);
1994 len_to_track_end
= count
* blksize
;
1995 ccw
[-1].flags
|= CCW_FLAG_CC
;
1996 ccw
->cmd_code
= cmd
;
1997 ccw
->count
= len_to_track_end
;
1998 ccw
->cda
= (__u32
)(addr_t
)idaws
;
1999 ccw
->flags
= CCW_FLAG_IDA
;
2003 /* first idaw for a ccw may start anywhere */
2007 /* If we start a new idaw, we must make sure that it
2008 * starts on an IDA_BLOCK_SIZE boundary.
2009 * If we continue an idaw, we must make sure that the
2010 * current segment begins where the so far accumulated
2014 if (__pa(dst
) & (IDA_BLOCK_SIZE
-1)) {
2015 dasd_sfree_request(cqr
, startdev
);
2016 return ERR_PTR(-ERANGE
);
2020 if ((idaw_dst
+ idaw_len
) != dst
) {
2021 dasd_sfree_request(cqr
, startdev
);
2022 return ERR_PTR(-ERANGE
);
2024 part_len
= min(seg_len
, len_to_track_end
);
2025 seg_len
-= part_len
;
2027 idaw_len
+= part_len
;
2028 len_to_track_end
-= part_len
;
2029 /* collected memory area ends on an IDA_BLOCK border,
2031 * idal_create_words will handle cases where idaw_len
2032 * is larger then IDA_BLOCK_SIZE
2034 if (!(__pa(idaw_dst
+ idaw_len
) & (IDA_BLOCK_SIZE
-1)))
2036 /* We also need to end the idaw at track end */
2037 if (!len_to_track_end
) {
2042 idaws
= idal_create_words(idaws
, idaw_dst
,
2051 if (blk_noretry_request(req
) ||
2052 block
->base
->features
& DASD_FEATURE_FAILFAST
)
2053 set_bit(DASD_CQR_FLAGS_FAILFAST
, &cqr
->flags
);
2054 cqr
->startdev
= startdev
;
2055 cqr
->memdev
= startdev
;
2057 cqr
->expires
= 5 * 60 * HZ
; /* 5 minutes */
2058 cqr
->lpm
= private->path_data
.ppm
;
2060 cqr
->buildclk
= get_clock();
2061 cqr
->status
= DASD_CQR_FILLED
;
2065 static int prepare_itcw(struct itcw
*itcw
,
2066 unsigned int trk
, unsigned int totrk
, int cmd
,
2067 struct dasd_device
*basedev
,
2068 struct dasd_device
*startdev
,
2069 unsigned int rec_on_trk
, int count
,
2070 unsigned int blksize
,
2071 unsigned int total_data_size
,
2073 unsigned int blk_per_trk
)
2075 struct PFX_eckd_data pfxdata
;
2076 struct dasd_eckd_private
*basepriv
, *startpriv
;
2077 struct DE_eckd_data
*dedata
;
2078 struct LRE_eckd_data
*lredata
;
2082 u16 heads
, beghead
, endhead
;
2090 /* setup prefix data */
2091 basepriv
= (struct dasd_eckd_private
*) basedev
->private;
2092 startpriv
= (struct dasd_eckd_private
*) startdev
->private;
2093 dedata
= &pfxdata
.define_extent
;
2094 lredata
= &pfxdata
.locate_record
;
2096 memset(&pfxdata
, 0, sizeof(pfxdata
));
2097 pfxdata
.format
= 1; /* PFX with LRE */
2098 pfxdata
.base_address
= basepriv
->ned
->unit_addr
;
2099 pfxdata
.base_lss
= basepriv
->ned
->ID
;
2100 pfxdata
.validity
.define_extent
= 1;
2102 /* private uid is kept up to date, conf_data may be outdated */
2103 if (startpriv
->uid
.type
!= UA_BASE_DEVICE
) {
2104 pfxdata
.validity
.verify_base
= 1;
2105 if (startpriv
->uid
.type
== UA_HYPER_PAV_ALIAS
)
2106 pfxdata
.validity
.hyper_pav
= 1;
2110 case DASD_ECKD_CCW_READ_TRACK_DATA
:
2111 dedata
->mask
.perm
= 0x1;
2112 dedata
->attributes
.operation
= basepriv
->attrib
.operation
;
2113 dedata
->blk_size
= blksize
;
2114 dedata
->ga_extended
|= 0x42;
2115 lredata
->operation
.orientation
= 0x0;
2116 lredata
->operation
.operation
= 0x0C;
2117 lredata
->auxiliary
.check_bytes
= 0x01;
2118 pfx_cmd
= DASD_ECKD_CCW_PFX_READ
;
2120 case DASD_ECKD_CCW_WRITE_TRACK_DATA
:
2121 dedata
->mask
.perm
= 0x02;
2122 dedata
->attributes
.operation
= basepriv
->attrib
.operation
;
2123 dedata
->blk_size
= blksize
;
2124 rc
= check_XRC_on_prefix(&pfxdata
, basedev
);
2125 dedata
->ga_extended
|= 0x42;
2126 lredata
->operation
.orientation
= 0x0;
2127 lredata
->operation
.operation
= 0x3F;
2128 lredata
->extended_operation
= 0x23;
2129 lredata
->auxiliary
.check_bytes
= 0x2;
2130 pfx_cmd
= DASD_ECKD_CCW_PFX
;
2133 DBF_DEV_EVENT(DBF_ERR
, basedev
,
2134 "prepare itcw, unknown opcode 0x%x", cmd
);
2141 dedata
->attributes
.mode
= 0x3; /* ECKD */
2143 heads
= basepriv
->rdc_data
.trk_per_cyl
;
2144 begcyl
= trk
/ heads
;
2145 beghead
= trk
% heads
;
2146 endcyl
= totrk
/ heads
;
2147 endhead
= totrk
% heads
;
2149 /* check for sequential prestage - enhance cylinder range */
2150 if (dedata
->attributes
.operation
== DASD_SEQ_PRESTAGE
||
2151 dedata
->attributes
.operation
== DASD_SEQ_ACCESS
) {
2153 if (endcyl
+ basepriv
->attrib
.nr_cyl
< basepriv
->real_cyl
)
2154 endcyl
+= basepriv
->attrib
.nr_cyl
;
2156 endcyl
= (basepriv
->real_cyl
- 1);
2159 set_ch_t(&dedata
->beg_ext
, begcyl
, beghead
);
2160 set_ch_t(&dedata
->end_ext
, endcyl
, endhead
);
2162 dedata
->ep_format
= 0x20; /* records per track is valid */
2163 dedata
->ep_rec_per_track
= blk_per_trk
;
2166 switch (basepriv
->rdc_data
.dev_type
) {
2168 dn
= ceil_quot(blksize
+ 6, 232);
2169 d
= 9 + ceil_quot(blksize
+ 6 * (dn
+ 1), 34);
2170 sector
= (49 + (rec_on_trk
- 1) * (10 + d
)) / 8;
2173 d
= 7 + ceil_quot(blksize
+ 12, 32);
2174 sector
= (39 + (rec_on_trk
- 1) * (8 + d
)) / 7;
2179 lredata
->auxiliary
.length_valid
= 1;
2180 lredata
->auxiliary
.length_scope
= 1;
2181 lredata
->auxiliary
.imbedded_ccw_valid
= 1;
2182 lredata
->length
= tlf
;
2183 lredata
->imbedded_ccw
= cmd
;
2184 lredata
->count
= count
;
2185 lredata
->sector
= sector
;
2186 set_ch_t(&lredata
->seek_addr
, begcyl
, beghead
);
2187 lredata
->search_arg
.cyl
= lredata
->seek_addr
.cyl
;
2188 lredata
->search_arg
.head
= lredata
->seek_addr
.head
;
2189 lredata
->search_arg
.record
= rec_on_trk
;
2191 dcw
= itcw_add_dcw(itcw
, pfx_cmd
, 0,
2192 &pfxdata
, sizeof(pfxdata
), total_data_size
);
2197 static struct dasd_ccw_req
*dasd_eckd_build_cp_tpm_track(
2198 struct dasd_device
*startdev
,
2199 struct dasd_block
*block
,
2200 struct request
*req
,
2205 unsigned int first_offs
,
2206 unsigned int last_offs
,
2207 unsigned int blk_per_trk
,
2208 unsigned int blksize
)
2210 struct dasd_eckd_private
*private;
2211 struct dasd_ccw_req
*cqr
;
2212 struct req_iterator iter
;
2215 unsigned int trkcount
, ctidaw
;
2217 struct dasd_device
*basedev
;
2220 struct tidaw
*last_tidaw
= NULL
;
2224 basedev
= block
->base
;
2225 private = (struct dasd_eckd_private
*) basedev
->private;
2226 if (rq_data_dir(req
) == READ
) {
2227 cmd
= DASD_ECKD_CCW_READ_TRACK_DATA
;
2228 itcw_op
= ITCW_OP_READ
;
2229 } else if (rq_data_dir(req
) == WRITE
) {
2230 cmd
= DASD_ECKD_CCW_WRITE_TRACK_DATA
;
2231 itcw_op
= ITCW_OP_WRITE
;
2233 return ERR_PTR(-EINVAL
);
2235 /* trackbased I/O needs address all memory via TIDAWs,
2236 * not just for 64 bit addresses. This allows us to map
2237 * each segment directly to one tidaw.
2239 trkcount
= last_trk
- first_trk
+ 1;
2241 rq_for_each_segment(bv
, req
, iter
) {
2245 /* Allocate the ccw request. */
2246 itcw_size
= itcw_calc_size(0, ctidaw
, 0);
2247 cqr
= dasd_smalloc_request(DASD_ECKD_MAGIC
, 0, itcw_size
, startdev
);
2252 cqr
->startdev
= startdev
;
2253 cqr
->memdev
= startdev
;
2255 cqr
->expires
= 100*HZ
;
2256 cqr
->buildclk
= get_clock();
2257 cqr
->status
= DASD_CQR_FILLED
;
2260 /* transfer length factor: how many bytes to read from the last track */
2261 if (first_trk
== last_trk
)
2262 tlf
= last_offs
- first_offs
+ 1;
2264 tlf
= last_offs
+ 1;
2267 itcw
= itcw_init(cqr
->data
, itcw_size
, itcw_op
, 0, ctidaw
, 0);
2268 cqr
->cpaddr
= itcw_get_tcw(itcw
);
2270 if (prepare_itcw(itcw
, first_trk
, last_trk
,
2271 cmd
, basedev
, startdev
,
2274 (last_rec
- first_rec
+ 1) * blksize
,
2275 tlf
, blk_per_trk
) == -EAGAIN
) {
2276 /* Clock not in sync and XRC is enabled.
2279 dasd_sfree_request(cqr
, startdev
);
2280 return ERR_PTR(-EAGAIN
);
2284 * A tidaw can address 4k of memory, but must not cross page boundaries
2285 * We can let the block layer handle this by setting
2286 * blk_queue_segment_boundary to page boundaries and
2287 * blk_max_segment_size to page size when setting up the request queue.
2289 rq_for_each_segment(bv
, req
, iter
) {
2290 dst
= page_address(bv
->bv_page
) + bv
->bv_offset
;
2291 last_tidaw
= itcw_add_tidaw(itcw
, 0x00, dst
, bv
->bv_len
);
2292 if (IS_ERR(last_tidaw
))
2293 return (struct dasd_ccw_req
*)last_tidaw
;
2296 last_tidaw
->flags
|= 0x80;
2297 itcw_finalize(itcw
);
2299 if (blk_noretry_request(req
) ||
2300 block
->base
->features
& DASD_FEATURE_FAILFAST
)
2301 set_bit(DASD_CQR_FLAGS_FAILFAST
, &cqr
->flags
);
2302 cqr
->startdev
= startdev
;
2303 cqr
->memdev
= startdev
;
2305 cqr
->expires
= 5 * 60 * HZ
; /* 5 minutes */
2306 cqr
->lpm
= private->path_data
.ppm
;
2308 cqr
->buildclk
= get_clock();
2309 cqr
->status
= DASD_CQR_FILLED
;
2313 static struct dasd_ccw_req
*dasd_eckd_build_cp(struct dasd_device
*startdev
,
2314 struct dasd_block
*block
,
2315 struct request
*req
)
2317 int tpm
, cmdrtd
, cmdwtd
;
2319 #if defined(CONFIG_64BIT)
2320 int fcx_in_css
, fcx_in_gneq
, fcx_in_features
;
2322 struct dasd_eckd_private
*private;
2323 struct dasd_device
*basedev
;
2324 sector_t first_rec
, last_rec
;
2325 sector_t first_trk
, last_trk
;
2326 unsigned int first_offs
, last_offs
;
2327 unsigned int blk_per_trk
, blksize
;
2329 struct dasd_ccw_req
*cqr
;
2331 basedev
= block
->base
;
2332 private = (struct dasd_eckd_private
*) basedev
->private;
2334 /* Calculate number of blocks/records per track. */
2335 blksize
= block
->bp_block
;
2336 blk_per_trk
= recs_per_track(&private->rdc_data
, 0, blksize
);
2337 if (blk_per_trk
== 0)
2338 return ERR_PTR(-EINVAL
);
2339 /* Calculate record id of first and last block. */
2340 first_rec
= first_trk
= blk_rq_pos(req
) >> block
->s2b_shift
;
2341 first_offs
= sector_div(first_trk
, blk_per_trk
);
2342 last_rec
= last_trk
=
2343 (blk_rq_pos(req
) + blk_rq_sectors(req
) - 1) >> block
->s2b_shift
;
2344 last_offs
= sector_div(last_trk
, blk_per_trk
);
2345 cdlspecial
= (private->uses_cdl
&& first_rec
< 2*blk_per_trk
);
2347 /* is transport mode supported? */
2348 #if defined(CONFIG_64BIT)
2349 fcx_in_css
= css_general_characteristics
.fcx
;
2350 fcx_in_gneq
= private->gneq
->reserved2
[7] & 0x04;
2351 fcx_in_features
= private->features
.feature
[40] & 0x80;
2352 tpm
= fcx_in_css
&& fcx_in_gneq
&& fcx_in_features
;
2357 /* is read track data and write track data in command mode supported? */
2358 cmdrtd
= private->features
.feature
[9] & 0x20;
2359 cmdwtd
= private->features
.feature
[12] & 0x40;
2360 use_prefix
= private->features
.feature
[8] & 0x01;
2363 if (cdlspecial
|| dasd_page_cache
) {
2364 /* do nothing, just fall through to the cmd mode single case */
2365 } else if (!dasd_nofcx
&& tpm
&& (first_trk
== last_trk
)) {
2366 cqr
= dasd_eckd_build_cp_tpm_track(startdev
, block
, req
,
2367 first_rec
, last_rec
,
2368 first_trk
, last_trk
,
2369 first_offs
, last_offs
,
2370 blk_per_trk
, blksize
);
2371 if (IS_ERR(cqr
) && PTR_ERR(cqr
) != -EAGAIN
)
2373 } else if (use_prefix
&&
2374 (((rq_data_dir(req
) == READ
) && cmdrtd
) ||
2375 ((rq_data_dir(req
) == WRITE
) && cmdwtd
))) {
2376 cqr
= dasd_eckd_build_cp_cmd_track(startdev
, block
, req
,
2377 first_rec
, last_rec
,
2378 first_trk
, last_trk
,
2379 first_offs
, last_offs
,
2380 blk_per_trk
, blksize
);
2381 if (IS_ERR(cqr
) && PTR_ERR(cqr
) != -EAGAIN
)
2385 cqr
= dasd_eckd_build_cp_cmd_single(startdev
, block
, req
,
2386 first_rec
, last_rec
,
2387 first_trk
, last_trk
,
2388 first_offs
, last_offs
,
2389 blk_per_trk
, blksize
);
2394 dasd_eckd_free_cp(struct dasd_ccw_req
*cqr
, struct request
*req
)
2396 struct dasd_eckd_private
*private;
2398 struct req_iterator iter
;
2401 unsigned int blksize
, blk_per_trk
, off
;
2405 if (!dasd_page_cache
)
2407 private = (struct dasd_eckd_private
*) cqr
->block
->base
->private;
2408 blksize
= cqr
->block
->bp_block
;
2409 blk_per_trk
= recs_per_track(&private->rdc_data
, 0, blksize
);
2410 recid
= blk_rq_pos(req
) >> cqr
->block
->s2b_shift
;
2412 /* Skip over define extent & locate record. */
2414 if (private->uses_cdl
== 0 || recid
> 2*blk_per_trk
)
2416 rq_for_each_segment(bv
, req
, iter
) {
2417 dst
= page_address(bv
->bv_page
) + bv
->bv_offset
;
2418 for (off
= 0; off
< bv
->bv_len
; off
+= blksize
) {
2419 /* Skip locate record. */
2420 if (private->uses_cdl
&& recid
<= 2*blk_per_trk
)
2423 if (ccw
->flags
& CCW_FLAG_IDA
)
2424 cda
= *((char **)((addr_t
) ccw
->cda
));
2426 cda
= (char *)((addr_t
) ccw
->cda
);
2428 if (rq_data_dir(req
) == READ
)
2429 memcpy(dst
, cda
, bv
->bv_len
);
2430 kmem_cache_free(dasd_page_cache
,
2431 (void *)((addr_t
)cda
& PAGE_MASK
));
2440 status
= cqr
->status
== DASD_CQR_DONE
;
2441 dasd_sfree_request(cqr
, cqr
->memdev
);
2446 * Modify ccw/tcw in cqr so it can be started on a base device.
2448 * Note that this is not enough to restart the cqr!
2449 * Either reset cqr->startdev as well (summary unit check handling)
2450 * or restart via separate cqr (as in ERP handling).
2452 void dasd_eckd_reset_ccw_to_base_io(struct dasd_ccw_req
*cqr
)
2455 struct PFX_eckd_data
*pfxdata
;
2460 if (cqr
->cpmode
== 1) {
2462 tccb
= tcw_get_tccb(tcw
);
2463 dcw
= (struct dcw
*)&tccb
->tca
[0];
2464 pfxdata
= (struct PFX_eckd_data
*)&dcw
->cd
[0];
2465 pfxdata
->validity
.verify_base
= 0;
2466 pfxdata
->validity
.hyper_pav
= 0;
2469 pfxdata
= cqr
->data
;
2470 if (ccw
->cmd_code
== DASD_ECKD_CCW_PFX
) {
2471 pfxdata
->validity
.verify_base
= 0;
2472 pfxdata
->validity
.hyper_pav
= 0;
2477 #define DASD_ECKD_CHANQ_MAX_SIZE 4
2479 static struct dasd_ccw_req
*dasd_eckd_build_alias_cp(struct dasd_device
*base
,
2480 struct dasd_block
*block
,
2481 struct request
*req
)
2483 struct dasd_eckd_private
*private;
2484 struct dasd_device
*startdev
;
2485 unsigned long flags
;
2486 struct dasd_ccw_req
*cqr
;
2488 startdev
= dasd_alias_get_start_dev(base
);
2491 private = (struct dasd_eckd_private
*) startdev
->private;
2492 if (private->count
>= DASD_ECKD_CHANQ_MAX_SIZE
)
2493 return ERR_PTR(-EBUSY
);
2495 spin_lock_irqsave(get_ccwdev_lock(startdev
->cdev
), flags
);
2497 cqr
= dasd_eckd_build_cp(startdev
, block
, req
);
2500 spin_unlock_irqrestore(get_ccwdev_lock(startdev
->cdev
), flags
);
2504 static int dasd_eckd_free_alias_cp(struct dasd_ccw_req
*cqr
,
2505 struct request
*req
)
2507 struct dasd_eckd_private
*private;
2508 unsigned long flags
;
2510 spin_lock_irqsave(get_ccwdev_lock(cqr
->memdev
->cdev
), flags
);
2511 private = (struct dasd_eckd_private
*) cqr
->memdev
->private;
2513 spin_unlock_irqrestore(get_ccwdev_lock(cqr
->memdev
->cdev
), flags
);
2514 return dasd_eckd_free_cp(cqr
, req
);
2518 dasd_eckd_fill_info(struct dasd_device
* device
,
2519 struct dasd_information2_t
* info
)
2521 struct dasd_eckd_private
*private;
2523 private = (struct dasd_eckd_private
*) device
->private;
2524 info
->label_block
= 2;
2525 info
->FBA_layout
= private->uses_cdl
? 0 : 1;
2526 info
->format
= private->uses_cdl
? DASD_FORMAT_CDL
: DASD_FORMAT_LDL
;
2527 info
->characteristics_size
= sizeof(struct dasd_eckd_characteristics
);
2528 memcpy(info
->characteristics
, &private->rdc_data
,
2529 sizeof(struct dasd_eckd_characteristics
));
2530 info
->confdata_size
= min((unsigned long)private->conf_len
,
2531 sizeof(info
->configuration_data
));
2532 memcpy(info
->configuration_data
, private->conf_data
,
2533 info
->confdata_size
);
2538 * SECTION: ioctl functions for eckd devices.
2542 * Release device ioctl.
2543 * Buils a channel programm to releases a prior reserved
2544 * (see dasd_eckd_reserve) device.
2547 dasd_eckd_release(struct dasd_device
*device
)
2549 struct dasd_ccw_req
*cqr
;
2553 if (!capable(CAP_SYS_ADMIN
))
2556 cqr
= dasd_smalloc_request(DASD_ECKD_MAGIC
, 1, 32, device
);
2558 DBF_DEV_EVENT(DBF_WARNING
, device
, "%s",
2559 "Could not allocate initialization request");
2560 return PTR_ERR(cqr
);
2563 ccw
->cmd_code
= DASD_ECKD_CCW_RELEASE
;
2564 ccw
->flags
|= CCW_FLAG_SLI
;
2566 ccw
->cda
= (__u32
)(addr_t
) cqr
->data
;
2567 cqr
->startdev
= device
;
2568 cqr
->memdev
= device
;
2569 clear_bit(DASD_CQR_FLAGS_USE_ERP
, &cqr
->flags
);
2570 set_bit(DASD_CQR_FLAGS_FAILFAST
, &cqr
->flags
);
2571 cqr
->retries
= 2; /* set retry counter to enable basic ERP */
2572 cqr
->expires
= 2 * HZ
;
2573 cqr
->buildclk
= get_clock();
2574 cqr
->status
= DASD_CQR_FILLED
;
2576 rc
= dasd_sleep_on_immediatly(cqr
);
2578 dasd_sfree_request(cqr
, cqr
->memdev
);
2583 * Reserve device ioctl.
2584 * Options are set to 'synchronous wait for interrupt' and
2585 * 'timeout the request'. This leads to a terminate IO if
2586 * the interrupt is outstanding for a certain time.
2589 dasd_eckd_reserve(struct dasd_device
*device
)
2591 struct dasd_ccw_req
*cqr
;
2595 if (!capable(CAP_SYS_ADMIN
))
2598 cqr
= dasd_smalloc_request(DASD_ECKD_MAGIC
, 1, 32, device
);
2600 DBF_DEV_EVENT(DBF_WARNING
, device
, "%s",
2601 "Could not allocate initialization request");
2602 return PTR_ERR(cqr
);
2605 ccw
->cmd_code
= DASD_ECKD_CCW_RESERVE
;
2606 ccw
->flags
|= CCW_FLAG_SLI
;
2608 ccw
->cda
= (__u32
)(addr_t
) cqr
->data
;
2609 cqr
->startdev
= device
;
2610 cqr
->memdev
= device
;
2611 clear_bit(DASD_CQR_FLAGS_USE_ERP
, &cqr
->flags
);
2612 set_bit(DASD_CQR_FLAGS_FAILFAST
, &cqr
->flags
);
2613 cqr
->retries
= 2; /* set retry counter to enable basic ERP */
2614 cqr
->expires
= 2 * HZ
;
2615 cqr
->buildclk
= get_clock();
2616 cqr
->status
= DASD_CQR_FILLED
;
2618 rc
= dasd_sleep_on_immediatly(cqr
);
2620 dasd_sfree_request(cqr
, cqr
->memdev
);
2625 * Steal lock ioctl - unconditional reserve device.
2626 * Buils a channel programm to break a device's reservation.
2627 * (unconditional reserve)
2630 dasd_eckd_steal_lock(struct dasd_device
*device
)
2632 struct dasd_ccw_req
*cqr
;
2636 if (!capable(CAP_SYS_ADMIN
))
2639 cqr
= dasd_smalloc_request(DASD_ECKD_MAGIC
, 1, 32, device
);
2641 DBF_DEV_EVENT(DBF_WARNING
, device
, "%s",
2642 "Could not allocate initialization request");
2643 return PTR_ERR(cqr
);
2646 ccw
->cmd_code
= DASD_ECKD_CCW_SLCK
;
2647 ccw
->flags
|= CCW_FLAG_SLI
;
2649 ccw
->cda
= (__u32
)(addr_t
) cqr
->data
;
2650 cqr
->startdev
= device
;
2651 cqr
->memdev
= device
;
2652 clear_bit(DASD_CQR_FLAGS_USE_ERP
, &cqr
->flags
);
2653 set_bit(DASD_CQR_FLAGS_FAILFAST
, &cqr
->flags
);
2654 cqr
->retries
= 2; /* set retry counter to enable basic ERP */
2655 cqr
->expires
= 2 * HZ
;
2656 cqr
->buildclk
= get_clock();
2657 cqr
->status
= DASD_CQR_FILLED
;
2659 rc
= dasd_sleep_on_immediatly(cqr
);
2661 dasd_sfree_request(cqr
, cqr
->memdev
);
2666 * Read performance statistics
2669 dasd_eckd_performance(struct dasd_device
*device
, void __user
*argp
)
2671 struct dasd_psf_prssd_data
*prssdp
;
2672 struct dasd_rssd_perf_stats_t
*stats
;
2673 struct dasd_ccw_req
*cqr
;
2677 cqr
= dasd_smalloc_request(DASD_ECKD_MAGIC
, 1 /* PSF */ + 1 /* RSSD */,
2678 (sizeof(struct dasd_psf_prssd_data
) +
2679 sizeof(struct dasd_rssd_perf_stats_t
)),
2682 DBF_DEV_EVENT(DBF_WARNING
, device
, "%s",
2683 "Could not allocate initialization request");
2684 return PTR_ERR(cqr
);
2686 cqr
->startdev
= device
;
2687 cqr
->memdev
= device
;
2689 cqr
->expires
= 10 * HZ
;
2691 /* Prepare for Read Subsystem Data */
2692 prssdp
= (struct dasd_psf_prssd_data
*) cqr
->data
;
2693 memset(prssdp
, 0, sizeof(struct dasd_psf_prssd_data
));
2694 prssdp
->order
= PSF_ORDER_PRSSD
;
2695 prssdp
->suborder
= 0x01; /* Performance Statistics */
2696 prssdp
->varies
[1] = 0x01; /* Perf Statistics for the Subsystem */
2699 ccw
->cmd_code
= DASD_ECKD_CCW_PSF
;
2700 ccw
->count
= sizeof(struct dasd_psf_prssd_data
);
2701 ccw
->flags
|= CCW_FLAG_CC
;
2702 ccw
->cda
= (__u32
)(addr_t
) prssdp
;
2704 /* Read Subsystem Data - Performance Statistics */
2705 stats
= (struct dasd_rssd_perf_stats_t
*) (prssdp
+ 1);
2706 memset(stats
, 0, sizeof(struct dasd_rssd_perf_stats_t
));
2709 ccw
->cmd_code
= DASD_ECKD_CCW_RSSD
;
2710 ccw
->count
= sizeof(struct dasd_rssd_perf_stats_t
);
2711 ccw
->cda
= (__u32
)(addr_t
) stats
;
2713 cqr
->buildclk
= get_clock();
2714 cqr
->status
= DASD_CQR_FILLED
;
2715 rc
= dasd_sleep_on(cqr
);
2717 prssdp
= (struct dasd_psf_prssd_data
*) cqr
->data
;
2718 stats
= (struct dasd_rssd_perf_stats_t
*) (prssdp
+ 1);
2719 if (copy_to_user(argp
, stats
,
2720 sizeof(struct dasd_rssd_perf_stats_t
)))
2723 dasd_sfree_request(cqr
, cqr
->memdev
);
2728 * Get attributes (cache operations)
2729 * Returnes the cache attributes used in Define Extend (DE).
2732 dasd_eckd_get_attrib(struct dasd_device
*device
, void __user
*argp
)
2734 struct dasd_eckd_private
*private =
2735 (struct dasd_eckd_private
*)device
->private;
2736 struct attrib_data_t attrib
= private->attrib
;
2739 if (!capable(CAP_SYS_ADMIN
))
2745 if (copy_to_user(argp
, (long *) &attrib
,
2746 sizeof(struct attrib_data_t
)))
2753 * Set attributes (cache operations)
2754 * Stores the attributes for cache operation to be used in Define Extend (DE).
2757 dasd_eckd_set_attrib(struct dasd_device
*device
, void __user
*argp
)
2759 struct dasd_eckd_private
*private =
2760 (struct dasd_eckd_private
*)device
->private;
2761 struct attrib_data_t attrib
;
2763 if (!capable(CAP_SYS_ADMIN
))
2768 if (copy_from_user(&attrib
, argp
, sizeof(struct attrib_data_t
)))
2770 private->attrib
= attrib
;
2772 dev_info(&device
->cdev
->dev
,
2773 "The DASD cache mode was set to %x (%i cylinder prestage)\n",
2774 private->attrib
.operation
, private->attrib
.nr_cyl
);
2779 * Issue syscall I/O to EMC Symmetrix array.
2780 * CCWs are PSF and RSSD
2782 static int dasd_symm_io(struct dasd_device
*device
, void __user
*argp
)
2784 struct dasd_symmio_parms usrparm
;
2785 char *psf_data
, *rssd_result
;
2786 struct dasd_ccw_req
*cqr
;
2790 /* Copy parms from caller */
2792 if (copy_from_user(&usrparm
, argp
, sizeof(usrparm
)))
2794 #ifndef CONFIG_64BIT
2795 /* Make sure pointers are sane even on 31 bit. */
2796 if ((usrparm
.psf_data
>> 32) != 0 || (usrparm
.rssd_result
>> 32) != 0) {
2801 /* alloc I/O data area */
2802 psf_data
= kzalloc(usrparm
.psf_data_len
, GFP_KERNEL
| GFP_DMA
);
2803 rssd_result
= kzalloc(usrparm
.rssd_result_len
, GFP_KERNEL
| GFP_DMA
);
2804 if (!psf_data
|| !rssd_result
) {
2809 /* get syscall header from user space */
2811 if (copy_from_user(psf_data
,
2812 (void __user
*)(unsigned long) usrparm
.psf_data
,
2813 usrparm
.psf_data_len
))
2816 /* sanity check on syscall header */
2817 if (psf_data
[0] != 0x17 && psf_data
[1] != 0xce) {
2822 /* setup CCWs for PSF + RSSD */
2823 cqr
= dasd_smalloc_request(DASD_ECKD_MAGIC
, 2 , 0, device
);
2825 DBF_DEV_EVENT(DBF_WARNING
, device
, "%s",
2826 "Could not allocate initialization request");
2831 cqr
->startdev
= device
;
2832 cqr
->memdev
= device
;
2834 cqr
->expires
= 10 * HZ
;
2835 cqr
->buildclk
= get_clock();
2836 cqr
->status
= DASD_CQR_FILLED
;
2838 /* Build the ccws */
2842 ccw
->cmd_code
= DASD_ECKD_CCW_PSF
;
2843 ccw
->count
= usrparm
.psf_data_len
;
2844 ccw
->flags
|= CCW_FLAG_CC
;
2845 ccw
->cda
= (__u32
)(addr_t
) psf_data
;
2850 ccw
->cmd_code
= DASD_ECKD_CCW_RSSD
;
2851 ccw
->count
= usrparm
.rssd_result_len
;
2852 ccw
->flags
= CCW_FLAG_SLI
;
2853 ccw
->cda
= (__u32
)(addr_t
) rssd_result
;
2855 rc
= dasd_sleep_on(cqr
);
2860 if (copy_to_user((void __user
*)(unsigned long) usrparm
.rssd_result
,
2861 rssd_result
, usrparm
.rssd_result_len
))
2866 dasd_sfree_request(cqr
, cqr
->memdev
);
2871 DBF_DEV_EVENT(DBF_WARNING
, device
, "Symmetrix ioctl: rc=%d", rc
);
2876 dasd_eckd_ioctl(struct dasd_block
*block
, unsigned int cmd
, void __user
*argp
)
2878 struct dasd_device
*device
= block
->base
;
2882 return dasd_eckd_get_attrib(device
, argp
);
2884 return dasd_eckd_set_attrib(device
, argp
);
2886 return dasd_eckd_performance(device
, argp
);
2888 return dasd_eckd_release(device
);
2890 return dasd_eckd_reserve(device
);
2892 return dasd_eckd_steal_lock(device
);
2894 return dasd_symm_io(device
, argp
);
2896 return -ENOIOCTLCMD
;
2901 * Dump the range of CCWs into 'page' buffer
2902 * and return number of printed chars.
2905 dasd_eckd_dump_ccw_range(struct ccw1
*from
, struct ccw1
*to
, char *page
)
2911 while (from
<= to
) {
2912 len
+= sprintf(page
+ len
, KERN_ERR PRINTK_HEADER
2913 " CCW %p: %08X %08X DAT:",
2914 from
, ((int *) from
)[0], ((int *) from
)[1]);
2916 /* get pointer to data (consider IDALs) */
2917 if (from
->flags
& CCW_FLAG_IDA
)
2918 datap
= (char *) *((addr_t
*) (addr_t
) from
->cda
);
2920 datap
= (char *) ((addr_t
) from
->cda
);
2922 /* dump data (max 32 bytes) */
2923 for (count
= 0; count
< from
->count
&& count
< 32; count
++) {
2924 if (count
% 8 == 0) len
+= sprintf(page
+ len
, " ");
2925 if (count
% 4 == 0) len
+= sprintf(page
+ len
, " ");
2926 len
+= sprintf(page
+ len
, "%02x", datap
[count
]);
2928 len
+= sprintf(page
+ len
, "\n");
2935 dasd_eckd_dump_sense_dbf(struct dasd_device
*device
, struct irb
*irb
,
2940 sense
= (u64
*) dasd_get_sense(irb
);
2942 DBF_DEV_EVENT(DBF_EMERG
, device
,
2943 "%s: %s %02x%02x%02x %016llx %016llx %016llx "
2945 scsw_is_tm(&irb
->scsw
) ? "t" : "c",
2946 scsw_cc(&irb
->scsw
), scsw_cstat(&irb
->scsw
),
2947 scsw_dstat(&irb
->scsw
), sense
[0], sense
[1],
2948 sense
[2], sense
[3]);
2950 DBF_DEV_EVENT(DBF_EMERG
, device
, "%s",
2951 "SORRY - NO VALID SENSE AVAILABLE\n");
2956 * Print sense data and related channel program.
2957 * Parts are printed because printk buffer is only 1024 bytes.
2959 static void dasd_eckd_dump_sense_ccw(struct dasd_device
*device
,
2960 struct dasd_ccw_req
*req
, struct irb
*irb
)
2963 struct ccw1
*first
, *last
, *fail
, *from
, *to
;
2966 page
= (char *) get_zeroed_page(GFP_ATOMIC
);
2968 DBF_DEV_EVENT(DBF_WARNING
, device
, "%s",
2969 "No memory to dump sense data\n");
2972 /* dump the sense data */
2973 len
= sprintf(page
, KERN_ERR PRINTK_HEADER
2974 " I/O status report for device %s:\n",
2975 dev_name(&device
->cdev
->dev
));
2976 len
+= sprintf(page
+ len
, KERN_ERR PRINTK_HEADER
2977 " in req: %p CS: 0x%02X DS: 0x%02X CC: 0x%02X RC: %d\n",
2978 req
, scsw_cstat(&irb
->scsw
), scsw_dstat(&irb
->scsw
),
2979 scsw_cc(&irb
->scsw
), req
? req
->intrc
: 0);
2980 len
+= sprintf(page
+ len
, KERN_ERR PRINTK_HEADER
2981 " device %s: Failing CCW: %p\n",
2982 dev_name(&device
->cdev
->dev
),
2983 (void *) (addr_t
) irb
->scsw
.cmd
.cpa
);
2984 if (irb
->esw
.esw0
.erw
.cons
) {
2985 for (sl
= 0; sl
< 4; sl
++) {
2986 len
+= sprintf(page
+ len
, KERN_ERR PRINTK_HEADER
2987 " Sense(hex) %2d-%2d:",
2988 (8 * sl
), ((8 * sl
) + 7));
2990 for (sct
= 0; sct
< 8; sct
++) {
2991 len
+= sprintf(page
+ len
, " %02x",
2992 irb
->ecw
[8 * sl
+ sct
]);
2994 len
+= sprintf(page
+ len
, "\n");
2997 if (irb
->ecw
[27] & DASD_SENSE_BIT_0
) {
2998 /* 24 Byte Sense Data */
2999 sprintf(page
+ len
, KERN_ERR PRINTK_HEADER
3000 " 24 Byte: %x MSG %x, "
3001 "%s MSGb to SYSOP\n",
3002 irb
->ecw
[7] >> 4, irb
->ecw
[7] & 0x0f,
3003 irb
->ecw
[1] & 0x10 ? "" : "no");
3005 /* 32 Byte Sense Data */
3006 sprintf(page
+ len
, KERN_ERR PRINTK_HEADER
3007 " 32 Byte: Format: %x "
3008 "Exception class %x\n",
3009 irb
->ecw
[6] & 0x0f, irb
->ecw
[22] >> 4);
3012 sprintf(page
+ len
, KERN_ERR PRINTK_HEADER
3013 " SORRY - NO VALID SENSE AVAILABLE\n");
3018 /* req == NULL for unsolicited interrupts */
3019 /* dump the Channel Program (max 140 Bytes per line) */
3020 /* Count CCW and print first CCWs (maximum 1024 % 140 = 7) */
3021 first
= req
->cpaddr
;
3022 for (last
= first
; last
->flags
& (CCW_FLAG_CC
| CCW_FLAG_DC
); last
++);
3023 to
= min(first
+ 6, last
);
3024 len
= sprintf(page
, KERN_ERR PRINTK_HEADER
3025 " Related CP in req: %p\n", req
);
3026 dasd_eckd_dump_ccw_range(first
, to
, page
+ len
);
3029 /* print failing CCW area (maximum 4) */
3030 /* scsw->cda is either valid or zero */
3033 fail
= (struct ccw1
*)(addr_t
)
3034 irb
->scsw
.cmd
.cpa
; /* failing CCW */
3035 if (from
< fail
- 2) {
3036 from
= fail
- 2; /* there is a gap - print header */
3037 len
+= sprintf(page
, KERN_ERR PRINTK_HEADER
"......\n");
3039 to
= min(fail
+ 1, last
);
3040 len
+= dasd_eckd_dump_ccw_range(from
, to
, page
+ len
);
3042 /* print last CCWs (maximum 2) */
3043 from
= max(from
, ++to
);
3044 if (from
< last
- 1) {
3045 from
= last
- 1; /* there is a gap - print header */
3046 len
+= sprintf(page
+ len
, KERN_ERR PRINTK_HEADER
"......\n");
3048 len
+= dasd_eckd_dump_ccw_range(from
, last
, page
+ len
);
3052 free_page((unsigned long) page
);
3057 * Print sense data from a tcw.
3059 static void dasd_eckd_dump_sense_tcw(struct dasd_device
*device
,
3060 struct dasd_ccw_req
*req
, struct irb
*irb
)
3063 int len
, sl
, sct
, residual
;
3069 page
= (char *) get_zeroed_page(GFP_ATOMIC
);
3071 DBF_DEV_EVENT(DBF_WARNING
, device
, " %s",
3072 "No memory to dump sense data");
3075 /* dump the sense data */
3076 len
= sprintf(page
, KERN_ERR PRINTK_HEADER
3077 " I/O status report for device %s:\n",
3078 dev_name(&device
->cdev
->dev
));
3079 len
+= sprintf(page
+ len
, KERN_ERR PRINTK_HEADER
3080 " in req: %p CS: 0x%02X DS: 0x%02X CC: 0x%02X RC: %d "
3081 "fcxs: 0x%02X schxs: 0x%02X\n", req
,
3082 scsw_cstat(&irb
->scsw
), scsw_dstat(&irb
->scsw
),
3083 scsw_cc(&irb
->scsw
), req
->intrc
,
3084 irb
->scsw
.tm
.fcxs
, irb
->scsw
.tm
.schxs
);
3085 len
+= sprintf(page
+ len
, KERN_ERR PRINTK_HEADER
3086 " device %s: Failing TCW: %p\n",
3087 dev_name(&device
->cdev
->dev
),
3088 (void *) (addr_t
) irb
->scsw
.tm
.tcw
);
3092 if (irb
->scsw
.tm
.tcw
)
3094 (struct tcw
*)(unsigned long)irb
->scsw
.tm
.tcw
);
3096 if (tsb
&& (irb
->scsw
.tm
.fcxs
== 0x01)) {
3097 len
+= sprintf(page
+ len
, KERN_ERR PRINTK_HEADER
3098 " tsb->length %d\n", tsb
->length
);
3099 len
+= sprintf(page
+ len
, KERN_ERR PRINTK_HEADER
3100 " tsb->flags %x\n", tsb
->flags
);
3101 len
+= sprintf(page
+ len
, KERN_ERR PRINTK_HEADER
3102 " tsb->dcw_offset %d\n", tsb
->dcw_offset
);
3103 len
+= sprintf(page
+ len
, KERN_ERR PRINTK_HEADER
3104 " tsb->count %d\n", tsb
->count
);
3105 residual
= tsb
->count
- 28;
3106 len
+= sprintf(page
+ len
, KERN_ERR PRINTK_HEADER
3107 " residual %d\n", residual
);
3109 switch (tsb
->flags
& 0x07) {
3110 case 1: /* tsa_iostat */
3111 len
+= sprintf(page
+ len
, KERN_ERR PRINTK_HEADER
3112 " tsb->tsa.iostat.dev_time %d\n",
3113 tsb
->tsa
.iostat
.dev_time
);
3114 len
+= sprintf(page
+ len
, KERN_ERR PRINTK_HEADER
3115 " tsb->tsa.iostat.def_time %d\n",
3116 tsb
->tsa
.iostat
.def_time
);
3117 len
+= sprintf(page
+ len
, KERN_ERR PRINTK_HEADER
3118 " tsb->tsa.iostat.queue_time %d\n",
3119 tsb
->tsa
.iostat
.queue_time
);
3120 len
+= sprintf(page
+ len
, KERN_ERR PRINTK_HEADER
3121 " tsb->tsa.iostat.dev_busy_time %d\n",
3122 tsb
->tsa
.iostat
.dev_busy_time
);
3123 len
+= sprintf(page
+ len
, KERN_ERR PRINTK_HEADER
3124 " tsb->tsa.iostat.dev_act_time %d\n",
3125 tsb
->tsa
.iostat
.dev_act_time
);
3126 sense
= tsb
->tsa
.iostat
.sense
;
3128 case 2: /* ts_ddpc */
3129 len
+= sprintf(page
+ len
, KERN_ERR PRINTK_HEADER
3130 " tsb->tsa.ddpc.rc %d\n", tsb
->tsa
.ddpc
.rc
);
3131 len
+= sprintf(page
+ len
, KERN_ERR PRINTK_HEADER
3132 " tsb->tsa.ddpc.rcq: ");
3133 for (sl
= 0; sl
< 16; sl
++) {
3134 for (sct
= 0; sct
< 8; sct
++) {
3135 len
+= sprintf(page
+ len
, " %02x",
3136 tsb
->tsa
.ddpc
.rcq
[sl
]);
3138 len
+= sprintf(page
+ len
, "\n");
3140 sense
= tsb
->tsa
.ddpc
.sense
;
3142 case 3: /* tsa_intrg */
3143 len
+= sprintf(page
+ len
, KERN_ERR PRINTK_HEADER
3144 " tsb->tsa.intrg.: not supportet yet \n");
3149 for (sl
= 0; sl
< 4; sl
++) {
3150 len
+= sprintf(page
+ len
,
3151 KERN_ERR PRINTK_HEADER
3152 " Sense(hex) %2d-%2d:",
3153 (8 * sl
), ((8 * sl
) + 7));
3154 for (sct
= 0; sct
< 8; sct
++) {
3155 len
+= sprintf(page
+ len
, " %02x",
3156 sense
[8 * sl
+ sct
]);
3158 len
+= sprintf(page
+ len
, "\n");
3161 if (sense
[27] & DASD_SENSE_BIT_0
) {
3162 /* 24 Byte Sense Data */
3163 sprintf(page
+ len
, KERN_ERR PRINTK_HEADER
3164 " 24 Byte: %x MSG %x, "
3165 "%s MSGb to SYSOP\n",
3166 sense
[7] >> 4, sense
[7] & 0x0f,
3167 sense
[1] & 0x10 ? "" : "no");
3169 /* 32 Byte Sense Data */
3170 sprintf(page
+ len
, KERN_ERR PRINTK_HEADER
3171 " 32 Byte: Format: %x "
3172 "Exception class %x\n",
3173 sense
[6] & 0x0f, sense
[22] >> 4);
3176 sprintf(page
+ len
, KERN_ERR PRINTK_HEADER
3177 " SORRY - NO VALID SENSE AVAILABLE\n");
3180 sprintf(page
+ len
, KERN_ERR PRINTK_HEADER
3181 " SORRY - NO TSB DATA AVAILABLE\n");
3184 free_page((unsigned long) page
);
3187 static void dasd_eckd_dump_sense(struct dasd_device
*device
,
3188 struct dasd_ccw_req
*req
, struct irb
*irb
)
3190 if (req
&& scsw_is_tm(&req
->irb
.scsw
))
3191 dasd_eckd_dump_sense_tcw(device
, req
, irb
);
3193 dasd_eckd_dump_sense_ccw(device
, req
, irb
);
3196 int dasd_eckd_pm_freeze(struct dasd_device
*device
)
3199 * the device should be disconnected from our LCU structure
3200 * on restore we will reconnect it and reread LCU specific
3201 * information like PAV support that might have changed
3203 dasd_alias_remove_device(device
);
3204 dasd_alias_disconnect_device_from_lcu(device
);
3209 int dasd_eckd_restore_device(struct dasd_device
*device
)
3211 struct dasd_eckd_private
*private;
3212 struct dasd_eckd_characteristics temp_rdc_data
;
3214 struct dasd_uid temp_uid
;
3215 unsigned long flags
;
3217 private = (struct dasd_eckd_private
*) device
->private;
3219 /* Read Configuration Data */
3220 rc
= dasd_eckd_read_conf(device
);
3224 /* Generate device unique id and register in devmap */
3225 rc
= dasd_eckd_generate_uid(device
, &private->uid
);
3226 dasd_get_uid(device
->cdev
, &temp_uid
);
3227 if (memcmp(&private->uid
, &temp_uid
, sizeof(struct dasd_uid
)) != 0)
3228 dev_err(&device
->cdev
->dev
, "The UID of the DASD has "
3232 dasd_set_uid(device
->cdev
, &private->uid
);
3234 /* register lcu with alias handling, enable PAV if this is a new lcu */
3235 is_known
= dasd_alias_make_device_known_to_lcu(device
);
3240 rc
= dasd_eckd_validate_server(device
); /* will switch pav on */
3245 /* Read Feature Codes */
3246 dasd_eckd_read_features(device
);
3248 /* Read Device Characteristics */
3249 rc
= dasd_generic_read_dev_chars(device
, DASD_ECKD_MAGIC
,
3250 &temp_rdc_data
, 64);
3252 DBF_EVENT_DEVID(DBF_WARNING
, device
->cdev
,
3253 "Read device characteristic failed, rc=%d", rc
);
3256 spin_lock_irqsave(get_ccwdev_lock(device
->cdev
), flags
);
3257 memcpy(&private->rdc_data
, &temp_rdc_data
, sizeof(temp_rdc_data
));
3258 spin_unlock_irqrestore(get_ccwdev_lock(device
->cdev
), flags
);
3260 /* add device to alias management */
3261 dasd_alias_add_device(device
);
3269 static struct ccw_driver dasd_eckd_driver
= {
3270 .name
= "dasd-eckd",
3271 .owner
= THIS_MODULE
,
3272 .ids
= dasd_eckd_ids
,
3273 .probe
= dasd_eckd_probe
,
3274 .remove
= dasd_generic_remove
,
3275 .set_offline
= dasd_generic_set_offline
,
3276 .set_online
= dasd_eckd_set_online
,
3277 .notify
= dasd_generic_notify
,
3278 .freeze
= dasd_generic_pm_freeze
,
3279 .thaw
= dasd_generic_restore_device
,
3280 .restore
= dasd_generic_restore_device
,
3284 * max_blocks is dependent on the amount of storage that is available
3285 * in the static io buffer for each device. Currently each device has
3286 * 8192 bytes (=2 pages). For 64 bit one dasd_mchunkt_t structure has
3287 * 24 bytes, the struct dasd_ccw_req has 136 bytes and each block can use
3288 * up to 16 bytes (8 for the ccw and 8 for the idal pointer). In
3289 * addition we have one define extent ccw + 16 bytes of data and one
3290 * locate record ccw + 16 bytes of data. That makes:
3291 * (8192 - 24 - 136 - 8 - 16 - 8 - 16) / 16 = 499 blocks at maximum.
3292 * We want to fit two into the available memory so that we can immediately
3293 * start the next request if one finishes off. That makes 249.5 blocks
3294 * for one request. Give a little safety and the result is 240.
3296 static struct dasd_discipline dasd_eckd_discipline
= {
3297 .owner
= THIS_MODULE
,
3301 .check_device
= dasd_eckd_check_characteristics
,
3302 .uncheck_device
= dasd_eckd_uncheck_device
,
3303 .do_analysis
= dasd_eckd_do_analysis
,
3304 .ready_to_online
= dasd_eckd_ready_to_online
,
3305 .online_to_ready
= dasd_eckd_online_to_ready
,
3306 .fill_geometry
= dasd_eckd_fill_geometry
,
3307 .start_IO
= dasd_start_IO
,
3308 .term_IO
= dasd_term_IO
,
3309 .handle_terminated_request
= dasd_eckd_handle_terminated_request
,
3310 .format_device
= dasd_eckd_format_device
,
3311 .erp_action
= dasd_eckd_erp_action
,
3312 .erp_postaction
= dasd_eckd_erp_postaction
,
3313 .handle_unsolicited_interrupt
= dasd_eckd_handle_unsolicited_interrupt
,
3314 .build_cp
= dasd_eckd_build_alias_cp
,
3315 .free_cp
= dasd_eckd_free_alias_cp
,
3316 .dump_sense
= dasd_eckd_dump_sense
,
3317 .dump_sense_dbf
= dasd_eckd_dump_sense_dbf
,
3318 .fill_info
= dasd_eckd_fill_info
,
3319 .ioctl
= dasd_eckd_ioctl
,
3320 .freeze
= dasd_eckd_pm_freeze
,
3321 .restore
= dasd_eckd_restore_device
,
3325 dasd_eckd_init(void)
3329 ASCEBC(dasd_eckd_discipline
.ebcname
, 4);
3330 ret
= ccw_driver_register(&dasd_eckd_driver
);
3332 wait_for_device_probe();
3338 dasd_eckd_cleanup(void)
3340 ccw_driver_unregister(&dasd_eckd_driver
);
3343 module_init(dasd_eckd_init
);
3344 module_exit(dasd_eckd_cleanup
);