2 * File...........: linux/drivers/s390/block/dasd_eckd.c
3 * Author(s)......: Holger Smolinski <Holger.Smolinski@de.ibm.com>
4 * Horst Hummel <Horst.Hummel@de.ibm.com>
5 * Carsten Otte <Cotte@de.ibm.com>
6 * Martin Schwidefsky <schwidefsky@de.ibm.com>
7 * Bugreports.to..: <Linux390@de.ibm.com>
8 * Copyright IBM Corp. 1999, 2009
9 * EMC Symmetrix ioctl Copyright EMC Corporation, 2008
10 * Author.........: Nigel Hislop <hislop_nigel@emc.com>
13 #define KMSG_COMPONENT "dasd-eckd"
15 #include <linux/stddef.h>
16 #include <linux/kernel.h>
17 #include <linux/slab.h>
18 #include <linux/hdreg.h> /* HDIO_GETGEO */
19 #include <linux/bio.h>
20 #include <linux/module.h>
21 #include <linux/init.h>
23 #include <asm/debug.h>
24 #include <asm/idals.h>
25 #include <asm/ebcdic.h>
26 #include <asm/compat.h>
28 #include <asm/uaccess.h>
30 #include <asm/ccwdev.h>
34 #include "dasd_eckd.h"
35 #include "../cio/chsc.h"
40 #endif /* PRINTK_HEADER */
41 #define PRINTK_HEADER "dasd(eckd):"
43 #define ECKD_C0(i) (i->home_bytes)
44 #define ECKD_F(i) (i->formula)
45 #define ECKD_F1(i) (ECKD_F(i)==0x01?(i->factors.f_0x01.f1):\
46 (i->factors.f_0x02.f1))
47 #define ECKD_F2(i) (ECKD_F(i)==0x01?(i->factors.f_0x01.f2):\
48 (i->factors.f_0x02.f2))
49 #define ECKD_F3(i) (ECKD_F(i)==0x01?(i->factors.f_0x01.f3):\
50 (i->factors.f_0x02.f3))
51 #define ECKD_F4(i) (ECKD_F(i)==0x02?(i->factors.f_0x02.f4):0)
52 #define ECKD_F5(i) (ECKD_F(i)==0x02?(i->factors.f_0x02.f5):0)
53 #define ECKD_F6(i) (i->factor6)
54 #define ECKD_F7(i) (i->factor7)
55 #define ECKD_F8(i) (i->factor8)
57 MODULE_LICENSE("GPL");
59 static struct dasd_discipline dasd_eckd_discipline
;
61 /* The ccw bus type uses this table to find devices that it sends to
63 static struct ccw_device_id dasd_eckd_ids
[] = {
64 { CCW_DEVICE_DEVTYPE (0x3990, 0, 0x3390, 0), .driver_info
= 0x1},
65 { CCW_DEVICE_DEVTYPE (0x2105, 0, 0x3390, 0), .driver_info
= 0x2},
66 { CCW_DEVICE_DEVTYPE (0x3880, 0, 0x3390, 0), .driver_info
= 0x3},
67 { CCW_DEVICE_DEVTYPE (0x3990, 0, 0x3380, 0), .driver_info
= 0x4},
68 { CCW_DEVICE_DEVTYPE (0x2105, 0, 0x3380, 0), .driver_info
= 0x5},
69 { CCW_DEVICE_DEVTYPE (0x9343, 0, 0x9345, 0), .driver_info
= 0x6},
70 { CCW_DEVICE_DEVTYPE (0x2107, 0, 0x3390, 0), .driver_info
= 0x7},
71 { CCW_DEVICE_DEVTYPE (0x2107, 0, 0x3380, 0), .driver_info
= 0x8},
72 { CCW_DEVICE_DEVTYPE (0x1750, 0, 0x3390, 0), .driver_info
= 0x9},
73 { CCW_DEVICE_DEVTYPE (0x1750, 0, 0x3380, 0), .driver_info
= 0xa},
74 { /* end of list */ },
77 MODULE_DEVICE_TABLE(ccw
, dasd_eckd_ids
);
79 static struct ccw_driver dasd_eckd_driver
; /* see below */
82 #define INIT_CQR_UNFORMATTED 1
83 #define INIT_CQR_ERROR 2
86 /* initial attempt at a probe function. this can be simplified once
87 * the other detection code is gone */
89 dasd_eckd_probe (struct ccw_device
*cdev
)
93 /* set ECKD specific ccw-device options */
94 ret
= ccw_device_set_options(cdev
, CCWDEV_ALLOW_FORCE
|
95 CCWDEV_DO_PATHGROUP
| CCWDEV_DO_MULTIPATH
);
97 DBF_EVENT_DEVID(DBF_WARNING
, cdev
, "%s",
98 "dasd_eckd_probe: could not set "
99 "ccw-device options");
102 ret
= dasd_generic_probe(cdev
, &dasd_eckd_discipline
);
107 dasd_eckd_set_online(struct ccw_device
*cdev
)
109 return dasd_generic_set_online(cdev
, &dasd_eckd_discipline
);
112 static const int sizes_trk0
[] = { 28, 148, 84 };
113 #define LABEL_SIZE 140
115 static inline unsigned int
116 round_up_multiple(unsigned int no
, unsigned int mult
)
119 return (rem
? no
- rem
+ mult
: no
);
122 static inline unsigned int
123 ceil_quot(unsigned int d1
, unsigned int d2
)
125 return (d1
+ (d2
- 1)) / d2
;
129 recs_per_track(struct dasd_eckd_characteristics
* rdc
,
130 unsigned int kl
, unsigned int dl
)
134 switch (rdc
->dev_type
) {
137 return 1499 / (15 + 7 + ceil_quot(kl
+ 12, 32) +
138 ceil_quot(dl
+ 12, 32));
140 return 1499 / (15 + ceil_quot(dl
+ 12, 32));
142 dn
= ceil_quot(dl
+ 6, 232) + 1;
144 kn
= ceil_quot(kl
+ 6, 232) + 1;
145 return 1729 / (10 + 9 + ceil_quot(kl
+ 6 * kn
, 34) +
146 9 + ceil_quot(dl
+ 6 * dn
, 34));
148 return 1729 / (10 + 9 + ceil_quot(dl
+ 6 * dn
, 34));
150 dn
= ceil_quot(dl
+ 6, 232) + 1;
152 kn
= ceil_quot(kl
+ 6, 232) + 1;
153 return 1420 / (18 + 7 + ceil_quot(kl
+ 6 * kn
, 34) +
154 ceil_quot(dl
+ 6 * dn
, 34));
156 return 1420 / (18 + 7 + ceil_quot(dl
+ 6 * dn
, 34));
161 static void set_ch_t(struct ch_t
*geo
, __u32 cyl
, __u8 head
)
163 geo
->cyl
= (__u16
) cyl
;
164 geo
->head
= cyl
>> 16;
170 check_XRC (struct ccw1
*de_ccw
,
171 struct DE_eckd_data
*data
,
172 struct dasd_device
*device
)
174 struct dasd_eckd_private
*private;
177 private = (struct dasd_eckd_private
*) device
->private;
178 if (!private->rdc_data
.facilities
.XRC_supported
)
181 /* switch on System Time Stamp - needed for XRC Support */
182 data
->ga_extended
|= 0x08; /* switch on 'Time Stamp Valid' */
183 data
->ga_extended
|= 0x02; /* switch on 'Extended Parameter' */
185 rc
= get_sync_clock(&data
->ep_sys_time
);
186 /* Ignore return code if sync clock is switched off. */
187 if (rc
== -ENOSYS
|| rc
== -EACCES
)
190 de_ccw
->count
= sizeof(struct DE_eckd_data
);
191 de_ccw
->flags
|= CCW_FLAG_SLI
;
196 define_extent(struct ccw1
*ccw
, struct DE_eckd_data
*data
, unsigned int trk
,
197 unsigned int totrk
, int cmd
, struct dasd_device
*device
)
199 struct dasd_eckd_private
*private;
201 u16 heads
, beghead
, endhead
;
204 private = (struct dasd_eckd_private
*) device
->private;
206 ccw
->cmd_code
= DASD_ECKD_CCW_DEFINE_EXTENT
;
209 ccw
->cda
= (__u32
) __pa(data
);
211 memset(data
, 0, sizeof(struct DE_eckd_data
));
213 case DASD_ECKD_CCW_READ_HOME_ADDRESS
:
214 case DASD_ECKD_CCW_READ_RECORD_ZERO
:
215 case DASD_ECKD_CCW_READ
:
216 case DASD_ECKD_CCW_READ_MT
:
217 case DASD_ECKD_CCW_READ_CKD
:
218 case DASD_ECKD_CCW_READ_CKD_MT
:
219 case DASD_ECKD_CCW_READ_KD
:
220 case DASD_ECKD_CCW_READ_KD_MT
:
221 case DASD_ECKD_CCW_READ_COUNT
:
222 data
->mask
.perm
= 0x1;
223 data
->attributes
.operation
= private->attrib
.operation
;
225 case DASD_ECKD_CCW_WRITE
:
226 case DASD_ECKD_CCW_WRITE_MT
:
227 case DASD_ECKD_CCW_WRITE_KD
:
228 case DASD_ECKD_CCW_WRITE_KD_MT
:
229 data
->mask
.perm
= 0x02;
230 data
->attributes
.operation
= private->attrib
.operation
;
231 rc
= check_XRC (ccw
, data
, device
);
233 case DASD_ECKD_CCW_WRITE_CKD
:
234 case DASD_ECKD_CCW_WRITE_CKD_MT
:
235 data
->attributes
.operation
= DASD_BYPASS_CACHE
;
236 rc
= check_XRC (ccw
, data
, device
);
238 case DASD_ECKD_CCW_ERASE
:
239 case DASD_ECKD_CCW_WRITE_HOME_ADDRESS
:
240 case DASD_ECKD_CCW_WRITE_RECORD_ZERO
:
241 data
->mask
.perm
= 0x3;
242 data
->mask
.auth
= 0x1;
243 data
->attributes
.operation
= DASD_BYPASS_CACHE
;
244 rc
= check_XRC (ccw
, data
, device
);
247 dev_err(&device
->cdev
->dev
,
248 "0x%x is not a known command\n", cmd
);
252 data
->attributes
.mode
= 0x3; /* ECKD */
254 if ((private->rdc_data
.cu_type
== 0x2105 ||
255 private->rdc_data
.cu_type
== 0x2107 ||
256 private->rdc_data
.cu_type
== 0x1750)
257 && !(private->uses_cdl
&& trk
< 2))
258 data
->ga_extended
|= 0x40; /* Regular Data Format Mode */
260 heads
= private->rdc_data
.trk_per_cyl
;
261 begcyl
= trk
/ heads
;
262 beghead
= trk
% heads
;
263 endcyl
= totrk
/ heads
;
264 endhead
= totrk
% heads
;
266 /* check for sequential prestage - enhance cylinder range */
267 if (data
->attributes
.operation
== DASD_SEQ_PRESTAGE
||
268 data
->attributes
.operation
== DASD_SEQ_ACCESS
) {
270 if (endcyl
+ private->attrib
.nr_cyl
< private->real_cyl
)
271 endcyl
+= private->attrib
.nr_cyl
;
273 endcyl
= (private->real_cyl
- 1);
276 set_ch_t(&data
->beg_ext
, begcyl
, beghead
);
277 set_ch_t(&data
->end_ext
, endcyl
, endhead
);
281 static int check_XRC_on_prefix(struct PFX_eckd_data
*pfxdata
,
282 struct dasd_device
*device
)
284 struct dasd_eckd_private
*private;
287 private = (struct dasd_eckd_private
*) device
->private;
288 if (!private->rdc_data
.facilities
.XRC_supported
)
291 /* switch on System Time Stamp - needed for XRC Support */
292 pfxdata
->define_extent
.ga_extended
|= 0x08; /* 'Time Stamp Valid' */
293 pfxdata
->define_extent
.ga_extended
|= 0x02; /* 'Extended Parameter' */
294 pfxdata
->validity
.time_stamp
= 1; /* 'Time Stamp Valid' */
296 rc
= get_sync_clock(&pfxdata
->define_extent
.ep_sys_time
);
297 /* Ignore return code if sync clock is switched off. */
298 if (rc
== -ENOSYS
|| rc
== -EACCES
)
303 static void fill_LRE_data(struct LRE_eckd_data
*data
, unsigned int trk
,
304 unsigned int rec_on_trk
, int count
, int cmd
,
305 struct dasd_device
*device
, unsigned int reclen
,
308 struct dasd_eckd_private
*private;
312 private = (struct dasd_eckd_private
*) device
->private;
314 memset(data
, 0, sizeof(*data
));
317 switch (private->rdc_data
.dev_type
) {
319 dn
= ceil_quot(reclen
+ 6, 232);
320 d
= 9 + ceil_quot(reclen
+ 6 * (dn
+ 1), 34);
321 sector
= (49 + (rec_on_trk
- 1) * (10 + d
)) / 8;
324 d
= 7 + ceil_quot(reclen
+ 12, 32);
325 sector
= (39 + (rec_on_trk
- 1) * (8 + d
)) / 7;
329 data
->sector
= sector
;
330 /* note: meaning of count depends on the operation
331 * for record based I/O it's the number of records, but for
332 * track based I/O it's the number of tracks
336 case DASD_ECKD_CCW_WRITE_HOME_ADDRESS
:
337 data
->operation
.orientation
= 0x3;
338 data
->operation
.operation
= 0x03;
340 case DASD_ECKD_CCW_READ_HOME_ADDRESS
:
341 data
->operation
.orientation
= 0x3;
342 data
->operation
.operation
= 0x16;
344 case DASD_ECKD_CCW_WRITE_RECORD_ZERO
:
345 data
->operation
.orientation
= 0x1;
346 data
->operation
.operation
= 0x03;
349 case DASD_ECKD_CCW_READ_RECORD_ZERO
:
350 data
->operation
.orientation
= 0x3;
351 data
->operation
.operation
= 0x16;
354 case DASD_ECKD_CCW_WRITE
:
355 case DASD_ECKD_CCW_WRITE_MT
:
356 case DASD_ECKD_CCW_WRITE_KD
:
357 case DASD_ECKD_CCW_WRITE_KD_MT
:
358 data
->auxiliary
.length_valid
= 0x1;
359 data
->length
= reclen
;
360 data
->operation
.operation
= 0x01;
362 case DASD_ECKD_CCW_WRITE_CKD
:
363 case DASD_ECKD_CCW_WRITE_CKD_MT
:
364 data
->auxiliary
.length_valid
= 0x1;
365 data
->length
= reclen
;
366 data
->operation
.operation
= 0x03;
368 case DASD_ECKD_CCW_WRITE_TRACK_DATA
:
369 data
->auxiliary
.length_valid
= 0x1;
370 data
->length
= reclen
; /* not tlf, as one might think */
371 data
->operation
.operation
= 0x3F;
372 data
->extended_operation
= 0x23;
374 case DASD_ECKD_CCW_READ
:
375 case DASD_ECKD_CCW_READ_MT
:
376 case DASD_ECKD_CCW_READ_KD
:
377 case DASD_ECKD_CCW_READ_KD_MT
:
378 data
->auxiliary
.length_valid
= 0x1;
379 data
->length
= reclen
;
380 data
->operation
.operation
= 0x06;
382 case DASD_ECKD_CCW_READ_CKD
:
383 case DASD_ECKD_CCW_READ_CKD_MT
:
384 data
->auxiliary
.length_valid
= 0x1;
385 data
->length
= reclen
;
386 data
->operation
.operation
= 0x16;
388 case DASD_ECKD_CCW_READ_COUNT
:
389 data
->operation
.operation
= 0x06;
391 case DASD_ECKD_CCW_READ_TRACK_DATA
:
392 data
->auxiliary
.length_valid
= 0x1;
394 data
->operation
.operation
= 0x0C;
396 case DASD_ECKD_CCW_ERASE
:
397 data
->length
= reclen
;
398 data
->auxiliary
.length_valid
= 0x1;
399 data
->operation
.operation
= 0x0b;
402 DBF_DEV_EVENT(DBF_ERR
, device
,
403 "fill LRE unknown opcode 0x%x", cmd
);
406 set_ch_t(&data
->seek_addr
,
407 trk
/ private->rdc_data
.trk_per_cyl
,
408 trk
% private->rdc_data
.trk_per_cyl
);
409 data
->search_arg
.cyl
= data
->seek_addr
.cyl
;
410 data
->search_arg
.head
= data
->seek_addr
.head
;
411 data
->search_arg
.record
= rec_on_trk
;
414 static int prefix_LRE(struct ccw1
*ccw
, struct PFX_eckd_data
*pfxdata
,
415 unsigned int trk
, unsigned int totrk
, int cmd
,
416 struct dasd_device
*basedev
, struct dasd_device
*startdev
,
417 unsigned char format
, unsigned int rec_on_trk
, int count
,
418 unsigned int blksize
, unsigned int tlf
)
420 struct dasd_eckd_private
*basepriv
, *startpriv
;
421 struct DE_eckd_data
*dedata
;
422 struct LRE_eckd_data
*lredata
;
424 u16 heads
, beghead
, endhead
;
427 basepriv
= (struct dasd_eckd_private
*) basedev
->private;
428 startpriv
= (struct dasd_eckd_private
*) startdev
->private;
429 dedata
= &pfxdata
->define_extent
;
430 lredata
= &pfxdata
->locate_record
;
432 ccw
->cmd_code
= DASD_ECKD_CCW_PFX
;
434 ccw
->count
= sizeof(*pfxdata
);
435 ccw
->cda
= (__u32
) __pa(pfxdata
);
437 memset(pfxdata
, 0, sizeof(*pfxdata
));
440 DBF_DEV_EVENT(DBF_ERR
, basedev
,
441 "PFX LRE unknown format 0x%x", format
);
445 pfxdata
->format
= format
;
446 pfxdata
->base_address
= basepriv
->ned
->unit_addr
;
447 pfxdata
->base_lss
= basepriv
->ned
->ID
;
448 pfxdata
->validity
.define_extent
= 1;
450 /* private uid is kept up to date, conf_data may be outdated */
451 if (startpriv
->uid
.type
!= UA_BASE_DEVICE
) {
452 pfxdata
->validity
.verify_base
= 1;
453 if (startpriv
->uid
.type
== UA_HYPER_PAV_ALIAS
)
454 pfxdata
->validity
.hyper_pav
= 1;
457 /* define extend data (mostly)*/
459 case DASD_ECKD_CCW_READ_HOME_ADDRESS
:
460 case DASD_ECKD_CCW_READ_RECORD_ZERO
:
461 case DASD_ECKD_CCW_READ
:
462 case DASD_ECKD_CCW_READ_MT
:
463 case DASD_ECKD_CCW_READ_CKD
:
464 case DASD_ECKD_CCW_READ_CKD_MT
:
465 case DASD_ECKD_CCW_READ_KD
:
466 case DASD_ECKD_CCW_READ_KD_MT
:
467 case DASD_ECKD_CCW_READ_COUNT
:
468 dedata
->mask
.perm
= 0x1;
469 dedata
->attributes
.operation
= basepriv
->attrib
.operation
;
471 case DASD_ECKD_CCW_READ_TRACK_DATA
:
472 dedata
->mask
.perm
= 0x1;
473 dedata
->attributes
.operation
= basepriv
->attrib
.operation
;
474 dedata
->blk_size
= 0;
476 case DASD_ECKD_CCW_WRITE
:
477 case DASD_ECKD_CCW_WRITE_MT
:
478 case DASD_ECKD_CCW_WRITE_KD
:
479 case DASD_ECKD_CCW_WRITE_KD_MT
:
480 dedata
->mask
.perm
= 0x02;
481 dedata
->attributes
.operation
= basepriv
->attrib
.operation
;
482 rc
= check_XRC_on_prefix(pfxdata
, basedev
);
484 case DASD_ECKD_CCW_WRITE_CKD
:
485 case DASD_ECKD_CCW_WRITE_CKD_MT
:
486 dedata
->attributes
.operation
= DASD_BYPASS_CACHE
;
487 rc
= check_XRC_on_prefix(pfxdata
, basedev
);
489 case DASD_ECKD_CCW_ERASE
:
490 case DASD_ECKD_CCW_WRITE_HOME_ADDRESS
:
491 case DASD_ECKD_CCW_WRITE_RECORD_ZERO
:
492 dedata
->mask
.perm
= 0x3;
493 dedata
->mask
.auth
= 0x1;
494 dedata
->attributes
.operation
= DASD_BYPASS_CACHE
;
495 rc
= check_XRC_on_prefix(pfxdata
, basedev
);
497 case DASD_ECKD_CCW_WRITE_TRACK_DATA
:
498 dedata
->mask
.perm
= 0x02;
499 dedata
->attributes
.operation
= basepriv
->attrib
.operation
;
500 dedata
->blk_size
= blksize
;
501 rc
= check_XRC_on_prefix(pfxdata
, basedev
);
504 DBF_DEV_EVENT(DBF_ERR
, basedev
,
505 "PFX LRE unknown opcode 0x%x", cmd
);
510 dedata
->attributes
.mode
= 0x3; /* ECKD */
512 if ((basepriv
->rdc_data
.cu_type
== 0x2105 ||
513 basepriv
->rdc_data
.cu_type
== 0x2107 ||
514 basepriv
->rdc_data
.cu_type
== 0x1750)
515 && !(basepriv
->uses_cdl
&& trk
< 2))
516 dedata
->ga_extended
|= 0x40; /* Regular Data Format Mode */
518 heads
= basepriv
->rdc_data
.trk_per_cyl
;
519 begcyl
= trk
/ heads
;
520 beghead
= trk
% heads
;
521 endcyl
= totrk
/ heads
;
522 endhead
= totrk
% heads
;
524 /* check for sequential prestage - enhance cylinder range */
525 if (dedata
->attributes
.operation
== DASD_SEQ_PRESTAGE
||
526 dedata
->attributes
.operation
== DASD_SEQ_ACCESS
) {
528 if (endcyl
+ basepriv
->attrib
.nr_cyl
< basepriv
->real_cyl
)
529 endcyl
+= basepriv
->attrib
.nr_cyl
;
531 endcyl
= (basepriv
->real_cyl
- 1);
534 set_ch_t(&dedata
->beg_ext
, begcyl
, beghead
);
535 set_ch_t(&dedata
->end_ext
, endcyl
, endhead
);
538 fill_LRE_data(lredata
, trk
, rec_on_trk
, count
, cmd
,
539 basedev
, blksize
, tlf
);
545 static int prefix(struct ccw1
*ccw
, struct PFX_eckd_data
*pfxdata
,
546 unsigned int trk
, unsigned int totrk
, int cmd
,
547 struct dasd_device
*basedev
, struct dasd_device
*startdev
)
549 return prefix_LRE(ccw
, pfxdata
, trk
, totrk
, cmd
, basedev
, startdev
,
554 locate_record(struct ccw1
*ccw
, struct LO_eckd_data
*data
, unsigned int trk
,
555 unsigned int rec_on_trk
, int no_rec
, int cmd
,
556 struct dasd_device
* device
, int reclen
)
558 struct dasd_eckd_private
*private;
562 private = (struct dasd_eckd_private
*) device
->private;
564 DBF_DEV_EVENT(DBF_INFO
, device
,
565 "Locate: trk %d, rec %d, no_rec %d, cmd %d, reclen %d",
566 trk
, rec_on_trk
, no_rec
, cmd
, reclen
);
568 ccw
->cmd_code
= DASD_ECKD_CCW_LOCATE_RECORD
;
571 ccw
->cda
= (__u32
) __pa(data
);
573 memset(data
, 0, sizeof(struct LO_eckd_data
));
576 switch (private->rdc_data
.dev_type
) {
578 dn
= ceil_quot(reclen
+ 6, 232);
579 d
= 9 + ceil_quot(reclen
+ 6 * (dn
+ 1), 34);
580 sector
= (49 + (rec_on_trk
- 1) * (10 + d
)) / 8;
583 d
= 7 + ceil_quot(reclen
+ 12, 32);
584 sector
= (39 + (rec_on_trk
- 1) * (8 + d
)) / 7;
588 data
->sector
= sector
;
589 data
->count
= no_rec
;
591 case DASD_ECKD_CCW_WRITE_HOME_ADDRESS
:
592 data
->operation
.orientation
= 0x3;
593 data
->operation
.operation
= 0x03;
595 case DASD_ECKD_CCW_READ_HOME_ADDRESS
:
596 data
->operation
.orientation
= 0x3;
597 data
->operation
.operation
= 0x16;
599 case DASD_ECKD_CCW_WRITE_RECORD_ZERO
:
600 data
->operation
.orientation
= 0x1;
601 data
->operation
.operation
= 0x03;
604 case DASD_ECKD_CCW_READ_RECORD_ZERO
:
605 data
->operation
.orientation
= 0x3;
606 data
->operation
.operation
= 0x16;
609 case DASD_ECKD_CCW_WRITE
:
610 case DASD_ECKD_CCW_WRITE_MT
:
611 case DASD_ECKD_CCW_WRITE_KD
:
612 case DASD_ECKD_CCW_WRITE_KD_MT
:
613 data
->auxiliary
.last_bytes_used
= 0x1;
614 data
->length
= reclen
;
615 data
->operation
.operation
= 0x01;
617 case DASD_ECKD_CCW_WRITE_CKD
:
618 case DASD_ECKD_CCW_WRITE_CKD_MT
:
619 data
->auxiliary
.last_bytes_used
= 0x1;
620 data
->length
= reclen
;
621 data
->operation
.operation
= 0x03;
623 case DASD_ECKD_CCW_READ
:
624 case DASD_ECKD_CCW_READ_MT
:
625 case DASD_ECKD_CCW_READ_KD
:
626 case DASD_ECKD_CCW_READ_KD_MT
:
627 data
->auxiliary
.last_bytes_used
= 0x1;
628 data
->length
= reclen
;
629 data
->operation
.operation
= 0x06;
631 case DASD_ECKD_CCW_READ_CKD
:
632 case DASD_ECKD_CCW_READ_CKD_MT
:
633 data
->auxiliary
.last_bytes_used
= 0x1;
634 data
->length
= reclen
;
635 data
->operation
.operation
= 0x16;
637 case DASD_ECKD_CCW_READ_COUNT
:
638 data
->operation
.operation
= 0x06;
640 case DASD_ECKD_CCW_ERASE
:
641 data
->length
= reclen
;
642 data
->auxiliary
.last_bytes_used
= 0x1;
643 data
->operation
.operation
= 0x0b;
646 DBF_DEV_EVENT(DBF_ERR
, device
, "unknown locate record "
649 set_ch_t(&data
->seek_addr
,
650 trk
/ private->rdc_data
.trk_per_cyl
,
651 trk
% private->rdc_data
.trk_per_cyl
);
652 data
->search_arg
.cyl
= data
->seek_addr
.cyl
;
653 data
->search_arg
.head
= data
->seek_addr
.head
;
654 data
->search_arg
.record
= rec_on_trk
;
658 * Returns 1 if the block is one of the special blocks that needs
659 * to get read/written with the KD variant of the command.
660 * That is DASD_ECKD_READ_KD_MT instead of DASD_ECKD_READ_MT and
661 * DASD_ECKD_WRITE_KD_MT instead of DASD_ECKD_WRITE_MT.
662 * Luckily the KD variants differ only by one bit (0x08) from the
663 * normal variant. So don't wonder about code like:
664 * if (dasd_eckd_cdl_special(blk_per_trk, recid))
665 * ccw->cmd_code |= 0x8;
668 dasd_eckd_cdl_special(int blk_per_trk
, int recid
)
672 if (recid
< blk_per_trk
)
674 if (recid
< 2 * blk_per_trk
)
680 * Returns the record size for the special blocks of the cdl format.
681 * Only returns something useful if dasd_eckd_cdl_special is true
685 dasd_eckd_cdl_reclen(int recid
)
688 return sizes_trk0
[recid
];
693 * Generate device unique id that specifies the physical device.
695 static int dasd_eckd_generate_uid(struct dasd_device
*device
,
696 struct dasd_uid
*uid
)
698 struct dasd_eckd_private
*private;
701 private = (struct dasd_eckd_private
*) device
->private;
704 if (!private->ned
|| !private->gneq
)
707 memset(uid
, 0, sizeof(struct dasd_uid
));
708 memcpy(uid
->vendor
, private->ned
->HDA_manufacturer
,
709 sizeof(uid
->vendor
) - 1);
710 EBCASC(uid
->vendor
, sizeof(uid
->vendor
) - 1);
711 memcpy(uid
->serial
, private->ned
->HDA_location
,
712 sizeof(uid
->serial
) - 1);
713 EBCASC(uid
->serial
, sizeof(uid
->serial
) - 1);
714 uid
->ssid
= private->gneq
->subsystemID
;
715 uid
->real_unit_addr
= private->ned
->unit_addr
;
717 uid
->type
= private->sneq
->sua_flags
;
718 if (uid
->type
== UA_BASE_PAV_ALIAS
)
719 uid
->base_unit_addr
= private->sneq
->base_unit_addr
;
721 uid
->type
= UA_BASE_DEVICE
;
723 if (private->vdsneq
) {
724 for (count
= 0; count
< 16; count
++) {
725 sprintf(uid
->vduit
+2*count
, "%02x",
726 private->vdsneq
->uit
[count
]);
732 static struct dasd_ccw_req
*dasd_eckd_build_rcd_lpm(struct dasd_device
*device
,
734 struct ciw
*ciw
, __u8 lpm
)
736 struct dasd_ccw_req
*cqr
;
739 cqr
= dasd_smalloc_request(DASD_ECKD_MAGIC
, 1 /* RCD */, ciw
->count
,
743 DBF_DEV_EVENT(DBF_WARNING
, device
, "%s",
744 "Could not allocate RCD request");
749 ccw
->cmd_code
= ciw
->cmd
;
750 ccw
->cda
= (__u32
)(addr_t
)rcd_buffer
;
751 ccw
->count
= ciw
->count
;
753 cqr
->startdev
= device
;
754 cqr
->memdev
= device
;
756 cqr
->expires
= 10*HZ
;
759 cqr
->buildclk
= get_clock();
760 cqr
->status
= DASD_CQR_FILLED
;
764 static int dasd_eckd_read_conf_lpm(struct dasd_device
*device
,
766 int *rcd_buffer_size
, __u8 lpm
)
769 char *rcd_buf
= NULL
;
771 struct dasd_ccw_req
*cqr
;
774 * scan for RCD command in extended SenseID data
776 ciw
= ccw_device_get_ciw(device
->cdev
, CIW_TYPE_RCD
);
777 if (!ciw
|| ciw
->cmd
== 0) {
781 rcd_buf
= kzalloc(ciw
->count
, GFP_KERNEL
| GFP_DMA
);
788 * buffer has to start with EBCDIC "V1.0" to show
789 * support for virtual device SNEQ
795 cqr
= dasd_eckd_build_rcd_lpm(device
, rcd_buf
, ciw
, lpm
);
800 ret
= dasd_sleep_on(cqr
);
802 * on success we update the user input parms
804 dasd_sfree_request(cqr
, cqr
->memdev
);
808 *rcd_buffer_size
= ciw
->count
;
809 *rcd_buffer
= rcd_buf
;
814 *rcd_buffer_size
= 0;
818 static int dasd_eckd_identify_conf_parts(struct dasd_eckd_private
*private)
821 struct dasd_sneq
*sneq
;
825 private->sneq
= NULL
;
826 private->vdsneq
= NULL
;
827 private->gneq
= NULL
;
828 count
= private->conf_len
/ sizeof(struct dasd_sneq
);
829 sneq
= (struct dasd_sneq
*)private->conf_data
;
830 for (i
= 0; i
< count
; ++i
) {
831 if (sneq
->flags
.identifier
== 1 && sneq
->format
== 1)
832 private->sneq
= sneq
;
833 else if (sneq
->flags
.identifier
== 1 && sneq
->format
== 4)
834 private->vdsneq
= (struct vd_sneq
*)sneq
;
835 else if (sneq
->flags
.identifier
== 2)
836 private->gneq
= (struct dasd_gneq
*)sneq
;
837 else if (sneq
->flags
.identifier
== 3 && sneq
->res1
== 1)
838 private->ned
= (struct dasd_ned
*)sneq
;
841 if (!private->ned
|| !private->gneq
) {
843 private->sneq
= NULL
;
844 private->vdsneq
= NULL
;
845 private->gneq
= NULL
;
852 static unsigned char dasd_eckd_path_access(void *conf_data
, int conf_len
)
854 struct dasd_gneq
*gneq
;
857 count
= conf_len
/ sizeof(*gneq
);
858 gneq
= (struct dasd_gneq
*)conf_data
;
860 for (i
= 0; i
< count
; ++i
) {
861 if (gneq
->flags
.identifier
== 2) {
868 return ((char *)gneq
)[18] & 0x07;
873 static int dasd_eckd_read_conf(struct dasd_device
*device
)
876 int conf_len
, conf_data_saved
;
879 struct dasd_eckd_private
*private;
880 struct dasd_eckd_path
*path_data
;
882 private = (struct dasd_eckd_private
*) device
->private;
883 path_data
= (struct dasd_eckd_path
*) &private->path_data
;
884 path_data
->opm
= ccw_device_get_path_mask(device
->cdev
);
887 /* get configuration data per operational path */
888 for (lpm
= 0x80; lpm
; lpm
>>= 1) {
889 if (lpm
& path_data
->opm
){
890 rc
= dasd_eckd_read_conf_lpm(device
, &conf_data
,
892 if (rc
&& rc
!= -EOPNOTSUPP
) { /* -EOPNOTSUPP is ok */
893 DBF_EVENT_DEVID(DBF_WARNING
, device
->cdev
,
894 "Read configuration data returned "
898 if (conf_data
== NULL
) {
899 DBF_EVENT_DEVID(DBF_WARNING
, device
->cdev
, "%s",
900 "No configuration data "
902 continue; /* no error */
904 /* save first valid configuration data */
905 if (!conf_data_saved
) {
906 kfree(private->conf_data
);
907 private->conf_data
= conf_data
;
908 private->conf_len
= conf_len
;
909 if (dasd_eckd_identify_conf_parts(private)) {
910 private->conf_data
= NULL
;
911 private->conf_len
= 0;
917 switch (dasd_eckd_path_access(conf_data
, conf_len
)) {
919 path_data
->npm
|= lpm
;
922 path_data
->ppm
|= lpm
;
925 if (conf_data
!= private->conf_data
)
932 static int dasd_eckd_read_features(struct dasd_device
*device
)
934 struct dasd_psf_prssd_data
*prssdp
;
935 struct dasd_rssd_features
*features
;
936 struct dasd_ccw_req
*cqr
;
939 struct dasd_eckd_private
*private;
941 private = (struct dasd_eckd_private
*) device
->private;
942 memset(&private->features
, 0, sizeof(struct dasd_rssd_features
));
943 cqr
= dasd_smalloc_request(DASD_ECKD_MAGIC
, 1 /* PSF */ + 1 /* RSSD */,
944 (sizeof(struct dasd_psf_prssd_data
) +
945 sizeof(struct dasd_rssd_features
)),
948 DBF_EVENT_DEVID(DBF_WARNING
, device
->cdev
, "%s", "Could not "
949 "allocate initialization request");
952 cqr
->startdev
= device
;
953 cqr
->memdev
= device
;
956 cqr
->expires
= 10 * HZ
;
958 /* Prepare for Read Subsystem Data */
959 prssdp
= (struct dasd_psf_prssd_data
*) cqr
->data
;
960 memset(prssdp
, 0, sizeof(struct dasd_psf_prssd_data
));
961 prssdp
->order
= PSF_ORDER_PRSSD
;
962 prssdp
->suborder
= 0x41; /* Read Feature Codes */
963 /* all other bytes of prssdp must be zero */
966 ccw
->cmd_code
= DASD_ECKD_CCW_PSF
;
967 ccw
->count
= sizeof(struct dasd_psf_prssd_data
);
968 ccw
->flags
|= CCW_FLAG_CC
;
969 ccw
->cda
= (__u32
)(addr_t
) prssdp
;
971 /* Read Subsystem Data - feature codes */
972 features
= (struct dasd_rssd_features
*) (prssdp
+ 1);
973 memset(features
, 0, sizeof(struct dasd_rssd_features
));
976 ccw
->cmd_code
= DASD_ECKD_CCW_RSSD
;
977 ccw
->count
= sizeof(struct dasd_rssd_features
);
978 ccw
->cda
= (__u32
)(addr_t
) features
;
980 cqr
->buildclk
= get_clock();
981 cqr
->status
= DASD_CQR_FILLED
;
982 rc
= dasd_sleep_on(cqr
);
984 prssdp
= (struct dasd_psf_prssd_data
*) cqr
->data
;
985 features
= (struct dasd_rssd_features
*) (prssdp
+ 1);
986 memcpy(&private->features
, features
,
987 sizeof(struct dasd_rssd_features
));
989 dev_warn(&device
->cdev
->dev
, "Reading device feature codes"
990 " failed with rc=%d\n", rc
);
991 dasd_sfree_request(cqr
, cqr
->memdev
);
997 * Build CP for Perform Subsystem Function - SSC.
999 static struct dasd_ccw_req
*dasd_eckd_build_psf_ssc(struct dasd_device
*device
,
1002 struct dasd_ccw_req
*cqr
;
1003 struct dasd_psf_ssc_data
*psf_ssc_data
;
1006 cqr
= dasd_smalloc_request(DASD_ECKD_MAGIC
, 1 /* PSF */ ,
1007 sizeof(struct dasd_psf_ssc_data
),
1011 DBF_DEV_EVENT(DBF_WARNING
, device
, "%s",
1012 "Could not allocate PSF-SSC request");
1015 psf_ssc_data
= (struct dasd_psf_ssc_data
*)cqr
->data
;
1016 psf_ssc_data
->order
= PSF_ORDER_SSC
;
1017 psf_ssc_data
->suborder
= 0xc0;
1019 psf_ssc_data
->suborder
|= 0x08;
1020 psf_ssc_data
->reserved
[0] = 0x88;
1023 ccw
->cmd_code
= DASD_ECKD_CCW_PSF
;
1024 ccw
->cda
= (__u32
)(addr_t
)psf_ssc_data
;
1027 cqr
->startdev
= device
;
1028 cqr
->memdev
= device
;
1031 cqr
->expires
= 10*HZ
;
1032 cqr
->buildclk
= get_clock();
1033 cqr
->status
= DASD_CQR_FILLED
;
1038 * Perform Subsystem Function.
1039 * It is necessary to trigger CIO for channel revalidation since this
1040 * call might change behaviour of DASD devices.
1043 dasd_eckd_psf_ssc(struct dasd_device
*device
, int enable_pav
)
1045 struct dasd_ccw_req
*cqr
;
1048 cqr
= dasd_eckd_build_psf_ssc(device
, enable_pav
);
1050 return PTR_ERR(cqr
);
1052 rc
= dasd_sleep_on(cqr
);
1054 /* trigger CIO to reprobe devices */
1055 css_schedule_reprobe();
1056 dasd_sfree_request(cqr
, cqr
->memdev
);
1061 * Valide storage server of current device.
1063 static void dasd_eckd_validate_server(struct dasd_device
*device
)
1066 struct dasd_eckd_private
*private;
1069 if (dasd_nopav
|| MACHINE_IS_VM
)
1073 rc
= dasd_eckd_psf_ssc(device
, enable_pav
);
1075 /* may be requested feature is not available on server,
1076 * therefore just report error and go ahead */
1077 private = (struct dasd_eckd_private
*) device
->private;
1078 DBF_EVENT_DEVID(DBF_WARNING
, device
->cdev
, "PSF-SSC for SSID %04x "
1079 "returned rc=%d", private->uid
.ssid
, rc
);
1083 * Check device characteristics.
1084 * If the device is accessible using ECKD discipline, the device is enabled.
1087 dasd_eckd_check_characteristics(struct dasd_device
*device
)
1089 struct dasd_eckd_private
*private;
1090 struct dasd_block
*block
;
1094 if (!ccw_device_is_pathgroup(device
->cdev
)) {
1095 dev_warn(&device
->cdev
->dev
,
1096 "A channel path group could not be established\n");
1099 if (!ccw_device_is_multipath(device
->cdev
)) {
1100 dev_info(&device
->cdev
->dev
,
1101 "The DASD is not operating in multipath mode\n");
1103 private = (struct dasd_eckd_private
*) device
->private;
1105 private = kzalloc(sizeof(*private), GFP_KERNEL
| GFP_DMA
);
1107 dev_warn(&device
->cdev
->dev
,
1108 "Allocating memory for private DASD data "
1112 device
->private = (void *) private;
1114 memset(private, 0, sizeof(*private));
1116 /* Invalidate status of initial analysis. */
1117 private->init_cqr_status
= -1;
1118 /* Set default cache operations. */
1119 private->attrib
.operation
= DASD_NORMAL_CACHE
;
1120 private->attrib
.nr_cyl
= 0;
1122 /* Read Configuration Data */
1123 rc
= dasd_eckd_read_conf(device
);
1127 /* Generate device unique id and register in devmap */
1128 rc
= dasd_eckd_generate_uid(device
, &private->uid
);
1131 dasd_set_uid(device
->cdev
, &private->uid
);
1133 if (private->uid
.type
== UA_BASE_DEVICE
) {
1134 block
= dasd_alloc_block();
1135 if (IS_ERR(block
)) {
1136 DBF_EVENT_DEVID(DBF_WARNING
, device
->cdev
, "%s",
1137 "could not allocate dasd "
1139 rc
= PTR_ERR(block
);
1142 device
->block
= block
;
1143 block
->base
= device
;
1146 /* register lcu with alias handling, enable PAV if this is a new lcu */
1147 is_known
= dasd_alias_make_device_known_to_lcu(device
);
1153 * dasd_eckd_vaildate_server is done on the first device that
1154 * is found for an LCU. All later other devices have to wait
1155 * for it, so they will read the correct feature codes.
1158 dasd_eckd_validate_server(device
);
1159 dasd_alias_lcu_setup_complete(device
);
1161 dasd_alias_wait_for_lcu_setup(device
);
1163 /* device may report different configuration data after LCU setup */
1164 rc
= dasd_eckd_read_conf(device
);
1168 /* Read Feature Codes */
1169 dasd_eckd_read_features(device
);
1171 /* Read Device Characteristics */
1172 rc
= dasd_generic_read_dev_chars(device
, DASD_ECKD_MAGIC
,
1173 &private->rdc_data
, 64);
1175 DBF_EVENT_DEVID(DBF_WARNING
, device
->cdev
,
1176 "Read device characteristic failed, rc=%d", rc
);
1179 /* find the vaild cylinder size */
1180 if (private->rdc_data
.no_cyl
== LV_COMPAT_CYL
&&
1181 private->rdc_data
.long_no_cyl
)
1182 private->real_cyl
= private->rdc_data
.long_no_cyl
;
1184 private->real_cyl
= private->rdc_data
.no_cyl
;
1186 readonly
= dasd_device_is_ro(device
);
1188 set_bit(DASD_FLAG_DEVICE_RO
, &device
->flags
);
1190 dev_info(&device
->cdev
->dev
, "New DASD %04X/%02X (CU %04X/%02X) "
1191 "with %d cylinders, %d heads, %d sectors%s\n",
1192 private->rdc_data
.dev_type
,
1193 private->rdc_data
.dev_model
,
1194 private->rdc_data
.cu_type
,
1195 private->rdc_data
.cu_model
.model
,
1197 private->rdc_data
.trk_per_cyl
,
1198 private->rdc_data
.sec_per_trk
,
1199 readonly
? ", read-only device" : "");
1203 dasd_alias_disconnect_device_from_lcu(device
);
1205 dasd_free_block(device
->block
);
1206 device
->block
= NULL
;
1208 kfree(private->conf_data
);
1209 kfree(device
->private);
1210 device
->private = NULL
;
1214 static void dasd_eckd_uncheck_device(struct dasd_device
*device
)
1216 struct dasd_eckd_private
*private;
1218 private = (struct dasd_eckd_private
*) device
->private;
1219 dasd_alias_disconnect_device_from_lcu(device
);
1220 private->ned
= NULL
;
1221 private->sneq
= NULL
;
1222 private->vdsneq
= NULL
;
1223 private->gneq
= NULL
;
1224 private->conf_len
= 0;
1225 kfree(private->conf_data
);
1226 private->conf_data
= NULL
;
1229 static struct dasd_ccw_req
*
1230 dasd_eckd_analysis_ccw(struct dasd_device
*device
)
1232 struct dasd_eckd_private
*private;
1233 struct eckd_count
*count_data
;
1234 struct LO_eckd_data
*LO_data
;
1235 struct dasd_ccw_req
*cqr
;
1237 int cplength
, datasize
;
1240 private = (struct dasd_eckd_private
*) device
->private;
1243 datasize
= sizeof(struct DE_eckd_data
) + 2*sizeof(struct LO_eckd_data
);
1244 cqr
= dasd_smalloc_request(DASD_ECKD_MAGIC
, cplength
, datasize
, device
);
1248 /* Define extent for the first 3 tracks. */
1249 define_extent(ccw
++, cqr
->data
, 0, 2,
1250 DASD_ECKD_CCW_READ_COUNT
, device
);
1251 LO_data
= cqr
->data
+ sizeof(struct DE_eckd_data
);
1252 /* Locate record for the first 4 records on track 0. */
1253 ccw
[-1].flags
|= CCW_FLAG_CC
;
1254 locate_record(ccw
++, LO_data
++, 0, 0, 4,
1255 DASD_ECKD_CCW_READ_COUNT
, device
, 0);
1257 count_data
= private->count_area
;
1258 for (i
= 0; i
< 4; i
++) {
1259 ccw
[-1].flags
|= CCW_FLAG_CC
;
1260 ccw
->cmd_code
= DASD_ECKD_CCW_READ_COUNT
;
1263 ccw
->cda
= (__u32
)(addr_t
) count_data
;
1268 /* Locate record for the first record on track 2. */
1269 ccw
[-1].flags
|= CCW_FLAG_CC
;
1270 locate_record(ccw
++, LO_data
++, 2, 0, 1,
1271 DASD_ECKD_CCW_READ_COUNT
, device
, 0);
1272 /* Read count ccw. */
1273 ccw
[-1].flags
|= CCW_FLAG_CC
;
1274 ccw
->cmd_code
= DASD_ECKD_CCW_READ_COUNT
;
1277 ccw
->cda
= (__u32
)(addr_t
) count_data
;
1280 cqr
->startdev
= device
;
1281 cqr
->memdev
= device
;
1283 cqr
->buildclk
= get_clock();
1284 cqr
->status
= DASD_CQR_FILLED
;
1288 /* differentiate between 'no record found' and any other error */
1289 static int dasd_eckd_analysis_evaluation(struct dasd_ccw_req
*init_cqr
)
1292 if (init_cqr
->status
== DASD_CQR_DONE
)
1294 else if (init_cqr
->status
== DASD_CQR_NEED_ERP
||
1295 init_cqr
->status
== DASD_CQR_FAILED
) {
1296 sense
= dasd_get_sense(&init_cqr
->irb
);
1297 if (sense
&& (sense
[1] & SNS1_NO_REC_FOUND
))
1298 return INIT_CQR_UNFORMATTED
;
1300 return INIT_CQR_ERROR
;
1302 return INIT_CQR_ERROR
;
1306 * This is the callback function for the init_analysis cqr. It saves
1307 * the status of the initial analysis ccw before it frees it and kicks
1308 * the device to continue the startup sequence. This will call
1309 * dasd_eckd_do_analysis again (if the devices has not been marked
1310 * for deletion in the meantime).
1312 static void dasd_eckd_analysis_callback(struct dasd_ccw_req
*init_cqr
,
1315 struct dasd_eckd_private
*private;
1316 struct dasd_device
*device
;
1318 device
= init_cqr
->startdev
;
1319 private = (struct dasd_eckd_private
*) device
->private;
1320 private->init_cqr_status
= dasd_eckd_analysis_evaluation(init_cqr
);
1321 dasd_sfree_request(init_cqr
, device
);
1322 dasd_kick_device(device
);
1325 static int dasd_eckd_start_analysis(struct dasd_block
*block
)
1327 struct dasd_eckd_private
*private;
1328 struct dasd_ccw_req
*init_cqr
;
1330 private = (struct dasd_eckd_private
*) block
->base
->private;
1331 init_cqr
= dasd_eckd_analysis_ccw(block
->base
);
1332 if (IS_ERR(init_cqr
))
1333 return PTR_ERR(init_cqr
);
1334 init_cqr
->callback
= dasd_eckd_analysis_callback
;
1335 init_cqr
->callback_data
= NULL
;
1336 init_cqr
->expires
= 5*HZ
;
1337 /* first try without ERP, so we can later handle unformatted
1338 * devices as special case
1340 clear_bit(DASD_CQR_FLAGS_USE_ERP
, &init_cqr
->flags
);
1341 init_cqr
->retries
= 0;
1342 dasd_add_request_head(init_cqr
);
1346 static int dasd_eckd_end_analysis(struct dasd_block
*block
)
1348 struct dasd_device
*device
;
1349 struct dasd_eckd_private
*private;
1350 struct eckd_count
*count_area
;
1351 unsigned int sb
, blk_per_trk
;
1353 struct dasd_ccw_req
*init_cqr
;
1355 device
= block
->base
;
1356 private = (struct dasd_eckd_private
*) device
->private;
1357 status
= private->init_cqr_status
;
1358 private->init_cqr_status
= -1;
1359 if (status
== INIT_CQR_ERROR
) {
1360 /* try again, this time with full ERP */
1361 init_cqr
= dasd_eckd_analysis_ccw(device
);
1362 dasd_sleep_on(init_cqr
);
1363 status
= dasd_eckd_analysis_evaluation(init_cqr
);
1364 dasd_sfree_request(init_cqr
, device
);
1367 if (status
== INIT_CQR_UNFORMATTED
) {
1368 dev_warn(&device
->cdev
->dev
, "The DASD is not formatted\n");
1369 return -EMEDIUMTYPE
;
1370 } else if (status
== INIT_CQR_ERROR
) {
1371 dev_err(&device
->cdev
->dev
,
1372 "Detecting the DASD disk layout failed because "
1373 "of an I/O error\n");
1377 private->uses_cdl
= 1;
1378 /* Check Track 0 for Compatible Disk Layout */
1380 for (i
= 0; i
< 3; i
++) {
1381 if (private->count_area
[i
].kl
!= 4 ||
1382 private->count_area
[i
].dl
!= dasd_eckd_cdl_reclen(i
) - 4) {
1383 private->uses_cdl
= 0;
1388 count_area
= &private->count_area
[4];
1390 if (private->uses_cdl
== 0) {
1391 for (i
= 0; i
< 5; i
++) {
1392 if ((private->count_area
[i
].kl
!= 0) ||
1393 (private->count_area
[i
].dl
!=
1394 private->count_area
[0].dl
))
1398 count_area
= &private->count_area
[0];
1400 if (private->count_area
[3].record
== 1)
1401 dev_warn(&device
->cdev
->dev
,
1402 "Track 0 has no records following the VTOC\n");
1404 if (count_area
!= NULL
&& count_area
->kl
== 0) {
1405 /* we found notthing violating our disk layout */
1406 if (dasd_check_blocksize(count_area
->dl
) == 0)
1407 block
->bp_block
= count_area
->dl
;
1409 if (block
->bp_block
== 0) {
1410 dev_warn(&device
->cdev
->dev
,
1411 "The disk layout of the DASD is not supported\n");
1412 return -EMEDIUMTYPE
;
1414 block
->s2b_shift
= 0; /* bits to shift 512 to get a block */
1415 for (sb
= 512; sb
< block
->bp_block
; sb
= sb
<< 1)
1418 blk_per_trk
= recs_per_track(&private->rdc_data
, 0, block
->bp_block
);
1419 block
->blocks
= (private->real_cyl
*
1420 private->rdc_data
.trk_per_cyl
*
1423 dev_info(&device
->cdev
->dev
,
1424 "DASD with %d KB/block, %d KB total size, %d KB/track, "
1425 "%s\n", (block
->bp_block
>> 10),
1426 ((private->real_cyl
*
1427 private->rdc_data
.trk_per_cyl
*
1428 blk_per_trk
* (block
->bp_block
>> 9)) >> 1),
1429 ((blk_per_trk
* block
->bp_block
) >> 10),
1431 "compatible disk layout" : "linux disk layout");
1436 static int dasd_eckd_do_analysis(struct dasd_block
*block
)
1438 struct dasd_eckd_private
*private;
1440 private = (struct dasd_eckd_private
*) block
->base
->private;
1441 if (private->init_cqr_status
< 0)
1442 return dasd_eckd_start_analysis(block
);
1444 return dasd_eckd_end_analysis(block
);
1447 static int dasd_eckd_ready_to_online(struct dasd_device
*device
)
1449 return dasd_alias_add_device(device
);
1452 static int dasd_eckd_online_to_ready(struct dasd_device
*device
)
1454 return dasd_alias_remove_device(device
);
1458 dasd_eckd_fill_geometry(struct dasd_block
*block
, struct hd_geometry
*geo
)
1460 struct dasd_eckd_private
*private;
1462 private = (struct dasd_eckd_private
*) block
->base
->private;
1463 if (dasd_check_blocksize(block
->bp_block
) == 0) {
1464 geo
->sectors
= recs_per_track(&private->rdc_data
,
1465 0, block
->bp_block
);
1467 geo
->cylinders
= private->rdc_data
.no_cyl
;
1468 geo
->heads
= private->rdc_data
.trk_per_cyl
;
1472 static struct dasd_ccw_req
*
1473 dasd_eckd_format_device(struct dasd_device
* device
,
1474 struct format_data_t
* fdata
)
1476 struct dasd_eckd_private
*private;
1477 struct dasd_ccw_req
*fcp
;
1478 struct eckd_count
*ect
;
1482 struct ch_t address
;
1483 int cplength
, datasize
;
1488 private = (struct dasd_eckd_private
*) device
->private;
1489 rpt
= recs_per_track(&private->rdc_data
, 0, fdata
->blksize
);
1491 fdata
->start_unit
/ private->rdc_data
.trk_per_cyl
,
1492 fdata
->start_unit
% private->rdc_data
.trk_per_cyl
);
1494 /* Sanity checks. */
1495 if (fdata
->start_unit
>=
1496 (private->real_cyl
* private->rdc_data
.trk_per_cyl
)) {
1497 dev_warn(&device
->cdev
->dev
, "Start track number %d used in "
1498 "formatting is too big\n", fdata
->start_unit
);
1499 return ERR_PTR(-EINVAL
);
1501 if (fdata
->start_unit
> fdata
->stop_unit
) {
1502 dev_warn(&device
->cdev
->dev
, "Start track %d used in "
1503 "formatting exceeds end track\n", fdata
->start_unit
);
1504 return ERR_PTR(-EINVAL
);
1506 if (dasd_check_blocksize(fdata
->blksize
) != 0) {
1507 dev_warn(&device
->cdev
->dev
,
1508 "The DASD cannot be formatted with block size %d\n",
1510 return ERR_PTR(-EINVAL
);
1514 * fdata->intensity is a bit string that tells us what to do:
1515 * Bit 0: write record zero
1516 * Bit 1: write home address, currently not supported
1517 * Bit 2: invalidate tracks
1518 * Bit 3: use OS/390 compatible disk layout (cdl)
1519 * Bit 4: do not allow storage subsystem to modify record zero
1520 * Only some bit combinations do make sense.
1522 if (fdata
->intensity
& 0x10) {
1524 intensity
= fdata
->intensity
& ~0x10;
1527 intensity
= fdata
->intensity
;
1529 switch (intensity
) {
1530 case 0x00: /* Normal format */
1531 case 0x08: /* Normal format, use cdl. */
1533 datasize
= sizeof(struct DE_eckd_data
) +
1534 sizeof(struct LO_eckd_data
) +
1535 rpt
* sizeof(struct eckd_count
);
1537 case 0x01: /* Write record zero and format track. */
1538 case 0x09: /* Write record zero and format track, use cdl. */
1540 datasize
= sizeof(struct DE_eckd_data
) +
1541 sizeof(struct LO_eckd_data
) +
1542 sizeof(struct eckd_count
) +
1543 rpt
* sizeof(struct eckd_count
);
1545 case 0x04: /* Invalidate track. */
1546 case 0x0c: /* Invalidate track, use cdl. */
1548 datasize
= sizeof(struct DE_eckd_data
) +
1549 sizeof(struct LO_eckd_data
) +
1550 sizeof(struct eckd_count
);
1553 dev_warn(&device
->cdev
->dev
, "An I/O control call used "
1554 "incorrect flags 0x%x\n", fdata
->intensity
);
1555 return ERR_PTR(-EINVAL
);
1557 /* Allocate the format ccw request. */
1558 fcp
= dasd_smalloc_request(DASD_ECKD_MAGIC
, cplength
, datasize
, device
);
1565 switch (intensity
& ~0x08) {
1566 case 0x00: /* Normal format. */
1567 define_extent(ccw
++, (struct DE_eckd_data
*) data
,
1568 fdata
->start_unit
, fdata
->start_unit
,
1569 DASD_ECKD_CCW_WRITE_CKD
, device
);
1570 /* grant subsystem permission to format R0 */
1572 ((struct DE_eckd_data
*)data
)->ga_extended
|= 0x04;
1573 data
+= sizeof(struct DE_eckd_data
);
1574 ccw
[-1].flags
|= CCW_FLAG_CC
;
1575 locate_record(ccw
++, (struct LO_eckd_data
*) data
,
1576 fdata
->start_unit
, 0, rpt
,
1577 DASD_ECKD_CCW_WRITE_CKD
, device
,
1579 data
+= sizeof(struct LO_eckd_data
);
1581 case 0x01: /* Write record zero + format track. */
1582 define_extent(ccw
++, (struct DE_eckd_data
*) data
,
1583 fdata
->start_unit
, fdata
->start_unit
,
1584 DASD_ECKD_CCW_WRITE_RECORD_ZERO
,
1586 data
+= sizeof(struct DE_eckd_data
);
1587 ccw
[-1].flags
|= CCW_FLAG_CC
;
1588 locate_record(ccw
++, (struct LO_eckd_data
*) data
,
1589 fdata
->start_unit
, 0, rpt
+ 1,
1590 DASD_ECKD_CCW_WRITE_RECORD_ZERO
, device
,
1591 device
->block
->bp_block
);
1592 data
+= sizeof(struct LO_eckd_data
);
1594 case 0x04: /* Invalidate track. */
1595 define_extent(ccw
++, (struct DE_eckd_data
*) data
,
1596 fdata
->start_unit
, fdata
->start_unit
,
1597 DASD_ECKD_CCW_WRITE_CKD
, device
);
1598 data
+= sizeof(struct DE_eckd_data
);
1599 ccw
[-1].flags
|= CCW_FLAG_CC
;
1600 locate_record(ccw
++, (struct LO_eckd_data
*) data
,
1601 fdata
->start_unit
, 0, 1,
1602 DASD_ECKD_CCW_WRITE_CKD
, device
, 8);
1603 data
+= sizeof(struct LO_eckd_data
);
1606 if (intensity
& 0x01) { /* write record zero */
1607 ect
= (struct eckd_count
*) data
;
1608 data
+= sizeof(struct eckd_count
);
1609 ect
->cyl
= address
.cyl
;
1610 ect
->head
= address
.head
;
1614 ccw
[-1].flags
|= CCW_FLAG_CC
;
1615 ccw
->cmd_code
= DASD_ECKD_CCW_WRITE_RECORD_ZERO
;
1616 ccw
->flags
= CCW_FLAG_SLI
;
1618 ccw
->cda
= (__u32
)(addr_t
) ect
;
1621 if ((intensity
& ~0x08) & 0x04) { /* erase track */
1622 ect
= (struct eckd_count
*) data
;
1623 data
+= sizeof(struct eckd_count
);
1624 ect
->cyl
= address
.cyl
;
1625 ect
->head
= address
.head
;
1629 ccw
[-1].flags
|= CCW_FLAG_CC
;
1630 ccw
->cmd_code
= DASD_ECKD_CCW_WRITE_CKD
;
1631 ccw
->flags
= CCW_FLAG_SLI
;
1633 ccw
->cda
= (__u32
)(addr_t
) ect
;
1634 } else { /* write remaining records */
1635 for (i
= 0; i
< rpt
; i
++) {
1636 ect
= (struct eckd_count
*) data
;
1637 data
+= sizeof(struct eckd_count
);
1638 ect
->cyl
= address
.cyl
;
1639 ect
->head
= address
.head
;
1640 ect
->record
= i
+ 1;
1642 ect
->dl
= fdata
->blksize
;
1643 /* Check for special tracks 0-1 when formatting CDL */
1644 if ((intensity
& 0x08) &&
1645 fdata
->start_unit
== 0) {
1648 ect
->dl
= sizes_trk0
[i
] - 4;
1651 if ((intensity
& 0x08) &&
1652 fdata
->start_unit
== 1) {
1654 ect
->dl
= LABEL_SIZE
- 44;
1656 ccw
[-1].flags
|= CCW_FLAG_CC
;
1657 ccw
->cmd_code
= DASD_ECKD_CCW_WRITE_CKD
;
1658 ccw
->flags
= CCW_FLAG_SLI
;
1660 ccw
->cda
= (__u32
)(addr_t
) ect
;
1664 fcp
->startdev
= device
;
1665 fcp
->memdev
= device
;
1667 fcp
->buildclk
= get_clock();
1668 fcp
->status
= DASD_CQR_FILLED
;
1672 static void dasd_eckd_handle_terminated_request(struct dasd_ccw_req
*cqr
)
1674 cqr
->status
= DASD_CQR_FILLED
;
1675 if (cqr
->block
&& (cqr
->startdev
!= cqr
->block
->base
)) {
1676 dasd_eckd_reset_ccw_to_base_io(cqr
);
1677 cqr
->startdev
= cqr
->block
->base
;
1681 static dasd_erp_fn_t
1682 dasd_eckd_erp_action(struct dasd_ccw_req
* cqr
)
1684 struct dasd_device
*device
= (struct dasd_device
*) cqr
->startdev
;
1685 struct ccw_device
*cdev
= device
->cdev
;
1687 switch (cdev
->id
.cu_type
) {
1692 return dasd_3990_erp_action
;
1696 return dasd_default_erp_action
;
1700 static dasd_erp_fn_t
1701 dasd_eckd_erp_postaction(struct dasd_ccw_req
* cqr
)
1703 return dasd_default_erp_postaction
;
1707 static void dasd_eckd_handle_unsolicited_interrupt(struct dasd_device
*device
,
1713 /* first of all check for state change pending interrupt */
1714 mask
= DEV_STAT_ATTENTION
| DEV_STAT_DEV_END
| DEV_STAT_UNIT_EXCEP
;
1715 if ((scsw_dstat(&irb
->scsw
) & mask
) == mask
) {
1716 dasd_generic_handle_state_change(device
);
1720 /* summary unit check */
1721 if ((scsw_dstat(&irb
->scsw
) & DEV_STAT_UNIT_CHECK
) &&
1722 (irb
->ecw
[7] == 0x0D)) {
1723 dasd_alias_handle_summary_unit_check(device
, irb
);
1727 sense
= dasd_get_sense(irb
);
1728 /* service information message SIM */
1729 if (sense
&& !(sense
[27] & DASD_SENSE_BIT_0
) &&
1730 ((sense
[6] & DASD_SIM_SENSE
) == DASD_SIM_SENSE
)) {
1731 dasd_3990_erp_handle_sim(device
, sense
);
1732 dasd_schedule_device_bh(device
);
1736 if ((scsw_cc(&irb
->scsw
) == 1) &&
1737 (scsw_fctl(&irb
->scsw
) & SCSW_FCTL_START_FUNC
) &&
1738 (scsw_actl(&irb
->scsw
) & SCSW_ACTL_START_PEND
) &&
1739 (scsw_stctl(&irb
->scsw
) & SCSW_STCTL_STATUS_PEND
)) {
1740 /* fake irb do nothing, they are handled elsewhere */
1741 dasd_schedule_device_bh(device
);
1746 /* just report other unsolicited interrupts */
1747 DBF_DEV_EVENT(DBF_ERR
, device
, "%s",
1748 "unsolicited interrupt received");
1750 DBF_DEV_EVENT(DBF_ERR
, device
, "%s",
1751 "unsolicited interrupt received "
1752 "(sense available)");
1753 device
->discipline
->dump_sense_dbf(device
, irb
, "unsolicited");
1756 dasd_schedule_device_bh(device
);
1761 static struct dasd_ccw_req
*dasd_eckd_build_cp_cmd_single(
1762 struct dasd_device
*startdev
,
1763 struct dasd_block
*block
,
1764 struct request
*req
,
1769 unsigned int first_offs
,
1770 unsigned int last_offs
,
1771 unsigned int blk_per_trk
,
1772 unsigned int blksize
)
1774 struct dasd_eckd_private
*private;
1775 unsigned long *idaws
;
1776 struct LO_eckd_data
*LO_data
;
1777 struct dasd_ccw_req
*cqr
;
1779 struct req_iterator iter
;
1783 int count
, cidaw
, cplength
, datasize
;
1785 unsigned char cmd
, rcmd
;
1787 struct dasd_device
*basedev
;
1789 basedev
= block
->base
;
1790 private = (struct dasd_eckd_private
*) basedev
->private;
1791 if (rq_data_dir(req
) == READ
)
1792 cmd
= DASD_ECKD_CCW_READ_MT
;
1793 else if (rq_data_dir(req
) == WRITE
)
1794 cmd
= DASD_ECKD_CCW_WRITE_MT
;
1796 return ERR_PTR(-EINVAL
);
1798 /* Check struct bio and count the number of blocks for the request. */
1801 rq_for_each_segment(bv
, req
, iter
) {
1802 if (bv
->bv_len
& (blksize
- 1))
1803 /* Eckd can only do full blocks. */
1804 return ERR_PTR(-EINVAL
);
1805 count
+= bv
->bv_len
>> (block
->s2b_shift
+ 9);
1806 #if defined(CONFIG_64BIT)
1807 if (idal_is_needed (page_address(bv
->bv_page
), bv
->bv_len
))
1808 cidaw
+= bv
->bv_len
>> (block
->s2b_shift
+ 9);
1812 if (count
!= last_rec
- first_rec
+ 1)
1813 return ERR_PTR(-EINVAL
);
1815 /* use the prefix command if available */
1816 use_prefix
= private->features
.feature
[8] & 0x01;
1818 /* 1x prefix + number of blocks */
1819 cplength
= 2 + count
;
1820 /* 1x prefix + cidaws*sizeof(long) */
1821 datasize
= sizeof(struct PFX_eckd_data
) +
1822 sizeof(struct LO_eckd_data
) +
1823 cidaw
* sizeof(unsigned long);
1825 /* 1x define extent + 1x locate record + number of blocks */
1826 cplength
= 2 + count
;
1827 /* 1x define extent + 1x locate record + cidaws*sizeof(long) */
1828 datasize
= sizeof(struct DE_eckd_data
) +
1829 sizeof(struct LO_eckd_data
) +
1830 cidaw
* sizeof(unsigned long);
1832 /* Find out the number of additional locate record ccws for cdl. */
1833 if (private->uses_cdl
&& first_rec
< 2*blk_per_trk
) {
1834 if (last_rec
>= 2*blk_per_trk
)
1835 count
= 2*blk_per_trk
- first_rec
;
1837 datasize
+= count
*sizeof(struct LO_eckd_data
);
1839 /* Allocate the ccw request. */
1840 cqr
= dasd_smalloc_request(DASD_ECKD_MAGIC
, cplength
, datasize
,
1845 /* First ccw is define extent or prefix. */
1847 if (prefix(ccw
++, cqr
->data
, first_trk
,
1848 last_trk
, cmd
, basedev
, startdev
) == -EAGAIN
) {
1849 /* Clock not in sync and XRC is enabled.
1852 dasd_sfree_request(cqr
, startdev
);
1853 return ERR_PTR(-EAGAIN
);
1855 idaws
= (unsigned long *) (cqr
->data
+
1856 sizeof(struct PFX_eckd_data
));
1858 if (define_extent(ccw
++, cqr
->data
, first_trk
,
1859 last_trk
, cmd
, startdev
) == -EAGAIN
) {
1860 /* Clock not in sync and XRC is enabled.
1863 dasd_sfree_request(cqr
, startdev
);
1864 return ERR_PTR(-EAGAIN
);
1866 idaws
= (unsigned long *) (cqr
->data
+
1867 sizeof(struct DE_eckd_data
));
1869 /* Build locate_record+read/write/ccws. */
1870 LO_data
= (struct LO_eckd_data
*) (idaws
+ cidaw
);
1872 if (private->uses_cdl
== 0 || recid
> 2*blk_per_trk
) {
1873 /* Only standard blocks so there is just one locate record. */
1874 ccw
[-1].flags
|= CCW_FLAG_CC
;
1875 locate_record(ccw
++, LO_data
++, first_trk
, first_offs
+ 1,
1876 last_rec
- recid
+ 1, cmd
, basedev
, blksize
);
1878 rq_for_each_segment(bv
, req
, iter
) {
1879 dst
= page_address(bv
->bv_page
) + bv
->bv_offset
;
1880 if (dasd_page_cache
) {
1881 char *copy
= kmem_cache_alloc(dasd_page_cache
,
1882 GFP_DMA
| __GFP_NOWARN
);
1883 if (copy
&& rq_data_dir(req
) == WRITE
)
1884 memcpy(copy
+ bv
->bv_offset
, dst
, bv
->bv_len
);
1886 dst
= copy
+ bv
->bv_offset
;
1888 for (off
= 0; off
< bv
->bv_len
; off
+= blksize
) {
1889 sector_t trkid
= recid
;
1890 unsigned int recoffs
= sector_div(trkid
, blk_per_trk
);
1893 /* Locate record for cdl special block ? */
1894 if (private->uses_cdl
&& recid
< 2*blk_per_trk
) {
1895 if (dasd_eckd_cdl_special(blk_per_trk
, recid
)){
1897 count
= dasd_eckd_cdl_reclen(recid
);
1898 if (count
< blksize
&&
1899 rq_data_dir(req
) == READ
)
1900 memset(dst
+ count
, 0xe5,
1903 ccw
[-1].flags
|= CCW_FLAG_CC
;
1904 locate_record(ccw
++, LO_data
++,
1906 1, rcmd
, basedev
, count
);
1908 /* Locate record for standard blocks ? */
1909 if (private->uses_cdl
&& recid
== 2*blk_per_trk
) {
1910 ccw
[-1].flags
|= CCW_FLAG_CC
;
1911 locate_record(ccw
++, LO_data
++,
1913 last_rec
- recid
+ 1,
1914 cmd
, basedev
, count
);
1916 /* Read/write ccw. */
1917 ccw
[-1].flags
|= CCW_FLAG_CC
;
1918 ccw
->cmd_code
= rcmd
;
1920 if (idal_is_needed(dst
, blksize
)) {
1921 ccw
->cda
= (__u32
)(addr_t
) idaws
;
1922 ccw
->flags
= CCW_FLAG_IDA
;
1923 idaws
= idal_create_words(idaws
, dst
, blksize
);
1925 ccw
->cda
= (__u32
)(addr_t
) dst
;
1933 if (blk_noretry_request(req
) ||
1934 block
->base
->features
& DASD_FEATURE_FAILFAST
)
1935 set_bit(DASD_CQR_FLAGS_FAILFAST
, &cqr
->flags
);
1936 cqr
->startdev
= startdev
;
1937 cqr
->memdev
= startdev
;
1939 cqr
->expires
= 5 * 60 * HZ
; /* 5 minutes */
1940 cqr
->lpm
= private->path_data
.ppm
;
1942 cqr
->buildclk
= get_clock();
1943 cqr
->status
= DASD_CQR_FILLED
;
1947 static struct dasd_ccw_req
*dasd_eckd_build_cp_cmd_track(
1948 struct dasd_device
*startdev
,
1949 struct dasd_block
*block
,
1950 struct request
*req
,
1955 unsigned int first_offs
,
1956 unsigned int last_offs
,
1957 unsigned int blk_per_trk
,
1958 unsigned int blksize
)
1960 struct dasd_eckd_private
*private;
1961 unsigned long *idaws
;
1962 struct dasd_ccw_req
*cqr
;
1964 struct req_iterator iter
;
1966 char *dst
, *idaw_dst
;
1967 unsigned int cidaw
, cplength
, datasize
;
1971 struct dasd_device
*basedev
;
1972 unsigned int trkcount
, count
, count_to_trk_end
;
1973 unsigned int idaw_len
, seg_len
, part_len
, len_to_track_end
;
1974 unsigned char new_track
, end_idaw
;
1976 unsigned int recoffs
;
1978 basedev
= block
->base
;
1979 private = (struct dasd_eckd_private
*) basedev
->private;
1980 if (rq_data_dir(req
) == READ
)
1981 cmd
= DASD_ECKD_CCW_READ_TRACK_DATA
;
1982 else if (rq_data_dir(req
) == WRITE
)
1983 cmd
= DASD_ECKD_CCW_WRITE_TRACK_DATA
;
1985 return ERR_PTR(-EINVAL
);
1987 /* Track based I/O needs IDAWs for each page, and not just for
1988 * 64 bit addresses. We need additional idals for pages
1989 * that get filled from two tracks, so we use the number
1990 * of records as upper limit.
1992 cidaw
= last_rec
- first_rec
+ 1;
1993 trkcount
= last_trk
- first_trk
+ 1;
1995 /* 1x prefix + one read/write ccw per track */
1996 cplength
= 1 + trkcount
;
1998 /* on 31-bit we need space for two 32 bit addresses per page
1999 * on 64-bit one 64 bit address
2001 datasize
= sizeof(struct PFX_eckd_data
) +
2002 cidaw
* sizeof(unsigned long long);
2004 /* Allocate the ccw request. */
2005 cqr
= dasd_smalloc_request(DASD_ECKD_MAGIC
, cplength
, datasize
,
2010 /* transfer length factor: how many bytes to read from the last track */
2011 if (first_trk
== last_trk
)
2012 tlf
= last_offs
- first_offs
+ 1;
2014 tlf
= last_offs
+ 1;
2017 if (prefix_LRE(ccw
++, cqr
->data
, first_trk
,
2018 last_trk
, cmd
, basedev
, startdev
,
2019 1 /* format */, first_offs
+ 1,
2022 /* Clock not in sync and XRC is enabled.
2025 dasd_sfree_request(cqr
, startdev
);
2026 return ERR_PTR(-EAGAIN
);
2030 * The translation of request into ccw programs must meet the
2031 * following conditions:
2032 * - all idaws but the first and the last must address full pages
2033 * (or 2K blocks on 31-bit)
2034 * - the scope of a ccw and it's idal ends with the track boundaries
2036 idaws
= (unsigned long *) (cqr
->data
+ sizeof(struct PFX_eckd_data
));
2040 len_to_track_end
= 0;
2043 rq_for_each_segment(bv
, req
, iter
) {
2044 dst
= page_address(bv
->bv_page
) + bv
->bv_offset
;
2045 seg_len
= bv
->bv_len
;
2049 recoffs
= sector_div(trkid
, blk_per_trk
);
2050 count_to_trk_end
= blk_per_trk
- recoffs
;
2051 count
= min((last_rec
- recid
+ 1),
2052 (sector_t
)count_to_trk_end
);
2053 len_to_track_end
= count
* blksize
;
2054 ccw
[-1].flags
|= CCW_FLAG_CC
;
2055 ccw
->cmd_code
= cmd
;
2056 ccw
->count
= len_to_track_end
;
2057 ccw
->cda
= (__u32
)(addr_t
)idaws
;
2058 ccw
->flags
= CCW_FLAG_IDA
;
2062 /* first idaw for a ccw may start anywhere */
2066 /* If we start a new idaw, we must make sure that it
2067 * starts on an IDA_BLOCK_SIZE boundary.
2068 * If we continue an idaw, we must make sure that the
2069 * current segment begins where the so far accumulated
2073 if (__pa(dst
) & (IDA_BLOCK_SIZE
-1)) {
2074 dasd_sfree_request(cqr
, startdev
);
2075 return ERR_PTR(-ERANGE
);
2079 if ((idaw_dst
+ idaw_len
) != dst
) {
2080 dasd_sfree_request(cqr
, startdev
);
2081 return ERR_PTR(-ERANGE
);
2083 part_len
= min(seg_len
, len_to_track_end
);
2084 seg_len
-= part_len
;
2086 idaw_len
+= part_len
;
2087 len_to_track_end
-= part_len
;
2088 /* collected memory area ends on an IDA_BLOCK border,
2090 * idal_create_words will handle cases where idaw_len
2091 * is larger then IDA_BLOCK_SIZE
2093 if (!(__pa(idaw_dst
+ idaw_len
) & (IDA_BLOCK_SIZE
-1)))
2095 /* We also need to end the idaw at track end */
2096 if (!len_to_track_end
) {
2101 idaws
= idal_create_words(idaws
, idaw_dst
,
2110 if (blk_noretry_request(req
) ||
2111 block
->base
->features
& DASD_FEATURE_FAILFAST
)
2112 set_bit(DASD_CQR_FLAGS_FAILFAST
, &cqr
->flags
);
2113 cqr
->startdev
= startdev
;
2114 cqr
->memdev
= startdev
;
2116 cqr
->expires
= 5 * 60 * HZ
; /* 5 minutes */
2117 cqr
->lpm
= private->path_data
.ppm
;
2119 cqr
->buildclk
= get_clock();
2120 cqr
->status
= DASD_CQR_FILLED
;
2124 static int prepare_itcw(struct itcw
*itcw
,
2125 unsigned int trk
, unsigned int totrk
, int cmd
,
2126 struct dasd_device
*basedev
,
2127 struct dasd_device
*startdev
,
2128 unsigned int rec_on_trk
, int count
,
2129 unsigned int blksize
,
2130 unsigned int total_data_size
,
2132 unsigned int blk_per_trk
)
2134 struct PFX_eckd_data pfxdata
;
2135 struct dasd_eckd_private
*basepriv
, *startpriv
;
2136 struct DE_eckd_data
*dedata
;
2137 struct LRE_eckd_data
*lredata
;
2141 u16 heads
, beghead
, endhead
;
2149 /* setup prefix data */
2150 basepriv
= (struct dasd_eckd_private
*) basedev
->private;
2151 startpriv
= (struct dasd_eckd_private
*) startdev
->private;
2152 dedata
= &pfxdata
.define_extent
;
2153 lredata
= &pfxdata
.locate_record
;
2155 memset(&pfxdata
, 0, sizeof(pfxdata
));
2156 pfxdata
.format
= 1; /* PFX with LRE */
2157 pfxdata
.base_address
= basepriv
->ned
->unit_addr
;
2158 pfxdata
.base_lss
= basepriv
->ned
->ID
;
2159 pfxdata
.validity
.define_extent
= 1;
2161 /* private uid is kept up to date, conf_data may be outdated */
2162 if (startpriv
->uid
.type
!= UA_BASE_DEVICE
) {
2163 pfxdata
.validity
.verify_base
= 1;
2164 if (startpriv
->uid
.type
== UA_HYPER_PAV_ALIAS
)
2165 pfxdata
.validity
.hyper_pav
= 1;
2169 case DASD_ECKD_CCW_READ_TRACK_DATA
:
2170 dedata
->mask
.perm
= 0x1;
2171 dedata
->attributes
.operation
= basepriv
->attrib
.operation
;
2172 dedata
->blk_size
= blksize
;
2173 dedata
->ga_extended
|= 0x42;
2174 lredata
->operation
.orientation
= 0x0;
2175 lredata
->operation
.operation
= 0x0C;
2176 lredata
->auxiliary
.check_bytes
= 0x01;
2177 pfx_cmd
= DASD_ECKD_CCW_PFX_READ
;
2179 case DASD_ECKD_CCW_WRITE_TRACK_DATA
:
2180 dedata
->mask
.perm
= 0x02;
2181 dedata
->attributes
.operation
= basepriv
->attrib
.operation
;
2182 dedata
->blk_size
= blksize
;
2183 rc
= check_XRC_on_prefix(&pfxdata
, basedev
);
2184 dedata
->ga_extended
|= 0x42;
2185 lredata
->operation
.orientation
= 0x0;
2186 lredata
->operation
.operation
= 0x3F;
2187 lredata
->extended_operation
= 0x23;
2188 lredata
->auxiliary
.check_bytes
= 0x2;
2189 pfx_cmd
= DASD_ECKD_CCW_PFX
;
2192 DBF_DEV_EVENT(DBF_ERR
, basedev
,
2193 "prepare itcw, unknown opcode 0x%x", cmd
);
2200 dedata
->attributes
.mode
= 0x3; /* ECKD */
2202 heads
= basepriv
->rdc_data
.trk_per_cyl
;
2203 begcyl
= trk
/ heads
;
2204 beghead
= trk
% heads
;
2205 endcyl
= totrk
/ heads
;
2206 endhead
= totrk
% heads
;
2208 /* check for sequential prestage - enhance cylinder range */
2209 if (dedata
->attributes
.operation
== DASD_SEQ_PRESTAGE
||
2210 dedata
->attributes
.operation
== DASD_SEQ_ACCESS
) {
2212 if (endcyl
+ basepriv
->attrib
.nr_cyl
< basepriv
->real_cyl
)
2213 endcyl
+= basepriv
->attrib
.nr_cyl
;
2215 endcyl
= (basepriv
->real_cyl
- 1);
2218 set_ch_t(&dedata
->beg_ext
, begcyl
, beghead
);
2219 set_ch_t(&dedata
->end_ext
, endcyl
, endhead
);
2221 dedata
->ep_format
= 0x20; /* records per track is valid */
2222 dedata
->ep_rec_per_track
= blk_per_trk
;
2225 switch (basepriv
->rdc_data
.dev_type
) {
2227 dn
= ceil_quot(blksize
+ 6, 232);
2228 d
= 9 + ceil_quot(blksize
+ 6 * (dn
+ 1), 34);
2229 sector
= (49 + (rec_on_trk
- 1) * (10 + d
)) / 8;
2232 d
= 7 + ceil_quot(blksize
+ 12, 32);
2233 sector
= (39 + (rec_on_trk
- 1) * (8 + d
)) / 7;
2238 lredata
->auxiliary
.length_valid
= 1;
2239 lredata
->auxiliary
.length_scope
= 1;
2240 lredata
->auxiliary
.imbedded_ccw_valid
= 1;
2241 lredata
->length
= tlf
;
2242 lredata
->imbedded_ccw
= cmd
;
2243 lredata
->count
= count
;
2244 lredata
->sector
= sector
;
2245 set_ch_t(&lredata
->seek_addr
, begcyl
, beghead
);
2246 lredata
->search_arg
.cyl
= lredata
->seek_addr
.cyl
;
2247 lredata
->search_arg
.head
= lredata
->seek_addr
.head
;
2248 lredata
->search_arg
.record
= rec_on_trk
;
2250 dcw
= itcw_add_dcw(itcw
, pfx_cmd
, 0,
2251 &pfxdata
, sizeof(pfxdata
), total_data_size
);
2256 static struct dasd_ccw_req
*dasd_eckd_build_cp_tpm_track(
2257 struct dasd_device
*startdev
,
2258 struct dasd_block
*block
,
2259 struct request
*req
,
2264 unsigned int first_offs
,
2265 unsigned int last_offs
,
2266 unsigned int blk_per_trk
,
2267 unsigned int blksize
)
2269 struct dasd_eckd_private
*private;
2270 struct dasd_ccw_req
*cqr
;
2271 struct req_iterator iter
;
2274 unsigned int trkcount
, ctidaw
;
2276 struct dasd_device
*basedev
;
2279 struct tidaw
*last_tidaw
= NULL
;
2283 basedev
= block
->base
;
2284 private = (struct dasd_eckd_private
*) basedev
->private;
2285 if (rq_data_dir(req
) == READ
) {
2286 cmd
= DASD_ECKD_CCW_READ_TRACK_DATA
;
2287 itcw_op
= ITCW_OP_READ
;
2288 } else if (rq_data_dir(req
) == WRITE
) {
2289 cmd
= DASD_ECKD_CCW_WRITE_TRACK_DATA
;
2290 itcw_op
= ITCW_OP_WRITE
;
2292 return ERR_PTR(-EINVAL
);
2294 /* trackbased I/O needs address all memory via TIDAWs,
2295 * not just for 64 bit addresses. This allows us to map
2296 * each segment directly to one tidaw.
2298 trkcount
= last_trk
- first_trk
+ 1;
2300 rq_for_each_segment(bv
, req
, iter
) {
2304 /* Allocate the ccw request. */
2305 itcw_size
= itcw_calc_size(0, ctidaw
, 0);
2306 cqr
= dasd_smalloc_request(DASD_ECKD_MAGIC
, 0, itcw_size
, startdev
);
2311 cqr
->startdev
= startdev
;
2312 cqr
->memdev
= startdev
;
2314 cqr
->expires
= 100*HZ
;
2315 cqr
->buildclk
= get_clock();
2316 cqr
->status
= DASD_CQR_FILLED
;
2319 /* transfer length factor: how many bytes to read from the last track */
2320 if (first_trk
== last_trk
)
2321 tlf
= last_offs
- first_offs
+ 1;
2323 tlf
= last_offs
+ 1;
2326 itcw
= itcw_init(cqr
->data
, itcw_size
, itcw_op
, 0, ctidaw
, 0);
2327 cqr
->cpaddr
= itcw_get_tcw(itcw
);
2329 if (prepare_itcw(itcw
, first_trk
, last_trk
,
2330 cmd
, basedev
, startdev
,
2333 (last_rec
- first_rec
+ 1) * blksize
,
2334 tlf
, blk_per_trk
) == -EAGAIN
) {
2335 /* Clock not in sync and XRC is enabled.
2338 dasd_sfree_request(cqr
, startdev
);
2339 return ERR_PTR(-EAGAIN
);
2343 * A tidaw can address 4k of memory, but must not cross page boundaries
2344 * We can let the block layer handle this by setting
2345 * blk_queue_segment_boundary to page boundaries and
2346 * blk_max_segment_size to page size when setting up the request queue.
2348 rq_for_each_segment(bv
, req
, iter
) {
2349 dst
= page_address(bv
->bv_page
) + bv
->bv_offset
;
2350 last_tidaw
= itcw_add_tidaw(itcw
, 0x00, dst
, bv
->bv_len
);
2351 if (IS_ERR(last_tidaw
))
2352 return (struct dasd_ccw_req
*)last_tidaw
;
2355 last_tidaw
->flags
|= 0x80;
2356 itcw_finalize(itcw
);
2358 if (blk_noretry_request(req
) ||
2359 block
->base
->features
& DASD_FEATURE_FAILFAST
)
2360 set_bit(DASD_CQR_FLAGS_FAILFAST
, &cqr
->flags
);
2361 cqr
->startdev
= startdev
;
2362 cqr
->memdev
= startdev
;
2364 cqr
->expires
= 5 * 60 * HZ
; /* 5 minutes */
2365 cqr
->lpm
= private->path_data
.ppm
;
2367 cqr
->buildclk
= get_clock();
2368 cqr
->status
= DASD_CQR_FILLED
;
2372 static struct dasd_ccw_req
*dasd_eckd_build_cp(struct dasd_device
*startdev
,
2373 struct dasd_block
*block
,
2374 struct request
*req
)
2376 int tpm
, cmdrtd
, cmdwtd
;
2378 #if defined(CONFIG_64BIT)
2379 int fcx_in_css
, fcx_in_gneq
, fcx_in_features
;
2381 struct dasd_eckd_private
*private;
2382 struct dasd_device
*basedev
;
2383 sector_t first_rec
, last_rec
;
2384 sector_t first_trk
, last_trk
;
2385 unsigned int first_offs
, last_offs
;
2386 unsigned int blk_per_trk
, blksize
;
2388 struct dasd_ccw_req
*cqr
;
2390 basedev
= block
->base
;
2391 private = (struct dasd_eckd_private
*) basedev
->private;
2393 /* Calculate number of blocks/records per track. */
2394 blksize
= block
->bp_block
;
2395 blk_per_trk
= recs_per_track(&private->rdc_data
, 0, blksize
);
2396 if (blk_per_trk
== 0)
2397 return ERR_PTR(-EINVAL
);
2398 /* Calculate record id of first and last block. */
2399 first_rec
= first_trk
= blk_rq_pos(req
) >> block
->s2b_shift
;
2400 first_offs
= sector_div(first_trk
, blk_per_trk
);
2401 last_rec
= last_trk
=
2402 (blk_rq_pos(req
) + blk_rq_sectors(req
) - 1) >> block
->s2b_shift
;
2403 last_offs
= sector_div(last_trk
, blk_per_trk
);
2404 cdlspecial
= (private->uses_cdl
&& first_rec
< 2*blk_per_trk
);
2406 /* is transport mode supported? */
2407 #if defined(CONFIG_64BIT)
2408 fcx_in_css
= css_general_characteristics
.fcx
;
2409 fcx_in_gneq
= private->gneq
->reserved2
[7] & 0x04;
2410 fcx_in_features
= private->features
.feature
[40] & 0x80;
2411 tpm
= fcx_in_css
&& fcx_in_gneq
&& fcx_in_features
;
2416 /* is read track data and write track data in command mode supported? */
2417 cmdrtd
= private->features
.feature
[9] & 0x20;
2418 cmdwtd
= private->features
.feature
[12] & 0x40;
2419 use_prefix
= private->features
.feature
[8] & 0x01;
2422 if (cdlspecial
|| dasd_page_cache
) {
2423 /* do nothing, just fall through to the cmd mode single case */
2424 } else if (!dasd_nofcx
&& tpm
&& (first_trk
== last_trk
)) {
2425 cqr
= dasd_eckd_build_cp_tpm_track(startdev
, block
, req
,
2426 first_rec
, last_rec
,
2427 first_trk
, last_trk
,
2428 first_offs
, last_offs
,
2429 blk_per_trk
, blksize
);
2430 if (IS_ERR(cqr
) && PTR_ERR(cqr
) != -EAGAIN
)
2432 } else if (use_prefix
&&
2433 (((rq_data_dir(req
) == READ
) && cmdrtd
) ||
2434 ((rq_data_dir(req
) == WRITE
) && cmdwtd
))) {
2435 cqr
= dasd_eckd_build_cp_cmd_track(startdev
, block
, req
,
2436 first_rec
, last_rec
,
2437 first_trk
, last_trk
,
2438 first_offs
, last_offs
,
2439 blk_per_trk
, blksize
);
2440 if (IS_ERR(cqr
) && PTR_ERR(cqr
) != -EAGAIN
)
2444 cqr
= dasd_eckd_build_cp_cmd_single(startdev
, block
, req
,
2445 first_rec
, last_rec
,
2446 first_trk
, last_trk
,
2447 first_offs
, last_offs
,
2448 blk_per_trk
, blksize
);
2453 dasd_eckd_free_cp(struct dasd_ccw_req
*cqr
, struct request
*req
)
2455 struct dasd_eckd_private
*private;
2457 struct req_iterator iter
;
2460 unsigned int blksize
, blk_per_trk
, off
;
2464 if (!dasd_page_cache
)
2466 private = (struct dasd_eckd_private
*) cqr
->block
->base
->private;
2467 blksize
= cqr
->block
->bp_block
;
2468 blk_per_trk
= recs_per_track(&private->rdc_data
, 0, blksize
);
2469 recid
= blk_rq_pos(req
) >> cqr
->block
->s2b_shift
;
2471 /* Skip over define extent & locate record. */
2473 if (private->uses_cdl
== 0 || recid
> 2*blk_per_trk
)
2475 rq_for_each_segment(bv
, req
, iter
) {
2476 dst
= page_address(bv
->bv_page
) + bv
->bv_offset
;
2477 for (off
= 0; off
< bv
->bv_len
; off
+= blksize
) {
2478 /* Skip locate record. */
2479 if (private->uses_cdl
&& recid
<= 2*blk_per_trk
)
2482 if (ccw
->flags
& CCW_FLAG_IDA
)
2483 cda
= *((char **)((addr_t
) ccw
->cda
));
2485 cda
= (char *)((addr_t
) ccw
->cda
);
2487 if (rq_data_dir(req
) == READ
)
2488 memcpy(dst
, cda
, bv
->bv_len
);
2489 kmem_cache_free(dasd_page_cache
,
2490 (void *)((addr_t
)cda
& PAGE_MASK
));
2499 status
= cqr
->status
== DASD_CQR_DONE
;
2500 dasd_sfree_request(cqr
, cqr
->memdev
);
2505 * Modify ccw/tcw in cqr so it can be started on a base device.
2507 * Note that this is not enough to restart the cqr!
2508 * Either reset cqr->startdev as well (summary unit check handling)
2509 * or restart via separate cqr (as in ERP handling).
2511 void dasd_eckd_reset_ccw_to_base_io(struct dasd_ccw_req
*cqr
)
2514 struct PFX_eckd_data
*pfxdata
;
2519 if (cqr
->cpmode
== 1) {
2521 tccb
= tcw_get_tccb(tcw
);
2522 dcw
= (struct dcw
*)&tccb
->tca
[0];
2523 pfxdata
= (struct PFX_eckd_data
*)&dcw
->cd
[0];
2524 pfxdata
->validity
.verify_base
= 0;
2525 pfxdata
->validity
.hyper_pav
= 0;
2528 pfxdata
= cqr
->data
;
2529 if (ccw
->cmd_code
== DASD_ECKD_CCW_PFX
) {
2530 pfxdata
->validity
.verify_base
= 0;
2531 pfxdata
->validity
.hyper_pav
= 0;
2536 #define DASD_ECKD_CHANQ_MAX_SIZE 4
2538 static struct dasd_ccw_req
*dasd_eckd_build_alias_cp(struct dasd_device
*base
,
2539 struct dasd_block
*block
,
2540 struct request
*req
)
2542 struct dasd_eckd_private
*private;
2543 struct dasd_device
*startdev
;
2544 unsigned long flags
;
2545 struct dasd_ccw_req
*cqr
;
2547 startdev
= dasd_alias_get_start_dev(base
);
2550 private = (struct dasd_eckd_private
*) startdev
->private;
2551 if (private->count
>= DASD_ECKD_CHANQ_MAX_SIZE
)
2552 return ERR_PTR(-EBUSY
);
2554 spin_lock_irqsave(get_ccwdev_lock(startdev
->cdev
), flags
);
2556 cqr
= dasd_eckd_build_cp(startdev
, block
, req
);
2559 spin_unlock_irqrestore(get_ccwdev_lock(startdev
->cdev
), flags
);
2563 static int dasd_eckd_free_alias_cp(struct dasd_ccw_req
*cqr
,
2564 struct request
*req
)
2566 struct dasd_eckd_private
*private;
2567 unsigned long flags
;
2569 spin_lock_irqsave(get_ccwdev_lock(cqr
->memdev
->cdev
), flags
);
2570 private = (struct dasd_eckd_private
*) cqr
->memdev
->private;
2572 spin_unlock_irqrestore(get_ccwdev_lock(cqr
->memdev
->cdev
), flags
);
2573 return dasd_eckd_free_cp(cqr
, req
);
2577 dasd_eckd_fill_info(struct dasd_device
* device
,
2578 struct dasd_information2_t
* info
)
2580 struct dasd_eckd_private
*private;
2582 private = (struct dasd_eckd_private
*) device
->private;
2583 info
->label_block
= 2;
2584 info
->FBA_layout
= private->uses_cdl
? 0 : 1;
2585 info
->format
= private->uses_cdl
? DASD_FORMAT_CDL
: DASD_FORMAT_LDL
;
2586 info
->characteristics_size
= sizeof(struct dasd_eckd_characteristics
);
2587 memcpy(info
->characteristics
, &private->rdc_data
,
2588 sizeof(struct dasd_eckd_characteristics
));
2589 info
->confdata_size
= min((unsigned long)private->conf_len
,
2590 sizeof(info
->configuration_data
));
2591 memcpy(info
->configuration_data
, private->conf_data
,
2592 info
->confdata_size
);
2597 * SECTION: ioctl functions for eckd devices.
2601 * Release device ioctl.
2602 * Buils a channel programm to releases a prior reserved
2603 * (see dasd_eckd_reserve) device.
2606 dasd_eckd_release(struct dasd_device
*device
)
2608 struct dasd_ccw_req
*cqr
;
2612 if (!capable(CAP_SYS_ADMIN
))
2615 cqr
= dasd_smalloc_request(DASD_ECKD_MAGIC
, 1, 32, device
);
2617 DBF_DEV_EVENT(DBF_WARNING
, device
, "%s",
2618 "Could not allocate initialization request");
2619 return PTR_ERR(cqr
);
2622 ccw
->cmd_code
= DASD_ECKD_CCW_RELEASE
;
2623 ccw
->flags
|= CCW_FLAG_SLI
;
2625 ccw
->cda
= (__u32
)(addr_t
) cqr
->data
;
2626 cqr
->startdev
= device
;
2627 cqr
->memdev
= device
;
2628 clear_bit(DASD_CQR_FLAGS_USE_ERP
, &cqr
->flags
);
2629 set_bit(DASD_CQR_FLAGS_FAILFAST
, &cqr
->flags
);
2630 cqr
->retries
= 2; /* set retry counter to enable basic ERP */
2631 cqr
->expires
= 2 * HZ
;
2632 cqr
->buildclk
= get_clock();
2633 cqr
->status
= DASD_CQR_FILLED
;
2635 rc
= dasd_sleep_on_immediatly(cqr
);
2637 dasd_sfree_request(cqr
, cqr
->memdev
);
2642 * Reserve device ioctl.
2643 * Options are set to 'synchronous wait for interrupt' and
2644 * 'timeout the request'. This leads to a terminate IO if
2645 * the interrupt is outstanding for a certain time.
2648 dasd_eckd_reserve(struct dasd_device
*device
)
2650 struct dasd_ccw_req
*cqr
;
2654 if (!capable(CAP_SYS_ADMIN
))
2657 cqr
= dasd_smalloc_request(DASD_ECKD_MAGIC
, 1, 32, device
);
2659 DBF_DEV_EVENT(DBF_WARNING
, device
, "%s",
2660 "Could not allocate initialization request");
2661 return PTR_ERR(cqr
);
2664 ccw
->cmd_code
= DASD_ECKD_CCW_RESERVE
;
2665 ccw
->flags
|= CCW_FLAG_SLI
;
2667 ccw
->cda
= (__u32
)(addr_t
) cqr
->data
;
2668 cqr
->startdev
= device
;
2669 cqr
->memdev
= device
;
2670 clear_bit(DASD_CQR_FLAGS_USE_ERP
, &cqr
->flags
);
2671 set_bit(DASD_CQR_FLAGS_FAILFAST
, &cqr
->flags
);
2672 cqr
->retries
= 2; /* set retry counter to enable basic ERP */
2673 cqr
->expires
= 2 * HZ
;
2674 cqr
->buildclk
= get_clock();
2675 cqr
->status
= DASD_CQR_FILLED
;
2677 rc
= dasd_sleep_on_immediatly(cqr
);
2679 dasd_sfree_request(cqr
, cqr
->memdev
);
2684 * Steal lock ioctl - unconditional reserve device.
2685 * Buils a channel programm to break a device's reservation.
2686 * (unconditional reserve)
2689 dasd_eckd_steal_lock(struct dasd_device
*device
)
2691 struct dasd_ccw_req
*cqr
;
2695 if (!capable(CAP_SYS_ADMIN
))
2698 cqr
= dasd_smalloc_request(DASD_ECKD_MAGIC
, 1, 32, device
);
2700 DBF_DEV_EVENT(DBF_WARNING
, device
, "%s",
2701 "Could not allocate initialization request");
2702 return PTR_ERR(cqr
);
2705 ccw
->cmd_code
= DASD_ECKD_CCW_SLCK
;
2706 ccw
->flags
|= CCW_FLAG_SLI
;
2708 ccw
->cda
= (__u32
)(addr_t
) cqr
->data
;
2709 cqr
->startdev
= device
;
2710 cqr
->memdev
= device
;
2711 clear_bit(DASD_CQR_FLAGS_USE_ERP
, &cqr
->flags
);
2712 set_bit(DASD_CQR_FLAGS_FAILFAST
, &cqr
->flags
);
2713 cqr
->retries
= 2; /* set retry counter to enable basic ERP */
2714 cqr
->expires
= 2 * HZ
;
2715 cqr
->buildclk
= get_clock();
2716 cqr
->status
= DASD_CQR_FILLED
;
2718 rc
= dasd_sleep_on_immediatly(cqr
);
2720 dasd_sfree_request(cqr
, cqr
->memdev
);
2725 * Read performance statistics
2728 dasd_eckd_performance(struct dasd_device
*device
, void __user
*argp
)
2730 struct dasd_psf_prssd_data
*prssdp
;
2731 struct dasd_rssd_perf_stats_t
*stats
;
2732 struct dasd_ccw_req
*cqr
;
2736 cqr
= dasd_smalloc_request(DASD_ECKD_MAGIC
, 1 /* PSF */ + 1 /* RSSD */,
2737 (sizeof(struct dasd_psf_prssd_data
) +
2738 sizeof(struct dasd_rssd_perf_stats_t
)),
2741 DBF_DEV_EVENT(DBF_WARNING
, device
, "%s",
2742 "Could not allocate initialization request");
2743 return PTR_ERR(cqr
);
2745 cqr
->startdev
= device
;
2746 cqr
->memdev
= device
;
2748 clear_bit(DASD_CQR_FLAGS_USE_ERP
, &cqr
->flags
);
2749 cqr
->expires
= 10 * HZ
;
2751 /* Prepare for Read Subsystem Data */
2752 prssdp
= (struct dasd_psf_prssd_data
*) cqr
->data
;
2753 memset(prssdp
, 0, sizeof(struct dasd_psf_prssd_data
));
2754 prssdp
->order
= PSF_ORDER_PRSSD
;
2755 prssdp
->suborder
= 0x01; /* Performance Statistics */
2756 prssdp
->varies
[1] = 0x01; /* Perf Statistics for the Subsystem */
2759 ccw
->cmd_code
= DASD_ECKD_CCW_PSF
;
2760 ccw
->count
= sizeof(struct dasd_psf_prssd_data
);
2761 ccw
->flags
|= CCW_FLAG_CC
;
2762 ccw
->cda
= (__u32
)(addr_t
) prssdp
;
2764 /* Read Subsystem Data - Performance Statistics */
2765 stats
= (struct dasd_rssd_perf_stats_t
*) (prssdp
+ 1);
2766 memset(stats
, 0, sizeof(struct dasd_rssd_perf_stats_t
));
2769 ccw
->cmd_code
= DASD_ECKD_CCW_RSSD
;
2770 ccw
->count
= sizeof(struct dasd_rssd_perf_stats_t
);
2771 ccw
->cda
= (__u32
)(addr_t
) stats
;
2773 cqr
->buildclk
= get_clock();
2774 cqr
->status
= DASD_CQR_FILLED
;
2775 rc
= dasd_sleep_on(cqr
);
2777 prssdp
= (struct dasd_psf_prssd_data
*) cqr
->data
;
2778 stats
= (struct dasd_rssd_perf_stats_t
*) (prssdp
+ 1);
2779 if (copy_to_user(argp
, stats
,
2780 sizeof(struct dasd_rssd_perf_stats_t
)))
2783 dasd_sfree_request(cqr
, cqr
->memdev
);
2788 * Get attributes (cache operations)
2789 * Returnes the cache attributes used in Define Extend (DE).
2792 dasd_eckd_get_attrib(struct dasd_device
*device
, void __user
*argp
)
2794 struct dasd_eckd_private
*private =
2795 (struct dasd_eckd_private
*)device
->private;
2796 struct attrib_data_t attrib
= private->attrib
;
2799 if (!capable(CAP_SYS_ADMIN
))
2805 if (copy_to_user(argp
, (long *) &attrib
,
2806 sizeof(struct attrib_data_t
)))
2813 * Set attributes (cache operations)
2814 * Stores the attributes for cache operation to be used in Define Extend (DE).
2817 dasd_eckd_set_attrib(struct dasd_device
*device
, void __user
*argp
)
2819 struct dasd_eckd_private
*private =
2820 (struct dasd_eckd_private
*)device
->private;
2821 struct attrib_data_t attrib
;
2823 if (!capable(CAP_SYS_ADMIN
))
2828 if (copy_from_user(&attrib
, argp
, sizeof(struct attrib_data_t
)))
2830 private->attrib
= attrib
;
2832 dev_info(&device
->cdev
->dev
,
2833 "The DASD cache mode was set to %x (%i cylinder prestage)\n",
2834 private->attrib
.operation
, private->attrib
.nr_cyl
);
2839 * Issue syscall I/O to EMC Symmetrix array.
2840 * CCWs are PSF and RSSD
2842 static int dasd_symm_io(struct dasd_device
*device
, void __user
*argp
)
2844 struct dasd_symmio_parms usrparm
;
2845 char *psf_data
, *rssd_result
;
2846 struct dasd_ccw_req
*cqr
;
2851 if (!capable(CAP_SYS_ADMIN
) && !capable(CAP_SYS_RAWIO
))
2855 /* Copy parms from caller */
2857 if (copy_from_user(&usrparm
, argp
, sizeof(usrparm
)))
2859 if (is_compat_task() || sizeof(long) == 4) {
2860 /* Make sure pointers are sane even on 31 bit. */
2862 if ((usrparm
.psf_data
>> 32) != 0)
2864 if ((usrparm
.rssd_result
>> 32) != 0)
2866 usrparm
.psf_data
&= 0x7fffffffULL
;
2867 usrparm
.rssd_result
&= 0x7fffffffULL
;
2869 /* alloc I/O data area */
2870 psf_data
= kzalloc(usrparm
.psf_data_len
, GFP_KERNEL
| GFP_DMA
);
2871 rssd_result
= kzalloc(usrparm
.rssd_result_len
, GFP_KERNEL
| GFP_DMA
);
2872 if (!psf_data
|| !rssd_result
) {
2877 /* get syscall header from user space */
2879 if (copy_from_user(psf_data
,
2880 (void __user
*)(unsigned long) usrparm
.psf_data
,
2881 usrparm
.psf_data_len
))
2886 /* setup CCWs for PSF + RSSD */
2887 cqr
= dasd_smalloc_request(DASD_ECKD_MAGIC
, 2 , 0, device
);
2889 DBF_DEV_EVENT(DBF_WARNING
, device
, "%s",
2890 "Could not allocate initialization request");
2895 cqr
->startdev
= device
;
2896 cqr
->memdev
= device
;
2898 cqr
->expires
= 10 * HZ
;
2899 cqr
->buildclk
= get_clock();
2900 cqr
->status
= DASD_CQR_FILLED
;
2902 /* Build the ccws */
2906 ccw
->cmd_code
= DASD_ECKD_CCW_PSF
;
2907 ccw
->count
= usrparm
.psf_data_len
;
2908 ccw
->flags
|= CCW_FLAG_CC
;
2909 ccw
->cda
= (__u32
)(addr_t
) psf_data
;
2914 ccw
->cmd_code
= DASD_ECKD_CCW_RSSD
;
2915 ccw
->count
= usrparm
.rssd_result_len
;
2916 ccw
->flags
= CCW_FLAG_SLI
;
2917 ccw
->cda
= (__u32
)(addr_t
) rssd_result
;
2919 rc
= dasd_sleep_on(cqr
);
2924 if (copy_to_user((void __user
*)(unsigned long) usrparm
.rssd_result
,
2925 rssd_result
, usrparm
.rssd_result_len
))
2930 dasd_sfree_request(cqr
, cqr
->memdev
);
2935 DBF_DEV_EVENT(DBF_WARNING
, device
,
2936 "Symmetrix ioctl (0x%02x 0x%02x): rc=%d",
2937 (int) psf0
, (int) psf1
, rc
);
2942 dasd_eckd_ioctl(struct dasd_block
*block
, unsigned int cmd
, void __user
*argp
)
2944 struct dasd_device
*device
= block
->base
;
2948 return dasd_eckd_get_attrib(device
, argp
);
2950 return dasd_eckd_set_attrib(device
, argp
);
2952 return dasd_eckd_performance(device
, argp
);
2954 return dasd_eckd_release(device
);
2956 return dasd_eckd_reserve(device
);
2958 return dasd_eckd_steal_lock(device
);
2960 return dasd_symm_io(device
, argp
);
2962 return -ENOIOCTLCMD
;
2967 * Dump the range of CCWs into 'page' buffer
2968 * and return number of printed chars.
2971 dasd_eckd_dump_ccw_range(struct ccw1
*from
, struct ccw1
*to
, char *page
)
2977 while (from
<= to
) {
2978 len
+= sprintf(page
+ len
, KERN_ERR PRINTK_HEADER
2979 " CCW %p: %08X %08X DAT:",
2980 from
, ((int *) from
)[0], ((int *) from
)[1]);
2982 /* get pointer to data (consider IDALs) */
2983 if (from
->flags
& CCW_FLAG_IDA
)
2984 datap
= (char *) *((addr_t
*) (addr_t
) from
->cda
);
2986 datap
= (char *) ((addr_t
) from
->cda
);
2988 /* dump data (max 32 bytes) */
2989 for (count
= 0; count
< from
->count
&& count
< 32; count
++) {
2990 if (count
% 8 == 0) len
+= sprintf(page
+ len
, " ");
2991 if (count
% 4 == 0) len
+= sprintf(page
+ len
, " ");
2992 len
+= sprintf(page
+ len
, "%02x", datap
[count
]);
2994 len
+= sprintf(page
+ len
, "\n");
3001 dasd_eckd_dump_sense_dbf(struct dasd_device
*device
, struct irb
*irb
,
3006 sense
= (u64
*) dasd_get_sense(irb
);
3008 DBF_DEV_EVENT(DBF_EMERG
, device
,
3009 "%s: %s %02x%02x%02x %016llx %016llx %016llx "
3011 scsw_is_tm(&irb
->scsw
) ? "t" : "c",
3012 scsw_cc(&irb
->scsw
), scsw_cstat(&irb
->scsw
),
3013 scsw_dstat(&irb
->scsw
), sense
[0], sense
[1],
3014 sense
[2], sense
[3]);
3016 DBF_DEV_EVENT(DBF_EMERG
, device
, "%s",
3017 "SORRY - NO VALID SENSE AVAILABLE\n");
3022 * Print sense data and related channel program.
3023 * Parts are printed because printk buffer is only 1024 bytes.
3025 static void dasd_eckd_dump_sense_ccw(struct dasd_device
*device
,
3026 struct dasd_ccw_req
*req
, struct irb
*irb
)
3029 struct ccw1
*first
, *last
, *fail
, *from
, *to
;
3032 page
= (char *) get_zeroed_page(GFP_ATOMIC
);
3034 DBF_DEV_EVENT(DBF_WARNING
, device
, "%s",
3035 "No memory to dump sense data\n");
3038 /* dump the sense data */
3039 len
= sprintf(page
, KERN_ERR PRINTK_HEADER
3040 " I/O status report for device %s:\n",
3041 dev_name(&device
->cdev
->dev
));
3042 len
+= sprintf(page
+ len
, KERN_ERR PRINTK_HEADER
3043 " in req: %p CS: 0x%02X DS: 0x%02X CC: 0x%02X RC: %d\n",
3044 req
, scsw_cstat(&irb
->scsw
), scsw_dstat(&irb
->scsw
),
3045 scsw_cc(&irb
->scsw
), req
? req
->intrc
: 0);
3046 len
+= sprintf(page
+ len
, KERN_ERR PRINTK_HEADER
3047 " device %s: Failing CCW: %p\n",
3048 dev_name(&device
->cdev
->dev
),
3049 (void *) (addr_t
) irb
->scsw
.cmd
.cpa
);
3050 if (irb
->esw
.esw0
.erw
.cons
) {
3051 for (sl
= 0; sl
< 4; sl
++) {
3052 len
+= sprintf(page
+ len
, KERN_ERR PRINTK_HEADER
3053 " Sense(hex) %2d-%2d:",
3054 (8 * sl
), ((8 * sl
) + 7));
3056 for (sct
= 0; sct
< 8; sct
++) {
3057 len
+= sprintf(page
+ len
, " %02x",
3058 irb
->ecw
[8 * sl
+ sct
]);
3060 len
+= sprintf(page
+ len
, "\n");
3063 if (irb
->ecw
[27] & DASD_SENSE_BIT_0
) {
3064 /* 24 Byte Sense Data */
3065 sprintf(page
+ len
, KERN_ERR PRINTK_HEADER
3066 " 24 Byte: %x MSG %x, "
3067 "%s MSGb to SYSOP\n",
3068 irb
->ecw
[7] >> 4, irb
->ecw
[7] & 0x0f,
3069 irb
->ecw
[1] & 0x10 ? "" : "no");
3071 /* 32 Byte Sense Data */
3072 sprintf(page
+ len
, KERN_ERR PRINTK_HEADER
3073 " 32 Byte: Format: %x "
3074 "Exception class %x\n",
3075 irb
->ecw
[6] & 0x0f, irb
->ecw
[22] >> 4);
3078 sprintf(page
+ len
, KERN_ERR PRINTK_HEADER
3079 " SORRY - NO VALID SENSE AVAILABLE\n");
3084 /* req == NULL for unsolicited interrupts */
3085 /* dump the Channel Program (max 140 Bytes per line) */
3086 /* Count CCW and print first CCWs (maximum 1024 % 140 = 7) */
3087 first
= req
->cpaddr
;
3088 for (last
= first
; last
->flags
& (CCW_FLAG_CC
| CCW_FLAG_DC
); last
++);
3089 to
= min(first
+ 6, last
);
3090 len
= sprintf(page
, KERN_ERR PRINTK_HEADER
3091 " Related CP in req: %p\n", req
);
3092 dasd_eckd_dump_ccw_range(first
, to
, page
+ len
);
3095 /* print failing CCW area (maximum 4) */
3096 /* scsw->cda is either valid or zero */
3099 fail
= (struct ccw1
*)(addr_t
)
3100 irb
->scsw
.cmd
.cpa
; /* failing CCW */
3101 if (from
< fail
- 2) {
3102 from
= fail
- 2; /* there is a gap - print header */
3103 len
+= sprintf(page
, KERN_ERR PRINTK_HEADER
"......\n");
3105 to
= min(fail
+ 1, last
);
3106 len
+= dasd_eckd_dump_ccw_range(from
, to
, page
+ len
);
3108 /* print last CCWs (maximum 2) */
3109 from
= max(from
, ++to
);
3110 if (from
< last
- 1) {
3111 from
= last
- 1; /* there is a gap - print header */
3112 len
+= sprintf(page
+ len
, KERN_ERR PRINTK_HEADER
"......\n");
3114 len
+= dasd_eckd_dump_ccw_range(from
, last
, page
+ len
);
3118 free_page((unsigned long) page
);
3123 * Print sense data from a tcw.
3125 static void dasd_eckd_dump_sense_tcw(struct dasd_device
*device
,
3126 struct dasd_ccw_req
*req
, struct irb
*irb
)
3129 int len
, sl
, sct
, residual
;
3135 page
= (char *) get_zeroed_page(GFP_ATOMIC
);
3137 DBF_DEV_EVENT(DBF_WARNING
, device
, " %s",
3138 "No memory to dump sense data");
3141 /* dump the sense data */
3142 len
= sprintf(page
, KERN_ERR PRINTK_HEADER
3143 " I/O status report for device %s:\n",
3144 dev_name(&device
->cdev
->dev
));
3145 len
+= sprintf(page
+ len
, KERN_ERR PRINTK_HEADER
3146 " in req: %p CS: 0x%02X DS: 0x%02X CC: 0x%02X RC: %d "
3147 "fcxs: 0x%02X schxs: 0x%02X\n", req
,
3148 scsw_cstat(&irb
->scsw
), scsw_dstat(&irb
->scsw
),
3149 scsw_cc(&irb
->scsw
), req
->intrc
,
3150 irb
->scsw
.tm
.fcxs
, irb
->scsw
.tm
.schxs
);
3151 len
+= sprintf(page
+ len
, KERN_ERR PRINTK_HEADER
3152 " device %s: Failing TCW: %p\n",
3153 dev_name(&device
->cdev
->dev
),
3154 (void *) (addr_t
) irb
->scsw
.tm
.tcw
);
3158 if (irb
->scsw
.tm
.tcw
&& (irb
->scsw
.tm
.fcxs
== 0x01))
3160 (struct tcw
*)(unsigned long)irb
->scsw
.tm
.tcw
);
3163 len
+= sprintf(page
+ len
, KERN_ERR PRINTK_HEADER
3164 " tsb->length %d\n", tsb
->length
);
3165 len
+= sprintf(page
+ len
, KERN_ERR PRINTK_HEADER
3166 " tsb->flags %x\n", tsb
->flags
);
3167 len
+= sprintf(page
+ len
, KERN_ERR PRINTK_HEADER
3168 " tsb->dcw_offset %d\n", tsb
->dcw_offset
);
3169 len
+= sprintf(page
+ len
, KERN_ERR PRINTK_HEADER
3170 " tsb->count %d\n", tsb
->count
);
3171 residual
= tsb
->count
- 28;
3172 len
+= sprintf(page
+ len
, KERN_ERR PRINTK_HEADER
3173 " residual %d\n", residual
);
3175 switch (tsb
->flags
& 0x07) {
3176 case 1: /* tsa_iostat */
3177 len
+= sprintf(page
+ len
, KERN_ERR PRINTK_HEADER
3178 " tsb->tsa.iostat.dev_time %d\n",
3179 tsb
->tsa
.iostat
.dev_time
);
3180 len
+= sprintf(page
+ len
, KERN_ERR PRINTK_HEADER
3181 " tsb->tsa.iostat.def_time %d\n",
3182 tsb
->tsa
.iostat
.def_time
);
3183 len
+= sprintf(page
+ len
, KERN_ERR PRINTK_HEADER
3184 " tsb->tsa.iostat.queue_time %d\n",
3185 tsb
->tsa
.iostat
.queue_time
);
3186 len
+= sprintf(page
+ len
, KERN_ERR PRINTK_HEADER
3187 " tsb->tsa.iostat.dev_busy_time %d\n",
3188 tsb
->tsa
.iostat
.dev_busy_time
);
3189 len
+= sprintf(page
+ len
, KERN_ERR PRINTK_HEADER
3190 " tsb->tsa.iostat.dev_act_time %d\n",
3191 tsb
->tsa
.iostat
.dev_act_time
);
3192 sense
= tsb
->tsa
.iostat
.sense
;
3194 case 2: /* ts_ddpc */
3195 len
+= sprintf(page
+ len
, KERN_ERR PRINTK_HEADER
3196 " tsb->tsa.ddpc.rc %d\n", tsb
->tsa
.ddpc
.rc
);
3197 len
+= sprintf(page
+ len
, KERN_ERR PRINTK_HEADER
3198 " tsb->tsa.ddpc.rcq: ");
3199 for (sl
= 0; sl
< 16; sl
++) {
3200 for (sct
= 0; sct
< 8; sct
++) {
3201 len
+= sprintf(page
+ len
, " %02x",
3202 tsb
->tsa
.ddpc
.rcq
[sl
]);
3204 len
+= sprintf(page
+ len
, "\n");
3206 sense
= tsb
->tsa
.ddpc
.sense
;
3208 case 3: /* tsa_intrg */
3209 len
+= sprintf(page
+ len
, KERN_ERR PRINTK_HEADER
3210 " tsb->tsa.intrg.: not supportet yet \n");
3215 for (sl
= 0; sl
< 4; sl
++) {
3216 len
+= sprintf(page
+ len
,
3217 KERN_ERR PRINTK_HEADER
3218 " Sense(hex) %2d-%2d:",
3219 (8 * sl
), ((8 * sl
) + 7));
3220 for (sct
= 0; sct
< 8; sct
++) {
3221 len
+= sprintf(page
+ len
, " %02x",
3222 sense
[8 * sl
+ sct
]);
3224 len
+= sprintf(page
+ len
, "\n");
3227 if (sense
[27] & DASD_SENSE_BIT_0
) {
3228 /* 24 Byte Sense Data */
3229 sprintf(page
+ len
, KERN_ERR PRINTK_HEADER
3230 " 24 Byte: %x MSG %x, "
3231 "%s MSGb to SYSOP\n",
3232 sense
[7] >> 4, sense
[7] & 0x0f,
3233 sense
[1] & 0x10 ? "" : "no");
3235 /* 32 Byte Sense Data */
3236 sprintf(page
+ len
, KERN_ERR PRINTK_HEADER
3237 " 32 Byte: Format: %x "
3238 "Exception class %x\n",
3239 sense
[6] & 0x0f, sense
[22] >> 4);
3242 sprintf(page
+ len
, KERN_ERR PRINTK_HEADER
3243 " SORRY - NO VALID SENSE AVAILABLE\n");
3246 sprintf(page
+ len
, KERN_ERR PRINTK_HEADER
3247 " SORRY - NO TSB DATA AVAILABLE\n");
3250 free_page((unsigned long) page
);
3253 static void dasd_eckd_dump_sense(struct dasd_device
*device
,
3254 struct dasd_ccw_req
*req
, struct irb
*irb
)
3256 if (req
&& scsw_is_tm(&req
->irb
.scsw
))
3257 dasd_eckd_dump_sense_tcw(device
, req
, irb
);
3259 dasd_eckd_dump_sense_ccw(device
, req
, irb
);
3262 int dasd_eckd_pm_freeze(struct dasd_device
*device
)
3265 * the device should be disconnected from our LCU structure
3266 * on restore we will reconnect it and reread LCU specific
3267 * information like PAV support that might have changed
3269 dasd_alias_remove_device(device
);
3270 dasd_alias_disconnect_device_from_lcu(device
);
3275 int dasd_eckd_restore_device(struct dasd_device
*device
)
3277 struct dasd_eckd_private
*private;
3278 struct dasd_eckd_characteristics temp_rdc_data
;
3280 struct dasd_uid temp_uid
;
3281 unsigned long flags
;
3283 private = (struct dasd_eckd_private
*) device
->private;
3285 /* Read Configuration Data */
3286 rc
= dasd_eckd_read_conf(device
);
3290 /* Generate device unique id and register in devmap */
3291 rc
= dasd_eckd_generate_uid(device
, &private->uid
);
3292 dasd_get_uid(device
->cdev
, &temp_uid
);
3293 if (memcmp(&private->uid
, &temp_uid
, sizeof(struct dasd_uid
)) != 0)
3294 dev_err(&device
->cdev
->dev
, "The UID of the DASD has "
3298 dasd_set_uid(device
->cdev
, &private->uid
);
3300 /* register lcu with alias handling, enable PAV if this is a new lcu */
3301 is_known
= dasd_alias_make_device_known_to_lcu(device
);
3305 dasd_eckd_validate_server(device
);
3306 dasd_alias_lcu_setup_complete(device
);
3308 dasd_alias_wait_for_lcu_setup(device
);
3310 /* RE-Read Configuration Data */
3311 rc
= dasd_eckd_read_conf(device
);
3315 /* Read Feature Codes */
3316 dasd_eckd_read_features(device
);
3318 /* Read Device Characteristics */
3319 rc
= dasd_generic_read_dev_chars(device
, DASD_ECKD_MAGIC
,
3320 &temp_rdc_data
, 64);
3322 DBF_EVENT_DEVID(DBF_WARNING
, device
->cdev
,
3323 "Read device characteristic failed, rc=%d", rc
);
3326 spin_lock_irqsave(get_ccwdev_lock(device
->cdev
), flags
);
3327 memcpy(&private->rdc_data
, &temp_rdc_data
, sizeof(temp_rdc_data
));
3328 spin_unlock_irqrestore(get_ccwdev_lock(device
->cdev
), flags
);
3330 /* add device to alias management */
3331 dasd_alias_add_device(device
);
3339 static struct ccw_driver dasd_eckd_driver
= {
3340 .name
= "dasd-eckd",
3341 .owner
= THIS_MODULE
,
3342 .ids
= dasd_eckd_ids
,
3343 .probe
= dasd_eckd_probe
,
3344 .remove
= dasd_generic_remove
,
3345 .set_offline
= dasd_generic_set_offline
,
3346 .set_online
= dasd_eckd_set_online
,
3347 .notify
= dasd_generic_notify
,
3348 .freeze
= dasd_generic_pm_freeze
,
3349 .thaw
= dasd_generic_restore_device
,
3350 .restore
= dasd_generic_restore_device
,
3354 * max_blocks is dependent on the amount of storage that is available
3355 * in the static io buffer for each device. Currently each device has
3356 * 8192 bytes (=2 pages). For 64 bit one dasd_mchunkt_t structure has
3357 * 24 bytes, the struct dasd_ccw_req has 136 bytes and each block can use
3358 * up to 16 bytes (8 for the ccw and 8 for the idal pointer). In
3359 * addition we have one define extent ccw + 16 bytes of data and one
3360 * locate record ccw + 16 bytes of data. That makes:
3361 * (8192 - 24 - 136 - 8 - 16 - 8 - 16) / 16 = 499 blocks at maximum.
3362 * We want to fit two into the available memory so that we can immediately
3363 * start the next request if one finishes off. That makes 249.5 blocks
3364 * for one request. Give a little safety and the result is 240.
3366 static struct dasd_discipline dasd_eckd_discipline
= {
3367 .owner
= THIS_MODULE
,
3371 .check_device
= dasd_eckd_check_characteristics
,
3372 .uncheck_device
= dasd_eckd_uncheck_device
,
3373 .do_analysis
= dasd_eckd_do_analysis
,
3374 .ready_to_online
= dasd_eckd_ready_to_online
,
3375 .online_to_ready
= dasd_eckd_online_to_ready
,
3376 .fill_geometry
= dasd_eckd_fill_geometry
,
3377 .start_IO
= dasd_start_IO
,
3378 .term_IO
= dasd_term_IO
,
3379 .handle_terminated_request
= dasd_eckd_handle_terminated_request
,
3380 .format_device
= dasd_eckd_format_device
,
3381 .erp_action
= dasd_eckd_erp_action
,
3382 .erp_postaction
= dasd_eckd_erp_postaction
,
3383 .handle_unsolicited_interrupt
= dasd_eckd_handle_unsolicited_interrupt
,
3384 .build_cp
= dasd_eckd_build_alias_cp
,
3385 .free_cp
= dasd_eckd_free_alias_cp
,
3386 .dump_sense
= dasd_eckd_dump_sense
,
3387 .dump_sense_dbf
= dasd_eckd_dump_sense_dbf
,
3388 .fill_info
= dasd_eckd_fill_info
,
3389 .ioctl
= dasd_eckd_ioctl
,
3390 .freeze
= dasd_eckd_pm_freeze
,
3391 .restore
= dasd_eckd_restore_device
,
3395 dasd_eckd_init(void)
3399 ASCEBC(dasd_eckd_discipline
.ebcname
, 4);
3400 ret
= ccw_driver_register(&dasd_eckd_driver
);
3402 wait_for_device_probe();
3408 dasd_eckd_cleanup(void)
3410 ccw_driver_unregister(&dasd_eckd_driver
);
3413 module_init(dasd_eckd_init
);
3414 module_exit(dasd_eckd_cleanup
);