1 // SPDX-License-Identifier: GPL-2.0
3 * Author(s)......: Holger Smolinski <Holger.Smolinski@de.ibm.com>
4 * Horst Hummel <Horst.Hummel@de.ibm.com>
5 * Carsten Otte <Cotte@de.ibm.com>
6 * Martin Schwidefsky <schwidefsky@de.ibm.com>
7 * Bugreports.to..: <Linux390@de.ibm.com>
8 * Copyright IBM Corp. 1999, 2009
9 * EMC Symmetrix ioctl Copyright EMC Corporation, 2008
10 * Author.........: Nigel Hislop <hislop_nigel@emc.com>
13 #define KMSG_COMPONENT "dasd-eckd"
15 #include <linux/stddef.h>
16 #include <linux/kernel.h>
17 #include <linux/slab.h>
18 #include <linux/hdreg.h> /* HDIO_GETGEO */
19 #include <linux/bio.h>
20 #include <linux/module.h>
21 #include <linux/compat.h>
22 #include <linux/init.h>
23 #include <linux/seq_file.h>
25 #include <asm/css_chars.h>
26 #include <asm/debug.h>
27 #include <asm/idals.h>
28 #include <asm/ebcdic.h>
30 #include <linux/uaccess.h>
32 #include <asm/ccwdev.h>
34 #include <asm/schid.h>
35 #include <asm/chpid.h>
38 #include "dasd_eckd.h"
42 #endif /* PRINTK_HEADER */
43 #define PRINTK_HEADER "dasd(eckd):"
45 #define ECKD_C0(i) (i->home_bytes)
46 #define ECKD_F(i) (i->formula)
47 #define ECKD_F1(i) (ECKD_F(i)==0x01?(i->factors.f_0x01.f1):\
48 (i->factors.f_0x02.f1))
49 #define ECKD_F2(i) (ECKD_F(i)==0x01?(i->factors.f_0x01.f2):\
50 (i->factors.f_0x02.f2))
51 #define ECKD_F3(i) (ECKD_F(i)==0x01?(i->factors.f_0x01.f3):\
52 (i->factors.f_0x02.f3))
53 #define ECKD_F4(i) (ECKD_F(i)==0x02?(i->factors.f_0x02.f4):0)
54 #define ECKD_F5(i) (ECKD_F(i)==0x02?(i->factors.f_0x02.f5):0)
55 #define ECKD_F6(i) (i->factor6)
56 #define ECKD_F7(i) (i->factor7)
57 #define ECKD_F8(i) (i->factor8)
60 * raw track access always map to 64k in memory
61 * so it maps to 16 blocks of 4k per track
63 #define DASD_RAW_BLOCK_PER_TRACK 16
64 #define DASD_RAW_BLOCKSIZE 4096
65 /* 64k are 128 x 512 byte sectors */
66 #define DASD_RAW_SECTORS_PER_TRACK 128
68 MODULE_LICENSE("GPL");
70 static struct dasd_discipline dasd_eckd_discipline
;
72 /* The ccw bus type uses this table to find devices that it sends to
74 static struct ccw_device_id dasd_eckd_ids
[] = {
75 { CCW_DEVICE_DEVTYPE (0x3990, 0, 0x3390, 0), .driver_info
= 0x1},
76 { CCW_DEVICE_DEVTYPE (0x2105, 0, 0x3390, 0), .driver_info
= 0x2},
77 { CCW_DEVICE_DEVTYPE (0x3880, 0, 0x3380, 0), .driver_info
= 0x3},
78 { CCW_DEVICE_DEVTYPE (0x3990, 0, 0x3380, 0), .driver_info
= 0x4},
79 { CCW_DEVICE_DEVTYPE (0x2105, 0, 0x3380, 0), .driver_info
= 0x5},
80 { CCW_DEVICE_DEVTYPE (0x9343, 0, 0x9345, 0), .driver_info
= 0x6},
81 { CCW_DEVICE_DEVTYPE (0x2107, 0, 0x3390, 0), .driver_info
= 0x7},
82 { CCW_DEVICE_DEVTYPE (0x2107, 0, 0x3380, 0), .driver_info
= 0x8},
83 { CCW_DEVICE_DEVTYPE (0x1750, 0, 0x3390, 0), .driver_info
= 0x9},
84 { CCW_DEVICE_DEVTYPE (0x1750, 0, 0x3380, 0), .driver_info
= 0xa},
85 { /* end of list */ },
88 MODULE_DEVICE_TABLE(ccw
, dasd_eckd_ids
);
90 static struct ccw_driver dasd_eckd_driver
; /* see below */
92 static void *rawpadpage
;
95 #define INIT_CQR_UNFORMATTED 1
96 #define INIT_CQR_ERROR 2
98 /* emergency request for reserve/release */
100 struct dasd_ccw_req cqr
;
104 static DEFINE_MUTEX(dasd_reserve_mutex
);
106 /* definitions for the path verification worker */
107 struct path_verification_work_data
{
108 struct work_struct worker
;
109 struct dasd_device
*device
;
110 struct dasd_ccw_req cqr
;
112 __u8 rcd_buffer
[DASD_ECKD_RCD_DATA_SIZE
];
116 static struct path_verification_work_data
*path_verification_worker
;
117 static DEFINE_MUTEX(dasd_path_verification_mutex
);
119 struct check_attention_work_data
{
120 struct work_struct worker
;
121 struct dasd_device
*device
;
125 static int prepare_itcw(struct itcw
*, unsigned int, unsigned int, int,
126 struct dasd_device
*, struct dasd_device
*,
127 unsigned int, int, unsigned int, unsigned int,
128 unsigned int, unsigned int);
130 /* initial attempt at a probe function. this can be simplified once
131 * the other detection code is gone */
133 dasd_eckd_probe (struct ccw_device
*cdev
)
137 /* set ECKD specific ccw-device options */
138 ret
= ccw_device_set_options(cdev
, CCWDEV_ALLOW_FORCE
|
139 CCWDEV_DO_PATHGROUP
| CCWDEV_DO_MULTIPATH
);
141 DBF_EVENT_DEVID(DBF_WARNING
, cdev
, "%s",
142 "dasd_eckd_probe: could not set "
143 "ccw-device options");
146 ret
= dasd_generic_probe(cdev
, &dasd_eckd_discipline
);
151 dasd_eckd_set_online(struct ccw_device
*cdev
)
153 return dasd_generic_set_online(cdev
, &dasd_eckd_discipline
);
156 static const int sizes_trk0
[] = { 28, 148, 84 };
157 #define LABEL_SIZE 140
159 /* head and record addresses of count_area read in analysis ccw */
160 static const int count_area_head
[] = { 0, 0, 0, 0, 2 };
161 static const int count_area_rec
[] = { 1, 2, 3, 4, 1 };
163 static inline unsigned int
164 round_up_multiple(unsigned int no
, unsigned int mult
)
167 return (rem
? no
- rem
+ mult
: no
);
170 static inline unsigned int
171 ceil_quot(unsigned int d1
, unsigned int d2
)
173 return (d1
+ (d2
- 1)) / d2
;
177 recs_per_track(struct dasd_eckd_characteristics
* rdc
,
178 unsigned int kl
, unsigned int dl
)
182 switch (rdc
->dev_type
) {
185 return 1499 / (15 + 7 + ceil_quot(kl
+ 12, 32) +
186 ceil_quot(dl
+ 12, 32));
188 return 1499 / (15 + ceil_quot(dl
+ 12, 32));
190 dn
= ceil_quot(dl
+ 6, 232) + 1;
192 kn
= ceil_quot(kl
+ 6, 232) + 1;
193 return 1729 / (10 + 9 + ceil_quot(kl
+ 6 * kn
, 34) +
194 9 + ceil_quot(dl
+ 6 * dn
, 34));
196 return 1729 / (10 + 9 + ceil_quot(dl
+ 6 * dn
, 34));
198 dn
= ceil_quot(dl
+ 6, 232) + 1;
200 kn
= ceil_quot(kl
+ 6, 232) + 1;
201 return 1420 / (18 + 7 + ceil_quot(kl
+ 6 * kn
, 34) +
202 ceil_quot(dl
+ 6 * dn
, 34));
204 return 1420 / (18 + 7 + ceil_quot(dl
+ 6 * dn
, 34));
209 static void set_ch_t(struct ch_t
*geo
, __u32 cyl
, __u8 head
)
211 geo
->cyl
= (__u16
) cyl
;
212 geo
->head
= cyl
>> 16;
217 static int set_timestamp(struct ccw1
*ccw
, struct DE_eckd_data
*data
,
218 struct dasd_device
*device
)
220 struct dasd_eckd_private
*private = device
->private;
223 rc
= get_phys_clock(&data
->ep_sys_time
);
225 * Ignore return code if XRC is not supported or
226 * sync clock is switched off
228 if ((rc
&& !private->rdc_data
.facilities
.XRC_supported
) ||
229 rc
== -EOPNOTSUPP
|| rc
== -EACCES
)
232 /* switch on System Time Stamp - needed for XRC Support */
233 data
->ga_extended
|= 0x08; /* switch on 'Time Stamp Valid' */
234 data
->ga_extended
|= 0x02; /* switch on 'Extended Parameter' */
237 ccw
->count
= sizeof(struct DE_eckd_data
);
238 ccw
->flags
|= CCW_FLAG_SLI
;
245 define_extent(struct ccw1
*ccw
, struct DE_eckd_data
*data
, unsigned int trk
,
246 unsigned int totrk
, int cmd
, struct dasd_device
*device
,
249 struct dasd_eckd_private
*private = device
->private;
250 u16 heads
, beghead
, endhead
;
255 ccw
->cmd_code
= DASD_ECKD_CCW_DEFINE_EXTENT
;
258 ccw
->cda
= (__u32
)__pa(data
);
261 memset(data
, 0, sizeof(struct DE_eckd_data
));
263 case DASD_ECKD_CCW_READ_HOME_ADDRESS
:
264 case DASD_ECKD_CCW_READ_RECORD_ZERO
:
265 case DASD_ECKD_CCW_READ
:
266 case DASD_ECKD_CCW_READ_MT
:
267 case DASD_ECKD_CCW_READ_CKD
:
268 case DASD_ECKD_CCW_READ_CKD_MT
:
269 case DASD_ECKD_CCW_READ_KD
:
270 case DASD_ECKD_CCW_READ_KD_MT
:
271 data
->mask
.perm
= 0x1;
272 data
->attributes
.operation
= private->attrib
.operation
;
274 case DASD_ECKD_CCW_READ_COUNT
:
275 data
->mask
.perm
= 0x1;
276 data
->attributes
.operation
= DASD_BYPASS_CACHE
;
278 case DASD_ECKD_CCW_READ_TRACK
:
279 case DASD_ECKD_CCW_READ_TRACK_DATA
:
280 data
->mask
.perm
= 0x1;
281 data
->attributes
.operation
= private->attrib
.operation
;
284 case DASD_ECKD_CCW_WRITE
:
285 case DASD_ECKD_CCW_WRITE_MT
:
286 case DASD_ECKD_CCW_WRITE_KD
:
287 case DASD_ECKD_CCW_WRITE_KD_MT
:
288 data
->mask
.perm
= 0x02;
289 data
->attributes
.operation
= private->attrib
.operation
;
290 rc
= set_timestamp(ccw
, data
, device
);
292 case DASD_ECKD_CCW_WRITE_CKD
:
293 case DASD_ECKD_CCW_WRITE_CKD_MT
:
294 data
->attributes
.operation
= DASD_BYPASS_CACHE
;
295 rc
= set_timestamp(ccw
, data
, device
);
297 case DASD_ECKD_CCW_ERASE
:
298 case DASD_ECKD_CCW_WRITE_HOME_ADDRESS
:
299 case DASD_ECKD_CCW_WRITE_RECORD_ZERO
:
300 data
->mask
.perm
= 0x3;
301 data
->mask
.auth
= 0x1;
302 data
->attributes
.operation
= DASD_BYPASS_CACHE
;
303 rc
= set_timestamp(ccw
, data
, device
);
305 case DASD_ECKD_CCW_WRITE_FULL_TRACK
:
306 data
->mask
.perm
= 0x03;
307 data
->attributes
.operation
= private->attrib
.operation
;
310 case DASD_ECKD_CCW_WRITE_TRACK_DATA
:
311 data
->mask
.perm
= 0x02;
312 data
->attributes
.operation
= private->attrib
.operation
;
313 data
->blk_size
= blksize
;
314 rc
= set_timestamp(ccw
, data
, device
);
317 dev_err(&device
->cdev
->dev
,
318 "0x%x is not a known command\n", cmd
);
322 data
->attributes
.mode
= 0x3; /* ECKD */
324 if ((private->rdc_data
.cu_type
== 0x2105 ||
325 private->rdc_data
.cu_type
== 0x2107 ||
326 private->rdc_data
.cu_type
== 0x1750)
327 && !(private->uses_cdl
&& trk
< 2))
328 data
->ga_extended
|= 0x40; /* Regular Data Format Mode */
330 heads
= private->rdc_data
.trk_per_cyl
;
331 begcyl
= trk
/ heads
;
332 beghead
= trk
% heads
;
333 endcyl
= totrk
/ heads
;
334 endhead
= totrk
% heads
;
336 /* check for sequential prestage - enhance cylinder range */
337 if (data
->attributes
.operation
== DASD_SEQ_PRESTAGE
||
338 data
->attributes
.operation
== DASD_SEQ_ACCESS
) {
340 if (endcyl
+ private->attrib
.nr_cyl
< private->real_cyl
)
341 endcyl
+= private->attrib
.nr_cyl
;
343 endcyl
= (private->real_cyl
- 1);
346 set_ch_t(&data
->beg_ext
, begcyl
, beghead
);
347 set_ch_t(&data
->end_ext
, endcyl
, endhead
);
352 static void locate_record_ext(struct ccw1
*ccw
, struct LRE_eckd_data
*data
,
353 unsigned int trk
, unsigned int rec_on_trk
,
354 int count
, int cmd
, struct dasd_device
*device
,
355 unsigned int reclen
, unsigned int tlf
)
357 struct dasd_eckd_private
*private = device
->private;
362 ccw
->cmd_code
= DASD_ECKD_CCW_LOCATE_RECORD_EXT
;
364 if (cmd
== DASD_ECKD_CCW_WRITE_FULL_TRACK
)
368 ccw
->cda
= (__u32
)__pa(data
);
371 memset(data
, 0, sizeof(*data
));
374 switch (private->rdc_data
.dev_type
) {
376 dn
= ceil_quot(reclen
+ 6, 232);
377 d
= 9 + ceil_quot(reclen
+ 6 * (dn
+ 1), 34);
378 sector
= (49 + (rec_on_trk
- 1) * (10 + d
)) / 8;
381 d
= 7 + ceil_quot(reclen
+ 12, 32);
382 sector
= (39 + (rec_on_trk
- 1) * (8 + d
)) / 7;
386 data
->sector
= sector
;
387 /* note: meaning of count depends on the operation
388 * for record based I/O it's the number of records, but for
389 * track based I/O it's the number of tracks
393 case DASD_ECKD_CCW_WRITE_HOME_ADDRESS
:
394 data
->operation
.orientation
= 0x3;
395 data
->operation
.operation
= 0x03;
397 case DASD_ECKD_CCW_READ_HOME_ADDRESS
:
398 data
->operation
.orientation
= 0x3;
399 data
->operation
.operation
= 0x16;
401 case DASD_ECKD_CCW_WRITE_RECORD_ZERO
:
402 data
->operation
.orientation
= 0x1;
403 data
->operation
.operation
= 0x03;
406 case DASD_ECKD_CCW_READ_RECORD_ZERO
:
407 data
->operation
.orientation
= 0x3;
408 data
->operation
.operation
= 0x16;
411 case DASD_ECKD_CCW_WRITE
:
412 case DASD_ECKD_CCW_WRITE_MT
:
413 case DASD_ECKD_CCW_WRITE_KD
:
414 case DASD_ECKD_CCW_WRITE_KD_MT
:
415 data
->auxiliary
.length_valid
= 0x1;
416 data
->length
= reclen
;
417 data
->operation
.operation
= 0x01;
419 case DASD_ECKD_CCW_WRITE_CKD
:
420 case DASD_ECKD_CCW_WRITE_CKD_MT
:
421 data
->auxiliary
.length_valid
= 0x1;
422 data
->length
= reclen
;
423 data
->operation
.operation
= 0x03;
425 case DASD_ECKD_CCW_WRITE_FULL_TRACK
:
426 data
->operation
.orientation
= 0x0;
427 data
->operation
.operation
= 0x3F;
428 data
->extended_operation
= 0x11;
430 data
->extended_parameter_length
= 0x02;
431 if (data
->count
> 8) {
432 data
->extended_parameter
[0] = 0xFF;
433 data
->extended_parameter
[1] = 0xFF;
434 data
->extended_parameter
[1] <<= (16 - count
);
436 data
->extended_parameter
[0] = 0xFF;
437 data
->extended_parameter
[0] <<= (8 - count
);
438 data
->extended_parameter
[1] = 0x00;
442 case DASD_ECKD_CCW_WRITE_TRACK_DATA
:
443 data
->auxiliary
.length_valid
= 0x1;
444 data
->length
= reclen
; /* not tlf, as one might think */
445 data
->operation
.operation
= 0x3F;
446 data
->extended_operation
= 0x23;
448 case DASD_ECKD_CCW_READ
:
449 case DASD_ECKD_CCW_READ_MT
:
450 case DASD_ECKD_CCW_READ_KD
:
451 case DASD_ECKD_CCW_READ_KD_MT
:
452 data
->auxiliary
.length_valid
= 0x1;
453 data
->length
= reclen
;
454 data
->operation
.operation
= 0x06;
456 case DASD_ECKD_CCW_READ_CKD
:
457 case DASD_ECKD_CCW_READ_CKD_MT
:
458 data
->auxiliary
.length_valid
= 0x1;
459 data
->length
= reclen
;
460 data
->operation
.operation
= 0x16;
462 case DASD_ECKD_CCW_READ_COUNT
:
463 data
->operation
.operation
= 0x06;
465 case DASD_ECKD_CCW_READ_TRACK
:
466 data
->operation
.orientation
= 0x1;
467 data
->operation
.operation
= 0x0C;
468 data
->extended_parameter_length
= 0;
471 case DASD_ECKD_CCW_READ_TRACK_DATA
:
472 data
->auxiliary
.length_valid
= 0x1;
474 data
->operation
.operation
= 0x0C;
476 case DASD_ECKD_CCW_ERASE
:
477 data
->length
= reclen
;
478 data
->auxiliary
.length_valid
= 0x1;
479 data
->operation
.operation
= 0x0b;
482 DBF_DEV_EVENT(DBF_ERR
, device
,
483 "fill LRE unknown opcode 0x%x", cmd
);
486 set_ch_t(&data
->seek_addr
,
487 trk
/ private->rdc_data
.trk_per_cyl
,
488 trk
% private->rdc_data
.trk_per_cyl
);
489 data
->search_arg
.cyl
= data
->seek_addr
.cyl
;
490 data
->search_arg
.head
= data
->seek_addr
.head
;
491 data
->search_arg
.record
= rec_on_trk
;
494 static int prefix_LRE(struct ccw1
*ccw
, struct PFX_eckd_data
*pfxdata
,
495 unsigned int trk
, unsigned int totrk
, int cmd
,
496 struct dasd_device
*basedev
, struct dasd_device
*startdev
,
497 unsigned int format
, unsigned int rec_on_trk
, int count
,
498 unsigned int blksize
, unsigned int tlf
)
500 struct dasd_eckd_private
*basepriv
, *startpriv
;
501 struct LRE_eckd_data
*lredata
;
502 struct DE_eckd_data
*dedata
;
505 basepriv
= basedev
->private;
506 startpriv
= startdev
->private;
507 dedata
= &pfxdata
->define_extent
;
508 lredata
= &pfxdata
->locate_record
;
510 ccw
->cmd_code
= DASD_ECKD_CCW_PFX
;
512 if (cmd
== DASD_ECKD_CCW_WRITE_FULL_TRACK
) {
513 ccw
->count
= sizeof(*pfxdata
) + 2;
514 ccw
->cda
= (__u32
) __pa(pfxdata
);
515 memset(pfxdata
, 0, sizeof(*pfxdata
) + 2);
517 ccw
->count
= sizeof(*pfxdata
);
518 ccw
->cda
= (__u32
) __pa(pfxdata
);
519 memset(pfxdata
, 0, sizeof(*pfxdata
));
524 DBF_DEV_EVENT(DBF_ERR
, basedev
,
525 "PFX LRE unknown format 0x%x", format
);
529 pfxdata
->format
= format
;
530 pfxdata
->base_address
= basepriv
->ned
->unit_addr
;
531 pfxdata
->base_lss
= basepriv
->ned
->ID
;
532 pfxdata
->validity
.define_extent
= 1;
534 /* private uid is kept up to date, conf_data may be outdated */
535 if (startpriv
->uid
.type
== UA_BASE_PAV_ALIAS
)
536 pfxdata
->validity
.verify_base
= 1;
538 if (startpriv
->uid
.type
== UA_HYPER_PAV_ALIAS
) {
539 pfxdata
->validity
.verify_base
= 1;
540 pfxdata
->validity
.hyper_pav
= 1;
543 rc
= define_extent(NULL
, dedata
, trk
, totrk
, cmd
, basedev
, blksize
);
546 * For some commands the System Time Stamp is set in the define extent
547 * data when XRC is supported. The validity of the time stamp must be
548 * reflected in the prefix data as well.
550 if (dedata
->ga_extended
& 0x08 && dedata
->ga_extended
& 0x02)
551 pfxdata
->validity
.time_stamp
= 1; /* 'Time Stamp Valid' */
554 locate_record_ext(NULL
, lredata
, trk
, rec_on_trk
, count
, cmd
,
555 basedev
, blksize
, tlf
);
561 static int prefix(struct ccw1
*ccw
, struct PFX_eckd_data
*pfxdata
,
562 unsigned int trk
, unsigned int totrk
, int cmd
,
563 struct dasd_device
*basedev
, struct dasd_device
*startdev
)
565 return prefix_LRE(ccw
, pfxdata
, trk
, totrk
, cmd
, basedev
, startdev
,
570 locate_record(struct ccw1
*ccw
, struct LO_eckd_data
*data
, unsigned int trk
,
571 unsigned int rec_on_trk
, int no_rec
, int cmd
,
572 struct dasd_device
* device
, int reclen
)
574 struct dasd_eckd_private
*private = device
->private;
578 DBF_DEV_EVENT(DBF_INFO
, device
,
579 "Locate: trk %d, rec %d, no_rec %d, cmd %d, reclen %d",
580 trk
, rec_on_trk
, no_rec
, cmd
, reclen
);
582 ccw
->cmd_code
= DASD_ECKD_CCW_LOCATE_RECORD
;
585 ccw
->cda
= (__u32
) __pa(data
);
587 memset(data
, 0, sizeof(struct LO_eckd_data
));
590 switch (private->rdc_data
.dev_type
) {
592 dn
= ceil_quot(reclen
+ 6, 232);
593 d
= 9 + ceil_quot(reclen
+ 6 * (dn
+ 1), 34);
594 sector
= (49 + (rec_on_trk
- 1) * (10 + d
)) / 8;
597 d
= 7 + ceil_quot(reclen
+ 12, 32);
598 sector
= (39 + (rec_on_trk
- 1) * (8 + d
)) / 7;
602 data
->sector
= sector
;
603 data
->count
= no_rec
;
605 case DASD_ECKD_CCW_WRITE_HOME_ADDRESS
:
606 data
->operation
.orientation
= 0x3;
607 data
->operation
.operation
= 0x03;
609 case DASD_ECKD_CCW_READ_HOME_ADDRESS
:
610 data
->operation
.orientation
= 0x3;
611 data
->operation
.operation
= 0x16;
613 case DASD_ECKD_CCW_WRITE_RECORD_ZERO
:
614 data
->operation
.orientation
= 0x1;
615 data
->operation
.operation
= 0x03;
618 case DASD_ECKD_CCW_READ_RECORD_ZERO
:
619 data
->operation
.orientation
= 0x3;
620 data
->operation
.operation
= 0x16;
623 case DASD_ECKD_CCW_WRITE
:
624 case DASD_ECKD_CCW_WRITE_MT
:
625 case DASD_ECKD_CCW_WRITE_KD
:
626 case DASD_ECKD_CCW_WRITE_KD_MT
:
627 data
->auxiliary
.last_bytes_used
= 0x1;
628 data
->length
= reclen
;
629 data
->operation
.operation
= 0x01;
631 case DASD_ECKD_CCW_WRITE_CKD
:
632 case DASD_ECKD_CCW_WRITE_CKD_MT
:
633 data
->auxiliary
.last_bytes_used
= 0x1;
634 data
->length
= reclen
;
635 data
->operation
.operation
= 0x03;
637 case DASD_ECKD_CCW_READ
:
638 case DASD_ECKD_CCW_READ_MT
:
639 case DASD_ECKD_CCW_READ_KD
:
640 case DASD_ECKD_CCW_READ_KD_MT
:
641 data
->auxiliary
.last_bytes_used
= 0x1;
642 data
->length
= reclen
;
643 data
->operation
.operation
= 0x06;
645 case DASD_ECKD_CCW_READ_CKD
:
646 case DASD_ECKD_CCW_READ_CKD_MT
:
647 data
->auxiliary
.last_bytes_used
= 0x1;
648 data
->length
= reclen
;
649 data
->operation
.operation
= 0x16;
651 case DASD_ECKD_CCW_READ_COUNT
:
652 data
->operation
.operation
= 0x06;
654 case DASD_ECKD_CCW_ERASE
:
655 data
->length
= reclen
;
656 data
->auxiliary
.last_bytes_used
= 0x1;
657 data
->operation
.operation
= 0x0b;
660 DBF_DEV_EVENT(DBF_ERR
, device
, "unknown locate record "
663 set_ch_t(&data
->seek_addr
,
664 trk
/ private->rdc_data
.trk_per_cyl
,
665 trk
% private->rdc_data
.trk_per_cyl
);
666 data
->search_arg
.cyl
= data
->seek_addr
.cyl
;
667 data
->search_arg
.head
= data
->seek_addr
.head
;
668 data
->search_arg
.record
= rec_on_trk
;
672 * Returns 1 if the block is one of the special blocks that needs
673 * to get read/written with the KD variant of the command.
674 * That is DASD_ECKD_READ_KD_MT instead of DASD_ECKD_READ_MT and
675 * DASD_ECKD_WRITE_KD_MT instead of DASD_ECKD_WRITE_MT.
676 * Luckily the KD variants differ only by one bit (0x08) from the
677 * normal variant. So don't wonder about code like:
678 * if (dasd_eckd_cdl_special(blk_per_trk, recid))
679 * ccw->cmd_code |= 0x8;
682 dasd_eckd_cdl_special(int blk_per_trk
, int recid
)
686 if (recid
< blk_per_trk
)
688 if (recid
< 2 * blk_per_trk
)
694 * Returns the record size for the special blocks of the cdl format.
695 * Only returns something useful if dasd_eckd_cdl_special is true
699 dasd_eckd_cdl_reclen(int recid
)
702 return sizes_trk0
[recid
];
705 /* create unique id from private structure. */
706 static void create_uid(struct dasd_eckd_private
*private)
709 struct dasd_uid
*uid
;
712 memset(uid
, 0, sizeof(struct dasd_uid
));
713 memcpy(uid
->vendor
, private->ned
->HDA_manufacturer
,
714 sizeof(uid
->vendor
) - 1);
715 EBCASC(uid
->vendor
, sizeof(uid
->vendor
) - 1);
716 memcpy(uid
->serial
, private->ned
->HDA_location
,
717 sizeof(uid
->serial
) - 1);
718 EBCASC(uid
->serial
, sizeof(uid
->serial
) - 1);
719 uid
->ssid
= private->gneq
->subsystemID
;
720 uid
->real_unit_addr
= private->ned
->unit_addr
;
722 uid
->type
= private->sneq
->sua_flags
;
723 if (uid
->type
== UA_BASE_PAV_ALIAS
)
724 uid
->base_unit_addr
= private->sneq
->base_unit_addr
;
726 uid
->type
= UA_BASE_DEVICE
;
728 if (private->vdsneq
) {
729 for (count
= 0; count
< 16; count
++) {
730 sprintf(uid
->vduit
+2*count
, "%02x",
731 private->vdsneq
->uit
[count
]);
737 * Generate device unique id that specifies the physical device.
739 static int dasd_eckd_generate_uid(struct dasd_device
*device
)
741 struct dasd_eckd_private
*private = device
->private;
746 if (!private->ned
|| !private->gneq
)
748 spin_lock_irqsave(get_ccwdev_lock(device
->cdev
), flags
);
750 spin_unlock_irqrestore(get_ccwdev_lock(device
->cdev
), flags
);
754 static int dasd_eckd_get_uid(struct dasd_device
*device
, struct dasd_uid
*uid
)
756 struct dasd_eckd_private
*private = device
->private;
760 spin_lock_irqsave(get_ccwdev_lock(device
->cdev
), flags
);
762 spin_unlock_irqrestore(get_ccwdev_lock(device
->cdev
), flags
);
769 * compare device UID with data of a given dasd_eckd_private structure
772 static int dasd_eckd_compare_path_uid(struct dasd_device
*device
,
773 struct dasd_eckd_private
*private)
775 struct dasd_uid device_uid
;
778 dasd_eckd_get_uid(device
, &device_uid
);
780 return memcmp(&device_uid
, &private->uid
, sizeof(struct dasd_uid
));
783 static void dasd_eckd_fill_rcd_cqr(struct dasd_device
*device
,
784 struct dasd_ccw_req
*cqr
,
790 * buffer has to start with EBCDIC "V1.0" to show
791 * support for virtual device SNEQ
793 rcd_buffer
[0] = 0xE5;
794 rcd_buffer
[1] = 0xF1;
795 rcd_buffer
[2] = 0x4B;
796 rcd_buffer
[3] = 0xF0;
799 ccw
->cmd_code
= DASD_ECKD_CCW_RCD
;
801 ccw
->cda
= (__u32
)(addr_t
)rcd_buffer
;
802 ccw
->count
= DASD_ECKD_RCD_DATA_SIZE
;
803 cqr
->magic
= DASD_ECKD_MAGIC
;
805 cqr
->startdev
= device
;
806 cqr
->memdev
= device
;
808 cqr
->expires
= 10*HZ
;
811 cqr
->buildclk
= get_tod_clock();
812 cqr
->status
= DASD_CQR_FILLED
;
813 set_bit(DASD_CQR_VERIFY_PATH
, &cqr
->flags
);
817 * Wakeup helper for read_conf
818 * if the cqr is not done and needs some error recovery
819 * the buffer has to be re-initialized with the EBCDIC "V1.0"
820 * to show support for virtual device SNEQ
822 static void read_conf_cb(struct dasd_ccw_req
*cqr
, void *data
)
827 if (cqr
->status
!= DASD_CQR_DONE
) {
829 rcd_buffer
= (__u8
*)((addr_t
) ccw
->cda
);
830 memset(rcd_buffer
, 0, sizeof(*rcd_buffer
));
832 rcd_buffer
[0] = 0xE5;
833 rcd_buffer
[1] = 0xF1;
834 rcd_buffer
[2] = 0x4B;
835 rcd_buffer
[3] = 0xF0;
837 dasd_wakeup_cb(cqr
, data
);
840 static int dasd_eckd_read_conf_immediately(struct dasd_device
*device
,
841 struct dasd_ccw_req
*cqr
,
848 * sanity check: scan for RCD command in extended SenseID data
849 * some devices do not support RCD
851 ciw
= ccw_device_get_ciw(device
->cdev
, CIW_TYPE_RCD
);
852 if (!ciw
|| ciw
->cmd
!= DASD_ECKD_CCW_RCD
)
855 dasd_eckd_fill_rcd_cqr(device
, cqr
, rcd_buffer
, lpm
);
856 clear_bit(DASD_CQR_FLAGS_USE_ERP
, &cqr
->flags
);
857 set_bit(DASD_CQR_ALLOW_SLOCK
, &cqr
->flags
);
859 cqr
->callback
= read_conf_cb
;
860 rc
= dasd_sleep_on_immediatly(cqr
);
864 static int dasd_eckd_read_conf_lpm(struct dasd_device
*device
,
866 int *rcd_buffer_size
, __u8 lpm
)
869 char *rcd_buf
= NULL
;
871 struct dasd_ccw_req
*cqr
;
874 * sanity check: scan for RCD command in extended SenseID data
875 * some devices do not support RCD
877 ciw
= ccw_device_get_ciw(device
->cdev
, CIW_TYPE_RCD
);
878 if (!ciw
|| ciw
->cmd
!= DASD_ECKD_CCW_RCD
) {
882 rcd_buf
= kzalloc(DASD_ECKD_RCD_DATA_SIZE
, GFP_KERNEL
| GFP_DMA
);
887 cqr
= dasd_smalloc_request(DASD_ECKD_MAGIC
, 1 /* RCD */,
888 0, /* use rcd_buf as data ara */
891 DBF_DEV_EVENT(DBF_WARNING
, device
, "%s",
892 "Could not allocate RCD request");
896 dasd_eckd_fill_rcd_cqr(device
, cqr
, rcd_buf
, lpm
);
897 cqr
->callback
= read_conf_cb
;
898 ret
= dasd_sleep_on(cqr
);
900 * on success we update the user input parms
902 dasd_sfree_request(cqr
, cqr
->memdev
);
906 *rcd_buffer_size
= DASD_ECKD_RCD_DATA_SIZE
;
907 *rcd_buffer
= rcd_buf
;
912 *rcd_buffer_size
= 0;
916 static int dasd_eckd_identify_conf_parts(struct dasd_eckd_private
*private)
919 struct dasd_sneq
*sneq
;
923 private->sneq
= NULL
;
924 private->vdsneq
= NULL
;
925 private->gneq
= NULL
;
926 count
= private->conf_len
/ sizeof(struct dasd_sneq
);
927 sneq
= (struct dasd_sneq
*)private->conf_data
;
928 for (i
= 0; i
< count
; ++i
) {
929 if (sneq
->flags
.identifier
== 1 && sneq
->format
== 1)
930 private->sneq
= sneq
;
931 else if (sneq
->flags
.identifier
== 1 && sneq
->format
== 4)
932 private->vdsneq
= (struct vd_sneq
*)sneq
;
933 else if (sneq
->flags
.identifier
== 2)
934 private->gneq
= (struct dasd_gneq
*)sneq
;
935 else if (sneq
->flags
.identifier
== 3 && sneq
->res1
== 1)
936 private->ned
= (struct dasd_ned
*)sneq
;
939 if (!private->ned
|| !private->gneq
) {
941 private->sneq
= NULL
;
942 private->vdsneq
= NULL
;
943 private->gneq
= NULL
;
950 static unsigned char dasd_eckd_path_access(void *conf_data
, int conf_len
)
952 struct dasd_gneq
*gneq
;
955 count
= conf_len
/ sizeof(*gneq
);
956 gneq
= (struct dasd_gneq
*)conf_data
;
958 for (i
= 0; i
< count
; ++i
) {
959 if (gneq
->flags
.identifier
== 2) {
966 return ((char *)gneq
)[18] & 0x07;
971 static void dasd_eckd_clear_conf_data(struct dasd_device
*device
)
973 struct dasd_eckd_private
*private = device
->private;
976 private->conf_data
= NULL
;
977 private->conf_len
= 0;
978 for (i
= 0; i
< 8; i
++) {
979 kfree(device
->path
[i
].conf_data
);
980 device
->path
[i
].conf_data
= NULL
;
981 device
->path
[i
].cssid
= 0;
982 device
->path
[i
].ssid
= 0;
983 device
->path
[i
].chpid
= 0;
988 static int dasd_eckd_read_conf(struct dasd_device
*device
)
991 int conf_len
, conf_data_saved
;
992 int rc
, path_err
, pos
;
994 struct dasd_eckd_private
*private, path_private
;
995 struct dasd_uid
*uid
;
996 char print_path_uid
[60], print_device_uid
[60];
997 struct channel_path_desc_fmt0
*chp_desc
;
998 struct subchannel_id sch_id
;
1000 private = device
->private;
1001 opm
= ccw_device_get_path_mask(device
->cdev
);
1002 ccw_device_get_schid(device
->cdev
, &sch_id
);
1003 conf_data_saved
= 0;
1005 /* get configuration data per operational path */
1006 for (lpm
= 0x80; lpm
; lpm
>>= 1) {
1009 rc
= dasd_eckd_read_conf_lpm(device
, &conf_data
,
1011 if (rc
&& rc
!= -EOPNOTSUPP
) { /* -EOPNOTSUPP is ok */
1012 DBF_EVENT_DEVID(DBF_WARNING
, device
->cdev
,
1013 "Read configuration data returned "
1017 if (conf_data
== NULL
) {
1018 DBF_EVENT_DEVID(DBF_WARNING
, device
->cdev
, "%s",
1019 "No configuration data "
1021 /* no further analysis possible */
1022 dasd_path_add_opm(device
, opm
);
1023 continue; /* no error */
1025 /* save first valid configuration data */
1026 if (!conf_data_saved
) {
1027 /* initially clear previously stored conf_data */
1028 dasd_eckd_clear_conf_data(device
);
1029 private->conf_data
= conf_data
;
1030 private->conf_len
= conf_len
;
1031 if (dasd_eckd_identify_conf_parts(private)) {
1032 private->conf_data
= NULL
;
1033 private->conf_len
= 0;
1037 pos
= pathmask_to_pos(lpm
);
1038 /* store per path conf_data */
1039 device
->path
[pos
].conf_data
= conf_data
;
1040 device
->path
[pos
].cssid
= sch_id
.cssid
;
1041 device
->path
[pos
].ssid
= sch_id
.ssid
;
1042 chp_desc
= ccw_device_get_chp_desc(device
->cdev
, pos
);
1044 device
->path
[pos
].chpid
= chp_desc
->chpid
;
1047 * build device UID that other path data
1048 * can be compared to it
1050 dasd_eckd_generate_uid(device
);
1053 path_private
.conf_data
= conf_data
;
1054 path_private
.conf_len
= DASD_ECKD_RCD_DATA_SIZE
;
1055 if (dasd_eckd_identify_conf_parts(
1057 path_private
.conf_data
= NULL
;
1058 path_private
.conf_len
= 0;
1062 if (dasd_eckd_compare_path_uid(
1063 device
, &path_private
)) {
1064 uid
= &path_private
.uid
;
1065 if (strlen(uid
->vduit
) > 0)
1066 snprintf(print_path_uid
,
1067 sizeof(print_path_uid
),
1068 "%s.%s.%04x.%02x.%s",
1069 uid
->vendor
, uid
->serial
,
1070 uid
->ssid
, uid
->real_unit_addr
,
1073 snprintf(print_path_uid
,
1074 sizeof(print_path_uid
),
1076 uid
->vendor
, uid
->serial
,
1078 uid
->real_unit_addr
);
1079 uid
= &private->uid
;
1080 if (strlen(uid
->vduit
) > 0)
1081 snprintf(print_device_uid
,
1082 sizeof(print_device_uid
),
1083 "%s.%s.%04x.%02x.%s",
1084 uid
->vendor
, uid
->serial
,
1085 uid
->ssid
, uid
->real_unit_addr
,
1088 snprintf(print_device_uid
,
1089 sizeof(print_device_uid
),
1091 uid
->vendor
, uid
->serial
,
1093 uid
->real_unit_addr
);
1094 dev_err(&device
->cdev
->dev
,
1095 "Not all channel paths lead to "
1096 "the same device, path %02X leads to "
1097 "device %s instead of %s\n", lpm
,
1098 print_path_uid
, print_device_uid
);
1100 dasd_path_add_cablepm(device
, lpm
);
1103 pos
= pathmask_to_pos(lpm
);
1104 /* store per path conf_data */
1105 device
->path
[pos
].conf_data
= conf_data
;
1106 device
->path
[pos
].cssid
= sch_id
.cssid
;
1107 device
->path
[pos
].ssid
= sch_id
.ssid
;
1108 chp_desc
= ccw_device_get_chp_desc(device
->cdev
, pos
);
1110 device
->path
[pos
].chpid
= chp_desc
->chpid
;
1112 path_private
.conf_data
= NULL
;
1113 path_private
.conf_len
= 0;
1115 switch (dasd_eckd_path_access(conf_data
, conf_len
)) {
1117 dasd_path_add_nppm(device
, lpm
);
1120 dasd_path_add_ppm(device
, lpm
);
1123 if (!dasd_path_get_opm(device
)) {
1124 dasd_path_set_opm(device
, lpm
);
1125 dasd_generic_path_operational(device
);
1127 dasd_path_add_opm(device
, lpm
);
1134 static u32
get_fcx_max_data(struct dasd_device
*device
)
1136 struct dasd_eckd_private
*private = device
->private;
1137 int fcx_in_css
, fcx_in_gneq
, fcx_in_features
;
1143 /* is transport mode supported? */
1144 fcx_in_css
= css_general_characteristics
.fcx
;
1145 fcx_in_gneq
= private->gneq
->reserved2
[7] & 0x04;
1146 fcx_in_features
= private->features
.feature
[40] & 0x80;
1147 tpm
= fcx_in_css
&& fcx_in_gneq
&& fcx_in_features
;
1152 mdc
= ccw_device_get_mdc(device
->cdev
, 0);
1154 dev_warn(&device
->cdev
->dev
, "Detecting the maximum supported data size for zHPF requests failed\n");
1157 return (u32
)mdc
* FCX_MAX_DATA_FACTOR
;
1161 static int verify_fcx_max_data(struct dasd_device
*device
, __u8 lpm
)
1163 struct dasd_eckd_private
*private = device
->private;
1167 if (private->fcx_max_data
) {
1168 mdc
= ccw_device_get_mdc(device
->cdev
, lpm
);
1170 dev_warn(&device
->cdev
->dev
,
1171 "Detecting the maximum data size for zHPF "
1172 "requests failed (rc=%d) for a new path %x\n",
1176 fcx_max_data
= (u32
)mdc
* FCX_MAX_DATA_FACTOR
;
1177 if (fcx_max_data
< private->fcx_max_data
) {
1178 dev_warn(&device
->cdev
->dev
,
1179 "The maximum data size for zHPF requests %u "
1180 "on a new path %x is below the active maximum "
1181 "%u\n", fcx_max_data
, lpm
,
1182 private->fcx_max_data
);
1189 static int rebuild_device_uid(struct dasd_device
*device
,
1190 struct path_verification_work_data
*data
)
1192 struct dasd_eckd_private
*private = device
->private;
1193 __u8 lpm
, opm
= dasd_path_get_opm(device
);
1196 for (lpm
= 0x80; lpm
; lpm
>>= 1) {
1199 memset(&data
->rcd_buffer
, 0, sizeof(data
->rcd_buffer
));
1200 memset(&data
->cqr
, 0, sizeof(data
->cqr
));
1201 data
->cqr
.cpaddr
= &data
->ccw
;
1202 rc
= dasd_eckd_read_conf_immediately(device
, &data
->cqr
,
1207 if (rc
== -EOPNOTSUPP
) /* -EOPNOTSUPP is ok */
1209 DBF_EVENT_DEVID(DBF_WARNING
, device
->cdev
,
1210 "Read configuration data "
1211 "returned error %d", rc
);
1214 memcpy(private->conf_data
, data
->rcd_buffer
,
1215 DASD_ECKD_RCD_DATA_SIZE
);
1216 if (dasd_eckd_identify_conf_parts(private)) {
1218 } else /* first valid path is enough */
1223 rc
= dasd_eckd_generate_uid(device
);
1228 static void do_path_verification_work(struct work_struct
*work
)
1230 struct path_verification_work_data
*data
;
1231 struct dasd_device
*device
;
1232 struct dasd_eckd_private path_private
;
1233 struct dasd_uid
*uid
;
1234 __u8 path_rcd_buf
[DASD_ECKD_RCD_DATA_SIZE
];
1235 __u8 lpm
, opm
, npm
, ppm
, epm
, hpfpm
, cablepm
;
1236 unsigned long flags
;
1240 data
= container_of(work
, struct path_verification_work_data
, worker
);
1241 device
= data
->device
;
1243 /* delay path verification until device was resumed */
1244 if (test_bit(DASD_FLAG_SUSPENDED
, &device
->flags
)) {
1245 schedule_work(work
);
1248 /* check if path verification already running and delay if so */
1249 if (test_and_set_bit(DASD_FLAG_PATH_VERIFY
, &device
->flags
)) {
1250 schedule_work(work
);
1260 for (lpm
= 0x80; lpm
; lpm
>>= 1) {
1261 if (!(lpm
& data
->tbvpm
))
1263 memset(&data
->rcd_buffer
, 0, sizeof(data
->rcd_buffer
));
1264 memset(&data
->cqr
, 0, sizeof(data
->cqr
));
1265 data
->cqr
.cpaddr
= &data
->ccw
;
1266 rc
= dasd_eckd_read_conf_immediately(device
, &data
->cqr
,
1270 switch (dasd_eckd_path_access(data
->rcd_buffer
,
1271 DASD_ECKD_RCD_DATA_SIZE
)
1281 } else if (rc
== -EOPNOTSUPP
) {
1282 DBF_EVENT_DEVID(DBF_WARNING
, device
->cdev
, "%s",
1283 "path verification: No configuration "
1286 } else if (rc
== -EAGAIN
) {
1287 DBF_EVENT_DEVID(DBF_WARNING
, device
->cdev
, "%s",
1288 "path verification: device is stopped,"
1289 " try again later");
1292 dev_warn(&device
->cdev
->dev
,
1293 "Reading device feature codes failed "
1294 "(rc=%d) for new path %x\n", rc
, lpm
);
1297 if (verify_fcx_max_data(device
, lpm
)) {
1306 * save conf_data for comparison after
1307 * rebuild_device_uid may have changed
1310 memcpy(&path_rcd_buf
, data
->rcd_buffer
,
1311 DASD_ECKD_RCD_DATA_SIZE
);
1312 path_private
.conf_data
= (void *) &path_rcd_buf
;
1313 path_private
.conf_len
= DASD_ECKD_RCD_DATA_SIZE
;
1314 if (dasd_eckd_identify_conf_parts(&path_private
)) {
1315 path_private
.conf_data
= NULL
;
1316 path_private
.conf_len
= 0;
1321 * compare path UID with device UID only if at least
1322 * one valid path is left
1323 * in other case the device UID may have changed and
1324 * the first working path UID will be used as device UID
1326 if (dasd_path_get_opm(device
) &&
1327 dasd_eckd_compare_path_uid(device
, &path_private
)) {
1329 * the comparison was not successful
1330 * rebuild the device UID with at least one
1331 * known path in case a z/VM hyperswap command
1332 * has changed the device
1334 * after this compare again
1336 * if either the rebuild or the recompare fails
1337 * the path can not be used
1339 if (rebuild_device_uid(device
, data
) ||
1340 dasd_eckd_compare_path_uid(
1341 device
, &path_private
)) {
1342 uid
= &path_private
.uid
;
1343 if (strlen(uid
->vduit
) > 0)
1344 snprintf(print_uid
, sizeof(print_uid
),
1345 "%s.%s.%04x.%02x.%s",
1346 uid
->vendor
, uid
->serial
,
1347 uid
->ssid
, uid
->real_unit_addr
,
1350 snprintf(print_uid
, sizeof(print_uid
),
1352 uid
->vendor
, uid
->serial
,
1354 uid
->real_unit_addr
);
1355 dev_err(&device
->cdev
->dev
,
1356 "The newly added channel path %02X "
1357 "will not be used because it leads "
1358 "to a different device %s\n",
1369 * There is a small chance that a path is lost again between
1370 * above path verification and the following modification of
1371 * the device opm mask. We could avoid that race here by using
1372 * yet another path mask, but we rather deal with this unlikely
1373 * situation in dasd_start_IO.
1375 spin_lock_irqsave(get_ccwdev_lock(device
->cdev
), flags
);
1376 if (!dasd_path_get_opm(device
) && opm
) {
1377 dasd_path_set_opm(device
, opm
);
1378 dasd_generic_path_operational(device
);
1380 dasd_path_add_opm(device
, opm
);
1382 dasd_path_add_nppm(device
, npm
);
1383 dasd_path_add_ppm(device
, ppm
);
1384 dasd_path_add_tbvpm(device
, epm
);
1385 dasd_path_add_cablepm(device
, cablepm
);
1386 dasd_path_add_nohpfpm(device
, hpfpm
);
1387 spin_unlock_irqrestore(get_ccwdev_lock(device
->cdev
), flags
);
1389 clear_bit(DASD_FLAG_PATH_VERIFY
, &device
->flags
);
1390 dasd_put_device(device
);
1392 mutex_unlock(&dasd_path_verification_mutex
);
1397 static int dasd_eckd_verify_path(struct dasd_device
*device
, __u8 lpm
)
1399 struct path_verification_work_data
*data
;
1401 data
= kmalloc(sizeof(*data
), GFP_ATOMIC
| GFP_DMA
);
1403 if (mutex_trylock(&dasd_path_verification_mutex
)) {
1404 data
= path_verification_worker
;
1409 memset(data
, 0, sizeof(*data
));
1412 INIT_WORK(&data
->worker
, do_path_verification_work
);
1413 dasd_get_device(device
);
1414 data
->device
= device
;
1416 schedule_work(&data
->worker
);
1420 static void dasd_eckd_reset_path(struct dasd_device
*device
, __u8 pm
)
1422 struct dasd_eckd_private
*private = device
->private;
1423 unsigned long flags
;
1425 if (!private->fcx_max_data
)
1426 private->fcx_max_data
= get_fcx_max_data(device
);
1427 spin_lock_irqsave(get_ccwdev_lock(device
->cdev
), flags
);
1428 dasd_path_set_tbvpm(device
, pm
? : dasd_path_get_notoperpm(device
));
1429 dasd_schedule_device_bh(device
);
1430 spin_unlock_irqrestore(get_ccwdev_lock(device
->cdev
), flags
);
1433 static int dasd_eckd_read_features(struct dasd_device
*device
)
1435 struct dasd_eckd_private
*private = device
->private;
1436 struct dasd_psf_prssd_data
*prssdp
;
1437 struct dasd_rssd_features
*features
;
1438 struct dasd_ccw_req
*cqr
;
1442 memset(&private->features
, 0, sizeof(struct dasd_rssd_features
));
1443 cqr
= dasd_smalloc_request(DASD_ECKD_MAGIC
, 1 /* PSF */ + 1 /* RSSD */,
1444 (sizeof(struct dasd_psf_prssd_data
) +
1445 sizeof(struct dasd_rssd_features
)),
1448 DBF_EVENT_DEVID(DBF_WARNING
, device
->cdev
, "%s", "Could not "
1449 "allocate initialization request");
1450 return PTR_ERR(cqr
);
1452 cqr
->startdev
= device
;
1453 cqr
->memdev
= device
;
1456 cqr
->expires
= 10 * HZ
;
1458 /* Prepare for Read Subsystem Data */
1459 prssdp
= (struct dasd_psf_prssd_data
*) cqr
->data
;
1460 memset(prssdp
, 0, sizeof(struct dasd_psf_prssd_data
));
1461 prssdp
->order
= PSF_ORDER_PRSSD
;
1462 prssdp
->suborder
= 0x41; /* Read Feature Codes */
1463 /* all other bytes of prssdp must be zero */
1466 ccw
->cmd_code
= DASD_ECKD_CCW_PSF
;
1467 ccw
->count
= sizeof(struct dasd_psf_prssd_data
);
1468 ccw
->flags
|= CCW_FLAG_CC
;
1469 ccw
->cda
= (__u32
)(addr_t
) prssdp
;
1471 /* Read Subsystem Data - feature codes */
1472 features
= (struct dasd_rssd_features
*) (prssdp
+ 1);
1473 memset(features
, 0, sizeof(struct dasd_rssd_features
));
1476 ccw
->cmd_code
= DASD_ECKD_CCW_RSSD
;
1477 ccw
->count
= sizeof(struct dasd_rssd_features
);
1478 ccw
->cda
= (__u32
)(addr_t
) features
;
1480 cqr
->buildclk
= get_tod_clock();
1481 cqr
->status
= DASD_CQR_FILLED
;
1482 rc
= dasd_sleep_on(cqr
);
1484 prssdp
= (struct dasd_psf_prssd_data
*) cqr
->data
;
1485 features
= (struct dasd_rssd_features
*) (prssdp
+ 1);
1486 memcpy(&private->features
, features
,
1487 sizeof(struct dasd_rssd_features
));
1489 dev_warn(&device
->cdev
->dev
, "Reading device feature codes"
1490 " failed with rc=%d\n", rc
);
1491 dasd_sfree_request(cqr
, cqr
->memdev
);
1497 * Build CP for Perform Subsystem Function - SSC.
1499 static struct dasd_ccw_req
*dasd_eckd_build_psf_ssc(struct dasd_device
*device
,
1502 struct dasd_ccw_req
*cqr
;
1503 struct dasd_psf_ssc_data
*psf_ssc_data
;
1506 cqr
= dasd_smalloc_request(DASD_ECKD_MAGIC
, 1 /* PSF */ ,
1507 sizeof(struct dasd_psf_ssc_data
),
1511 DBF_DEV_EVENT(DBF_WARNING
, device
, "%s",
1512 "Could not allocate PSF-SSC request");
1515 psf_ssc_data
= (struct dasd_psf_ssc_data
*)cqr
->data
;
1516 psf_ssc_data
->order
= PSF_ORDER_SSC
;
1517 psf_ssc_data
->suborder
= 0xc0;
1519 psf_ssc_data
->suborder
|= 0x08;
1520 psf_ssc_data
->reserved
[0] = 0x88;
1523 ccw
->cmd_code
= DASD_ECKD_CCW_PSF
;
1524 ccw
->cda
= (__u32
)(addr_t
)psf_ssc_data
;
1527 cqr
->startdev
= device
;
1528 cqr
->memdev
= device
;
1531 cqr
->expires
= 10*HZ
;
1532 cqr
->buildclk
= get_tod_clock();
1533 cqr
->status
= DASD_CQR_FILLED
;
1538 * Perform Subsystem Function.
1539 * It is necessary to trigger CIO for channel revalidation since this
1540 * call might change behaviour of DASD devices.
1543 dasd_eckd_psf_ssc(struct dasd_device
*device
, int enable_pav
,
1544 unsigned long flags
)
1546 struct dasd_ccw_req
*cqr
;
1549 cqr
= dasd_eckd_build_psf_ssc(device
, enable_pav
);
1551 return PTR_ERR(cqr
);
1554 * set flags e.g. turn on failfast, to prevent blocking
1555 * the calling function should handle failed requests
1557 cqr
->flags
|= flags
;
1559 rc
= dasd_sleep_on(cqr
);
1561 /* trigger CIO to reprobe devices */
1562 css_schedule_reprobe();
1563 else if (cqr
->intrc
== -EAGAIN
)
1566 dasd_sfree_request(cqr
, cqr
->memdev
);
1571 * Valide storage server of current device.
1573 static int dasd_eckd_validate_server(struct dasd_device
*device
,
1574 unsigned long flags
)
1576 struct dasd_eckd_private
*private = device
->private;
1579 if (private->uid
.type
== UA_BASE_PAV_ALIAS
||
1580 private->uid
.type
== UA_HYPER_PAV_ALIAS
)
1582 if (dasd_nopav
|| MACHINE_IS_VM
)
1586 rc
= dasd_eckd_psf_ssc(device
, enable_pav
, flags
);
1588 /* may be requested feature is not available on server,
1589 * therefore just report error and go ahead */
1590 DBF_EVENT_DEVID(DBF_WARNING
, device
->cdev
, "PSF-SSC for SSID %04x "
1591 "returned rc=%d", private->uid
.ssid
, rc
);
1596 * worker to do a validate server in case of a lost pathgroup
1598 static void dasd_eckd_do_validate_server(struct work_struct
*work
)
1600 struct dasd_device
*device
= container_of(work
, struct dasd_device
,
1602 unsigned long flags
= 0;
1604 set_bit(DASD_CQR_FLAGS_FAILFAST
, &flags
);
1605 if (dasd_eckd_validate_server(device
, flags
)
1607 /* schedule worker again if failed */
1608 schedule_work(&device
->kick_validate
);
1612 dasd_put_device(device
);
1615 static void dasd_eckd_kick_validate_server(struct dasd_device
*device
)
1617 dasd_get_device(device
);
1618 /* exit if device not online or in offline processing */
1619 if (test_bit(DASD_FLAG_OFFLINE
, &device
->flags
) ||
1620 device
->state
< DASD_STATE_ONLINE
) {
1621 dasd_put_device(device
);
1624 /* queue call to do_validate_server to the kernel event daemon. */
1625 if (!schedule_work(&device
->kick_validate
))
1626 dasd_put_device(device
);
1630 * Check device characteristics.
1631 * If the device is accessible using ECKD discipline, the device is enabled.
1634 dasd_eckd_check_characteristics(struct dasd_device
*device
)
1636 struct dasd_eckd_private
*private = device
->private;
1637 struct dasd_block
*block
;
1638 struct dasd_uid temp_uid
;
1641 unsigned long value
;
1643 /* setup work queue for validate server*/
1644 INIT_WORK(&device
->kick_validate
, dasd_eckd_do_validate_server
);
1645 /* setup work queue for summary unit check */
1646 INIT_WORK(&device
->suc_work
, dasd_alias_handle_summary_unit_check
);
1648 if (!ccw_device_is_pathgroup(device
->cdev
)) {
1649 dev_warn(&device
->cdev
->dev
,
1650 "A channel path group could not be established\n");
1653 if (!ccw_device_is_multipath(device
->cdev
)) {
1654 dev_info(&device
->cdev
->dev
,
1655 "The DASD is not operating in multipath mode\n");
1658 private = kzalloc(sizeof(*private), GFP_KERNEL
| GFP_DMA
);
1660 dev_warn(&device
->cdev
->dev
,
1661 "Allocating memory for private DASD data "
1665 device
->private = private;
1667 memset(private, 0, sizeof(*private));
1669 /* Invalidate status of initial analysis. */
1670 private->init_cqr_status
= -1;
1671 /* Set default cache operations. */
1672 private->attrib
.operation
= DASD_NORMAL_CACHE
;
1673 private->attrib
.nr_cyl
= 0;
1675 /* Read Configuration Data */
1676 rc
= dasd_eckd_read_conf(device
);
1680 /* set some default values */
1681 device
->default_expires
= DASD_EXPIRES
;
1682 device
->default_retries
= DASD_RETRIES
;
1683 device
->path_thrhld
= DASD_ECKD_PATH_THRHLD
;
1684 device
->path_interval
= DASD_ECKD_PATH_INTERVAL
;
1686 if (private->gneq
) {
1688 for (i
= 0; i
< private->gneq
->timeout
.value
; i
++)
1690 value
= value
* private->gneq
->timeout
.number
;
1691 /* do not accept useless values */
1692 if (value
!= 0 && value
<= DASD_EXPIRES_MAX
)
1693 device
->default_expires
= value
;
1696 dasd_eckd_get_uid(device
, &temp_uid
);
1697 if (temp_uid
.type
== UA_BASE_DEVICE
) {
1698 block
= dasd_alloc_block();
1699 if (IS_ERR(block
)) {
1700 DBF_EVENT_DEVID(DBF_WARNING
, device
->cdev
, "%s",
1701 "could not allocate dasd "
1703 rc
= PTR_ERR(block
);
1706 device
->block
= block
;
1707 block
->base
= device
;
1710 /* register lcu with alias handling, enable PAV */
1711 rc
= dasd_alias_make_device_known_to_lcu(device
);
1715 dasd_eckd_validate_server(device
, 0);
1717 /* device may report different configuration data after LCU setup */
1718 rc
= dasd_eckd_read_conf(device
);
1722 /* Read Feature Codes */
1723 dasd_eckd_read_features(device
);
1725 /* Read Device Characteristics */
1726 rc
= dasd_generic_read_dev_chars(device
, DASD_ECKD_MAGIC
,
1727 &private->rdc_data
, 64);
1729 DBF_EVENT_DEVID(DBF_WARNING
, device
->cdev
,
1730 "Read device characteristic failed, rc=%d", rc
);
1734 if ((device
->features
& DASD_FEATURE_USERAW
) &&
1735 !(private->rdc_data
.facilities
.RT_in_LR
)) {
1736 dev_err(&device
->cdev
->dev
, "The storage server does not "
1737 "support raw-track access\n");
1742 /* find the valid cylinder size */
1743 if (private->rdc_data
.no_cyl
== LV_COMPAT_CYL
&&
1744 private->rdc_data
.long_no_cyl
)
1745 private->real_cyl
= private->rdc_data
.long_no_cyl
;
1747 private->real_cyl
= private->rdc_data
.no_cyl
;
1749 private->fcx_max_data
= get_fcx_max_data(device
);
1751 readonly
= dasd_device_is_ro(device
);
1753 set_bit(DASD_FLAG_DEVICE_RO
, &device
->flags
);
1755 dev_info(&device
->cdev
->dev
, "New DASD %04X/%02X (CU %04X/%02X) "
1756 "with %d cylinders, %d heads, %d sectors%s\n",
1757 private->rdc_data
.dev_type
,
1758 private->rdc_data
.dev_model
,
1759 private->rdc_data
.cu_type
,
1760 private->rdc_data
.cu_model
.model
,
1762 private->rdc_data
.trk_per_cyl
,
1763 private->rdc_data
.sec_per_trk
,
1764 readonly
? ", read-only device" : "");
1768 dasd_alias_disconnect_device_from_lcu(device
);
1770 dasd_free_block(device
->block
);
1771 device
->block
= NULL
;
1773 dasd_eckd_clear_conf_data(device
);
1774 kfree(device
->private);
1775 device
->private = NULL
;
1779 static void dasd_eckd_uncheck_device(struct dasd_device
*device
)
1781 struct dasd_eckd_private
*private = device
->private;
1786 dasd_alias_disconnect_device_from_lcu(device
);
1787 private->ned
= NULL
;
1788 private->sneq
= NULL
;
1789 private->vdsneq
= NULL
;
1790 private->gneq
= NULL
;
1791 dasd_eckd_clear_conf_data(device
);
1794 static struct dasd_ccw_req
*
1795 dasd_eckd_analysis_ccw(struct dasd_device
*device
)
1797 struct dasd_eckd_private
*private = device
->private;
1798 struct eckd_count
*count_data
;
1799 struct LO_eckd_data
*LO_data
;
1800 struct dasd_ccw_req
*cqr
;
1802 int cplength
, datasize
;
1806 datasize
= sizeof(struct DE_eckd_data
) + 2*sizeof(struct LO_eckd_data
);
1807 cqr
= dasd_smalloc_request(DASD_ECKD_MAGIC
, cplength
, datasize
, device
,
1812 /* Define extent for the first 3 tracks. */
1813 define_extent(ccw
++, cqr
->data
, 0, 2,
1814 DASD_ECKD_CCW_READ_COUNT
, device
, 0);
1815 LO_data
= cqr
->data
+ sizeof(struct DE_eckd_data
);
1816 /* Locate record for the first 4 records on track 0. */
1817 ccw
[-1].flags
|= CCW_FLAG_CC
;
1818 locate_record(ccw
++, LO_data
++, 0, 0, 4,
1819 DASD_ECKD_CCW_READ_COUNT
, device
, 0);
1821 count_data
= private->count_area
;
1822 for (i
= 0; i
< 4; i
++) {
1823 ccw
[-1].flags
|= CCW_FLAG_CC
;
1824 ccw
->cmd_code
= DASD_ECKD_CCW_READ_COUNT
;
1827 ccw
->cda
= (__u32
)(addr_t
) count_data
;
1832 /* Locate record for the first record on track 2. */
1833 ccw
[-1].flags
|= CCW_FLAG_CC
;
1834 locate_record(ccw
++, LO_data
++, 2, 0, 1,
1835 DASD_ECKD_CCW_READ_COUNT
, device
, 0);
1836 /* Read count ccw. */
1837 ccw
[-1].flags
|= CCW_FLAG_CC
;
1838 ccw
->cmd_code
= DASD_ECKD_CCW_READ_COUNT
;
1841 ccw
->cda
= (__u32
)(addr_t
) count_data
;
1844 cqr
->startdev
= device
;
1845 cqr
->memdev
= device
;
1847 cqr
->buildclk
= get_tod_clock();
1848 cqr
->status
= DASD_CQR_FILLED
;
1852 /* differentiate between 'no record found' and any other error */
1853 static int dasd_eckd_analysis_evaluation(struct dasd_ccw_req
*init_cqr
)
1856 if (init_cqr
->status
== DASD_CQR_DONE
)
1858 else if (init_cqr
->status
== DASD_CQR_NEED_ERP
||
1859 init_cqr
->status
== DASD_CQR_FAILED
) {
1860 sense
= dasd_get_sense(&init_cqr
->irb
);
1861 if (sense
&& (sense
[1] & SNS1_NO_REC_FOUND
))
1862 return INIT_CQR_UNFORMATTED
;
1864 return INIT_CQR_ERROR
;
1866 return INIT_CQR_ERROR
;
1870 * This is the callback function for the init_analysis cqr. It saves
1871 * the status of the initial analysis ccw before it frees it and kicks
1872 * the device to continue the startup sequence. This will call
1873 * dasd_eckd_do_analysis again (if the devices has not been marked
1874 * for deletion in the meantime).
1876 static void dasd_eckd_analysis_callback(struct dasd_ccw_req
*init_cqr
,
1879 struct dasd_device
*device
= init_cqr
->startdev
;
1880 struct dasd_eckd_private
*private = device
->private;
1882 private->init_cqr_status
= dasd_eckd_analysis_evaluation(init_cqr
);
1883 dasd_sfree_request(init_cqr
, device
);
1884 dasd_kick_device(device
);
1887 static int dasd_eckd_start_analysis(struct dasd_block
*block
)
1889 struct dasd_ccw_req
*init_cqr
;
1891 init_cqr
= dasd_eckd_analysis_ccw(block
->base
);
1892 if (IS_ERR(init_cqr
))
1893 return PTR_ERR(init_cqr
);
1894 init_cqr
->callback
= dasd_eckd_analysis_callback
;
1895 init_cqr
->callback_data
= NULL
;
1896 init_cqr
->expires
= 5*HZ
;
1897 /* first try without ERP, so we can later handle unformatted
1898 * devices as special case
1900 clear_bit(DASD_CQR_FLAGS_USE_ERP
, &init_cqr
->flags
);
1901 init_cqr
->retries
= 0;
1902 dasd_add_request_head(init_cqr
);
1906 static int dasd_eckd_end_analysis(struct dasd_block
*block
)
1908 struct dasd_device
*device
= block
->base
;
1909 struct dasd_eckd_private
*private = device
->private;
1910 struct eckd_count
*count_area
;
1911 unsigned int sb
, blk_per_trk
;
1913 struct dasd_ccw_req
*init_cqr
;
1915 status
= private->init_cqr_status
;
1916 private->init_cqr_status
= -1;
1917 if (status
== INIT_CQR_ERROR
) {
1918 /* try again, this time with full ERP */
1919 init_cqr
= dasd_eckd_analysis_ccw(device
);
1920 dasd_sleep_on(init_cqr
);
1921 status
= dasd_eckd_analysis_evaluation(init_cqr
);
1922 dasd_sfree_request(init_cqr
, device
);
1925 if (device
->features
& DASD_FEATURE_USERAW
) {
1926 block
->bp_block
= DASD_RAW_BLOCKSIZE
;
1927 blk_per_trk
= DASD_RAW_BLOCK_PER_TRACK
;
1928 block
->s2b_shift
= 3;
1932 if (status
== INIT_CQR_UNFORMATTED
) {
1933 dev_warn(&device
->cdev
->dev
, "The DASD is not formatted\n");
1934 return -EMEDIUMTYPE
;
1935 } else if (status
== INIT_CQR_ERROR
) {
1936 dev_err(&device
->cdev
->dev
,
1937 "Detecting the DASD disk layout failed because "
1938 "of an I/O error\n");
1942 private->uses_cdl
= 1;
1943 /* Check Track 0 for Compatible Disk Layout */
1945 for (i
= 0; i
< 3; i
++) {
1946 if (private->count_area
[i
].kl
!= 4 ||
1947 private->count_area
[i
].dl
!= dasd_eckd_cdl_reclen(i
) - 4 ||
1948 private->count_area
[i
].cyl
!= 0 ||
1949 private->count_area
[i
].head
!= count_area_head
[i
] ||
1950 private->count_area
[i
].record
!= count_area_rec
[i
]) {
1951 private->uses_cdl
= 0;
1956 count_area
= &private->count_area
[4];
1958 if (private->uses_cdl
== 0) {
1959 for (i
= 0; i
< 5; i
++) {
1960 if ((private->count_area
[i
].kl
!= 0) ||
1961 (private->count_area
[i
].dl
!=
1962 private->count_area
[0].dl
) ||
1963 private->count_area
[i
].cyl
!= 0 ||
1964 private->count_area
[i
].head
!= count_area_head
[i
] ||
1965 private->count_area
[i
].record
!= count_area_rec
[i
])
1969 count_area
= &private->count_area
[0];
1971 if (private->count_area
[3].record
== 1)
1972 dev_warn(&device
->cdev
->dev
,
1973 "Track 0 has no records following the VTOC\n");
1976 if (count_area
!= NULL
&& count_area
->kl
== 0) {
1977 /* we found notthing violating our disk layout */
1978 if (dasd_check_blocksize(count_area
->dl
) == 0)
1979 block
->bp_block
= count_area
->dl
;
1981 if (block
->bp_block
== 0) {
1982 dev_warn(&device
->cdev
->dev
,
1983 "The disk layout of the DASD is not supported\n");
1984 return -EMEDIUMTYPE
;
1986 block
->s2b_shift
= 0; /* bits to shift 512 to get a block */
1987 for (sb
= 512; sb
< block
->bp_block
; sb
= sb
<< 1)
1990 blk_per_trk
= recs_per_track(&private->rdc_data
, 0, block
->bp_block
);
1993 block
->blocks
= ((unsigned long) private->real_cyl
*
1994 private->rdc_data
.trk_per_cyl
*
1997 dev_info(&device
->cdev
->dev
,
1998 "DASD with %u KB/block, %lu KB total size, %u KB/track, "
1999 "%s\n", (block
->bp_block
>> 10),
2000 (((unsigned long) private->real_cyl
*
2001 private->rdc_data
.trk_per_cyl
*
2002 blk_per_trk
* (block
->bp_block
>> 9)) >> 1),
2003 ((blk_per_trk
* block
->bp_block
) >> 10),
2005 "compatible disk layout" : "linux disk layout");
2010 static int dasd_eckd_do_analysis(struct dasd_block
*block
)
2012 struct dasd_eckd_private
*private = block
->base
->private;
2014 if (private->init_cqr_status
< 0)
2015 return dasd_eckd_start_analysis(block
);
2017 return dasd_eckd_end_analysis(block
);
2020 static int dasd_eckd_basic_to_ready(struct dasd_device
*device
)
2022 return dasd_alias_add_device(device
);
2025 static int dasd_eckd_online_to_ready(struct dasd_device
*device
)
2027 if (cancel_work_sync(&device
->reload_device
))
2028 dasd_put_device(device
);
2029 if (cancel_work_sync(&device
->kick_validate
))
2030 dasd_put_device(device
);
2035 static int dasd_eckd_basic_to_known(struct dasd_device
*device
)
2037 return dasd_alias_remove_device(device
);
2041 dasd_eckd_fill_geometry(struct dasd_block
*block
, struct hd_geometry
*geo
)
2043 struct dasd_eckd_private
*private = block
->base
->private;
2045 if (dasd_check_blocksize(block
->bp_block
) == 0) {
2046 geo
->sectors
= recs_per_track(&private->rdc_data
,
2047 0, block
->bp_block
);
2049 geo
->cylinders
= private->rdc_data
.no_cyl
;
2050 geo
->heads
= private->rdc_data
.trk_per_cyl
;
2055 * Build the TCW request for the format check
2057 static struct dasd_ccw_req
*
2058 dasd_eckd_build_check_tcw(struct dasd_device
*base
, struct format_data_t
*fdata
,
2059 int enable_pav
, struct eckd_count
*fmt_buffer
,
2062 struct dasd_eckd_private
*start_priv
;
2063 struct dasd_device
*startdev
= NULL
;
2064 struct tidaw
*last_tidaw
= NULL
;
2065 struct dasd_ccw_req
*cqr
;
2073 startdev
= dasd_alias_get_start_dev(base
);
2078 start_priv
= startdev
->private;
2080 count
= rpt
* (fdata
->stop_unit
- fdata
->start_unit
+ 1);
2083 * we're adding 'count' amount of tidaw to the itcw.
2084 * calculate the corresponding itcw_size
2086 itcw_size
= itcw_calc_size(0, count
, 0);
2088 cqr
= dasd_smalloc_request(DASD_ECKD_MAGIC
, 0, itcw_size
, startdev
,
2093 start_priv
->count
++;
2095 itcw
= itcw_init(cqr
->data
, itcw_size
, ITCW_OP_READ
, 0, count
, 0);
2101 cqr
->cpaddr
= itcw_get_tcw(itcw
);
2102 rc
= prepare_itcw(itcw
, fdata
->start_unit
, fdata
->stop_unit
,
2103 DASD_ECKD_CCW_READ_COUNT_MT
, base
, startdev
, 0, count
,
2104 sizeof(struct eckd_count
),
2105 count
* sizeof(struct eckd_count
), 0, rpt
);
2109 for (i
= 0; i
< count
; i
++) {
2110 last_tidaw
= itcw_add_tidaw(itcw
, 0, fmt_buffer
++,
2111 sizeof(struct eckd_count
));
2112 if (IS_ERR(last_tidaw
)) {
2118 last_tidaw
->flags
|= TIDAW_FLAGS_LAST
;
2119 itcw_finalize(itcw
);
2122 cqr
->startdev
= startdev
;
2123 cqr
->memdev
= startdev
;
2124 cqr
->basedev
= base
;
2125 cqr
->retries
= startdev
->default_retries
;
2126 cqr
->expires
= startdev
->default_expires
* HZ
;
2127 cqr
->buildclk
= get_tod_clock();
2128 cqr
->status
= DASD_CQR_FILLED
;
2129 /* Set flags to suppress output for expected errors */
2130 set_bit(DASD_CQR_SUPPRESS_FP
, &cqr
->flags
);
2131 set_bit(DASD_CQR_SUPPRESS_IL
, &cqr
->flags
);
2136 dasd_sfree_request(cqr
, startdev
);
2142 * Build the CCW request for the format check
2144 static struct dasd_ccw_req
*
2145 dasd_eckd_build_check(struct dasd_device
*base
, struct format_data_t
*fdata
,
2146 int enable_pav
, struct eckd_count
*fmt_buffer
, int rpt
)
2148 struct dasd_eckd_private
*start_priv
;
2149 struct dasd_eckd_private
*base_priv
;
2150 struct dasd_device
*startdev
= NULL
;
2151 struct dasd_ccw_req
*cqr
;
2154 int cplength
, datasize
;
2160 startdev
= dasd_alias_get_start_dev(base
);
2165 start_priv
= startdev
->private;
2166 base_priv
= base
->private;
2168 count
= rpt
* (fdata
->stop_unit
- fdata
->start_unit
+ 1);
2170 use_prefix
= base_priv
->features
.feature
[8] & 0x01;
2174 datasize
= sizeof(struct PFX_eckd_data
);
2177 datasize
= sizeof(struct DE_eckd_data
) +
2178 sizeof(struct LO_eckd_data
);
2182 cqr
= dasd_smalloc_request(DASD_ECKD_MAGIC
, cplength
, datasize
,
2187 start_priv
->count
++;
2192 prefix_LRE(ccw
++, data
, fdata
->start_unit
, fdata
->stop_unit
,
2193 DASD_ECKD_CCW_READ_COUNT
, base
, startdev
, 1, 0,
2196 define_extent(ccw
++, data
, fdata
->start_unit
, fdata
->stop_unit
,
2197 DASD_ECKD_CCW_READ_COUNT
, startdev
, 0);
2199 data
+= sizeof(struct DE_eckd_data
);
2200 ccw
[-1].flags
|= CCW_FLAG_CC
;
2202 locate_record(ccw
++, data
, fdata
->start_unit
, 0, count
,
2203 DASD_ECKD_CCW_READ_COUNT
, base
, 0);
2206 for (i
= 0; i
< count
; i
++) {
2207 ccw
[-1].flags
|= CCW_FLAG_CC
;
2208 ccw
->cmd_code
= DASD_ECKD_CCW_READ_COUNT
;
2209 ccw
->flags
= CCW_FLAG_SLI
;
2211 ccw
->cda
= (__u32
)(addr_t
) fmt_buffer
;
2216 cqr
->startdev
= startdev
;
2217 cqr
->memdev
= startdev
;
2218 cqr
->basedev
= base
;
2219 cqr
->retries
= DASD_RETRIES
;
2220 cqr
->expires
= startdev
->default_expires
* HZ
;
2221 cqr
->buildclk
= get_tod_clock();
2222 cqr
->status
= DASD_CQR_FILLED
;
2223 /* Set flags to suppress output for expected errors */
2224 set_bit(DASD_CQR_SUPPRESS_NRF
, &cqr
->flags
);
2229 static struct dasd_ccw_req
*
2230 dasd_eckd_build_format(struct dasd_device
*base
,
2231 struct format_data_t
*fdata
,
2234 struct dasd_eckd_private
*base_priv
;
2235 struct dasd_eckd_private
*start_priv
;
2236 struct dasd_device
*startdev
= NULL
;
2237 struct dasd_ccw_req
*fcp
;
2238 struct eckd_count
*ect
;
2239 struct ch_t address
;
2243 int cplength
, datasize
;
2251 startdev
= dasd_alias_get_start_dev(base
);
2256 start_priv
= startdev
->private;
2257 base_priv
= base
->private;
2259 rpt
= recs_per_track(&base_priv
->rdc_data
, 0, fdata
->blksize
);
2261 nr_tracks
= fdata
->stop_unit
- fdata
->start_unit
+ 1;
2264 * fdata->intensity is a bit string that tells us what to do:
2265 * Bit 0: write record zero
2266 * Bit 1: write home address, currently not supported
2267 * Bit 2: invalidate tracks
2268 * Bit 3: use OS/390 compatible disk layout (cdl)
2269 * Bit 4: do not allow storage subsystem to modify record zero
2270 * Only some bit combinations do make sense.
2272 if (fdata
->intensity
& 0x10) {
2274 intensity
= fdata
->intensity
& ~0x10;
2277 intensity
= fdata
->intensity
;
2280 use_prefix
= base_priv
->features
.feature
[8] & 0x01;
2282 switch (intensity
) {
2283 case 0x00: /* Normal format */
2284 case 0x08: /* Normal format, use cdl. */
2285 cplength
= 2 + (rpt
*nr_tracks
);
2287 datasize
= sizeof(struct PFX_eckd_data
) +
2288 sizeof(struct LO_eckd_data
) +
2289 rpt
* nr_tracks
* sizeof(struct eckd_count
);
2291 datasize
= sizeof(struct DE_eckd_data
) +
2292 sizeof(struct LO_eckd_data
) +
2293 rpt
* nr_tracks
* sizeof(struct eckd_count
);
2295 case 0x01: /* Write record zero and format track. */
2296 case 0x09: /* Write record zero and format track, use cdl. */
2297 cplength
= 2 + rpt
* nr_tracks
;
2299 datasize
= sizeof(struct PFX_eckd_data
) +
2300 sizeof(struct LO_eckd_data
) +
2301 sizeof(struct eckd_count
) +
2302 rpt
* nr_tracks
* sizeof(struct eckd_count
);
2304 datasize
= sizeof(struct DE_eckd_data
) +
2305 sizeof(struct LO_eckd_data
) +
2306 sizeof(struct eckd_count
) +
2307 rpt
* nr_tracks
* sizeof(struct eckd_count
);
2309 case 0x04: /* Invalidate track. */
2310 case 0x0c: /* Invalidate track, use cdl. */
2313 datasize
= sizeof(struct PFX_eckd_data
) +
2314 sizeof(struct LO_eckd_data
) +
2315 sizeof(struct eckd_count
);
2317 datasize
= sizeof(struct DE_eckd_data
) +
2318 sizeof(struct LO_eckd_data
) +
2319 sizeof(struct eckd_count
);
2322 dev_warn(&startdev
->cdev
->dev
,
2323 "An I/O control call used incorrect flags 0x%x\n",
2325 return ERR_PTR(-EINVAL
);
2327 /* Allocate the format ccw request. */
2328 fcp
= dasd_smalloc_request(DASD_ECKD_MAGIC
, cplength
,
2329 datasize
, startdev
, NULL
);
2333 start_priv
->count
++;
2337 switch (intensity
& ~0x08) {
2338 case 0x00: /* Normal format. */
2340 prefix(ccw
++, (struct PFX_eckd_data
*) data
,
2341 fdata
->start_unit
, fdata
->stop_unit
,
2342 DASD_ECKD_CCW_WRITE_CKD
, base
, startdev
);
2343 /* grant subsystem permission to format R0 */
2345 ((struct PFX_eckd_data
*)data
)
2346 ->define_extent
.ga_extended
|= 0x04;
2347 data
+= sizeof(struct PFX_eckd_data
);
2349 define_extent(ccw
++, (struct DE_eckd_data
*) data
,
2350 fdata
->start_unit
, fdata
->stop_unit
,
2351 DASD_ECKD_CCW_WRITE_CKD
, startdev
, 0);
2352 /* grant subsystem permission to format R0 */
2354 ((struct DE_eckd_data
*) data
)
2355 ->ga_extended
|= 0x04;
2356 data
+= sizeof(struct DE_eckd_data
);
2358 ccw
[-1].flags
|= CCW_FLAG_CC
;
2359 locate_record(ccw
++, (struct LO_eckd_data
*) data
,
2360 fdata
->start_unit
, 0, rpt
*nr_tracks
,
2361 DASD_ECKD_CCW_WRITE_CKD
, base
,
2363 data
+= sizeof(struct LO_eckd_data
);
2365 case 0x01: /* Write record zero + format track. */
2367 prefix(ccw
++, (struct PFX_eckd_data
*) data
,
2368 fdata
->start_unit
, fdata
->stop_unit
,
2369 DASD_ECKD_CCW_WRITE_RECORD_ZERO
,
2371 data
+= sizeof(struct PFX_eckd_data
);
2373 define_extent(ccw
++, (struct DE_eckd_data
*) data
,
2374 fdata
->start_unit
, fdata
->stop_unit
,
2375 DASD_ECKD_CCW_WRITE_RECORD_ZERO
, startdev
, 0);
2376 data
+= sizeof(struct DE_eckd_data
);
2378 ccw
[-1].flags
|= CCW_FLAG_CC
;
2379 locate_record(ccw
++, (struct LO_eckd_data
*) data
,
2380 fdata
->start_unit
, 0, rpt
* nr_tracks
+ 1,
2381 DASD_ECKD_CCW_WRITE_RECORD_ZERO
, base
,
2382 base
->block
->bp_block
);
2383 data
+= sizeof(struct LO_eckd_data
);
2385 case 0x04: /* Invalidate track. */
2387 prefix(ccw
++, (struct PFX_eckd_data
*) data
,
2388 fdata
->start_unit
, fdata
->stop_unit
,
2389 DASD_ECKD_CCW_WRITE_CKD
, base
, startdev
);
2390 data
+= sizeof(struct PFX_eckd_data
);
2392 define_extent(ccw
++, (struct DE_eckd_data
*) data
,
2393 fdata
->start_unit
, fdata
->stop_unit
,
2394 DASD_ECKD_CCW_WRITE_CKD
, startdev
, 0);
2395 data
+= sizeof(struct DE_eckd_data
);
2397 ccw
[-1].flags
|= CCW_FLAG_CC
;
2398 locate_record(ccw
++, (struct LO_eckd_data
*) data
,
2399 fdata
->start_unit
, 0, 1,
2400 DASD_ECKD_CCW_WRITE_CKD
, base
, 8);
2401 data
+= sizeof(struct LO_eckd_data
);
2405 for (j
= 0; j
< nr_tracks
; j
++) {
2406 /* calculate cylinder and head for the current track */
2408 (fdata
->start_unit
+ j
) /
2409 base_priv
->rdc_data
.trk_per_cyl
,
2410 (fdata
->start_unit
+ j
) %
2411 base_priv
->rdc_data
.trk_per_cyl
);
2412 if (intensity
& 0x01) { /* write record zero */
2413 ect
= (struct eckd_count
*) data
;
2414 data
+= sizeof(struct eckd_count
);
2415 ect
->cyl
= address
.cyl
;
2416 ect
->head
= address
.head
;
2420 ccw
[-1].flags
|= CCW_FLAG_CC
;
2421 ccw
->cmd_code
= DASD_ECKD_CCW_WRITE_RECORD_ZERO
;
2422 ccw
->flags
= CCW_FLAG_SLI
;
2424 ccw
->cda
= (__u32
)(addr_t
) ect
;
2427 if ((intensity
& ~0x08) & 0x04) { /* erase track */
2428 ect
= (struct eckd_count
*) data
;
2429 data
+= sizeof(struct eckd_count
);
2430 ect
->cyl
= address
.cyl
;
2431 ect
->head
= address
.head
;
2435 ccw
[-1].flags
|= CCW_FLAG_CC
;
2436 ccw
->cmd_code
= DASD_ECKD_CCW_WRITE_CKD
;
2437 ccw
->flags
= CCW_FLAG_SLI
;
2439 ccw
->cda
= (__u32
)(addr_t
) ect
;
2440 } else { /* write remaining records */
2441 for (i
= 0; i
< rpt
; i
++) {
2442 ect
= (struct eckd_count
*) data
;
2443 data
+= sizeof(struct eckd_count
);
2444 ect
->cyl
= address
.cyl
;
2445 ect
->head
= address
.head
;
2446 ect
->record
= i
+ 1;
2448 ect
->dl
= fdata
->blksize
;
2450 * Check for special tracks 0-1
2451 * when formatting CDL
2453 if ((intensity
& 0x08) &&
2454 address
.cyl
== 0 && address
.head
== 0) {
2457 ect
->dl
= sizes_trk0
[i
] - 4;
2460 if ((intensity
& 0x08) &&
2461 address
.cyl
== 0 && address
.head
== 1) {
2463 ect
->dl
= LABEL_SIZE
- 44;
2465 ccw
[-1].flags
|= CCW_FLAG_CC
;
2466 if (i
!= 0 || j
== 0)
2468 DASD_ECKD_CCW_WRITE_CKD
;
2471 DASD_ECKD_CCW_WRITE_CKD_MT
;
2472 ccw
->flags
= CCW_FLAG_SLI
;
2474 ccw
->cda
= (__u32
)(addr_t
) ect
;
2480 fcp
->startdev
= startdev
;
2481 fcp
->memdev
= startdev
;
2482 fcp
->basedev
= base
;
2484 fcp
->expires
= startdev
->default_expires
* HZ
;
2485 fcp
->buildclk
= get_tod_clock();
2486 fcp
->status
= DASD_CQR_FILLED
;
2492 * Wrapper function to build a CCW request depending on input data
2494 static struct dasd_ccw_req
*
2495 dasd_eckd_format_build_ccw_req(struct dasd_device
*base
,
2496 struct format_data_t
*fdata
, int enable_pav
,
2497 int tpm
, struct eckd_count
*fmt_buffer
, int rpt
)
2499 struct dasd_ccw_req
*ccw_req
;
2502 ccw_req
= dasd_eckd_build_format(base
, fdata
, enable_pav
);
2505 ccw_req
= dasd_eckd_build_check_tcw(base
, fdata
,
2509 ccw_req
= dasd_eckd_build_check(base
, fdata
, enable_pav
,
2517 * Sanity checks on format_data
2519 static int dasd_eckd_format_sanity_checks(struct dasd_device
*base
,
2520 struct format_data_t
*fdata
)
2522 struct dasd_eckd_private
*private = base
->private;
2524 if (fdata
->start_unit
>=
2525 (private->real_cyl
* private->rdc_data
.trk_per_cyl
)) {
2526 dev_warn(&base
->cdev
->dev
,
2527 "Start track number %u used in formatting is too big\n",
2531 if (fdata
->stop_unit
>=
2532 (private->real_cyl
* private->rdc_data
.trk_per_cyl
)) {
2533 dev_warn(&base
->cdev
->dev
,
2534 "Stop track number %u used in formatting is too big\n",
2538 if (fdata
->start_unit
> fdata
->stop_unit
) {
2539 dev_warn(&base
->cdev
->dev
,
2540 "Start track %u used in formatting exceeds end track\n",
2544 if (dasd_check_blocksize(fdata
->blksize
) != 0) {
2545 dev_warn(&base
->cdev
->dev
,
2546 "The DASD cannot be formatted with block size %u\n",
2554 * This function will process format_data originally coming from an IOCTL
2556 static int dasd_eckd_format_process_data(struct dasd_device
*base
,
2557 struct format_data_t
*fdata
,
2558 int enable_pav
, int tpm
,
2559 struct eckd_count
*fmt_buffer
, int rpt
,
2562 struct dasd_eckd_private
*private = base
->private;
2563 struct dasd_ccw_req
*cqr
, *n
;
2564 struct list_head format_queue
;
2565 struct dasd_device
*device
;
2567 int old_start
, old_stop
, format_step
;
2571 rc
= dasd_eckd_format_sanity_checks(base
, fdata
);
2575 INIT_LIST_HEAD(&format_queue
);
2577 old_start
= fdata
->start_unit
;
2578 old_stop
= fdata
->stop_unit
;
2580 if (!tpm
&& fmt_buffer
!= NULL
) {
2581 /* Command Mode / Format Check */
2583 } else if (tpm
&& fmt_buffer
!= NULL
) {
2584 /* Transport Mode / Format Check */
2585 format_step
= DASD_CQR_MAX_CCW
/ rpt
;
2587 /* Normal Formatting */
2588 format_step
= DASD_CQR_MAX_CCW
/
2589 recs_per_track(&private->rdc_data
, 0, fdata
->blksize
);
2594 while (fdata
->start_unit
<= old_stop
) {
2595 step
= fdata
->stop_unit
- fdata
->start_unit
+ 1;
2596 if (step
> format_step
) {
2598 fdata
->start_unit
+ format_step
- 1;
2601 cqr
= dasd_eckd_format_build_ccw_req(base
, fdata
,
2606 if (rc
== -ENOMEM
) {
2607 if (list_empty(&format_queue
))
2610 * not enough memory available, start
2611 * requests retry after first requests
2619 list_add_tail(&cqr
->blocklist
, &format_queue
);
2622 step
= fdata
->stop_unit
- fdata
->start_unit
+ 1;
2623 fmt_buffer
+= rpt
* step
;
2625 fdata
->start_unit
= fdata
->stop_unit
+ 1;
2626 fdata
->stop_unit
= old_stop
;
2629 rc
= dasd_sleep_on_queue(&format_queue
);
2632 list_for_each_entry_safe(cqr
, n
, &format_queue
, blocklist
) {
2633 device
= cqr
->startdev
;
2634 private = device
->private;
2636 if (cqr
->status
== DASD_CQR_FAILED
) {
2638 * Only get sense data if called by format
2641 if (fmt_buffer
&& irb
) {
2642 sense
= dasd_get_sense(&cqr
->irb
);
2643 memcpy(irb
, &cqr
->irb
, sizeof(*irb
));
2647 list_del_init(&cqr
->blocklist
);
2648 dasd_sfree_request(cqr
, device
);
2652 if (rc
&& rc
!= -EIO
)
2656 * In case fewer than the expected records are on the
2657 * track, we will most likely get a 'No Record Found'
2658 * error (in command mode) or a 'File Protected' error
2659 * (in transport mode). Those particular cases shouldn't
2660 * pass the -EIO to the IOCTL, therefore reset the rc
2664 (sense
[1] & SNS1_NO_REC_FOUND
||
2665 sense
[1] & SNS1_FILE_PROTECTED
))
2674 fdata
->start_unit
= old_start
;
2675 fdata
->stop_unit
= old_stop
;
2680 static int dasd_eckd_format_device(struct dasd_device
*base
,
2681 struct format_data_t
*fdata
, int enable_pav
)
2683 return dasd_eckd_format_process_data(base
, fdata
, enable_pav
, 0, NULL
,
2688 * Helper function to count consecutive records of a single track.
2690 static int dasd_eckd_count_records(struct eckd_count
*fmt_buffer
, int start
,
2696 head
= fmt_buffer
[start
].head
;
2699 * There are 3 conditions where we stop counting:
2700 * - if data reoccurs (same head and record may reoccur), which may
2701 * happen due to the way DASD_ECKD_CCW_READ_COUNT works
2702 * - when the head changes, because we're iterating over several tracks
2703 * then (DASD_ECKD_CCW_READ_COUNT_MT)
2704 * - when we've reached the end of sensible data in the buffer (the
2705 * record will be 0 then)
2707 for (i
= start
; i
< max
; i
++) {
2709 if ((fmt_buffer
[i
].head
== head
&&
2710 fmt_buffer
[i
].record
== 1) ||
2711 fmt_buffer
[i
].head
!= head
||
2712 fmt_buffer
[i
].record
== 0)
2721 * Evaluate a given range of tracks. Data like number of records, blocksize,
2722 * record ids, and key length are compared with expected data.
2724 * If a mismatch occurs, the corresponding error bit is set, as well as
2725 * additional information, depending on the error.
2727 static void dasd_eckd_format_evaluate_tracks(struct eckd_count
*fmt_buffer
,
2728 struct format_check_t
*cdata
,
2729 int rpt_max
, int rpt_exp
,
2730 int trk_per_cyl
, int tpm
)
2741 trkcount
= cdata
->expect
.stop_unit
- cdata
->expect
.start_unit
+ 1;
2742 max_entries
= trkcount
* rpt_max
;
2744 for (i
= cdata
->expect
.start_unit
; i
<= cdata
->expect
.stop_unit
; i
++) {
2745 /* Calculate the correct next starting position in the buffer */
2747 while (fmt_buffer
[pos
].record
== 0 &&
2748 fmt_buffer
[pos
].dl
== 0) {
2749 if (pos
++ > max_entries
)
2753 if (i
!= cdata
->expect
.start_unit
)
2754 pos
+= rpt_max
- count
;
2757 /* Calculate the expected geo values for the current track */
2758 set_ch_t(&geo
, i
/ trk_per_cyl
, i
% trk_per_cyl
);
2760 /* Count and check number of records */
2761 count
= dasd_eckd_count_records(fmt_buffer
, pos
, pos
+ rpt_max
);
2763 if (count
< rpt_exp
) {
2764 cdata
->result
= DASD_FMT_ERR_TOO_FEW_RECORDS
;
2767 if (count
> rpt_exp
) {
2768 cdata
->result
= DASD_FMT_ERR_TOO_MANY_RECORDS
;
2772 for (j
= 0; j
< count
; j
++, pos
++) {
2773 blksize
= cdata
->expect
.blksize
;
2777 * Set special values when checking CDL formatted
2780 if ((cdata
->expect
.intensity
& 0x08) &&
2781 geo
.cyl
== 0 && geo
.head
== 0) {
2783 blksize
= sizes_trk0
[j
] - 4;
2787 if ((cdata
->expect
.intensity
& 0x08) &&
2788 geo
.cyl
== 0 && geo
.head
== 1) {
2789 blksize
= LABEL_SIZE
- 44;
2793 /* Check blocksize */
2794 if (fmt_buffer
[pos
].dl
!= blksize
) {
2795 cdata
->result
= DASD_FMT_ERR_BLKSIZE
;
2798 /* Check if key length is 0 */
2799 if (fmt_buffer
[pos
].kl
!= kl
) {
2800 cdata
->result
= DASD_FMT_ERR_KEY_LENGTH
;
2803 /* Check if record_id is correct */
2804 if (fmt_buffer
[pos
].cyl
!= geo
.cyl
||
2805 fmt_buffer
[pos
].head
!= geo
.head
||
2806 fmt_buffer
[pos
].record
!= (j
+ 1)) {
2807 cdata
->result
= DASD_FMT_ERR_RECORD_ID
;
2815 * In case of no errors, we need to decrease by one
2816 * to get the correct positions.
2818 if (!cdata
->result
) {
2824 cdata
->num_records
= count
;
2825 cdata
->rec
= fmt_buffer
[pos
].record
;
2826 cdata
->blksize
= fmt_buffer
[pos
].dl
;
2827 cdata
->key_length
= fmt_buffer
[pos
].kl
;
2831 * Check the format of a range of tracks of a DASD.
2833 static int dasd_eckd_check_device_format(struct dasd_device
*base
,
2834 struct format_check_t
*cdata
,
2837 struct dasd_eckd_private
*private = base
->private;
2838 struct eckd_count
*fmt_buffer
;
2840 int rpt_max
, rpt_exp
;
2841 int fmt_buffer_size
;
2847 trk_per_cyl
= private->rdc_data
.trk_per_cyl
;
2849 /* Get maximum and expected amount of records per track */
2850 rpt_max
= recs_per_track(&private->rdc_data
, 0, 512) + 1;
2851 rpt_exp
= recs_per_track(&private->rdc_data
, 0, cdata
->expect
.blksize
);
2853 trkcount
= cdata
->expect
.stop_unit
- cdata
->expect
.start_unit
+ 1;
2854 fmt_buffer_size
= trkcount
* rpt_max
* sizeof(struct eckd_count
);
2856 fmt_buffer
= kzalloc(fmt_buffer_size
, GFP_KERNEL
| GFP_DMA
);
2861 * A certain FICON feature subset is needed to operate in transport
2862 * mode. Additionally, the support for transport mode is implicitly
2863 * checked by comparing the buffer size with fcx_max_data. As long as
2864 * the buffer size is smaller we can operate in transport mode and
2865 * process multiple tracks. If not, only one track at once is being
2866 * processed using command mode.
2868 if ((private->features
.feature
[40] & 0x04) &&
2869 fmt_buffer_size
<= private->fcx_max_data
)
2872 rc
= dasd_eckd_format_process_data(base
, &cdata
->expect
, enable_pav
,
2873 tpm
, fmt_buffer
, rpt_max
, &irb
);
2874 if (rc
&& rc
!= -EIO
)
2878 * If our first attempt with transport mode enabled comes back
2879 * with an incorrect length error, we're going to retry the
2880 * check with command mode.
2882 if (tpm
&& scsw_cstat(&irb
.scsw
) == 0x40) {
2884 rc
= dasd_eckd_format_process_data(base
, &cdata
->expect
,
2886 fmt_buffer
, rpt_max
,
2895 dasd_eckd_format_evaluate_tracks(fmt_buffer
, cdata
, rpt_max
, rpt_exp
,
2904 static void dasd_eckd_handle_terminated_request(struct dasd_ccw_req
*cqr
)
2906 if (cqr
->retries
< 0) {
2907 cqr
->status
= DASD_CQR_FAILED
;
2910 cqr
->status
= DASD_CQR_FILLED
;
2911 if (cqr
->block
&& (cqr
->startdev
!= cqr
->block
->base
)) {
2912 dasd_eckd_reset_ccw_to_base_io(cqr
);
2913 cqr
->startdev
= cqr
->block
->base
;
2914 cqr
->lpm
= dasd_path_get_opm(cqr
->block
->base
);
2918 static dasd_erp_fn_t
2919 dasd_eckd_erp_action(struct dasd_ccw_req
* cqr
)
2921 struct dasd_device
*device
= (struct dasd_device
*) cqr
->startdev
;
2922 struct ccw_device
*cdev
= device
->cdev
;
2924 switch (cdev
->id
.cu_type
) {
2929 return dasd_3990_erp_action
;
2933 return dasd_default_erp_action
;
2937 static dasd_erp_fn_t
2938 dasd_eckd_erp_postaction(struct dasd_ccw_req
* cqr
)
2940 return dasd_default_erp_postaction
;
2943 static void dasd_eckd_check_for_device_change(struct dasd_device
*device
,
2944 struct dasd_ccw_req
*cqr
,
2949 struct dasd_eckd_private
*private = device
->private;
2951 /* first of all check for state change pending interrupt */
2952 mask
= DEV_STAT_ATTENTION
| DEV_STAT_DEV_END
| DEV_STAT_UNIT_EXCEP
;
2953 if ((scsw_dstat(&irb
->scsw
) & mask
) == mask
) {
2955 * for alias only, not in offline processing
2956 * and only if not suspended
2958 if (!device
->block
&& private->lcu
&&
2959 device
->state
== DASD_STATE_ONLINE
&&
2960 !test_bit(DASD_FLAG_OFFLINE
, &device
->flags
) &&
2961 !test_bit(DASD_FLAG_SUSPENDED
, &device
->flags
)) {
2962 /* schedule worker to reload device */
2963 dasd_reload_device(device
);
2965 dasd_generic_handle_state_change(device
);
2969 sense
= dasd_get_sense(irb
);
2973 /* summary unit check */
2974 if ((sense
[27] & DASD_SENSE_BIT_0
) && (sense
[7] == 0x0D) &&
2975 (scsw_dstat(&irb
->scsw
) & DEV_STAT_UNIT_CHECK
)) {
2976 if (test_and_set_bit(DASD_FLAG_SUC
, &device
->flags
)) {
2977 DBF_DEV_EVENT(DBF_WARNING
, device
, "%s",
2978 "eckd suc: device already notified");
2981 sense
= dasd_get_sense(irb
);
2983 DBF_DEV_EVENT(DBF_WARNING
, device
, "%s",
2984 "eckd suc: no reason code available");
2985 clear_bit(DASD_FLAG_SUC
, &device
->flags
);
2989 private->suc_reason
= sense
[8];
2990 DBF_DEV_EVENT(DBF_NOTICE
, device
, "%s %x",
2991 "eckd handle summary unit check: reason",
2992 private->suc_reason
);
2993 dasd_get_device(device
);
2994 if (!schedule_work(&device
->suc_work
))
2995 dasd_put_device(device
);
3000 /* service information message SIM */
3001 if (!cqr
&& !(sense
[27] & DASD_SENSE_BIT_0
) &&
3002 ((sense
[6] & DASD_SIM_SENSE
) == DASD_SIM_SENSE
)) {
3003 dasd_3990_erp_handle_sim(device
, sense
);
3007 /* loss of device reservation is handled via base devices only
3008 * as alias devices may be used with several bases
3010 if (device
->block
&& (sense
[27] & DASD_SENSE_BIT_0
) &&
3011 (sense
[7] == 0x3F) &&
3012 (scsw_dstat(&irb
->scsw
) & DEV_STAT_UNIT_CHECK
) &&
3013 test_bit(DASD_FLAG_IS_RESERVED
, &device
->flags
)) {
3014 if (device
->features
& DASD_FEATURE_FAILONSLCK
)
3015 set_bit(DASD_FLAG_LOCK_STOLEN
, &device
->flags
);
3016 clear_bit(DASD_FLAG_IS_RESERVED
, &device
->flags
);
3017 dev_err(&device
->cdev
->dev
,
3018 "The device reservation was lost\n");
3022 static struct dasd_ccw_req
*dasd_eckd_build_cp_cmd_single(
3023 struct dasd_device
*startdev
,
3024 struct dasd_block
*block
,
3025 struct request
*req
,
3030 unsigned int first_offs
,
3031 unsigned int last_offs
,
3032 unsigned int blk_per_trk
,
3033 unsigned int blksize
)
3035 struct dasd_eckd_private
*private;
3036 unsigned long *idaws
;
3037 struct LO_eckd_data
*LO_data
;
3038 struct dasd_ccw_req
*cqr
;
3040 struct req_iterator iter
;
3044 int count
, cidaw
, cplength
, datasize
;
3046 unsigned char cmd
, rcmd
;
3048 struct dasd_device
*basedev
;
3050 basedev
= block
->base
;
3051 private = basedev
->private;
3052 if (rq_data_dir(req
) == READ
)
3053 cmd
= DASD_ECKD_CCW_READ_MT
;
3054 else if (rq_data_dir(req
) == WRITE
)
3055 cmd
= DASD_ECKD_CCW_WRITE_MT
;
3057 return ERR_PTR(-EINVAL
);
3059 /* Check struct bio and count the number of blocks for the request. */
3062 rq_for_each_segment(bv
, req
, iter
) {
3063 if (bv
.bv_len
& (blksize
- 1))
3064 /* Eckd can only do full blocks. */
3065 return ERR_PTR(-EINVAL
);
3066 count
+= bv
.bv_len
>> (block
->s2b_shift
+ 9);
3067 if (idal_is_needed (page_address(bv
.bv_page
), bv
.bv_len
))
3068 cidaw
+= bv
.bv_len
>> (block
->s2b_shift
+ 9);
3071 if (count
!= last_rec
- first_rec
+ 1)
3072 return ERR_PTR(-EINVAL
);
3074 /* use the prefix command if available */
3075 use_prefix
= private->features
.feature
[8] & 0x01;
3077 /* 1x prefix + number of blocks */
3078 cplength
= 2 + count
;
3079 /* 1x prefix + cidaws*sizeof(long) */
3080 datasize
= sizeof(struct PFX_eckd_data
) +
3081 sizeof(struct LO_eckd_data
) +
3082 cidaw
* sizeof(unsigned long);
3084 /* 1x define extent + 1x locate record + number of blocks */
3085 cplength
= 2 + count
;
3086 /* 1x define extent + 1x locate record + cidaws*sizeof(long) */
3087 datasize
= sizeof(struct DE_eckd_data
) +
3088 sizeof(struct LO_eckd_data
) +
3089 cidaw
* sizeof(unsigned long);
3091 /* Find out the number of additional locate record ccws for cdl. */
3092 if (private->uses_cdl
&& first_rec
< 2*blk_per_trk
) {
3093 if (last_rec
>= 2*blk_per_trk
)
3094 count
= 2*blk_per_trk
- first_rec
;
3096 datasize
+= count
*sizeof(struct LO_eckd_data
);
3098 /* Allocate the ccw request. */
3099 cqr
= dasd_smalloc_request(DASD_ECKD_MAGIC
, cplength
, datasize
,
3100 startdev
, blk_mq_rq_to_pdu(req
));
3104 /* First ccw is define extent or prefix. */
3106 if (prefix(ccw
++, cqr
->data
, first_trk
,
3107 last_trk
, cmd
, basedev
, startdev
) == -EAGAIN
) {
3108 /* Clock not in sync and XRC is enabled.
3111 dasd_sfree_request(cqr
, startdev
);
3112 return ERR_PTR(-EAGAIN
);
3114 idaws
= (unsigned long *) (cqr
->data
+
3115 sizeof(struct PFX_eckd_data
));
3117 if (define_extent(ccw
++, cqr
->data
, first_trk
,
3118 last_trk
, cmd
, basedev
, 0) == -EAGAIN
) {
3119 /* Clock not in sync and XRC is enabled.
3122 dasd_sfree_request(cqr
, startdev
);
3123 return ERR_PTR(-EAGAIN
);
3125 idaws
= (unsigned long *) (cqr
->data
+
3126 sizeof(struct DE_eckd_data
));
3128 /* Build locate_record+read/write/ccws. */
3129 LO_data
= (struct LO_eckd_data
*) (idaws
+ cidaw
);
3131 if (private->uses_cdl
== 0 || recid
> 2*blk_per_trk
) {
3132 /* Only standard blocks so there is just one locate record. */
3133 ccw
[-1].flags
|= CCW_FLAG_CC
;
3134 locate_record(ccw
++, LO_data
++, first_trk
, first_offs
+ 1,
3135 last_rec
- recid
+ 1, cmd
, basedev
, blksize
);
3137 rq_for_each_segment(bv
, req
, iter
) {
3138 dst
= page_address(bv
.bv_page
) + bv
.bv_offset
;
3139 if (dasd_page_cache
) {
3140 char *copy
= kmem_cache_alloc(dasd_page_cache
,
3141 GFP_DMA
| __GFP_NOWARN
);
3142 if (copy
&& rq_data_dir(req
) == WRITE
)
3143 memcpy(copy
+ bv
.bv_offset
, dst
, bv
.bv_len
);
3145 dst
= copy
+ bv
.bv_offset
;
3147 for (off
= 0; off
< bv
.bv_len
; off
+= blksize
) {
3148 sector_t trkid
= recid
;
3149 unsigned int recoffs
= sector_div(trkid
, blk_per_trk
);
3152 /* Locate record for cdl special block ? */
3153 if (private->uses_cdl
&& recid
< 2*blk_per_trk
) {
3154 if (dasd_eckd_cdl_special(blk_per_trk
, recid
)){
3156 count
= dasd_eckd_cdl_reclen(recid
);
3157 if (count
< blksize
&&
3158 rq_data_dir(req
) == READ
)
3159 memset(dst
+ count
, 0xe5,
3162 ccw
[-1].flags
|= CCW_FLAG_CC
;
3163 locate_record(ccw
++, LO_data
++,
3165 1, rcmd
, basedev
, count
);
3167 /* Locate record for standard blocks ? */
3168 if (private->uses_cdl
&& recid
== 2*blk_per_trk
) {
3169 ccw
[-1].flags
|= CCW_FLAG_CC
;
3170 locate_record(ccw
++, LO_data
++,
3172 last_rec
- recid
+ 1,
3173 cmd
, basedev
, count
);
3175 /* Read/write ccw. */
3176 ccw
[-1].flags
|= CCW_FLAG_CC
;
3177 ccw
->cmd_code
= rcmd
;
3179 if (idal_is_needed(dst
, blksize
)) {
3180 ccw
->cda
= (__u32
)(addr_t
) idaws
;
3181 ccw
->flags
= CCW_FLAG_IDA
;
3182 idaws
= idal_create_words(idaws
, dst
, blksize
);
3184 ccw
->cda
= (__u32
)(addr_t
) dst
;
3192 if (blk_noretry_request(req
) ||
3193 block
->base
->features
& DASD_FEATURE_FAILFAST
)
3194 set_bit(DASD_CQR_FLAGS_FAILFAST
, &cqr
->flags
);
3195 cqr
->startdev
= startdev
;
3196 cqr
->memdev
= startdev
;
3198 cqr
->expires
= startdev
->default_expires
* HZ
; /* default 5 minutes */
3199 cqr
->lpm
= dasd_path_get_ppm(startdev
);
3200 cqr
->retries
= startdev
->default_retries
;
3201 cqr
->buildclk
= get_tod_clock();
3202 cqr
->status
= DASD_CQR_FILLED
;
3206 static struct dasd_ccw_req
*dasd_eckd_build_cp_cmd_track(
3207 struct dasd_device
*startdev
,
3208 struct dasd_block
*block
,
3209 struct request
*req
,
3214 unsigned int first_offs
,
3215 unsigned int last_offs
,
3216 unsigned int blk_per_trk
,
3217 unsigned int blksize
)
3219 unsigned long *idaws
;
3220 struct dasd_ccw_req
*cqr
;
3222 struct req_iterator iter
;
3224 char *dst
, *idaw_dst
;
3225 unsigned int cidaw
, cplength
, datasize
;
3229 struct dasd_device
*basedev
;
3230 unsigned int trkcount
, count
, count_to_trk_end
;
3231 unsigned int idaw_len
, seg_len
, part_len
, len_to_track_end
;
3232 unsigned char new_track
, end_idaw
;
3234 unsigned int recoffs
;
3236 basedev
= block
->base
;
3237 if (rq_data_dir(req
) == READ
)
3238 cmd
= DASD_ECKD_CCW_READ_TRACK_DATA
;
3239 else if (rq_data_dir(req
) == WRITE
)
3240 cmd
= DASD_ECKD_CCW_WRITE_TRACK_DATA
;
3242 return ERR_PTR(-EINVAL
);
3244 /* Track based I/O needs IDAWs for each page, and not just for
3245 * 64 bit addresses. We need additional idals for pages
3246 * that get filled from two tracks, so we use the number
3247 * of records as upper limit.
3249 cidaw
= last_rec
- first_rec
+ 1;
3250 trkcount
= last_trk
- first_trk
+ 1;
3252 /* 1x prefix + one read/write ccw per track */
3253 cplength
= 1 + trkcount
;
3255 datasize
= sizeof(struct PFX_eckd_data
) + cidaw
* sizeof(unsigned long);
3257 /* Allocate the ccw request. */
3258 cqr
= dasd_smalloc_request(DASD_ECKD_MAGIC
, cplength
, datasize
,
3259 startdev
, blk_mq_rq_to_pdu(req
));
3263 /* transfer length factor: how many bytes to read from the last track */
3264 if (first_trk
== last_trk
)
3265 tlf
= last_offs
- first_offs
+ 1;
3267 tlf
= last_offs
+ 1;
3270 if (prefix_LRE(ccw
++, cqr
->data
, first_trk
,
3271 last_trk
, cmd
, basedev
, startdev
,
3272 1 /* format */, first_offs
+ 1,
3275 /* Clock not in sync and XRC is enabled.
3278 dasd_sfree_request(cqr
, startdev
);
3279 return ERR_PTR(-EAGAIN
);
3283 * The translation of request into ccw programs must meet the
3284 * following conditions:
3285 * - all idaws but the first and the last must address full pages
3286 * (or 2K blocks on 31-bit)
3287 * - the scope of a ccw and it's idal ends with the track boundaries
3289 idaws
= (unsigned long *) (cqr
->data
+ sizeof(struct PFX_eckd_data
));
3293 len_to_track_end
= 0;
3296 rq_for_each_segment(bv
, req
, iter
) {
3297 dst
= page_address(bv
.bv_page
) + bv
.bv_offset
;
3298 seg_len
= bv
.bv_len
;
3302 recoffs
= sector_div(trkid
, blk_per_trk
);
3303 count_to_trk_end
= blk_per_trk
- recoffs
;
3304 count
= min((last_rec
- recid
+ 1),
3305 (sector_t
)count_to_trk_end
);
3306 len_to_track_end
= count
* blksize
;
3307 ccw
[-1].flags
|= CCW_FLAG_CC
;
3308 ccw
->cmd_code
= cmd
;
3309 ccw
->count
= len_to_track_end
;
3310 ccw
->cda
= (__u32
)(addr_t
)idaws
;
3311 ccw
->flags
= CCW_FLAG_IDA
;
3315 /* first idaw for a ccw may start anywhere */
3319 /* If we start a new idaw, we must make sure that it
3320 * starts on an IDA_BLOCK_SIZE boundary.
3321 * If we continue an idaw, we must make sure that the
3322 * current segment begins where the so far accumulated
3326 if (__pa(dst
) & (IDA_BLOCK_SIZE
-1)) {
3327 dasd_sfree_request(cqr
, startdev
);
3328 return ERR_PTR(-ERANGE
);
3332 if ((idaw_dst
+ idaw_len
) != dst
) {
3333 dasd_sfree_request(cqr
, startdev
);
3334 return ERR_PTR(-ERANGE
);
3336 part_len
= min(seg_len
, len_to_track_end
);
3337 seg_len
-= part_len
;
3339 idaw_len
+= part_len
;
3340 len_to_track_end
-= part_len
;
3341 /* collected memory area ends on an IDA_BLOCK border,
3343 * idal_create_words will handle cases where idaw_len
3344 * is larger then IDA_BLOCK_SIZE
3346 if (!(__pa(idaw_dst
+ idaw_len
) & (IDA_BLOCK_SIZE
-1)))
3348 /* We also need to end the idaw at track end */
3349 if (!len_to_track_end
) {
3354 idaws
= idal_create_words(idaws
, idaw_dst
,
3363 if (blk_noretry_request(req
) ||
3364 block
->base
->features
& DASD_FEATURE_FAILFAST
)
3365 set_bit(DASD_CQR_FLAGS_FAILFAST
, &cqr
->flags
);
3366 cqr
->startdev
= startdev
;
3367 cqr
->memdev
= startdev
;
3369 cqr
->expires
= startdev
->default_expires
* HZ
; /* default 5 minutes */
3370 cqr
->lpm
= dasd_path_get_ppm(startdev
);
3371 cqr
->retries
= startdev
->default_retries
;
3372 cqr
->buildclk
= get_tod_clock();
3373 cqr
->status
= DASD_CQR_FILLED
;
3377 static int prepare_itcw(struct itcw
*itcw
,
3378 unsigned int trk
, unsigned int totrk
, int cmd
,
3379 struct dasd_device
*basedev
,
3380 struct dasd_device
*startdev
,
3381 unsigned int rec_on_trk
, int count
,
3382 unsigned int blksize
,
3383 unsigned int total_data_size
,
3385 unsigned int blk_per_trk
)
3387 struct PFX_eckd_data pfxdata
;
3388 struct dasd_eckd_private
*basepriv
, *startpriv
;
3389 struct DE_eckd_data
*dedata
;
3390 struct LRE_eckd_data
*lredata
;
3394 u16 heads
, beghead
, endhead
;
3402 /* setup prefix data */
3403 basepriv
= basedev
->private;
3404 startpriv
= startdev
->private;
3405 dedata
= &pfxdata
.define_extent
;
3406 lredata
= &pfxdata
.locate_record
;
3408 memset(&pfxdata
, 0, sizeof(pfxdata
));
3409 pfxdata
.format
= 1; /* PFX with LRE */
3410 pfxdata
.base_address
= basepriv
->ned
->unit_addr
;
3411 pfxdata
.base_lss
= basepriv
->ned
->ID
;
3412 pfxdata
.validity
.define_extent
= 1;
3414 /* private uid is kept up to date, conf_data may be outdated */
3415 if (startpriv
->uid
.type
== UA_BASE_PAV_ALIAS
)
3416 pfxdata
.validity
.verify_base
= 1;
3418 if (startpriv
->uid
.type
== UA_HYPER_PAV_ALIAS
) {
3419 pfxdata
.validity
.verify_base
= 1;
3420 pfxdata
.validity
.hyper_pav
= 1;
3424 case DASD_ECKD_CCW_READ_TRACK_DATA
:
3425 dedata
->mask
.perm
= 0x1;
3426 dedata
->attributes
.operation
= basepriv
->attrib
.operation
;
3427 dedata
->blk_size
= blksize
;
3428 dedata
->ga_extended
|= 0x42;
3429 lredata
->operation
.orientation
= 0x0;
3430 lredata
->operation
.operation
= 0x0C;
3431 lredata
->auxiliary
.check_bytes
= 0x01;
3432 pfx_cmd
= DASD_ECKD_CCW_PFX_READ
;
3434 case DASD_ECKD_CCW_WRITE_TRACK_DATA
:
3435 dedata
->mask
.perm
= 0x02;
3436 dedata
->attributes
.operation
= basepriv
->attrib
.operation
;
3437 dedata
->blk_size
= blksize
;
3438 rc
= set_timestamp(NULL
, dedata
, basedev
);
3439 dedata
->ga_extended
|= 0x42;
3440 lredata
->operation
.orientation
= 0x0;
3441 lredata
->operation
.operation
= 0x3F;
3442 lredata
->extended_operation
= 0x23;
3443 lredata
->auxiliary
.check_bytes
= 0x2;
3445 * If XRC is supported the System Time Stamp is set. The
3446 * validity of the time stamp must be reflected in the prefix
3449 if (dedata
->ga_extended
& 0x08 && dedata
->ga_extended
& 0x02)
3450 pfxdata
.validity
.time_stamp
= 1; /* 'Time Stamp Valid' */
3451 pfx_cmd
= DASD_ECKD_CCW_PFX
;
3453 case DASD_ECKD_CCW_READ_COUNT_MT
:
3454 dedata
->mask
.perm
= 0x1;
3455 dedata
->attributes
.operation
= DASD_BYPASS_CACHE
;
3456 dedata
->ga_extended
|= 0x42;
3457 dedata
->blk_size
= blksize
;
3458 lredata
->operation
.orientation
= 0x2;
3459 lredata
->operation
.operation
= 0x16;
3460 lredata
->auxiliary
.check_bytes
= 0x01;
3461 pfx_cmd
= DASD_ECKD_CCW_PFX_READ
;
3464 DBF_DEV_EVENT(DBF_ERR
, basedev
,
3465 "prepare itcw, unknown opcode 0x%x", cmd
);
3472 dedata
->attributes
.mode
= 0x3; /* ECKD */
3474 heads
= basepriv
->rdc_data
.trk_per_cyl
;
3475 begcyl
= trk
/ heads
;
3476 beghead
= trk
% heads
;
3477 endcyl
= totrk
/ heads
;
3478 endhead
= totrk
% heads
;
3480 /* check for sequential prestage - enhance cylinder range */
3481 if (dedata
->attributes
.operation
== DASD_SEQ_PRESTAGE
||
3482 dedata
->attributes
.operation
== DASD_SEQ_ACCESS
) {
3484 if (endcyl
+ basepriv
->attrib
.nr_cyl
< basepriv
->real_cyl
)
3485 endcyl
+= basepriv
->attrib
.nr_cyl
;
3487 endcyl
= (basepriv
->real_cyl
- 1);
3490 set_ch_t(&dedata
->beg_ext
, begcyl
, beghead
);
3491 set_ch_t(&dedata
->end_ext
, endcyl
, endhead
);
3493 dedata
->ep_format
= 0x20; /* records per track is valid */
3494 dedata
->ep_rec_per_track
= blk_per_trk
;
3497 switch (basepriv
->rdc_data
.dev_type
) {
3499 dn
= ceil_quot(blksize
+ 6, 232);
3500 d
= 9 + ceil_quot(blksize
+ 6 * (dn
+ 1), 34);
3501 sector
= (49 + (rec_on_trk
- 1) * (10 + d
)) / 8;
3504 d
= 7 + ceil_quot(blksize
+ 12, 32);
3505 sector
= (39 + (rec_on_trk
- 1) * (8 + d
)) / 7;
3510 if (cmd
== DASD_ECKD_CCW_READ_COUNT_MT
) {
3511 lredata
->auxiliary
.length_valid
= 0;
3512 lredata
->auxiliary
.length_scope
= 0;
3513 lredata
->sector
= 0xff;
3515 lredata
->auxiliary
.length_valid
= 1;
3516 lredata
->auxiliary
.length_scope
= 1;
3517 lredata
->sector
= sector
;
3519 lredata
->auxiliary
.imbedded_ccw_valid
= 1;
3520 lredata
->length
= tlf
;
3521 lredata
->imbedded_ccw
= cmd
;
3522 lredata
->count
= count
;
3523 set_ch_t(&lredata
->seek_addr
, begcyl
, beghead
);
3524 lredata
->search_arg
.cyl
= lredata
->seek_addr
.cyl
;
3525 lredata
->search_arg
.head
= lredata
->seek_addr
.head
;
3526 lredata
->search_arg
.record
= rec_on_trk
;
3528 dcw
= itcw_add_dcw(itcw
, pfx_cmd
, 0,
3529 &pfxdata
, sizeof(pfxdata
), total_data_size
);
3530 return PTR_ERR_OR_ZERO(dcw
);
3533 static struct dasd_ccw_req
*dasd_eckd_build_cp_tpm_track(
3534 struct dasd_device
*startdev
,
3535 struct dasd_block
*block
,
3536 struct request
*req
,
3541 unsigned int first_offs
,
3542 unsigned int last_offs
,
3543 unsigned int blk_per_trk
,
3544 unsigned int blksize
)
3546 struct dasd_ccw_req
*cqr
;
3547 struct req_iterator iter
;
3550 unsigned int trkcount
, ctidaw
;
3552 struct dasd_device
*basedev
;
3555 struct tidaw
*last_tidaw
= NULL
;
3559 unsigned int seg_len
, part_len
, len_to_track_end
;
3560 unsigned char new_track
;
3561 sector_t recid
, trkid
;
3563 unsigned int count
, count_to_trk_end
;
3566 basedev
= block
->base
;
3567 if (rq_data_dir(req
) == READ
) {
3568 cmd
= DASD_ECKD_CCW_READ_TRACK_DATA
;
3569 itcw_op
= ITCW_OP_READ
;
3570 } else if (rq_data_dir(req
) == WRITE
) {
3571 cmd
= DASD_ECKD_CCW_WRITE_TRACK_DATA
;
3572 itcw_op
= ITCW_OP_WRITE
;
3574 return ERR_PTR(-EINVAL
);
3576 /* trackbased I/O needs address all memory via TIDAWs,
3577 * not just for 64 bit addresses. This allows us to map
3578 * each segment directly to one tidaw.
3579 * In the case of write requests, additional tidaws may
3580 * be needed when a segment crosses a track boundary.
3582 trkcount
= last_trk
- first_trk
+ 1;
3584 rq_for_each_segment(bv
, req
, iter
) {
3587 if (rq_data_dir(req
) == WRITE
)
3588 ctidaw
+= (last_trk
- first_trk
);
3590 /* Allocate the ccw request. */
3591 itcw_size
= itcw_calc_size(0, ctidaw
, 0);
3592 cqr
= dasd_smalloc_request(DASD_ECKD_MAGIC
, 0, itcw_size
, startdev
,
3593 blk_mq_rq_to_pdu(req
));
3597 /* transfer length factor: how many bytes to read from the last track */
3598 if (first_trk
== last_trk
)
3599 tlf
= last_offs
- first_offs
+ 1;
3601 tlf
= last_offs
+ 1;
3604 itcw
= itcw_init(cqr
->data
, itcw_size
, itcw_op
, 0, ctidaw
, 0);
3609 cqr
->cpaddr
= itcw_get_tcw(itcw
);
3610 if (prepare_itcw(itcw
, first_trk
, last_trk
,
3611 cmd
, basedev
, startdev
,
3614 (last_rec
- first_rec
+ 1) * blksize
,
3615 tlf
, blk_per_trk
) == -EAGAIN
) {
3616 /* Clock not in sync and XRC is enabled.
3622 len_to_track_end
= 0;
3624 * A tidaw can address 4k of memory, but must not cross page boundaries
3625 * We can let the block layer handle this by setting
3626 * blk_queue_segment_boundary to page boundaries and
3627 * blk_max_segment_size to page size when setting up the request queue.
3628 * For write requests, a TIDAW must not cross track boundaries, because
3629 * we have to set the CBC flag on the last tidaw for each track.
3631 if (rq_data_dir(req
) == WRITE
) {
3634 rq_for_each_segment(bv
, req
, iter
) {
3635 dst
= page_address(bv
.bv_page
) + bv
.bv_offset
;
3636 seg_len
= bv
.bv_len
;
3640 offs
= sector_div(trkid
, blk_per_trk
);
3641 count_to_trk_end
= blk_per_trk
- offs
;
3642 count
= min((last_rec
- recid
+ 1),
3643 (sector_t
)count_to_trk_end
);
3644 len_to_track_end
= count
* blksize
;
3648 part_len
= min(seg_len
, len_to_track_end
);
3649 seg_len
-= part_len
;
3650 len_to_track_end
-= part_len
;
3651 /* We need to end the tidaw at track end */
3652 if (!len_to_track_end
) {
3654 tidaw_flags
= TIDAW_FLAGS_INSERT_CBC
;
3657 last_tidaw
= itcw_add_tidaw(itcw
, tidaw_flags
,
3659 if (IS_ERR(last_tidaw
)) {
3667 rq_for_each_segment(bv
, req
, iter
) {
3668 dst
= page_address(bv
.bv_page
) + bv
.bv_offset
;
3669 last_tidaw
= itcw_add_tidaw(itcw
, 0x00,
3671 if (IS_ERR(last_tidaw
)) {
3677 last_tidaw
->flags
|= TIDAW_FLAGS_LAST
;
3678 last_tidaw
->flags
&= ~TIDAW_FLAGS_INSERT_CBC
;
3679 itcw_finalize(itcw
);
3681 if (blk_noretry_request(req
) ||
3682 block
->base
->features
& DASD_FEATURE_FAILFAST
)
3683 set_bit(DASD_CQR_FLAGS_FAILFAST
, &cqr
->flags
);
3685 cqr
->startdev
= startdev
;
3686 cqr
->memdev
= startdev
;
3688 cqr
->expires
= startdev
->default_expires
* HZ
; /* default 5 minutes */
3689 cqr
->lpm
= dasd_path_get_ppm(startdev
);
3690 cqr
->retries
= startdev
->default_retries
;
3691 cqr
->buildclk
= get_tod_clock();
3692 cqr
->status
= DASD_CQR_FILLED
;
3695 dasd_sfree_request(cqr
, startdev
);
3696 return ERR_PTR(ret
);
3699 static struct dasd_ccw_req
*dasd_eckd_build_cp(struct dasd_device
*startdev
,
3700 struct dasd_block
*block
,
3701 struct request
*req
)
3706 struct dasd_eckd_private
*private;
3707 struct dasd_device
*basedev
;
3708 sector_t first_rec
, last_rec
;
3709 sector_t first_trk
, last_trk
;
3710 unsigned int first_offs
, last_offs
;
3711 unsigned int blk_per_trk
, blksize
;
3713 unsigned int data_size
;
3714 struct dasd_ccw_req
*cqr
;
3716 basedev
= block
->base
;
3717 private = basedev
->private;
3719 /* Calculate number of blocks/records per track. */
3720 blksize
= block
->bp_block
;
3721 blk_per_trk
= recs_per_track(&private->rdc_data
, 0, blksize
);
3722 if (blk_per_trk
== 0)
3723 return ERR_PTR(-EINVAL
);
3724 /* Calculate record id of first and last block. */
3725 first_rec
= first_trk
= blk_rq_pos(req
) >> block
->s2b_shift
;
3726 first_offs
= sector_div(first_trk
, blk_per_trk
);
3727 last_rec
= last_trk
=
3728 (blk_rq_pos(req
) + blk_rq_sectors(req
) - 1) >> block
->s2b_shift
;
3729 last_offs
= sector_div(last_trk
, blk_per_trk
);
3730 cdlspecial
= (private->uses_cdl
&& first_rec
< 2*blk_per_trk
);
3732 fcx_multitrack
= private->features
.feature
[40] & 0x20;
3733 data_size
= blk_rq_bytes(req
);
3734 if (data_size
% blksize
)
3735 return ERR_PTR(-EINVAL
);
3736 /* tpm write request add CBC data on each track boundary */
3737 if (rq_data_dir(req
) == WRITE
)
3738 data_size
+= (last_trk
- first_trk
) * 4;
3740 /* is read track data and write track data in command mode supported? */
3741 cmdrtd
= private->features
.feature
[9] & 0x20;
3742 cmdwtd
= private->features
.feature
[12] & 0x40;
3743 use_prefix
= private->features
.feature
[8] & 0x01;
3746 if (cdlspecial
|| dasd_page_cache
) {
3747 /* do nothing, just fall through to the cmd mode single case */
3748 } else if ((data_size
<= private->fcx_max_data
)
3749 && (fcx_multitrack
|| (first_trk
== last_trk
))) {
3750 cqr
= dasd_eckd_build_cp_tpm_track(startdev
, block
, req
,
3751 first_rec
, last_rec
,
3752 first_trk
, last_trk
,
3753 first_offs
, last_offs
,
3754 blk_per_trk
, blksize
);
3755 if (IS_ERR(cqr
) && (PTR_ERR(cqr
) != -EAGAIN
) &&
3756 (PTR_ERR(cqr
) != -ENOMEM
))
3758 } else if (use_prefix
&&
3759 (((rq_data_dir(req
) == READ
) && cmdrtd
) ||
3760 ((rq_data_dir(req
) == WRITE
) && cmdwtd
))) {
3761 cqr
= dasd_eckd_build_cp_cmd_track(startdev
, block
, req
,
3762 first_rec
, last_rec
,
3763 first_trk
, last_trk
,
3764 first_offs
, last_offs
,
3765 blk_per_trk
, blksize
);
3766 if (IS_ERR(cqr
) && (PTR_ERR(cqr
) != -EAGAIN
) &&
3767 (PTR_ERR(cqr
) != -ENOMEM
))
3771 cqr
= dasd_eckd_build_cp_cmd_single(startdev
, block
, req
,
3772 first_rec
, last_rec
,
3773 first_trk
, last_trk
,
3774 first_offs
, last_offs
,
3775 blk_per_trk
, blksize
);
3779 static struct dasd_ccw_req
*dasd_eckd_build_cp_raw(struct dasd_device
*startdev
,
3780 struct dasd_block
*block
,
3781 struct request
*req
)
3783 sector_t start_padding_sectors
, end_sector_offset
, end_padding_sectors
;
3784 unsigned int seg_len
, len_to_track_end
;
3785 unsigned int cidaw
, cplength
, datasize
;
3786 sector_t first_trk
, last_trk
, sectors
;
3787 struct dasd_eckd_private
*base_priv
;
3788 struct dasd_device
*basedev
;
3789 struct req_iterator iter
;
3790 struct dasd_ccw_req
*cqr
;
3791 unsigned int first_offs
;
3792 unsigned int trkcount
;
3793 unsigned long *idaws
;
3803 * raw track access needs to be mutiple of 64k and on 64k boundary
3804 * For read requests we can fix an incorrect alignment by padding
3805 * the request with dummy pages.
3807 start_padding_sectors
= blk_rq_pos(req
) % DASD_RAW_SECTORS_PER_TRACK
;
3808 end_sector_offset
= (blk_rq_pos(req
) + blk_rq_sectors(req
)) %
3809 DASD_RAW_SECTORS_PER_TRACK
;
3810 end_padding_sectors
= (DASD_RAW_SECTORS_PER_TRACK
- end_sector_offset
) %
3811 DASD_RAW_SECTORS_PER_TRACK
;
3812 basedev
= block
->base
;
3813 if ((start_padding_sectors
|| end_padding_sectors
) &&
3814 (rq_data_dir(req
) == WRITE
)) {
3815 DBF_DEV_EVENT(DBF_ERR
, basedev
,
3816 "raw write not track aligned (%lu,%lu) req %p",
3817 start_padding_sectors
, end_padding_sectors
, req
);
3818 return ERR_PTR(-EINVAL
);
3821 first_trk
= blk_rq_pos(req
) / DASD_RAW_SECTORS_PER_TRACK
;
3822 last_trk
= (blk_rq_pos(req
) + blk_rq_sectors(req
) - 1) /
3823 DASD_RAW_SECTORS_PER_TRACK
;
3824 trkcount
= last_trk
- first_trk
+ 1;
3827 if (rq_data_dir(req
) == READ
)
3828 cmd
= DASD_ECKD_CCW_READ_TRACK
;
3829 else if (rq_data_dir(req
) == WRITE
)
3830 cmd
= DASD_ECKD_CCW_WRITE_FULL_TRACK
;
3832 return ERR_PTR(-EINVAL
);
3835 * Raw track based I/O needs IDAWs for each page,
3836 * and not just for 64 bit addresses.
3838 cidaw
= trkcount
* DASD_RAW_BLOCK_PER_TRACK
;
3841 * struct PFX_eckd_data and struct LRE_eckd_data can have up to 2 bytes
3842 * of extended parameter. This is needed for write full track.
3844 base_priv
= basedev
->private;
3845 use_prefix
= base_priv
->features
.feature
[8] & 0x01;
3847 cplength
= 1 + trkcount
;
3848 size
= sizeof(struct PFX_eckd_data
) + 2;
3850 cplength
= 2 + trkcount
;
3851 size
= sizeof(struct DE_eckd_data
) +
3852 sizeof(struct LRE_eckd_data
) + 2;
3854 size
= ALIGN(size
, 8);
3856 datasize
= size
+ cidaw
* sizeof(unsigned long);
3858 /* Allocate the ccw request. */
3859 cqr
= dasd_smalloc_request(DASD_ECKD_MAGIC
, cplength
,
3860 datasize
, startdev
, blk_mq_rq_to_pdu(req
));
3868 prefix_LRE(ccw
++, data
, first_trk
, last_trk
, cmd
, basedev
,
3869 startdev
, 1, first_offs
+ 1, trkcount
, 0, 0);
3871 define_extent(ccw
++, data
, first_trk
, last_trk
, cmd
, basedev
, 0);
3872 ccw
[-1].flags
|= CCW_FLAG_CC
;
3874 data
+= sizeof(struct DE_eckd_data
);
3875 locate_record_ext(ccw
++, data
, first_trk
, first_offs
+ 1,
3876 trkcount
, cmd
, basedev
, 0, 0);
3879 idaws
= (unsigned long *)(cqr
->data
+ size
);
3880 len_to_track_end
= 0;
3881 if (start_padding_sectors
) {
3882 ccw
[-1].flags
|= CCW_FLAG_CC
;
3883 ccw
->cmd_code
= cmd
;
3884 /* maximum 3390 track size */
3886 /* 64k map to one track */
3887 len_to_track_end
= 65536 - start_padding_sectors
* 512;
3888 ccw
->cda
= (__u32
)(addr_t
)idaws
;
3889 ccw
->flags
|= CCW_FLAG_IDA
;
3890 ccw
->flags
|= CCW_FLAG_SLI
;
3892 for (sectors
= 0; sectors
< start_padding_sectors
; sectors
+= 8)
3893 idaws
= idal_create_words(idaws
, rawpadpage
, PAGE_SIZE
);
3895 rq_for_each_segment(bv
, req
, iter
) {
3896 dst
= page_address(bv
.bv_page
) + bv
.bv_offset
;
3897 seg_len
= bv
.bv_len
;
3898 if (cmd
== DASD_ECKD_CCW_READ_TRACK
)
3899 memset(dst
, 0, seg_len
);
3900 if (!len_to_track_end
) {
3901 ccw
[-1].flags
|= CCW_FLAG_CC
;
3902 ccw
->cmd_code
= cmd
;
3903 /* maximum 3390 track size */
3905 /* 64k map to one track */
3906 len_to_track_end
= 65536;
3907 ccw
->cda
= (__u32
)(addr_t
)idaws
;
3908 ccw
->flags
|= CCW_FLAG_IDA
;
3909 ccw
->flags
|= CCW_FLAG_SLI
;
3912 len_to_track_end
-= seg_len
;
3913 idaws
= idal_create_words(idaws
, dst
, seg_len
);
3915 for (sectors
= 0; sectors
< end_padding_sectors
; sectors
+= 8)
3916 idaws
= idal_create_words(idaws
, rawpadpage
, PAGE_SIZE
);
3917 if (blk_noretry_request(req
) ||
3918 block
->base
->features
& DASD_FEATURE_FAILFAST
)
3919 set_bit(DASD_CQR_FLAGS_FAILFAST
, &cqr
->flags
);
3920 cqr
->startdev
= startdev
;
3921 cqr
->memdev
= startdev
;
3923 cqr
->expires
= startdev
->default_expires
* HZ
;
3924 cqr
->lpm
= dasd_path_get_ppm(startdev
);
3925 cqr
->retries
= startdev
->default_retries
;
3926 cqr
->buildclk
= get_tod_clock();
3927 cqr
->status
= DASD_CQR_FILLED
;
3934 dasd_eckd_free_cp(struct dasd_ccw_req
*cqr
, struct request
*req
)
3936 struct dasd_eckd_private
*private;
3938 struct req_iterator iter
;
3941 unsigned int blksize
, blk_per_trk
, off
;
3945 if (!dasd_page_cache
)
3947 private = cqr
->block
->base
->private;
3948 blksize
= cqr
->block
->bp_block
;
3949 blk_per_trk
= recs_per_track(&private->rdc_data
, 0, blksize
);
3950 recid
= blk_rq_pos(req
) >> cqr
->block
->s2b_shift
;
3952 /* Skip over define extent & locate record. */
3954 if (private->uses_cdl
== 0 || recid
> 2*blk_per_trk
)
3956 rq_for_each_segment(bv
, req
, iter
) {
3957 dst
= page_address(bv
.bv_page
) + bv
.bv_offset
;
3958 for (off
= 0; off
< bv
.bv_len
; off
+= blksize
) {
3959 /* Skip locate record. */
3960 if (private->uses_cdl
&& recid
<= 2*blk_per_trk
)
3963 if (ccw
->flags
& CCW_FLAG_IDA
)
3964 cda
= *((char **)((addr_t
) ccw
->cda
));
3966 cda
= (char *)((addr_t
) ccw
->cda
);
3968 if (rq_data_dir(req
) == READ
)
3969 memcpy(dst
, cda
, bv
.bv_len
);
3970 kmem_cache_free(dasd_page_cache
,
3971 (void *)((addr_t
)cda
& PAGE_MASK
));
3980 status
= cqr
->status
== DASD_CQR_DONE
;
3981 dasd_sfree_request(cqr
, cqr
->memdev
);
3986 * Modify ccw/tcw in cqr so it can be started on a base device.
3988 * Note that this is not enough to restart the cqr!
3989 * Either reset cqr->startdev as well (summary unit check handling)
3990 * or restart via separate cqr (as in ERP handling).
3992 void dasd_eckd_reset_ccw_to_base_io(struct dasd_ccw_req
*cqr
)
3995 struct PFX_eckd_data
*pfxdata
;
4000 if (cqr
->cpmode
== 1) {
4002 tccb
= tcw_get_tccb(tcw
);
4003 dcw
= (struct dcw
*)&tccb
->tca
[0];
4004 pfxdata
= (struct PFX_eckd_data
*)&dcw
->cd
[0];
4005 pfxdata
->validity
.verify_base
= 0;
4006 pfxdata
->validity
.hyper_pav
= 0;
4009 pfxdata
= cqr
->data
;
4010 if (ccw
->cmd_code
== DASD_ECKD_CCW_PFX
) {
4011 pfxdata
->validity
.verify_base
= 0;
4012 pfxdata
->validity
.hyper_pav
= 0;
4017 #define DASD_ECKD_CHANQ_MAX_SIZE 4
4019 static struct dasd_ccw_req
*dasd_eckd_build_alias_cp(struct dasd_device
*base
,
4020 struct dasd_block
*block
,
4021 struct request
*req
)
4023 struct dasd_eckd_private
*private;
4024 struct dasd_device
*startdev
;
4025 unsigned long flags
;
4026 struct dasd_ccw_req
*cqr
;
4028 startdev
= dasd_alias_get_start_dev(base
);
4031 private = startdev
->private;
4032 if (private->count
>= DASD_ECKD_CHANQ_MAX_SIZE
)
4033 return ERR_PTR(-EBUSY
);
4035 spin_lock_irqsave(get_ccwdev_lock(startdev
->cdev
), flags
);
4037 if ((base
->features
& DASD_FEATURE_USERAW
))
4038 cqr
= dasd_eckd_build_cp_raw(startdev
, block
, req
);
4040 cqr
= dasd_eckd_build_cp(startdev
, block
, req
);
4043 spin_unlock_irqrestore(get_ccwdev_lock(startdev
->cdev
), flags
);
4047 static int dasd_eckd_free_alias_cp(struct dasd_ccw_req
*cqr
,
4048 struct request
*req
)
4050 struct dasd_eckd_private
*private;
4051 unsigned long flags
;
4053 spin_lock_irqsave(get_ccwdev_lock(cqr
->memdev
->cdev
), flags
);
4054 private = cqr
->memdev
->private;
4056 spin_unlock_irqrestore(get_ccwdev_lock(cqr
->memdev
->cdev
), flags
);
4057 return dasd_eckd_free_cp(cqr
, req
);
4061 dasd_eckd_fill_info(struct dasd_device
* device
,
4062 struct dasd_information2_t
* info
)
4064 struct dasd_eckd_private
*private = device
->private;
4066 info
->label_block
= 2;
4067 info
->FBA_layout
= private->uses_cdl
? 0 : 1;
4068 info
->format
= private->uses_cdl
? DASD_FORMAT_CDL
: DASD_FORMAT_LDL
;
4069 info
->characteristics_size
= sizeof(private->rdc_data
);
4070 memcpy(info
->characteristics
, &private->rdc_data
,
4071 sizeof(private->rdc_data
));
4072 info
->confdata_size
= min((unsigned long)private->conf_len
,
4073 sizeof(info
->configuration_data
));
4074 memcpy(info
->configuration_data
, private->conf_data
,
4075 info
->confdata_size
);
4080 * SECTION: ioctl functions for eckd devices.
4084 * Release device ioctl.
4085 * Buils a channel programm to releases a prior reserved
4086 * (see dasd_eckd_reserve) device.
4089 dasd_eckd_release(struct dasd_device
*device
)
4091 struct dasd_ccw_req
*cqr
;
4096 if (!capable(CAP_SYS_ADMIN
))
4100 cqr
= dasd_smalloc_request(DASD_ECKD_MAGIC
, 1, 32, device
, NULL
);
4102 mutex_lock(&dasd_reserve_mutex
);
4104 cqr
= &dasd_reserve_req
->cqr
;
4105 memset(cqr
, 0, sizeof(*cqr
));
4106 memset(&dasd_reserve_req
->ccw
, 0,
4107 sizeof(dasd_reserve_req
->ccw
));
4108 cqr
->cpaddr
= &dasd_reserve_req
->ccw
;
4109 cqr
->data
= &dasd_reserve_req
->data
;
4110 cqr
->magic
= DASD_ECKD_MAGIC
;
4113 ccw
->cmd_code
= DASD_ECKD_CCW_RELEASE
;
4114 ccw
->flags
|= CCW_FLAG_SLI
;
4116 ccw
->cda
= (__u32
)(addr_t
) cqr
->data
;
4117 cqr
->startdev
= device
;
4118 cqr
->memdev
= device
;
4119 clear_bit(DASD_CQR_FLAGS_USE_ERP
, &cqr
->flags
);
4120 set_bit(DASD_CQR_FLAGS_FAILFAST
, &cqr
->flags
);
4121 cqr
->retries
= 2; /* set retry counter to enable basic ERP */
4122 cqr
->expires
= 2 * HZ
;
4123 cqr
->buildclk
= get_tod_clock();
4124 cqr
->status
= DASD_CQR_FILLED
;
4126 rc
= dasd_sleep_on_immediatly(cqr
);
4128 clear_bit(DASD_FLAG_IS_RESERVED
, &device
->flags
);
4131 mutex_unlock(&dasd_reserve_mutex
);
4133 dasd_sfree_request(cqr
, cqr
->memdev
);
4138 * Reserve device ioctl.
4139 * Options are set to 'synchronous wait for interrupt' and
4140 * 'timeout the request'. This leads to a terminate IO if
4141 * the interrupt is outstanding for a certain time.
4144 dasd_eckd_reserve(struct dasd_device
*device
)
4146 struct dasd_ccw_req
*cqr
;
4151 if (!capable(CAP_SYS_ADMIN
))
4155 cqr
= dasd_smalloc_request(DASD_ECKD_MAGIC
, 1, 32, device
, NULL
);
4157 mutex_lock(&dasd_reserve_mutex
);
4159 cqr
= &dasd_reserve_req
->cqr
;
4160 memset(cqr
, 0, sizeof(*cqr
));
4161 memset(&dasd_reserve_req
->ccw
, 0,
4162 sizeof(dasd_reserve_req
->ccw
));
4163 cqr
->cpaddr
= &dasd_reserve_req
->ccw
;
4164 cqr
->data
= &dasd_reserve_req
->data
;
4165 cqr
->magic
= DASD_ECKD_MAGIC
;
4168 ccw
->cmd_code
= DASD_ECKD_CCW_RESERVE
;
4169 ccw
->flags
|= CCW_FLAG_SLI
;
4171 ccw
->cda
= (__u32
)(addr_t
) cqr
->data
;
4172 cqr
->startdev
= device
;
4173 cqr
->memdev
= device
;
4174 clear_bit(DASD_CQR_FLAGS_USE_ERP
, &cqr
->flags
);
4175 set_bit(DASD_CQR_FLAGS_FAILFAST
, &cqr
->flags
);
4176 cqr
->retries
= 2; /* set retry counter to enable basic ERP */
4177 cqr
->expires
= 2 * HZ
;
4178 cqr
->buildclk
= get_tod_clock();
4179 cqr
->status
= DASD_CQR_FILLED
;
4181 rc
= dasd_sleep_on_immediatly(cqr
);
4183 set_bit(DASD_FLAG_IS_RESERVED
, &device
->flags
);
4186 mutex_unlock(&dasd_reserve_mutex
);
4188 dasd_sfree_request(cqr
, cqr
->memdev
);
4193 * Steal lock ioctl - unconditional reserve device.
4194 * Buils a channel programm to break a device's reservation.
4195 * (unconditional reserve)
4198 dasd_eckd_steal_lock(struct dasd_device
*device
)
4200 struct dasd_ccw_req
*cqr
;
4205 if (!capable(CAP_SYS_ADMIN
))
4209 cqr
= dasd_smalloc_request(DASD_ECKD_MAGIC
, 1, 32, device
, NULL
);
4211 mutex_lock(&dasd_reserve_mutex
);
4213 cqr
= &dasd_reserve_req
->cqr
;
4214 memset(cqr
, 0, sizeof(*cqr
));
4215 memset(&dasd_reserve_req
->ccw
, 0,
4216 sizeof(dasd_reserve_req
->ccw
));
4217 cqr
->cpaddr
= &dasd_reserve_req
->ccw
;
4218 cqr
->data
= &dasd_reserve_req
->data
;
4219 cqr
->magic
= DASD_ECKD_MAGIC
;
4222 ccw
->cmd_code
= DASD_ECKD_CCW_SLCK
;
4223 ccw
->flags
|= CCW_FLAG_SLI
;
4225 ccw
->cda
= (__u32
)(addr_t
) cqr
->data
;
4226 cqr
->startdev
= device
;
4227 cqr
->memdev
= device
;
4228 clear_bit(DASD_CQR_FLAGS_USE_ERP
, &cqr
->flags
);
4229 set_bit(DASD_CQR_FLAGS_FAILFAST
, &cqr
->flags
);
4230 cqr
->retries
= 2; /* set retry counter to enable basic ERP */
4231 cqr
->expires
= 2 * HZ
;
4232 cqr
->buildclk
= get_tod_clock();
4233 cqr
->status
= DASD_CQR_FILLED
;
4235 rc
= dasd_sleep_on_immediatly(cqr
);
4237 set_bit(DASD_FLAG_IS_RESERVED
, &device
->flags
);
4240 mutex_unlock(&dasd_reserve_mutex
);
4242 dasd_sfree_request(cqr
, cqr
->memdev
);
4247 * SNID - Sense Path Group ID
4248 * This ioctl may be used in situations where I/O is stalled due to
4249 * a reserve, so if the normal dasd_smalloc_request fails, we use the
4250 * preallocated dasd_reserve_req.
4252 static int dasd_eckd_snid(struct dasd_device
*device
,
4255 struct dasd_ccw_req
*cqr
;
4259 struct dasd_snid_ioctl_data usrparm
;
4261 if (!capable(CAP_SYS_ADMIN
))
4264 if (copy_from_user(&usrparm
, argp
, sizeof(usrparm
)))
4268 cqr
= dasd_smalloc_request(DASD_ECKD_MAGIC
, 1,
4269 sizeof(struct dasd_snid_data
), device
,
4272 mutex_lock(&dasd_reserve_mutex
);
4274 cqr
= &dasd_reserve_req
->cqr
;
4275 memset(cqr
, 0, sizeof(*cqr
));
4276 memset(&dasd_reserve_req
->ccw
, 0,
4277 sizeof(dasd_reserve_req
->ccw
));
4278 cqr
->cpaddr
= &dasd_reserve_req
->ccw
;
4279 cqr
->data
= &dasd_reserve_req
->data
;
4280 cqr
->magic
= DASD_ECKD_MAGIC
;
4283 ccw
->cmd_code
= DASD_ECKD_CCW_SNID
;
4284 ccw
->flags
|= CCW_FLAG_SLI
;
4286 ccw
->cda
= (__u32
)(addr_t
) cqr
->data
;
4287 cqr
->startdev
= device
;
4288 cqr
->memdev
= device
;
4289 clear_bit(DASD_CQR_FLAGS_USE_ERP
, &cqr
->flags
);
4290 set_bit(DASD_CQR_FLAGS_FAILFAST
, &cqr
->flags
);
4291 set_bit(DASD_CQR_ALLOW_SLOCK
, &cqr
->flags
);
4293 cqr
->expires
= 10 * HZ
;
4294 cqr
->buildclk
= get_tod_clock();
4295 cqr
->status
= DASD_CQR_FILLED
;
4296 cqr
->lpm
= usrparm
.path_mask
;
4298 rc
= dasd_sleep_on_immediatly(cqr
);
4299 /* verify that I/O processing didn't modify the path mask */
4300 if (!rc
&& usrparm
.path_mask
&& (cqr
->lpm
!= usrparm
.path_mask
))
4303 usrparm
.data
= *((struct dasd_snid_data
*)cqr
->data
);
4304 if (copy_to_user(argp
, &usrparm
, sizeof(usrparm
)))
4309 mutex_unlock(&dasd_reserve_mutex
);
4311 dasd_sfree_request(cqr
, cqr
->memdev
);
4316 * Read performance statistics
4319 dasd_eckd_performance(struct dasd_device
*device
, void __user
*argp
)
4321 struct dasd_psf_prssd_data
*prssdp
;
4322 struct dasd_rssd_perf_stats_t
*stats
;
4323 struct dasd_ccw_req
*cqr
;
4327 cqr
= dasd_smalloc_request(DASD_ECKD_MAGIC
, 1 /* PSF */ + 1 /* RSSD */,
4328 (sizeof(struct dasd_psf_prssd_data
) +
4329 sizeof(struct dasd_rssd_perf_stats_t
)),
4332 DBF_DEV_EVENT(DBF_WARNING
, device
, "%s",
4333 "Could not allocate initialization request");
4334 return PTR_ERR(cqr
);
4336 cqr
->startdev
= device
;
4337 cqr
->memdev
= device
;
4339 clear_bit(DASD_CQR_FLAGS_USE_ERP
, &cqr
->flags
);
4340 cqr
->expires
= 10 * HZ
;
4342 /* Prepare for Read Subsystem Data */
4343 prssdp
= (struct dasd_psf_prssd_data
*) cqr
->data
;
4344 memset(prssdp
, 0, sizeof(struct dasd_psf_prssd_data
));
4345 prssdp
->order
= PSF_ORDER_PRSSD
;
4346 prssdp
->suborder
= 0x01; /* Performance Statistics */
4347 prssdp
->varies
[1] = 0x01; /* Perf Statistics for the Subsystem */
4350 ccw
->cmd_code
= DASD_ECKD_CCW_PSF
;
4351 ccw
->count
= sizeof(struct dasd_psf_prssd_data
);
4352 ccw
->flags
|= CCW_FLAG_CC
;
4353 ccw
->cda
= (__u32
)(addr_t
) prssdp
;
4355 /* Read Subsystem Data - Performance Statistics */
4356 stats
= (struct dasd_rssd_perf_stats_t
*) (prssdp
+ 1);
4357 memset(stats
, 0, sizeof(struct dasd_rssd_perf_stats_t
));
4360 ccw
->cmd_code
= DASD_ECKD_CCW_RSSD
;
4361 ccw
->count
= sizeof(struct dasd_rssd_perf_stats_t
);
4362 ccw
->cda
= (__u32
)(addr_t
) stats
;
4364 cqr
->buildclk
= get_tod_clock();
4365 cqr
->status
= DASD_CQR_FILLED
;
4366 rc
= dasd_sleep_on(cqr
);
4368 prssdp
= (struct dasd_psf_prssd_data
*) cqr
->data
;
4369 stats
= (struct dasd_rssd_perf_stats_t
*) (prssdp
+ 1);
4370 if (copy_to_user(argp
, stats
,
4371 sizeof(struct dasd_rssd_perf_stats_t
)))
4374 dasd_sfree_request(cqr
, cqr
->memdev
);
4379 * Get attributes (cache operations)
4380 * Returnes the cache attributes used in Define Extend (DE).
4383 dasd_eckd_get_attrib(struct dasd_device
*device
, void __user
*argp
)
4385 struct dasd_eckd_private
*private = device
->private;
4386 struct attrib_data_t attrib
= private->attrib
;
4389 if (!capable(CAP_SYS_ADMIN
))
4395 if (copy_to_user(argp
, (long *) &attrib
,
4396 sizeof(struct attrib_data_t
)))
4403 * Set attributes (cache operations)
4404 * Stores the attributes for cache operation to be used in Define Extend (DE).
4407 dasd_eckd_set_attrib(struct dasd_device
*device
, void __user
*argp
)
4409 struct dasd_eckd_private
*private = device
->private;
4410 struct attrib_data_t attrib
;
4412 if (!capable(CAP_SYS_ADMIN
))
4417 if (copy_from_user(&attrib
, argp
, sizeof(struct attrib_data_t
)))
4419 private->attrib
= attrib
;
4421 dev_info(&device
->cdev
->dev
,
4422 "The DASD cache mode was set to %x (%i cylinder prestage)\n",
4423 private->attrib
.operation
, private->attrib
.nr_cyl
);
4428 * Issue syscall I/O to EMC Symmetrix array.
4429 * CCWs are PSF and RSSD
4431 static int dasd_symm_io(struct dasd_device
*device
, void __user
*argp
)
4433 struct dasd_symmio_parms usrparm
;
4434 char *psf_data
, *rssd_result
;
4435 struct dasd_ccw_req
*cqr
;
4440 if (!capable(CAP_SYS_ADMIN
) && !capable(CAP_SYS_RAWIO
))
4444 /* Copy parms from caller */
4446 if (copy_from_user(&usrparm
, argp
, sizeof(usrparm
)))
4448 if (is_compat_task()) {
4449 /* Make sure pointers are sane even on 31 bit. */
4451 if ((usrparm
.psf_data
>> 32) != 0)
4453 if ((usrparm
.rssd_result
>> 32) != 0)
4455 usrparm
.psf_data
&= 0x7fffffffULL
;
4456 usrparm
.rssd_result
&= 0x7fffffffULL
;
4458 /* at least 2 bytes are accessed and should be allocated */
4459 if (usrparm
.psf_data_len
< 2) {
4460 DBF_DEV_EVENT(DBF_WARNING
, device
,
4461 "Symmetrix ioctl invalid data length %d",
4462 usrparm
.psf_data_len
);
4466 /* alloc I/O data area */
4467 psf_data
= kzalloc(usrparm
.psf_data_len
, GFP_KERNEL
| GFP_DMA
);
4468 rssd_result
= kzalloc(usrparm
.rssd_result_len
, GFP_KERNEL
| GFP_DMA
);
4469 if (!psf_data
|| !rssd_result
) {
4474 /* get syscall header from user space */
4476 if (copy_from_user(psf_data
,
4477 (void __user
*)(unsigned long) usrparm
.psf_data
,
4478 usrparm
.psf_data_len
))
4483 /* setup CCWs for PSF + RSSD */
4484 cqr
= dasd_smalloc_request(DASD_ECKD_MAGIC
, 2, 0, device
, NULL
);
4486 DBF_DEV_EVENT(DBF_WARNING
, device
, "%s",
4487 "Could not allocate initialization request");
4492 cqr
->startdev
= device
;
4493 cqr
->memdev
= device
;
4495 cqr
->expires
= 10 * HZ
;
4496 cqr
->buildclk
= get_tod_clock();
4497 cqr
->status
= DASD_CQR_FILLED
;
4499 /* Build the ccws */
4503 ccw
->cmd_code
= DASD_ECKD_CCW_PSF
;
4504 ccw
->count
= usrparm
.psf_data_len
;
4505 ccw
->flags
|= CCW_FLAG_CC
;
4506 ccw
->cda
= (__u32
)(addr_t
) psf_data
;
4511 ccw
->cmd_code
= DASD_ECKD_CCW_RSSD
;
4512 ccw
->count
= usrparm
.rssd_result_len
;
4513 ccw
->flags
= CCW_FLAG_SLI
;
4514 ccw
->cda
= (__u32
)(addr_t
) rssd_result
;
4516 rc
= dasd_sleep_on(cqr
);
4521 if (copy_to_user((void __user
*)(unsigned long) usrparm
.rssd_result
,
4522 rssd_result
, usrparm
.rssd_result_len
))
4527 dasd_sfree_request(cqr
, cqr
->memdev
);
4532 DBF_DEV_EVENT(DBF_WARNING
, device
,
4533 "Symmetrix ioctl (0x%02x 0x%02x): rc=%d",
4534 (int) psf0
, (int) psf1
, rc
);
4539 dasd_eckd_ioctl(struct dasd_block
*block
, unsigned int cmd
, void __user
*argp
)
4541 struct dasd_device
*device
= block
->base
;
4545 return dasd_eckd_get_attrib(device
, argp
);
4547 return dasd_eckd_set_attrib(device
, argp
);
4549 return dasd_eckd_performance(device
, argp
);
4551 return dasd_eckd_release(device
);
4553 return dasd_eckd_reserve(device
);
4555 return dasd_eckd_steal_lock(device
);
4557 return dasd_eckd_snid(device
, argp
);
4559 return dasd_symm_io(device
, argp
);
4566 * Dump the range of CCWs into 'page' buffer
4567 * and return number of printed chars.
4570 dasd_eckd_dump_ccw_range(struct ccw1
*from
, struct ccw1
*to
, char *page
)
4576 while (from
<= to
) {
4577 len
+= sprintf(page
+ len
, PRINTK_HEADER
4578 " CCW %p: %08X %08X DAT:",
4579 from
, ((int *) from
)[0], ((int *) from
)[1]);
4581 /* get pointer to data (consider IDALs) */
4582 if (from
->flags
& CCW_FLAG_IDA
)
4583 datap
= (char *) *((addr_t
*) (addr_t
) from
->cda
);
4585 datap
= (char *) ((addr_t
) from
->cda
);
4587 /* dump data (max 32 bytes) */
4588 for (count
= 0; count
< from
->count
&& count
< 32; count
++) {
4589 if (count
% 8 == 0) len
+= sprintf(page
+ len
, " ");
4590 if (count
% 4 == 0) len
+= sprintf(page
+ len
, " ");
4591 len
+= sprintf(page
+ len
, "%02x", datap
[count
]);
4593 len
+= sprintf(page
+ len
, "\n");
4600 dasd_eckd_dump_sense_dbf(struct dasd_device
*device
, struct irb
*irb
,
4606 sense
= (u64
*) dasd_get_sense(irb
);
4607 stat
= (u64
*) &irb
->scsw
;
4609 DBF_DEV_EVENT(DBF_EMERG
, device
, "%s: %016llx %08x : "
4610 "%016llx %016llx %016llx %016llx",
4611 reason
, *stat
, *((u32
*) (stat
+ 1)),
4612 sense
[0], sense
[1], sense
[2], sense
[3]);
4614 DBF_DEV_EVENT(DBF_EMERG
, device
, "%s: %016llx %08x : %s",
4615 reason
, *stat
, *((u32
*) (stat
+ 1)),
4621 * Print sense data and related channel program.
4622 * Parts are printed because printk buffer is only 1024 bytes.
4624 static void dasd_eckd_dump_sense_ccw(struct dasd_device
*device
,
4625 struct dasd_ccw_req
*req
, struct irb
*irb
)
4628 struct ccw1
*first
, *last
, *fail
, *from
, *to
;
4631 page
= (char *) get_zeroed_page(GFP_ATOMIC
);
4633 DBF_DEV_EVENT(DBF_WARNING
, device
, "%s",
4634 "No memory to dump sense data\n");
4637 /* dump the sense data */
4638 len
= sprintf(page
, PRINTK_HEADER
4639 " I/O status report for device %s:\n",
4640 dev_name(&device
->cdev
->dev
));
4641 len
+= sprintf(page
+ len
, PRINTK_HEADER
4642 " in req: %p CC:%02X FC:%02X AC:%02X SC:%02X DS:%02X "
4644 req
, scsw_cc(&irb
->scsw
), scsw_fctl(&irb
->scsw
),
4645 scsw_actl(&irb
->scsw
), scsw_stctl(&irb
->scsw
),
4646 scsw_dstat(&irb
->scsw
), scsw_cstat(&irb
->scsw
),
4647 req
? req
->intrc
: 0);
4648 len
+= sprintf(page
+ len
, PRINTK_HEADER
4649 " device %s: Failing CCW: %p\n",
4650 dev_name(&device
->cdev
->dev
),
4651 (void *) (addr_t
) irb
->scsw
.cmd
.cpa
);
4652 if (irb
->esw
.esw0
.erw
.cons
) {
4653 for (sl
= 0; sl
< 4; sl
++) {
4654 len
+= sprintf(page
+ len
, PRINTK_HEADER
4655 " Sense(hex) %2d-%2d:",
4656 (8 * sl
), ((8 * sl
) + 7));
4658 for (sct
= 0; sct
< 8; sct
++) {
4659 len
+= sprintf(page
+ len
, " %02x",
4660 irb
->ecw
[8 * sl
+ sct
]);
4662 len
+= sprintf(page
+ len
, "\n");
4665 if (irb
->ecw
[27] & DASD_SENSE_BIT_0
) {
4666 /* 24 Byte Sense Data */
4667 sprintf(page
+ len
, PRINTK_HEADER
4668 " 24 Byte: %x MSG %x, "
4669 "%s MSGb to SYSOP\n",
4670 irb
->ecw
[7] >> 4, irb
->ecw
[7] & 0x0f,
4671 irb
->ecw
[1] & 0x10 ? "" : "no");
4673 /* 32 Byte Sense Data */
4674 sprintf(page
+ len
, PRINTK_HEADER
4675 " 32 Byte: Format: %x "
4676 "Exception class %x\n",
4677 irb
->ecw
[6] & 0x0f, irb
->ecw
[22] >> 4);
4680 sprintf(page
+ len
, PRINTK_HEADER
4681 " SORRY - NO VALID SENSE AVAILABLE\n");
4683 printk(KERN_ERR
"%s", page
);
4686 /* req == NULL for unsolicited interrupts */
4687 /* dump the Channel Program (max 140 Bytes per line) */
4688 /* Count CCW and print first CCWs (maximum 1024 % 140 = 7) */
4689 first
= req
->cpaddr
;
4690 for (last
= first
; last
->flags
& (CCW_FLAG_CC
| CCW_FLAG_DC
); last
++);
4691 to
= min(first
+ 6, last
);
4692 len
= sprintf(page
, PRINTK_HEADER
4693 " Related CP in req: %p\n", req
);
4694 dasd_eckd_dump_ccw_range(first
, to
, page
+ len
);
4695 printk(KERN_ERR
"%s", page
);
4697 /* print failing CCW area (maximum 4) */
4698 /* scsw->cda is either valid or zero */
4701 fail
= (struct ccw1
*)(addr_t
)
4702 irb
->scsw
.cmd
.cpa
; /* failing CCW */
4703 if (from
< fail
- 2) {
4704 from
= fail
- 2; /* there is a gap - print header */
4705 len
+= sprintf(page
, PRINTK_HEADER
"......\n");
4707 to
= min(fail
+ 1, last
);
4708 len
+= dasd_eckd_dump_ccw_range(from
, to
, page
+ len
);
4710 /* print last CCWs (maximum 2) */
4711 from
= max(from
, ++to
);
4712 if (from
< last
- 1) {
4713 from
= last
- 1; /* there is a gap - print header */
4714 len
+= sprintf(page
+ len
, PRINTK_HEADER
"......\n");
4716 len
+= dasd_eckd_dump_ccw_range(from
, last
, page
+ len
);
4718 printk(KERN_ERR
"%s", page
);
4720 free_page((unsigned long) page
);
4725 * Print sense data from a tcw.
4727 static void dasd_eckd_dump_sense_tcw(struct dasd_device
*device
,
4728 struct dasd_ccw_req
*req
, struct irb
*irb
)
4731 int len
, sl
, sct
, residual
;
4735 page
= (char *) get_zeroed_page(GFP_ATOMIC
);
4737 DBF_DEV_EVENT(DBF_WARNING
, device
, " %s",
4738 "No memory to dump sense data");
4741 /* dump the sense data */
4742 len
= sprintf(page
, PRINTK_HEADER
4743 " I/O status report for device %s:\n",
4744 dev_name(&device
->cdev
->dev
));
4745 len
+= sprintf(page
+ len
, PRINTK_HEADER
4746 " in req: %p CC:%02X FC:%02X AC:%02X SC:%02X DS:%02X "
4747 "CS:%02X fcxs:%02X schxs:%02X RC:%d\n",
4748 req
, scsw_cc(&irb
->scsw
), scsw_fctl(&irb
->scsw
),
4749 scsw_actl(&irb
->scsw
), scsw_stctl(&irb
->scsw
),
4750 scsw_dstat(&irb
->scsw
), scsw_cstat(&irb
->scsw
),
4752 (irb
->scsw
.tm
.ifob
<< 7) | irb
->scsw
.tm
.sesq
,
4753 req
? req
->intrc
: 0);
4754 len
+= sprintf(page
+ len
, PRINTK_HEADER
4755 " device %s: Failing TCW: %p\n",
4756 dev_name(&device
->cdev
->dev
),
4757 (void *) (addr_t
) irb
->scsw
.tm
.tcw
);
4761 if (irb
->scsw
.tm
.tcw
&& (irb
->scsw
.tm
.fcxs
& 0x01))
4763 (struct tcw
*)(unsigned long)irb
->scsw
.tm
.tcw
);
4766 len
+= sprintf(page
+ len
, PRINTK_HEADER
4767 " tsb->length %d\n", tsb
->length
);
4768 len
+= sprintf(page
+ len
, PRINTK_HEADER
4769 " tsb->flags %x\n", tsb
->flags
);
4770 len
+= sprintf(page
+ len
, PRINTK_HEADER
4771 " tsb->dcw_offset %d\n", tsb
->dcw_offset
);
4772 len
+= sprintf(page
+ len
, PRINTK_HEADER
4773 " tsb->count %d\n", tsb
->count
);
4774 residual
= tsb
->count
- 28;
4775 len
+= sprintf(page
+ len
, PRINTK_HEADER
4776 " residual %d\n", residual
);
4778 switch (tsb
->flags
& 0x07) {
4779 case 1: /* tsa_iostat */
4780 len
+= sprintf(page
+ len
, PRINTK_HEADER
4781 " tsb->tsa.iostat.dev_time %d\n",
4782 tsb
->tsa
.iostat
.dev_time
);
4783 len
+= sprintf(page
+ len
, PRINTK_HEADER
4784 " tsb->tsa.iostat.def_time %d\n",
4785 tsb
->tsa
.iostat
.def_time
);
4786 len
+= sprintf(page
+ len
, PRINTK_HEADER
4787 " tsb->tsa.iostat.queue_time %d\n",
4788 tsb
->tsa
.iostat
.queue_time
);
4789 len
+= sprintf(page
+ len
, PRINTK_HEADER
4790 " tsb->tsa.iostat.dev_busy_time %d\n",
4791 tsb
->tsa
.iostat
.dev_busy_time
);
4792 len
+= sprintf(page
+ len
, PRINTK_HEADER
4793 " tsb->tsa.iostat.dev_act_time %d\n",
4794 tsb
->tsa
.iostat
.dev_act_time
);
4795 sense
= tsb
->tsa
.iostat
.sense
;
4797 case 2: /* ts_ddpc */
4798 len
+= sprintf(page
+ len
, PRINTK_HEADER
4799 " tsb->tsa.ddpc.rc %d\n", tsb
->tsa
.ddpc
.rc
);
4800 for (sl
= 0; sl
< 2; sl
++) {
4801 len
+= sprintf(page
+ len
, PRINTK_HEADER
4802 " tsb->tsa.ddpc.rcq %2d-%2d: ",
4803 (8 * sl
), ((8 * sl
) + 7));
4804 rcq
= tsb
->tsa
.ddpc
.rcq
;
4805 for (sct
= 0; sct
< 8; sct
++) {
4806 len
+= sprintf(page
+ len
, " %02x",
4809 len
+= sprintf(page
+ len
, "\n");
4811 sense
= tsb
->tsa
.ddpc
.sense
;
4813 case 3: /* tsa_intrg */
4814 len
+= sprintf(page
+ len
, PRINTK_HEADER
4815 " tsb->tsa.intrg.: not supported yet\n");
4820 for (sl
= 0; sl
< 4; sl
++) {
4821 len
+= sprintf(page
+ len
, PRINTK_HEADER
4822 " Sense(hex) %2d-%2d:",
4823 (8 * sl
), ((8 * sl
) + 7));
4824 for (sct
= 0; sct
< 8; sct
++) {
4825 len
+= sprintf(page
+ len
, " %02x",
4826 sense
[8 * sl
+ sct
]);
4828 len
+= sprintf(page
+ len
, "\n");
4831 if (sense
[27] & DASD_SENSE_BIT_0
) {
4832 /* 24 Byte Sense Data */
4833 sprintf(page
+ len
, PRINTK_HEADER
4834 " 24 Byte: %x MSG %x, "
4835 "%s MSGb to SYSOP\n",
4836 sense
[7] >> 4, sense
[7] & 0x0f,
4837 sense
[1] & 0x10 ? "" : "no");
4839 /* 32 Byte Sense Data */
4840 sprintf(page
+ len
, PRINTK_HEADER
4841 " 32 Byte: Format: %x "
4842 "Exception class %x\n",
4843 sense
[6] & 0x0f, sense
[22] >> 4);
4846 sprintf(page
+ len
, PRINTK_HEADER
4847 " SORRY - NO VALID SENSE AVAILABLE\n");
4850 sprintf(page
+ len
, PRINTK_HEADER
4851 " SORRY - NO TSB DATA AVAILABLE\n");
4853 printk(KERN_ERR
"%s", page
);
4854 free_page((unsigned long) page
);
4857 static void dasd_eckd_dump_sense(struct dasd_device
*device
,
4858 struct dasd_ccw_req
*req
, struct irb
*irb
)
4860 u8
*sense
= dasd_get_sense(irb
);
4862 if (scsw_is_tm(&irb
->scsw
)) {
4864 * In some cases the 'File Protected' or 'Incorrect Length'
4865 * error might be expected and log messages shouldn't be written
4866 * then. Check if the according suppress bit is set.
4868 if (sense
&& (sense
[1] & SNS1_FILE_PROTECTED
) &&
4869 test_bit(DASD_CQR_SUPPRESS_FP
, &req
->flags
))
4871 if (scsw_cstat(&irb
->scsw
) == 0x40 &&
4872 test_bit(DASD_CQR_SUPPRESS_IL
, &req
->flags
))
4875 dasd_eckd_dump_sense_tcw(device
, req
, irb
);
4878 * In some cases the 'Command Reject' or 'No Record Found'
4879 * error might be expected and log messages shouldn't be
4880 * written then. Check if the according suppress bit is set.
4882 if (sense
&& sense
[0] & SNS0_CMD_REJECT
&&
4883 test_bit(DASD_CQR_SUPPRESS_CR
, &req
->flags
))
4886 if (sense
&& sense
[1] & SNS1_NO_REC_FOUND
&&
4887 test_bit(DASD_CQR_SUPPRESS_NRF
, &req
->flags
))
4890 dasd_eckd_dump_sense_ccw(device
, req
, irb
);
4894 static int dasd_eckd_pm_freeze(struct dasd_device
*device
)
4897 * the device should be disconnected from our LCU structure
4898 * on restore we will reconnect it and reread LCU specific
4899 * information like PAV support that might have changed
4901 dasd_alias_remove_device(device
);
4902 dasd_alias_disconnect_device_from_lcu(device
);
4907 static int dasd_eckd_restore_device(struct dasd_device
*device
)
4909 struct dasd_eckd_private
*private = device
->private;
4910 struct dasd_eckd_characteristics temp_rdc_data
;
4912 struct dasd_uid temp_uid
;
4913 unsigned long flags
;
4914 unsigned long cqr_flags
= 0;
4916 /* Read Configuration Data */
4917 rc
= dasd_eckd_read_conf(device
);
4919 DBF_EVENT_DEVID(DBF_WARNING
, device
->cdev
,
4920 "Read configuration data failed, rc=%d", rc
);
4924 dasd_eckd_get_uid(device
, &temp_uid
);
4925 /* Generate device unique id */
4926 rc
= dasd_eckd_generate_uid(device
);
4927 spin_lock_irqsave(get_ccwdev_lock(device
->cdev
), flags
);
4928 if (memcmp(&private->uid
, &temp_uid
, sizeof(struct dasd_uid
)) != 0)
4929 dev_err(&device
->cdev
->dev
, "The UID of the DASD has "
4931 spin_unlock_irqrestore(get_ccwdev_lock(device
->cdev
), flags
);
4935 /* register lcu with alias handling, enable PAV if this is a new lcu */
4936 rc
= dasd_alias_make_device_known_to_lcu(device
);
4940 set_bit(DASD_CQR_FLAGS_FAILFAST
, &cqr_flags
);
4941 dasd_eckd_validate_server(device
, cqr_flags
);
4943 /* RE-Read Configuration Data */
4944 rc
= dasd_eckd_read_conf(device
);
4946 DBF_EVENT_DEVID(DBF_WARNING
, device
->cdev
,
4947 "Read configuration data failed, rc=%d", rc
);
4951 /* Read Feature Codes */
4952 dasd_eckd_read_features(device
);
4954 /* Read Device Characteristics */
4955 rc
= dasd_generic_read_dev_chars(device
, DASD_ECKD_MAGIC
,
4956 &temp_rdc_data
, 64);
4958 DBF_EVENT_DEVID(DBF_WARNING
, device
->cdev
,
4959 "Read device characteristic failed, rc=%d", rc
);
4962 spin_lock_irqsave(get_ccwdev_lock(device
->cdev
), flags
);
4963 memcpy(&private->rdc_data
, &temp_rdc_data
, sizeof(temp_rdc_data
));
4964 spin_unlock_irqrestore(get_ccwdev_lock(device
->cdev
), flags
);
4966 /* add device to alias management */
4967 dasd_alias_add_device(device
);
4972 dasd_alias_disconnect_device_from_lcu(device
);
4977 static int dasd_eckd_reload_device(struct dasd_device
*device
)
4979 struct dasd_eckd_private
*private = device
->private;
4982 struct dasd_uid uid
;
4983 unsigned long flags
;
4986 * remove device from alias handling to prevent new requests
4987 * from being scheduled on the wrong alias device
4989 dasd_alias_remove_device(device
);
4991 spin_lock_irqsave(get_ccwdev_lock(device
->cdev
), flags
);
4992 old_base
= private->uid
.base_unit_addr
;
4993 spin_unlock_irqrestore(get_ccwdev_lock(device
->cdev
), flags
);
4995 /* Read Configuration Data */
4996 rc
= dasd_eckd_read_conf(device
);
5000 rc
= dasd_eckd_generate_uid(device
);
5004 * update unit address configuration and
5005 * add device to alias management
5007 dasd_alias_update_add_device(device
);
5009 dasd_eckd_get_uid(device
, &uid
);
5011 if (old_base
!= uid
.base_unit_addr
) {
5012 if (strlen(uid
.vduit
) > 0)
5013 snprintf(print_uid
, sizeof(print_uid
),
5014 "%s.%s.%04x.%02x.%s", uid
.vendor
, uid
.serial
,
5015 uid
.ssid
, uid
.base_unit_addr
, uid
.vduit
);
5017 snprintf(print_uid
, sizeof(print_uid
),
5018 "%s.%s.%04x.%02x", uid
.vendor
, uid
.serial
,
5019 uid
.ssid
, uid
.base_unit_addr
);
5021 dev_info(&device
->cdev
->dev
,
5022 "An Alias device was reassigned to a new base device "
5023 "with UID: %s\n", print_uid
);
5031 static int dasd_eckd_read_message_buffer(struct dasd_device
*device
,
5032 struct dasd_rssd_messages
*messages
,
5035 struct dasd_rssd_messages
*message_buf
;
5036 struct dasd_psf_prssd_data
*prssdp
;
5037 struct dasd_ccw_req
*cqr
;
5041 cqr
= dasd_smalloc_request(DASD_ECKD_MAGIC
, 1 /* PSF */ + 1 /* RSSD */,
5042 (sizeof(struct dasd_psf_prssd_data
) +
5043 sizeof(struct dasd_rssd_messages
)),
5046 DBF_EVENT_DEVID(DBF_WARNING
, device
->cdev
, "%s",
5047 "Could not allocate read message buffer request");
5048 return PTR_ERR(cqr
);
5053 cqr
->startdev
= device
;
5054 cqr
->memdev
= device
;
5056 cqr
->expires
= 10 * HZ
;
5057 set_bit(DASD_CQR_VERIFY_PATH
, &cqr
->flags
);
5058 /* dasd_sleep_on_immediatly does not do complex error
5059 * recovery so clear erp flag and set retry counter to
5061 clear_bit(DASD_CQR_FLAGS_USE_ERP
, &cqr
->flags
);
5064 /* Prepare for Read Subsystem Data */
5065 prssdp
= (struct dasd_psf_prssd_data
*) cqr
->data
;
5066 memset(prssdp
, 0, sizeof(struct dasd_psf_prssd_data
));
5067 prssdp
->order
= PSF_ORDER_PRSSD
;
5068 prssdp
->suborder
= 0x03; /* Message Buffer */
5069 /* all other bytes of prssdp must be zero */
5072 ccw
->cmd_code
= DASD_ECKD_CCW_PSF
;
5073 ccw
->count
= sizeof(struct dasd_psf_prssd_data
);
5074 ccw
->flags
|= CCW_FLAG_CC
;
5075 ccw
->flags
|= CCW_FLAG_SLI
;
5076 ccw
->cda
= (__u32
)(addr_t
) prssdp
;
5078 /* Read Subsystem Data - message buffer */
5079 message_buf
= (struct dasd_rssd_messages
*) (prssdp
+ 1);
5080 memset(message_buf
, 0, sizeof(struct dasd_rssd_messages
));
5083 ccw
->cmd_code
= DASD_ECKD_CCW_RSSD
;
5084 ccw
->count
= sizeof(struct dasd_rssd_messages
);
5085 ccw
->flags
|= CCW_FLAG_SLI
;
5086 ccw
->cda
= (__u32
)(addr_t
) message_buf
;
5088 cqr
->buildclk
= get_tod_clock();
5089 cqr
->status
= DASD_CQR_FILLED
;
5090 rc
= dasd_sleep_on_immediatly(cqr
);
5092 prssdp
= (struct dasd_psf_prssd_data
*) cqr
->data
;
5093 message_buf
= (struct dasd_rssd_messages
*)
5095 memcpy(messages
, message_buf
,
5096 sizeof(struct dasd_rssd_messages
));
5097 } else if (cqr
->lpm
) {
5099 * on z/VM we might not be able to do I/O on the requested path
5100 * but instead we get the required information on any path
5101 * so retry with open path mask
5106 DBF_EVENT_DEVID(DBF_WARNING
, device
->cdev
,
5107 "Reading messages failed with rc=%d\n"
5109 dasd_sfree_request(cqr
, cqr
->memdev
);
5113 static int dasd_eckd_query_host_access(struct dasd_device
*device
,
5114 struct dasd_psf_query_host_access
*data
)
5116 struct dasd_eckd_private
*private = device
->private;
5117 struct dasd_psf_query_host_access
*host_access
;
5118 struct dasd_psf_prssd_data
*prssdp
;
5119 struct dasd_ccw_req
*cqr
;
5123 /* not available for HYPER PAV alias devices */
5124 if (!device
->block
&& private->lcu
->pav
== HYPER_PAV
)
5127 /* may not be supported by the storage server */
5128 if (!(private->features
.feature
[14] & 0x80))
5131 cqr
= dasd_smalloc_request(DASD_ECKD_MAGIC
, 1 /* PSF */ + 1 /* RSSD */,
5132 sizeof(struct dasd_psf_prssd_data
) + 1,
5135 DBF_EVENT_DEVID(DBF_WARNING
, device
->cdev
, "%s",
5136 "Could not allocate read message buffer request");
5137 return PTR_ERR(cqr
);
5139 host_access
= kzalloc(sizeof(*host_access
), GFP_KERNEL
| GFP_DMA
);
5141 dasd_sfree_request(cqr
, device
);
5142 DBF_EVENT_DEVID(DBF_WARNING
, device
->cdev
, "%s",
5143 "Could not allocate host_access buffer");
5146 cqr
->startdev
= device
;
5147 cqr
->memdev
= device
;
5150 cqr
->expires
= 10 * HZ
;
5152 /* Prepare for Read Subsystem Data */
5153 prssdp
= (struct dasd_psf_prssd_data
*) cqr
->data
;
5154 memset(prssdp
, 0, sizeof(struct dasd_psf_prssd_data
));
5155 prssdp
->order
= PSF_ORDER_PRSSD
;
5156 prssdp
->suborder
= PSF_SUBORDER_QHA
; /* query host access */
5157 /* LSS and Volume that will be queried */
5158 prssdp
->lss
= private->ned
->ID
;
5159 prssdp
->volume
= private->ned
->unit_addr
;
5160 /* all other bytes of prssdp must be zero */
5163 ccw
->cmd_code
= DASD_ECKD_CCW_PSF
;
5164 ccw
->count
= sizeof(struct dasd_psf_prssd_data
);
5165 ccw
->flags
|= CCW_FLAG_CC
;
5166 ccw
->flags
|= CCW_FLAG_SLI
;
5167 ccw
->cda
= (__u32
)(addr_t
) prssdp
;
5169 /* Read Subsystem Data - query host access */
5171 ccw
->cmd_code
= DASD_ECKD_CCW_RSSD
;
5172 ccw
->count
= sizeof(struct dasd_psf_query_host_access
);
5173 ccw
->flags
|= CCW_FLAG_SLI
;
5174 ccw
->cda
= (__u32
)(addr_t
) host_access
;
5176 cqr
->buildclk
= get_tod_clock();
5177 cqr
->status
= DASD_CQR_FILLED
;
5178 /* the command might not be supported, suppress error message */
5179 __set_bit(DASD_CQR_SUPPRESS_CR
, &cqr
->flags
);
5180 rc
= dasd_sleep_on_interruptible(cqr
);
5182 *data
= *host_access
;
5184 DBF_EVENT_DEVID(DBF_WARNING
, device
->cdev
,
5185 "Reading host access data failed with rc=%d\n",
5190 dasd_sfree_request(cqr
, cqr
->memdev
);
5195 * return number of grouped devices
5197 static int dasd_eckd_host_access_count(struct dasd_device
*device
)
5199 struct dasd_psf_query_host_access
*access
;
5200 struct dasd_ckd_path_group_entry
*entry
;
5201 struct dasd_ckd_host_information
*info
;
5205 access
= kzalloc(sizeof(*access
), GFP_NOIO
);
5207 DBF_EVENT_DEVID(DBF_WARNING
, device
->cdev
, "%s",
5208 "Could not allocate access buffer");
5211 rc
= dasd_eckd_query_host_access(device
, access
);
5217 info
= (struct dasd_ckd_host_information
*)
5218 access
->host_access_information
;
5219 for (i
= 0; i
< info
->entry_count
; i
++) {
5220 entry
= (struct dasd_ckd_path_group_entry
*)
5221 (info
->entry
+ i
* info
->entry_size
);
5222 if (entry
->status_flags
& DASD_ECKD_PG_GROUPED
)
5231 * write host access information to a sequential file
5233 static int dasd_hosts_print(struct dasd_device
*device
, struct seq_file
*m
)
5235 struct dasd_psf_query_host_access
*access
;
5236 struct dasd_ckd_path_group_entry
*entry
;
5237 struct dasd_ckd_host_information
*info
;
5238 char sysplex
[9] = "";
5241 access
= kzalloc(sizeof(*access
), GFP_NOIO
);
5243 DBF_EVENT_DEVID(DBF_WARNING
, device
->cdev
, "%s",
5244 "Could not allocate access buffer");
5247 rc
= dasd_eckd_query_host_access(device
, access
);
5253 info
= (struct dasd_ckd_host_information
*)
5254 access
->host_access_information
;
5255 for (i
= 0; i
< info
->entry_count
; i
++) {
5256 entry
= (struct dasd_ckd_path_group_entry
*)
5257 (info
->entry
+ i
* info
->entry_size
);
5259 seq_printf(m
, "pgid %*phN\n", 11, entry
->pgid
);
5261 seq_printf(m
, "status_flags %02x\n", entry
->status_flags
);
5263 memcpy(&sysplex
, &entry
->sysplex_name
, sizeof(sysplex
) - 1);
5264 EBCASC(sysplex
, sizeof(sysplex
));
5265 seq_printf(m
, "sysplex_name %8s\n", sysplex
);
5266 /* SUPPORTED CYLINDER */
5267 seq_printf(m
, "supported_cylinder %d\n", entry
->cylinder
);
5269 seq_printf(m
, "timestamp %lu\n", (unsigned long)
5278 * Perform Subsystem Function - CUIR response
5281 dasd_eckd_psf_cuir_response(struct dasd_device
*device
, int response
,
5282 __u32 message_id
, __u8 lpum
)
5284 struct dasd_psf_cuir_response
*psf_cuir
;
5285 int pos
= pathmask_to_pos(lpum
);
5286 struct dasd_ccw_req
*cqr
;
5290 cqr
= dasd_smalloc_request(DASD_ECKD_MAGIC
, 1 /* PSF */ ,
5291 sizeof(struct dasd_psf_cuir_response
),
5295 DBF_DEV_EVENT(DBF_WARNING
, device
, "%s",
5296 "Could not allocate PSF-CUIR request");
5297 return PTR_ERR(cqr
);
5300 psf_cuir
= (struct dasd_psf_cuir_response
*)cqr
->data
;
5301 psf_cuir
->order
= PSF_ORDER_CUIR_RESPONSE
;
5302 psf_cuir
->cc
= response
;
5303 psf_cuir
->chpid
= device
->path
[pos
].chpid
;
5304 psf_cuir
->message_id
= message_id
;
5305 psf_cuir
->cssid
= device
->path
[pos
].cssid
;
5306 psf_cuir
->ssid
= device
->path
[pos
].ssid
;
5308 ccw
->cmd_code
= DASD_ECKD_CCW_PSF
;
5309 ccw
->cda
= (__u32
)(addr_t
)psf_cuir
;
5310 ccw
->flags
= CCW_FLAG_SLI
;
5311 ccw
->count
= sizeof(struct dasd_psf_cuir_response
);
5313 cqr
->startdev
= device
;
5314 cqr
->memdev
= device
;
5317 cqr
->expires
= 10*HZ
;
5318 cqr
->buildclk
= get_tod_clock();
5319 cqr
->status
= DASD_CQR_FILLED
;
5320 set_bit(DASD_CQR_VERIFY_PATH
, &cqr
->flags
);
5322 rc
= dasd_sleep_on(cqr
);
5324 dasd_sfree_request(cqr
, cqr
->memdev
);
5329 * return configuration data that is referenced by record selector
5330 * if a record selector is specified or per default return the
5331 * conf_data pointer for the path specified by lpum
5333 static struct dasd_conf_data
*dasd_eckd_get_ref_conf(struct dasd_device
*device
,
5335 struct dasd_cuir_message
*cuir
)
5337 struct dasd_conf_data
*conf_data
;
5340 if (cuir
->record_selector
== 0)
5342 for (path
= 0x80, pos
= 0; path
; path
>>= 1, pos
++) {
5343 conf_data
= device
->path
[pos
].conf_data
;
5344 if (conf_data
->gneq
.record_selector
==
5345 cuir
->record_selector
)
5349 return device
->path
[pathmask_to_pos(lpum
)].conf_data
;
5353 * This function determines the scope of a reconfiguration request by
5354 * analysing the path and device selection data provided in the CUIR request.
5355 * Returns a path mask containing CUIR affected paths for the give device.
5357 * If the CUIR request does not contain the required information return the
5358 * path mask of the path the attention message for the CUIR request was reveived
5361 static int dasd_eckd_cuir_scope(struct dasd_device
*device
, __u8 lpum
,
5362 struct dasd_cuir_message
*cuir
)
5364 struct dasd_conf_data
*ref_conf_data
;
5365 unsigned long bitmask
= 0, mask
= 0;
5366 struct dasd_conf_data
*conf_data
;
5367 unsigned int pos
, path
;
5368 char *ref_gneq
, *gneq
;
5369 char *ref_ned
, *ned
;
5372 /* if CUIR request does not specify the scope use the path
5373 the attention message was presented on */
5374 if (!cuir
->ned_map
||
5375 !(cuir
->neq_map
[0] | cuir
->neq_map
[1] | cuir
->neq_map
[2]))
5378 /* get reference conf data */
5379 ref_conf_data
= dasd_eckd_get_ref_conf(device
, lpum
, cuir
);
5380 /* reference ned is determined by ned_map field */
5381 pos
= 8 - ffs(cuir
->ned_map
);
5382 ref_ned
= (char *)&ref_conf_data
->neds
[pos
];
5383 ref_gneq
= (char *)&ref_conf_data
->gneq
;
5384 /* transfer 24 bit neq_map to mask */
5385 mask
= cuir
->neq_map
[2];
5386 mask
|= cuir
->neq_map
[1] << 8;
5387 mask
|= cuir
->neq_map
[0] << 16;
5389 for (path
= 0; path
< 8; path
++) {
5390 /* initialise data per path */
5392 conf_data
= device
->path
[path
].conf_data
;
5393 pos
= 8 - ffs(cuir
->ned_map
);
5394 ned
= (char *) &conf_data
->neds
[pos
];
5395 /* compare reference ned and per path ned */
5396 if (memcmp(ref_ned
, ned
, sizeof(*ned
)) != 0)
5398 gneq
= (char *)&conf_data
->gneq
;
5399 /* compare reference gneq and per_path gneq under
5400 24 bit mask where mask bit 0 equals byte 7 of
5401 the gneq and mask bit 24 equals byte 31 */
5403 pos
= ffs(bitmask
) - 1;
5404 if (memcmp(&ref_gneq
[31 - pos
], &gneq
[31 - pos
], 1)
5407 clear_bit(pos
, &bitmask
);
5411 /* device and path match the reference values
5412 add path to CUIR scope */
5413 tbcpm
|= 0x80 >> path
;
5418 static void dasd_eckd_cuir_notify_user(struct dasd_device
*device
,
5419 unsigned long paths
, int action
)
5424 /* get position of bit in mask */
5425 pos
= 8 - ffs(paths
);
5426 /* get channel path descriptor from this position */
5427 if (action
== CUIR_QUIESCE
)
5428 pr_warn("Service on the storage server caused path %x.%02x to go offline",
5429 device
->path
[pos
].cssid
,
5430 device
->path
[pos
].chpid
);
5431 else if (action
== CUIR_RESUME
)
5432 pr_info("Path %x.%02x is back online after service on the storage server",
5433 device
->path
[pos
].cssid
,
5434 device
->path
[pos
].chpid
);
5435 clear_bit(7 - pos
, &paths
);
5439 static int dasd_eckd_cuir_remove_path(struct dasd_device
*device
, __u8 lpum
,
5440 struct dasd_cuir_message
*cuir
)
5442 unsigned long tbcpm
;
5444 tbcpm
= dasd_eckd_cuir_scope(device
, lpum
, cuir
);
5445 /* nothing to do if path is not in use */
5446 if (!(dasd_path_get_opm(device
) & tbcpm
))
5448 if (!(dasd_path_get_opm(device
) & ~tbcpm
)) {
5449 /* no path would be left if the CUIR action is taken
5453 /* remove device from operational path mask */
5454 dasd_path_remove_opm(device
, tbcpm
);
5455 dasd_path_add_cuirpm(device
, tbcpm
);
5460 * walk through all devices and build a path mask to quiesce them
5461 * return an error if the last path to a device would be removed
5463 * if only part of the devices are quiesced and an error
5464 * occurs no onlining necessary, the storage server will
5465 * notify the already set offline devices again
5467 static int dasd_eckd_cuir_quiesce(struct dasd_device
*device
, __u8 lpum
,
5468 struct dasd_cuir_message
*cuir
)
5470 struct dasd_eckd_private
*private = device
->private;
5471 struct alias_pav_group
*pavgroup
, *tempgroup
;
5472 struct dasd_device
*dev
, *n
;
5473 unsigned long paths
= 0;
5474 unsigned long flags
;
5477 /* active devices */
5478 list_for_each_entry_safe(dev
, n
, &private->lcu
->active_devices
,
5480 spin_lock_irqsave(get_ccwdev_lock(dev
->cdev
), flags
);
5481 tbcpm
= dasd_eckd_cuir_remove_path(dev
, lpum
, cuir
);
5482 spin_unlock_irqrestore(get_ccwdev_lock(dev
->cdev
), flags
);
5487 /* inactive devices */
5488 list_for_each_entry_safe(dev
, n
, &private->lcu
->inactive_devices
,
5490 spin_lock_irqsave(get_ccwdev_lock(dev
->cdev
), flags
);
5491 tbcpm
= dasd_eckd_cuir_remove_path(dev
, lpum
, cuir
);
5492 spin_unlock_irqrestore(get_ccwdev_lock(dev
->cdev
), flags
);
5497 /* devices in PAV groups */
5498 list_for_each_entry_safe(pavgroup
, tempgroup
,
5499 &private->lcu
->grouplist
, group
) {
5500 list_for_each_entry_safe(dev
, n
, &pavgroup
->baselist
,
5502 spin_lock_irqsave(get_ccwdev_lock(dev
->cdev
), flags
);
5503 tbcpm
= dasd_eckd_cuir_remove_path(dev
, lpum
, cuir
);
5504 spin_unlock_irqrestore(
5505 get_ccwdev_lock(dev
->cdev
), flags
);
5510 list_for_each_entry_safe(dev
, n
, &pavgroup
->aliaslist
,
5512 spin_lock_irqsave(get_ccwdev_lock(dev
->cdev
), flags
);
5513 tbcpm
= dasd_eckd_cuir_remove_path(dev
, lpum
, cuir
);
5514 spin_unlock_irqrestore(
5515 get_ccwdev_lock(dev
->cdev
), flags
);
5521 /* notify user about all paths affected by CUIR action */
5522 dasd_eckd_cuir_notify_user(device
, paths
, CUIR_QUIESCE
);
5528 static int dasd_eckd_cuir_resume(struct dasd_device
*device
, __u8 lpum
,
5529 struct dasd_cuir_message
*cuir
)
5531 struct dasd_eckd_private
*private = device
->private;
5532 struct alias_pav_group
*pavgroup
, *tempgroup
;
5533 struct dasd_device
*dev
, *n
;
5534 unsigned long paths
= 0;
5538 * the path may have been added through a generic path event before
5539 * only trigger path verification if the path is not already in use
5541 list_for_each_entry_safe(dev
, n
,
5542 &private->lcu
->active_devices
,
5544 tbcpm
= dasd_eckd_cuir_scope(dev
, lpum
, cuir
);
5546 if (!(dasd_path_get_opm(dev
) & tbcpm
)) {
5547 dasd_path_add_tbvpm(dev
, tbcpm
);
5548 dasd_schedule_device_bh(dev
);
5551 list_for_each_entry_safe(dev
, n
,
5552 &private->lcu
->inactive_devices
,
5554 tbcpm
= dasd_eckd_cuir_scope(dev
, lpum
, cuir
);
5556 if (!(dasd_path_get_opm(dev
) & tbcpm
)) {
5557 dasd_path_add_tbvpm(dev
, tbcpm
);
5558 dasd_schedule_device_bh(dev
);
5561 /* devices in PAV groups */
5562 list_for_each_entry_safe(pavgroup
, tempgroup
,
5563 &private->lcu
->grouplist
,
5565 list_for_each_entry_safe(dev
, n
,
5566 &pavgroup
->baselist
,
5568 tbcpm
= dasd_eckd_cuir_scope(dev
, lpum
, cuir
);
5570 if (!(dasd_path_get_opm(dev
) & tbcpm
)) {
5571 dasd_path_add_tbvpm(dev
, tbcpm
);
5572 dasd_schedule_device_bh(dev
);
5575 list_for_each_entry_safe(dev
, n
,
5576 &pavgroup
->aliaslist
,
5578 tbcpm
= dasd_eckd_cuir_scope(dev
, lpum
, cuir
);
5580 if (!(dasd_path_get_opm(dev
) & tbcpm
)) {
5581 dasd_path_add_tbvpm(dev
, tbcpm
);
5582 dasd_schedule_device_bh(dev
);
5586 /* notify user about all paths affected by CUIR action */
5587 dasd_eckd_cuir_notify_user(device
, paths
, CUIR_RESUME
);
5591 static void dasd_eckd_handle_cuir(struct dasd_device
*device
, void *messages
,
5594 struct dasd_cuir_message
*cuir
= messages
;
5597 DBF_DEV_EVENT(DBF_WARNING
, device
,
5598 "CUIR request: %016llx %016llx %016llx %08x",
5599 ((u64
*)cuir
)[0], ((u64
*)cuir
)[1], ((u64
*)cuir
)[2],
5602 if (cuir
->code
== CUIR_QUIESCE
) {
5604 if (dasd_eckd_cuir_quiesce(device
, lpum
, cuir
))
5605 response
= PSF_CUIR_LAST_PATH
;
5607 response
= PSF_CUIR_COMPLETED
;
5608 } else if (cuir
->code
== CUIR_RESUME
) {
5610 dasd_eckd_cuir_resume(device
, lpum
, cuir
);
5611 response
= PSF_CUIR_COMPLETED
;
5613 response
= PSF_CUIR_NOT_SUPPORTED
;
5615 dasd_eckd_psf_cuir_response(device
, response
,
5616 cuir
->message_id
, lpum
);
5617 DBF_DEV_EVENT(DBF_WARNING
, device
,
5618 "CUIR response: %d on message ID %08x", response
,
5620 /* to make sure there is no attention left schedule work again */
5621 device
->discipline
->check_attention(device
, lpum
);
5624 static void dasd_eckd_check_attention_work(struct work_struct
*work
)
5626 struct check_attention_work_data
*data
;
5627 struct dasd_rssd_messages
*messages
;
5628 struct dasd_device
*device
;
5631 data
= container_of(work
, struct check_attention_work_data
, worker
);
5632 device
= data
->device
;
5633 messages
= kzalloc(sizeof(*messages
), GFP_KERNEL
);
5635 DBF_DEV_EVENT(DBF_WARNING
, device
, "%s",
5636 "Could not allocate attention message buffer");
5639 rc
= dasd_eckd_read_message_buffer(device
, messages
, data
->lpum
);
5642 if (messages
->length
== ATTENTION_LENGTH_CUIR
&&
5643 messages
->format
== ATTENTION_FORMAT_CUIR
)
5644 dasd_eckd_handle_cuir(device
, messages
, data
->lpum
);
5646 dasd_put_device(device
);
5651 static int dasd_eckd_check_attention(struct dasd_device
*device
, __u8 lpum
)
5653 struct check_attention_work_data
*data
;
5655 data
= kzalloc(sizeof(*data
), GFP_ATOMIC
);
5658 INIT_WORK(&data
->worker
, dasd_eckd_check_attention_work
);
5659 dasd_get_device(device
);
5660 data
->device
= device
;
5662 schedule_work(&data
->worker
);
5666 static int dasd_eckd_disable_hpf_path(struct dasd_device
*device
, __u8 lpum
)
5668 if (~lpum
& dasd_path_get_opm(device
)) {
5669 dasd_path_add_nohpfpm(device
, lpum
);
5670 dasd_path_remove_opm(device
, lpum
);
5671 dev_err(&device
->cdev
->dev
,
5672 "Channel path %02X lost HPF functionality and is disabled\n",
5679 static void dasd_eckd_disable_hpf_device(struct dasd_device
*device
)
5681 struct dasd_eckd_private
*private = device
->private;
5683 dev_err(&device
->cdev
->dev
,
5684 "High Performance FICON disabled\n");
5685 private->fcx_max_data
= 0;
5688 static int dasd_eckd_hpf_enabled(struct dasd_device
*device
)
5690 struct dasd_eckd_private
*private = device
->private;
5692 return private->fcx_max_data
? 1 : 0;
5695 static void dasd_eckd_handle_hpf_error(struct dasd_device
*device
,
5698 struct dasd_eckd_private
*private = device
->private;
5700 if (!private->fcx_max_data
) {
5701 /* sanity check for no HPF, the error makes no sense */
5702 DBF_DEV_EVENT(DBF_WARNING
, device
, "%s",
5703 "Trying to disable HPF for a non HPF device");
5706 if (irb
->scsw
.tm
.sesq
== SCSW_SESQ_DEV_NOFCX
) {
5707 dasd_eckd_disable_hpf_device(device
);
5708 } else if (irb
->scsw
.tm
.sesq
== SCSW_SESQ_PATH_NOFCX
) {
5709 if (dasd_eckd_disable_hpf_path(device
, irb
->esw
.esw1
.lpum
))
5711 dasd_eckd_disable_hpf_device(device
);
5712 dasd_path_set_tbvpm(device
,
5713 dasd_path_get_hpfpm(device
));
5716 * prevent that any new I/O ist started on the device and schedule a
5717 * requeue of existing requests
5719 dasd_device_set_stop_bits(device
, DASD_STOPPED_NOT_ACC
);
5720 dasd_schedule_requeue(device
);
5723 static struct ccw_driver dasd_eckd_driver
= {
5725 .name
= "dasd-eckd",
5726 .owner
= THIS_MODULE
,
5728 .ids
= dasd_eckd_ids
,
5729 .probe
= dasd_eckd_probe
,
5730 .remove
= dasd_generic_remove
,
5731 .set_offline
= dasd_generic_set_offline
,
5732 .set_online
= dasd_eckd_set_online
,
5733 .notify
= dasd_generic_notify
,
5734 .path_event
= dasd_generic_path_event
,
5735 .shutdown
= dasd_generic_shutdown
,
5736 .freeze
= dasd_generic_pm_freeze
,
5737 .thaw
= dasd_generic_restore_device
,
5738 .restore
= dasd_generic_restore_device
,
5739 .uc_handler
= dasd_generic_uc_handler
,
5740 .int_class
= IRQIO_DAS
,
5744 * max_blocks is dependent on the amount of storage that is available
5745 * in the static io buffer for each device. Currently each device has
5746 * 8192 bytes (=2 pages). For 64 bit one dasd_mchunkt_t structure has
5747 * 24 bytes, the struct dasd_ccw_req has 136 bytes and each block can use
5748 * up to 16 bytes (8 for the ccw and 8 for the idal pointer). In
5749 * addition we have one define extent ccw + 16 bytes of data and one
5750 * locate record ccw + 16 bytes of data. That makes:
5751 * (8192 - 24 - 136 - 8 - 16 - 8 - 16) / 16 = 499 blocks at maximum.
5752 * We want to fit two into the available memory so that we can immediately
5753 * start the next request if one finishes off. That makes 249.5 blocks
5754 * for one request. Give a little safety and the result is 240.
5756 static struct dasd_discipline dasd_eckd_discipline
= {
5757 .owner
= THIS_MODULE
,
5761 .check_device
= dasd_eckd_check_characteristics
,
5762 .uncheck_device
= dasd_eckd_uncheck_device
,
5763 .do_analysis
= dasd_eckd_do_analysis
,
5764 .verify_path
= dasd_eckd_verify_path
,
5765 .basic_to_ready
= dasd_eckd_basic_to_ready
,
5766 .online_to_ready
= dasd_eckd_online_to_ready
,
5767 .basic_to_known
= dasd_eckd_basic_to_known
,
5768 .fill_geometry
= dasd_eckd_fill_geometry
,
5769 .start_IO
= dasd_start_IO
,
5770 .term_IO
= dasd_term_IO
,
5771 .handle_terminated_request
= dasd_eckd_handle_terminated_request
,
5772 .format_device
= dasd_eckd_format_device
,
5773 .check_device_format
= dasd_eckd_check_device_format
,
5774 .erp_action
= dasd_eckd_erp_action
,
5775 .erp_postaction
= dasd_eckd_erp_postaction
,
5776 .check_for_device_change
= dasd_eckd_check_for_device_change
,
5777 .build_cp
= dasd_eckd_build_alias_cp
,
5778 .free_cp
= dasd_eckd_free_alias_cp
,
5779 .dump_sense
= dasd_eckd_dump_sense
,
5780 .dump_sense_dbf
= dasd_eckd_dump_sense_dbf
,
5781 .fill_info
= dasd_eckd_fill_info
,
5782 .ioctl
= dasd_eckd_ioctl
,
5783 .freeze
= dasd_eckd_pm_freeze
,
5784 .restore
= dasd_eckd_restore_device
,
5785 .reload
= dasd_eckd_reload_device
,
5786 .get_uid
= dasd_eckd_get_uid
,
5787 .kick_validate
= dasd_eckd_kick_validate_server
,
5788 .check_attention
= dasd_eckd_check_attention
,
5789 .host_access_count
= dasd_eckd_host_access_count
,
5790 .hosts_print
= dasd_hosts_print
,
5791 .handle_hpf_error
= dasd_eckd_handle_hpf_error
,
5792 .disable_hpf
= dasd_eckd_disable_hpf_device
,
5793 .hpf_enabled
= dasd_eckd_hpf_enabled
,
5794 .reset_path
= dasd_eckd_reset_path
,
5798 dasd_eckd_init(void)
5802 ASCEBC(dasd_eckd_discipline
.ebcname
, 4);
5803 dasd_reserve_req
= kmalloc(sizeof(*dasd_reserve_req
),
5804 GFP_KERNEL
| GFP_DMA
);
5805 if (!dasd_reserve_req
)
5807 path_verification_worker
= kmalloc(sizeof(*path_verification_worker
),
5808 GFP_KERNEL
| GFP_DMA
);
5809 if (!path_verification_worker
) {
5810 kfree(dasd_reserve_req
);
5813 rawpadpage
= (void *)__get_free_page(GFP_KERNEL
);
5815 kfree(path_verification_worker
);
5816 kfree(dasd_reserve_req
);
5819 ret
= ccw_driver_register(&dasd_eckd_driver
);
5821 wait_for_device_probe();
5823 kfree(path_verification_worker
);
5824 kfree(dasd_reserve_req
);
5825 free_page((unsigned long)rawpadpage
);
5831 dasd_eckd_cleanup(void)
5833 ccw_driver_unregister(&dasd_eckd_driver
);
5834 kfree(path_verification_worker
);
5835 kfree(dasd_reserve_req
);
5836 free_page((unsigned long)rawpadpage
);
5839 module_init(dasd_eckd_init
);
5840 module_exit(dasd_eckd_cleanup
);