1 // SPDX-License-Identifier: GPL-2.0
3 * Author(s)......: Holger Smolinski <Holger.Smolinski@de.ibm.com>
4 * Horst Hummel <Horst.Hummel@de.ibm.com>
5 * Carsten Otte <Cotte@de.ibm.com>
6 * Martin Schwidefsky <schwidefsky@de.ibm.com>
7 * Bugreports.to..: <Linux390@de.ibm.com>
8 * Copyright IBM Corp. 1999, 2009
9 * EMC Symmetrix ioctl Copyright EMC Corporation, 2008
10 * Author.........: Nigel Hislop <hislop_nigel@emc.com>
13 #define KMSG_COMPONENT "dasd-eckd"
15 #include <linux/stddef.h>
16 #include <linux/kernel.h>
17 #include <linux/slab.h>
18 #include <linux/hdreg.h> /* HDIO_GETGEO */
19 #include <linux/bio.h>
20 #include <linux/module.h>
21 #include <linux/compat.h>
22 #include <linux/init.h>
23 #include <linux/seq_file.h>
25 #include <asm/css_chars.h>
26 #include <asm/debug.h>
27 #include <asm/idals.h>
28 #include <asm/ebcdic.h>
30 #include <linux/uaccess.h>
32 #include <asm/ccwdev.h>
34 #include <asm/schid.h>
35 #include <asm/chpid.h>
38 #include "dasd_eckd.h"
42 #endif /* PRINTK_HEADER */
43 #define PRINTK_HEADER "dasd(eckd):"
46 * raw track access always map to 64k in memory
47 * so it maps to 16 blocks of 4k per track
49 #define DASD_RAW_BLOCK_PER_TRACK 16
50 #define DASD_RAW_BLOCKSIZE 4096
51 /* 64k are 128 x 512 byte sectors */
52 #define DASD_RAW_SECTORS_PER_TRACK 128
54 MODULE_LICENSE("GPL");
56 static struct dasd_discipline dasd_eckd_discipline
;
58 /* The ccw bus type uses this table to find devices that it sends to
60 static struct ccw_device_id dasd_eckd_ids
[] = {
61 { CCW_DEVICE_DEVTYPE (0x3990, 0, 0x3390, 0), .driver_info
= 0x1},
62 { CCW_DEVICE_DEVTYPE (0x2105, 0, 0x3390, 0), .driver_info
= 0x2},
63 { CCW_DEVICE_DEVTYPE (0x3880, 0, 0x3380, 0), .driver_info
= 0x3},
64 { CCW_DEVICE_DEVTYPE (0x3990, 0, 0x3380, 0), .driver_info
= 0x4},
65 { CCW_DEVICE_DEVTYPE (0x2105, 0, 0x3380, 0), .driver_info
= 0x5},
66 { CCW_DEVICE_DEVTYPE (0x9343, 0, 0x9345, 0), .driver_info
= 0x6},
67 { CCW_DEVICE_DEVTYPE (0x2107, 0, 0x3390, 0), .driver_info
= 0x7},
68 { CCW_DEVICE_DEVTYPE (0x2107, 0, 0x3380, 0), .driver_info
= 0x8},
69 { CCW_DEVICE_DEVTYPE (0x1750, 0, 0x3390, 0), .driver_info
= 0x9},
70 { CCW_DEVICE_DEVTYPE (0x1750, 0, 0x3380, 0), .driver_info
= 0xa},
71 { /* end of list */ },
74 MODULE_DEVICE_TABLE(ccw
, dasd_eckd_ids
);
76 static struct ccw_driver dasd_eckd_driver
; /* see below */
78 static void *rawpadpage
;
81 #define INIT_CQR_UNFORMATTED 1
82 #define INIT_CQR_ERROR 2
84 /* emergency request for reserve/release */
86 struct dasd_ccw_req cqr
;
90 static DEFINE_MUTEX(dasd_reserve_mutex
);
93 struct dasd_ccw_req cqr
;
97 static DEFINE_MUTEX(dasd_vol_info_mutex
);
99 struct ext_pool_exhaust_work_data
{
100 struct work_struct worker
;
101 struct dasd_device
*device
;
102 struct dasd_device
*base
;
105 /* definitions for the path verification worker */
106 struct pe_handler_work_data
{
107 struct work_struct worker
;
108 struct dasd_device
*device
;
109 struct dasd_ccw_req cqr
;
111 __u8 rcd_buffer
[DASD_ECKD_RCD_DATA_SIZE
];
116 static struct pe_handler_work_data
*pe_handler_worker
;
117 static DEFINE_MUTEX(dasd_pe_handler_mutex
);
119 struct check_attention_work_data
{
120 struct work_struct worker
;
121 struct dasd_device
*device
;
125 static int dasd_eckd_ext_pool_id(struct dasd_device
*);
126 static int prepare_itcw(struct itcw
*, unsigned int, unsigned int, int,
127 struct dasd_device
*, struct dasd_device
*,
128 unsigned int, int, unsigned int, unsigned int,
129 unsigned int, unsigned int);
131 /* initial attempt at a probe function. this can be simplified once
132 * the other detection code is gone */
134 dasd_eckd_probe (struct ccw_device
*cdev
)
138 /* set ECKD specific ccw-device options */
139 ret
= ccw_device_set_options(cdev
, CCWDEV_ALLOW_FORCE
|
140 CCWDEV_DO_PATHGROUP
| CCWDEV_DO_MULTIPATH
);
142 DBF_EVENT_DEVID(DBF_WARNING
, cdev
, "%s",
143 "dasd_eckd_probe: could not set "
144 "ccw-device options");
147 ret
= dasd_generic_probe(cdev
);
152 dasd_eckd_set_online(struct ccw_device
*cdev
)
154 return dasd_generic_set_online(cdev
, &dasd_eckd_discipline
);
157 static const int sizes_trk0
[] = { 28, 148, 84 };
158 #define LABEL_SIZE 140
160 /* head and record addresses of count_area read in analysis ccw */
161 static const int count_area_head
[] = { 0, 0, 0, 0, 1 };
162 static const int count_area_rec
[] = { 1, 2, 3, 4, 1 };
164 static inline unsigned int
165 ceil_quot(unsigned int d1
, unsigned int d2
)
167 return (d1
+ (d2
- 1)) / d2
;
171 recs_per_track(struct dasd_eckd_characteristics
* rdc
,
172 unsigned int kl
, unsigned int dl
)
176 switch (rdc
->dev_type
) {
179 return 1499 / (15 + 7 + ceil_quot(kl
+ 12, 32) +
180 ceil_quot(dl
+ 12, 32));
182 return 1499 / (15 + ceil_quot(dl
+ 12, 32));
184 dn
= ceil_quot(dl
+ 6, 232) + 1;
186 kn
= ceil_quot(kl
+ 6, 232) + 1;
187 return 1729 / (10 + 9 + ceil_quot(kl
+ 6 * kn
, 34) +
188 9 + ceil_quot(dl
+ 6 * dn
, 34));
190 return 1729 / (10 + 9 + ceil_quot(dl
+ 6 * dn
, 34));
192 dn
= ceil_quot(dl
+ 6, 232) + 1;
194 kn
= ceil_quot(kl
+ 6, 232) + 1;
195 return 1420 / (18 + 7 + ceil_quot(kl
+ 6 * kn
, 34) +
196 ceil_quot(dl
+ 6 * dn
, 34));
198 return 1420 / (18 + 7 + ceil_quot(dl
+ 6 * dn
, 34));
203 static void set_ch_t(struct ch_t
*geo
, __u32 cyl
, __u8 head
)
205 geo
->cyl
= (__u16
) cyl
;
206 geo
->head
= cyl
>> 16;
212 * calculate failing track from sense data depending if
213 * it is an EAV device or not
215 static int dasd_eckd_track_from_irb(struct irb
*irb
, struct dasd_device
*device
,
218 struct dasd_eckd_private
*private = device
->private;
223 sense
= dasd_get_sense(irb
);
225 DBF_DEV_EVENT(DBF_WARNING
, device
, "%s",
226 "ESE error no sense data\n");
229 if (!(sense
[27] & DASD_SENSE_BIT_2
)) {
230 DBF_DEV_EVENT(DBF_WARNING
, device
, "%s",
231 "ESE error no valid track data\n");
235 if (sense
[27] & DASD_SENSE_BIT_3
) {
236 /* enhanced addressing */
237 cyl
= sense
[30] << 20;
238 cyl
|= (sense
[31] & 0xF0) << 12;
239 cyl
|= sense
[28] << 8;
242 cyl
= sense
[29] << 8;
245 head
= sense
[31] & 0x0F;
246 *track
= cyl
* private->rdc_data
.trk_per_cyl
+ head
;
250 static int set_timestamp(struct ccw1
*ccw
, struct DE_eckd_data
*data
,
251 struct dasd_device
*device
)
253 struct dasd_eckd_private
*private = device
->private;
256 rc
= get_phys_clock(&data
->ep_sys_time
);
258 * Ignore return code if XRC is not supported or
259 * sync clock is switched off
261 if ((rc
&& !private->rdc_data
.facilities
.XRC_supported
) ||
262 rc
== -EOPNOTSUPP
|| rc
== -EACCES
)
265 /* switch on System Time Stamp - needed for XRC Support */
266 data
->ga_extended
|= 0x08; /* switch on 'Time Stamp Valid' */
267 data
->ga_extended
|= 0x02; /* switch on 'Extended Parameter' */
270 ccw
->count
= sizeof(struct DE_eckd_data
);
271 ccw
->flags
|= CCW_FLAG_SLI
;
278 define_extent(struct ccw1
*ccw
, struct DE_eckd_data
*data
, unsigned int trk
,
279 unsigned int totrk
, int cmd
, struct dasd_device
*device
,
282 struct dasd_eckd_private
*private = device
->private;
283 u16 heads
, beghead
, endhead
;
288 ccw
->cmd_code
= DASD_ECKD_CCW_DEFINE_EXTENT
;
291 ccw
->cda
= (__u32
)__pa(data
);
294 memset(data
, 0, sizeof(struct DE_eckd_data
));
296 case DASD_ECKD_CCW_READ_HOME_ADDRESS
:
297 case DASD_ECKD_CCW_READ_RECORD_ZERO
:
298 case DASD_ECKD_CCW_READ
:
299 case DASD_ECKD_CCW_READ_MT
:
300 case DASD_ECKD_CCW_READ_CKD
:
301 case DASD_ECKD_CCW_READ_CKD_MT
:
302 case DASD_ECKD_CCW_READ_KD
:
303 case DASD_ECKD_CCW_READ_KD_MT
:
304 data
->mask
.perm
= 0x1;
305 data
->attributes
.operation
= private->attrib
.operation
;
307 case DASD_ECKD_CCW_READ_COUNT
:
308 data
->mask
.perm
= 0x1;
309 data
->attributes
.operation
= DASD_BYPASS_CACHE
;
311 case DASD_ECKD_CCW_READ_TRACK
:
312 case DASD_ECKD_CCW_READ_TRACK_DATA
:
313 data
->mask
.perm
= 0x1;
314 data
->attributes
.operation
= private->attrib
.operation
;
317 case DASD_ECKD_CCW_WRITE
:
318 case DASD_ECKD_CCW_WRITE_MT
:
319 case DASD_ECKD_CCW_WRITE_KD
:
320 case DASD_ECKD_CCW_WRITE_KD_MT
:
321 data
->mask
.perm
= 0x02;
322 data
->attributes
.operation
= private->attrib
.operation
;
323 rc
= set_timestamp(ccw
, data
, device
);
325 case DASD_ECKD_CCW_WRITE_CKD
:
326 case DASD_ECKD_CCW_WRITE_CKD_MT
:
327 data
->attributes
.operation
= DASD_BYPASS_CACHE
;
328 rc
= set_timestamp(ccw
, data
, device
);
330 case DASD_ECKD_CCW_ERASE
:
331 case DASD_ECKD_CCW_WRITE_HOME_ADDRESS
:
332 case DASD_ECKD_CCW_WRITE_RECORD_ZERO
:
333 data
->mask
.perm
= 0x3;
334 data
->mask
.auth
= 0x1;
335 data
->attributes
.operation
= DASD_BYPASS_CACHE
;
336 rc
= set_timestamp(ccw
, data
, device
);
338 case DASD_ECKD_CCW_WRITE_FULL_TRACK
:
339 data
->mask
.perm
= 0x03;
340 data
->attributes
.operation
= private->attrib
.operation
;
343 case DASD_ECKD_CCW_WRITE_TRACK_DATA
:
344 data
->mask
.perm
= 0x02;
345 data
->attributes
.operation
= private->attrib
.operation
;
346 data
->blk_size
= blksize
;
347 rc
= set_timestamp(ccw
, data
, device
);
350 dev_err(&device
->cdev
->dev
,
351 "0x%x is not a known command\n", cmd
);
355 data
->attributes
.mode
= 0x3; /* ECKD */
357 if ((private->rdc_data
.cu_type
== 0x2105 ||
358 private->rdc_data
.cu_type
== 0x2107 ||
359 private->rdc_data
.cu_type
== 0x1750)
360 && !(private->uses_cdl
&& trk
< 2))
361 data
->ga_extended
|= 0x40; /* Regular Data Format Mode */
363 heads
= private->rdc_data
.trk_per_cyl
;
364 begcyl
= trk
/ heads
;
365 beghead
= trk
% heads
;
366 endcyl
= totrk
/ heads
;
367 endhead
= totrk
% heads
;
369 /* check for sequential prestage - enhance cylinder range */
370 if (data
->attributes
.operation
== DASD_SEQ_PRESTAGE
||
371 data
->attributes
.operation
== DASD_SEQ_ACCESS
) {
373 if (endcyl
+ private->attrib
.nr_cyl
< private->real_cyl
)
374 endcyl
+= private->attrib
.nr_cyl
;
376 endcyl
= (private->real_cyl
- 1);
379 set_ch_t(&data
->beg_ext
, begcyl
, beghead
);
380 set_ch_t(&data
->end_ext
, endcyl
, endhead
);
385 static void locate_record_ext(struct ccw1
*ccw
, struct LRE_eckd_data
*data
,
386 unsigned int trk
, unsigned int rec_on_trk
,
387 int count
, int cmd
, struct dasd_device
*device
,
388 unsigned int reclen
, unsigned int tlf
)
390 struct dasd_eckd_private
*private = device
->private;
395 ccw
->cmd_code
= DASD_ECKD_CCW_LOCATE_RECORD_EXT
;
397 if (cmd
== DASD_ECKD_CCW_WRITE_FULL_TRACK
)
401 ccw
->cda
= (__u32
)__pa(data
);
404 memset(data
, 0, sizeof(*data
));
407 switch (private->rdc_data
.dev_type
) {
409 dn
= ceil_quot(reclen
+ 6, 232);
410 d
= 9 + ceil_quot(reclen
+ 6 * (dn
+ 1), 34);
411 sector
= (49 + (rec_on_trk
- 1) * (10 + d
)) / 8;
414 d
= 7 + ceil_quot(reclen
+ 12, 32);
415 sector
= (39 + (rec_on_trk
- 1) * (8 + d
)) / 7;
419 data
->sector
= sector
;
420 /* note: meaning of count depends on the operation
421 * for record based I/O it's the number of records, but for
422 * track based I/O it's the number of tracks
426 case DASD_ECKD_CCW_WRITE_HOME_ADDRESS
:
427 data
->operation
.orientation
= 0x3;
428 data
->operation
.operation
= 0x03;
430 case DASD_ECKD_CCW_READ_HOME_ADDRESS
:
431 data
->operation
.orientation
= 0x3;
432 data
->operation
.operation
= 0x16;
434 case DASD_ECKD_CCW_WRITE_RECORD_ZERO
:
435 data
->operation
.orientation
= 0x1;
436 data
->operation
.operation
= 0x03;
439 case DASD_ECKD_CCW_READ_RECORD_ZERO
:
440 data
->operation
.orientation
= 0x3;
441 data
->operation
.operation
= 0x16;
444 case DASD_ECKD_CCW_WRITE
:
445 case DASD_ECKD_CCW_WRITE_MT
:
446 case DASD_ECKD_CCW_WRITE_KD
:
447 case DASD_ECKD_CCW_WRITE_KD_MT
:
448 data
->auxiliary
.length_valid
= 0x1;
449 data
->length
= reclen
;
450 data
->operation
.operation
= 0x01;
452 case DASD_ECKD_CCW_WRITE_CKD
:
453 case DASD_ECKD_CCW_WRITE_CKD_MT
:
454 data
->auxiliary
.length_valid
= 0x1;
455 data
->length
= reclen
;
456 data
->operation
.operation
= 0x03;
458 case DASD_ECKD_CCW_WRITE_FULL_TRACK
:
459 data
->operation
.orientation
= 0x0;
460 data
->operation
.operation
= 0x3F;
461 data
->extended_operation
= 0x11;
463 data
->extended_parameter_length
= 0x02;
464 if (data
->count
> 8) {
465 data
->extended_parameter
[0] = 0xFF;
466 data
->extended_parameter
[1] = 0xFF;
467 data
->extended_parameter
[1] <<= (16 - count
);
469 data
->extended_parameter
[0] = 0xFF;
470 data
->extended_parameter
[0] <<= (8 - count
);
471 data
->extended_parameter
[1] = 0x00;
475 case DASD_ECKD_CCW_WRITE_TRACK_DATA
:
476 data
->auxiliary
.length_valid
= 0x1;
477 data
->length
= reclen
; /* not tlf, as one might think */
478 data
->operation
.operation
= 0x3F;
479 data
->extended_operation
= 0x23;
481 case DASD_ECKD_CCW_READ
:
482 case DASD_ECKD_CCW_READ_MT
:
483 case DASD_ECKD_CCW_READ_KD
:
484 case DASD_ECKD_CCW_READ_KD_MT
:
485 data
->auxiliary
.length_valid
= 0x1;
486 data
->length
= reclen
;
487 data
->operation
.operation
= 0x06;
489 case DASD_ECKD_CCW_READ_CKD
:
490 case DASD_ECKD_CCW_READ_CKD_MT
:
491 data
->auxiliary
.length_valid
= 0x1;
492 data
->length
= reclen
;
493 data
->operation
.operation
= 0x16;
495 case DASD_ECKD_CCW_READ_COUNT
:
496 data
->operation
.operation
= 0x06;
498 case DASD_ECKD_CCW_READ_TRACK
:
499 data
->operation
.orientation
= 0x1;
500 data
->operation
.operation
= 0x0C;
501 data
->extended_parameter_length
= 0;
504 case DASD_ECKD_CCW_READ_TRACK_DATA
:
505 data
->auxiliary
.length_valid
= 0x1;
507 data
->operation
.operation
= 0x0C;
509 case DASD_ECKD_CCW_ERASE
:
510 data
->length
= reclen
;
511 data
->auxiliary
.length_valid
= 0x1;
512 data
->operation
.operation
= 0x0b;
515 DBF_DEV_EVENT(DBF_ERR
, device
,
516 "fill LRE unknown opcode 0x%x", cmd
);
519 set_ch_t(&data
->seek_addr
,
520 trk
/ private->rdc_data
.trk_per_cyl
,
521 trk
% private->rdc_data
.trk_per_cyl
);
522 data
->search_arg
.cyl
= data
->seek_addr
.cyl
;
523 data
->search_arg
.head
= data
->seek_addr
.head
;
524 data
->search_arg
.record
= rec_on_trk
;
527 static int prefix_LRE(struct ccw1
*ccw
, struct PFX_eckd_data
*pfxdata
,
528 unsigned int trk
, unsigned int totrk
, int cmd
,
529 struct dasd_device
*basedev
, struct dasd_device
*startdev
,
530 unsigned int format
, unsigned int rec_on_trk
, int count
,
531 unsigned int blksize
, unsigned int tlf
)
533 struct dasd_eckd_private
*basepriv
, *startpriv
;
534 struct LRE_eckd_data
*lredata
;
535 struct DE_eckd_data
*dedata
;
538 basepriv
= basedev
->private;
539 startpriv
= startdev
->private;
540 dedata
= &pfxdata
->define_extent
;
541 lredata
= &pfxdata
->locate_record
;
543 ccw
->cmd_code
= DASD_ECKD_CCW_PFX
;
545 if (cmd
== DASD_ECKD_CCW_WRITE_FULL_TRACK
) {
546 ccw
->count
= sizeof(*pfxdata
) + 2;
547 ccw
->cda
= (__u32
) __pa(pfxdata
);
548 memset(pfxdata
, 0, sizeof(*pfxdata
) + 2);
550 ccw
->count
= sizeof(*pfxdata
);
551 ccw
->cda
= (__u32
) __pa(pfxdata
);
552 memset(pfxdata
, 0, sizeof(*pfxdata
));
557 DBF_DEV_EVENT(DBF_ERR
, basedev
,
558 "PFX LRE unknown format 0x%x", format
);
562 pfxdata
->format
= format
;
563 pfxdata
->base_address
= basepriv
->ned
->unit_addr
;
564 pfxdata
->base_lss
= basepriv
->ned
->ID
;
565 pfxdata
->validity
.define_extent
= 1;
567 /* private uid is kept up to date, conf_data may be outdated */
568 if (startpriv
->uid
.type
== UA_BASE_PAV_ALIAS
)
569 pfxdata
->validity
.verify_base
= 1;
571 if (startpriv
->uid
.type
== UA_HYPER_PAV_ALIAS
) {
572 pfxdata
->validity
.verify_base
= 1;
573 pfxdata
->validity
.hyper_pav
= 1;
576 rc
= define_extent(NULL
, dedata
, trk
, totrk
, cmd
, basedev
, blksize
);
579 * For some commands the System Time Stamp is set in the define extent
580 * data when XRC is supported. The validity of the time stamp must be
581 * reflected in the prefix data as well.
583 if (dedata
->ga_extended
& 0x08 && dedata
->ga_extended
& 0x02)
584 pfxdata
->validity
.time_stamp
= 1; /* 'Time Stamp Valid' */
587 locate_record_ext(NULL
, lredata
, trk
, rec_on_trk
, count
, cmd
,
588 basedev
, blksize
, tlf
);
594 static int prefix(struct ccw1
*ccw
, struct PFX_eckd_data
*pfxdata
,
595 unsigned int trk
, unsigned int totrk
, int cmd
,
596 struct dasd_device
*basedev
, struct dasd_device
*startdev
)
598 return prefix_LRE(ccw
, pfxdata
, trk
, totrk
, cmd
, basedev
, startdev
,
603 locate_record(struct ccw1
*ccw
, struct LO_eckd_data
*data
, unsigned int trk
,
604 unsigned int rec_on_trk
, int no_rec
, int cmd
,
605 struct dasd_device
* device
, int reclen
)
607 struct dasd_eckd_private
*private = device
->private;
611 DBF_DEV_EVENT(DBF_INFO
, device
,
612 "Locate: trk %d, rec %d, no_rec %d, cmd %d, reclen %d",
613 trk
, rec_on_trk
, no_rec
, cmd
, reclen
);
615 ccw
->cmd_code
= DASD_ECKD_CCW_LOCATE_RECORD
;
618 ccw
->cda
= (__u32
) __pa(data
);
620 memset(data
, 0, sizeof(struct LO_eckd_data
));
623 switch (private->rdc_data
.dev_type
) {
625 dn
= ceil_quot(reclen
+ 6, 232);
626 d
= 9 + ceil_quot(reclen
+ 6 * (dn
+ 1), 34);
627 sector
= (49 + (rec_on_trk
- 1) * (10 + d
)) / 8;
630 d
= 7 + ceil_quot(reclen
+ 12, 32);
631 sector
= (39 + (rec_on_trk
- 1) * (8 + d
)) / 7;
635 data
->sector
= sector
;
636 data
->count
= no_rec
;
638 case DASD_ECKD_CCW_WRITE_HOME_ADDRESS
:
639 data
->operation
.orientation
= 0x3;
640 data
->operation
.operation
= 0x03;
642 case DASD_ECKD_CCW_READ_HOME_ADDRESS
:
643 data
->operation
.orientation
= 0x3;
644 data
->operation
.operation
= 0x16;
646 case DASD_ECKD_CCW_WRITE_RECORD_ZERO
:
647 data
->operation
.orientation
= 0x1;
648 data
->operation
.operation
= 0x03;
651 case DASD_ECKD_CCW_READ_RECORD_ZERO
:
652 data
->operation
.orientation
= 0x3;
653 data
->operation
.operation
= 0x16;
656 case DASD_ECKD_CCW_WRITE
:
657 case DASD_ECKD_CCW_WRITE_MT
:
658 case DASD_ECKD_CCW_WRITE_KD
:
659 case DASD_ECKD_CCW_WRITE_KD_MT
:
660 data
->auxiliary
.last_bytes_used
= 0x1;
661 data
->length
= reclen
;
662 data
->operation
.operation
= 0x01;
664 case DASD_ECKD_CCW_WRITE_CKD
:
665 case DASD_ECKD_CCW_WRITE_CKD_MT
:
666 data
->auxiliary
.last_bytes_used
= 0x1;
667 data
->length
= reclen
;
668 data
->operation
.operation
= 0x03;
670 case DASD_ECKD_CCW_READ
:
671 case DASD_ECKD_CCW_READ_MT
:
672 case DASD_ECKD_CCW_READ_KD
:
673 case DASD_ECKD_CCW_READ_KD_MT
:
674 data
->auxiliary
.last_bytes_used
= 0x1;
675 data
->length
= reclen
;
676 data
->operation
.operation
= 0x06;
678 case DASD_ECKD_CCW_READ_CKD
:
679 case DASD_ECKD_CCW_READ_CKD_MT
:
680 data
->auxiliary
.last_bytes_used
= 0x1;
681 data
->length
= reclen
;
682 data
->operation
.operation
= 0x16;
684 case DASD_ECKD_CCW_READ_COUNT
:
685 data
->operation
.operation
= 0x06;
687 case DASD_ECKD_CCW_ERASE
:
688 data
->length
= reclen
;
689 data
->auxiliary
.last_bytes_used
= 0x1;
690 data
->operation
.operation
= 0x0b;
693 DBF_DEV_EVENT(DBF_ERR
, device
, "unknown locate record "
696 set_ch_t(&data
->seek_addr
,
697 trk
/ private->rdc_data
.trk_per_cyl
,
698 trk
% private->rdc_data
.trk_per_cyl
);
699 data
->search_arg
.cyl
= data
->seek_addr
.cyl
;
700 data
->search_arg
.head
= data
->seek_addr
.head
;
701 data
->search_arg
.record
= rec_on_trk
;
705 * Returns 1 if the block is one of the special blocks that needs
706 * to get read/written with the KD variant of the command.
707 * That is DASD_ECKD_READ_KD_MT instead of DASD_ECKD_READ_MT and
708 * DASD_ECKD_WRITE_KD_MT instead of DASD_ECKD_WRITE_MT.
709 * Luckily the KD variants differ only by one bit (0x08) from the
710 * normal variant. So don't wonder about code like:
711 * if (dasd_eckd_cdl_special(blk_per_trk, recid))
712 * ccw->cmd_code |= 0x8;
715 dasd_eckd_cdl_special(int blk_per_trk
, int recid
)
719 if (recid
< blk_per_trk
)
721 if (recid
< 2 * blk_per_trk
)
727 * Returns the record size for the special blocks of the cdl format.
728 * Only returns something useful if dasd_eckd_cdl_special is true
732 dasd_eckd_cdl_reclen(int recid
)
735 return sizes_trk0
[recid
];
738 /* create unique id from private structure. */
739 static void create_uid(struct dasd_eckd_private
*private)
742 struct dasd_uid
*uid
;
745 memset(uid
, 0, sizeof(struct dasd_uid
));
746 memcpy(uid
->vendor
, private->ned
->HDA_manufacturer
,
747 sizeof(uid
->vendor
) - 1);
748 EBCASC(uid
->vendor
, sizeof(uid
->vendor
) - 1);
749 memcpy(uid
->serial
, private->ned
->HDA_location
,
750 sizeof(uid
->serial
) - 1);
751 EBCASC(uid
->serial
, sizeof(uid
->serial
) - 1);
752 uid
->ssid
= private->gneq
->subsystemID
;
753 uid
->real_unit_addr
= private->ned
->unit_addr
;
755 uid
->type
= private->sneq
->sua_flags
;
756 if (uid
->type
== UA_BASE_PAV_ALIAS
)
757 uid
->base_unit_addr
= private->sneq
->base_unit_addr
;
759 uid
->type
= UA_BASE_DEVICE
;
761 if (private->vdsneq
) {
762 for (count
= 0; count
< 16; count
++) {
763 sprintf(uid
->vduit
+2*count
, "%02x",
764 private->vdsneq
->uit
[count
]);
770 * Generate device unique id that specifies the physical device.
772 static int dasd_eckd_generate_uid(struct dasd_device
*device
)
774 struct dasd_eckd_private
*private = device
->private;
779 if (!private->ned
|| !private->gneq
)
781 spin_lock_irqsave(get_ccwdev_lock(device
->cdev
), flags
);
783 spin_unlock_irqrestore(get_ccwdev_lock(device
->cdev
), flags
);
787 static int dasd_eckd_get_uid(struct dasd_device
*device
, struct dasd_uid
*uid
)
789 struct dasd_eckd_private
*private = device
->private;
793 spin_lock_irqsave(get_ccwdev_lock(device
->cdev
), flags
);
795 spin_unlock_irqrestore(get_ccwdev_lock(device
->cdev
), flags
);
802 * compare device UID with data of a given dasd_eckd_private structure
805 static int dasd_eckd_compare_path_uid(struct dasd_device
*device
,
806 struct dasd_eckd_private
*private)
808 struct dasd_uid device_uid
;
811 dasd_eckd_get_uid(device
, &device_uid
);
813 return memcmp(&device_uid
, &private->uid
, sizeof(struct dasd_uid
));
816 static void dasd_eckd_fill_rcd_cqr(struct dasd_device
*device
,
817 struct dasd_ccw_req
*cqr
,
823 * buffer has to start with EBCDIC "V1.0" to show
824 * support for virtual device SNEQ
826 rcd_buffer
[0] = 0xE5;
827 rcd_buffer
[1] = 0xF1;
828 rcd_buffer
[2] = 0x4B;
829 rcd_buffer
[3] = 0xF0;
832 ccw
->cmd_code
= DASD_ECKD_CCW_RCD
;
834 ccw
->cda
= (__u32
)(addr_t
)rcd_buffer
;
835 ccw
->count
= DASD_ECKD_RCD_DATA_SIZE
;
836 cqr
->magic
= DASD_ECKD_MAGIC
;
838 cqr
->startdev
= device
;
839 cqr
->memdev
= device
;
841 cqr
->expires
= 10*HZ
;
844 cqr
->buildclk
= get_tod_clock();
845 cqr
->status
= DASD_CQR_FILLED
;
846 set_bit(DASD_CQR_VERIFY_PATH
, &cqr
->flags
);
850 * Wakeup helper for read_conf
851 * if the cqr is not done and needs some error recovery
852 * the buffer has to be re-initialized with the EBCDIC "V1.0"
853 * to show support for virtual device SNEQ
855 static void read_conf_cb(struct dasd_ccw_req
*cqr
, void *data
)
860 if (cqr
->status
!= DASD_CQR_DONE
) {
862 rcd_buffer
= (__u8
*)((addr_t
) ccw
->cda
);
863 memset(rcd_buffer
, 0, sizeof(*rcd_buffer
));
865 rcd_buffer
[0] = 0xE5;
866 rcd_buffer
[1] = 0xF1;
867 rcd_buffer
[2] = 0x4B;
868 rcd_buffer
[3] = 0xF0;
870 dasd_wakeup_cb(cqr
, data
);
873 static int dasd_eckd_read_conf_immediately(struct dasd_device
*device
,
874 struct dasd_ccw_req
*cqr
,
881 * sanity check: scan for RCD command in extended SenseID data
882 * some devices do not support RCD
884 ciw
= ccw_device_get_ciw(device
->cdev
, CIW_TYPE_RCD
);
885 if (!ciw
|| ciw
->cmd
!= DASD_ECKD_CCW_RCD
)
888 dasd_eckd_fill_rcd_cqr(device
, cqr
, rcd_buffer
, lpm
);
889 clear_bit(DASD_CQR_FLAGS_USE_ERP
, &cqr
->flags
);
890 set_bit(DASD_CQR_ALLOW_SLOCK
, &cqr
->flags
);
892 cqr
->callback
= read_conf_cb
;
893 rc
= dasd_sleep_on_immediatly(cqr
);
897 static int dasd_eckd_read_conf_lpm(struct dasd_device
*device
,
899 int *rcd_buffer_size
, __u8 lpm
)
902 char *rcd_buf
= NULL
;
904 struct dasd_ccw_req
*cqr
;
907 * sanity check: scan for RCD command in extended SenseID data
908 * some devices do not support RCD
910 ciw
= ccw_device_get_ciw(device
->cdev
, CIW_TYPE_RCD
);
911 if (!ciw
|| ciw
->cmd
!= DASD_ECKD_CCW_RCD
) {
915 rcd_buf
= kzalloc(DASD_ECKD_RCD_DATA_SIZE
, GFP_KERNEL
| GFP_DMA
);
920 cqr
= dasd_smalloc_request(DASD_ECKD_MAGIC
, 1 /* RCD */,
921 0, /* use rcd_buf as data ara */
924 DBF_DEV_EVENT(DBF_WARNING
, device
, "%s",
925 "Could not allocate RCD request");
929 dasd_eckd_fill_rcd_cqr(device
, cqr
, rcd_buf
, lpm
);
930 cqr
->callback
= read_conf_cb
;
931 ret
= dasd_sleep_on(cqr
);
933 * on success we update the user input parms
935 dasd_sfree_request(cqr
, cqr
->memdev
);
939 *rcd_buffer_size
= DASD_ECKD_RCD_DATA_SIZE
;
940 *rcd_buffer
= rcd_buf
;
945 *rcd_buffer_size
= 0;
949 static int dasd_eckd_identify_conf_parts(struct dasd_eckd_private
*private)
952 struct dasd_sneq
*sneq
;
956 private->sneq
= NULL
;
957 private->vdsneq
= NULL
;
958 private->gneq
= NULL
;
959 count
= private->conf_len
/ sizeof(struct dasd_sneq
);
960 sneq
= (struct dasd_sneq
*)private->conf_data
;
961 for (i
= 0; i
< count
; ++i
) {
962 if (sneq
->flags
.identifier
== 1 && sneq
->format
== 1)
963 private->sneq
= sneq
;
964 else if (sneq
->flags
.identifier
== 1 && sneq
->format
== 4)
965 private->vdsneq
= (struct vd_sneq
*)sneq
;
966 else if (sneq
->flags
.identifier
== 2)
967 private->gneq
= (struct dasd_gneq
*)sneq
;
968 else if (sneq
->flags
.identifier
== 3 && sneq
->res1
== 1)
969 private->ned
= (struct dasd_ned
*)sneq
;
972 if (!private->ned
|| !private->gneq
) {
974 private->sneq
= NULL
;
975 private->vdsneq
= NULL
;
976 private->gneq
= NULL
;
983 static unsigned char dasd_eckd_path_access(void *conf_data
, int conf_len
)
985 struct dasd_gneq
*gneq
;
988 count
= conf_len
/ sizeof(*gneq
);
989 gneq
= (struct dasd_gneq
*)conf_data
;
991 for (i
= 0; i
< count
; ++i
) {
992 if (gneq
->flags
.identifier
== 2) {
999 return ((char *)gneq
)[18] & 0x07;
1004 static void dasd_eckd_store_conf_data(struct dasd_device
*device
,
1005 struct dasd_conf_data
*conf_data
, int chp
)
1007 struct channel_path_desc_fmt0
*chp_desc
;
1008 struct subchannel_id sch_id
;
1010 ccw_device_get_schid(device
->cdev
, &sch_id
);
1012 * path handling and read_conf allocate data
1013 * free it before replacing the pointer
1015 kfree(device
->path
[chp
].conf_data
);
1016 device
->path
[chp
].conf_data
= conf_data
;
1017 device
->path
[chp
].cssid
= sch_id
.cssid
;
1018 device
->path
[chp
].ssid
= sch_id
.ssid
;
1019 chp_desc
= ccw_device_get_chp_desc(device
->cdev
, chp
);
1021 device
->path
[chp
].chpid
= chp_desc
->chpid
;
1025 static void dasd_eckd_clear_conf_data(struct dasd_device
*device
)
1027 struct dasd_eckd_private
*private = device
->private;
1030 private->conf_data
= NULL
;
1031 private->conf_len
= 0;
1032 for (i
= 0; i
< 8; i
++) {
1033 kfree(device
->path
[i
].conf_data
);
1034 device
->path
[i
].conf_data
= NULL
;
1035 device
->path
[i
].cssid
= 0;
1036 device
->path
[i
].ssid
= 0;
1037 device
->path
[i
].chpid
= 0;
1038 dasd_path_notoper(device
, i
);
1039 dasd_path_remove_kobj(device
, i
);
1043 static void dasd_eckd_read_fc_security(struct dasd_device
*device
)
1045 struct dasd_eckd_private
*private = device
->private;
1051 rc
= chsc_scud(private->uid
.ssid
, (u64
*)esm
, &esm_valid
);
1053 for (chp
= 0; chp
< 8; chp
++)
1054 device
->path
[chp
].fc_security
= 0;
1058 for (chp
= 0; chp
< 8; chp
++) {
1059 if (esm_valid
& (0x80 >> chp
))
1060 device
->path
[chp
].fc_security
= esm
[chp
];
1062 device
->path
[chp
].fc_security
= 0;
1066 static int dasd_eckd_read_conf(struct dasd_device
*device
)
1069 int conf_len
, conf_data_saved
;
1070 int rc
, path_err
, pos
;
1072 struct dasd_eckd_private
*private, path_private
;
1073 struct dasd_uid
*uid
;
1074 char print_path_uid
[60], print_device_uid
[60];
1076 private = device
->private;
1077 opm
= ccw_device_get_path_mask(device
->cdev
);
1078 conf_data_saved
= 0;
1080 /* get configuration data per operational path */
1081 for (lpm
= 0x80; lpm
; lpm
>>= 1) {
1084 rc
= dasd_eckd_read_conf_lpm(device
, &conf_data
,
1086 if (rc
&& rc
!= -EOPNOTSUPP
) { /* -EOPNOTSUPP is ok */
1087 DBF_EVENT_DEVID(DBF_WARNING
, device
->cdev
,
1088 "Read configuration data returned "
1092 if (conf_data
== NULL
) {
1093 DBF_EVENT_DEVID(DBF_WARNING
, device
->cdev
, "%s",
1094 "No configuration data "
1096 /* no further analysis possible */
1097 dasd_path_add_opm(device
, opm
);
1098 continue; /* no error */
1100 /* save first valid configuration data */
1101 if (!conf_data_saved
) {
1102 /* initially clear previously stored conf_data */
1103 dasd_eckd_clear_conf_data(device
);
1104 private->conf_data
= conf_data
;
1105 private->conf_len
= conf_len
;
1106 if (dasd_eckd_identify_conf_parts(private)) {
1107 private->conf_data
= NULL
;
1108 private->conf_len
= 0;
1113 * build device UID that other path data
1114 * can be compared to it
1116 dasd_eckd_generate_uid(device
);
1119 path_private
.conf_data
= conf_data
;
1120 path_private
.conf_len
= DASD_ECKD_RCD_DATA_SIZE
;
1121 if (dasd_eckd_identify_conf_parts(
1123 path_private
.conf_data
= NULL
;
1124 path_private
.conf_len
= 0;
1128 if (dasd_eckd_compare_path_uid(
1129 device
, &path_private
)) {
1130 uid
= &path_private
.uid
;
1131 if (strlen(uid
->vduit
) > 0)
1132 snprintf(print_path_uid
,
1133 sizeof(print_path_uid
),
1134 "%s.%s.%04x.%02x.%s",
1135 uid
->vendor
, uid
->serial
,
1136 uid
->ssid
, uid
->real_unit_addr
,
1139 snprintf(print_path_uid
,
1140 sizeof(print_path_uid
),
1142 uid
->vendor
, uid
->serial
,
1144 uid
->real_unit_addr
);
1145 uid
= &private->uid
;
1146 if (strlen(uid
->vduit
) > 0)
1147 snprintf(print_device_uid
,
1148 sizeof(print_device_uid
),
1149 "%s.%s.%04x.%02x.%s",
1150 uid
->vendor
, uid
->serial
,
1151 uid
->ssid
, uid
->real_unit_addr
,
1154 snprintf(print_device_uid
,
1155 sizeof(print_device_uid
),
1157 uid
->vendor
, uid
->serial
,
1159 uid
->real_unit_addr
);
1160 dev_err(&device
->cdev
->dev
,
1161 "Not all channel paths lead to "
1162 "the same device, path %02X leads to "
1163 "device %s instead of %s\n", lpm
,
1164 print_path_uid
, print_device_uid
);
1166 dasd_path_add_cablepm(device
, lpm
);
1169 path_private
.conf_data
= NULL
;
1170 path_private
.conf_len
= 0;
1173 pos
= pathmask_to_pos(lpm
);
1174 dasd_eckd_store_conf_data(device
, conf_data
, pos
);
1176 switch (dasd_eckd_path_access(conf_data
, conf_len
)) {
1178 dasd_path_add_nppm(device
, lpm
);
1181 dasd_path_add_ppm(device
, lpm
);
1184 if (!dasd_path_get_opm(device
)) {
1185 dasd_path_set_opm(device
, lpm
);
1186 dasd_generic_path_operational(device
);
1188 dasd_path_add_opm(device
, lpm
);
1192 dasd_eckd_read_fc_security(device
);
1197 static u32
get_fcx_max_data(struct dasd_device
*device
)
1199 struct dasd_eckd_private
*private = device
->private;
1200 int fcx_in_css
, fcx_in_gneq
, fcx_in_features
;
1206 /* is transport mode supported? */
1207 fcx_in_css
= css_general_characteristics
.fcx
;
1208 fcx_in_gneq
= private->gneq
->reserved2
[7] & 0x04;
1209 fcx_in_features
= private->features
.feature
[40] & 0x80;
1210 tpm
= fcx_in_css
&& fcx_in_gneq
&& fcx_in_features
;
1215 mdc
= ccw_device_get_mdc(device
->cdev
, 0);
1217 dev_warn(&device
->cdev
->dev
, "Detecting the maximum supported data size for zHPF requests failed\n");
1220 return (u32
)mdc
* FCX_MAX_DATA_FACTOR
;
1224 static int verify_fcx_max_data(struct dasd_device
*device
, __u8 lpm
)
1226 struct dasd_eckd_private
*private = device
->private;
1230 if (private->fcx_max_data
) {
1231 mdc
= ccw_device_get_mdc(device
->cdev
, lpm
);
1233 dev_warn(&device
->cdev
->dev
,
1234 "Detecting the maximum data size for zHPF "
1235 "requests failed (rc=%d) for a new path %x\n",
1239 fcx_max_data
= (u32
)mdc
* FCX_MAX_DATA_FACTOR
;
1240 if (fcx_max_data
< private->fcx_max_data
) {
1241 dev_warn(&device
->cdev
->dev
,
1242 "The maximum data size for zHPF requests %u "
1243 "on a new path %x is below the active maximum "
1244 "%u\n", fcx_max_data
, lpm
,
1245 private->fcx_max_data
);
1252 static int rebuild_device_uid(struct dasd_device
*device
,
1253 struct pe_handler_work_data
*data
)
1255 struct dasd_eckd_private
*private = device
->private;
1256 __u8 lpm
, opm
= dasd_path_get_opm(device
);
1259 for (lpm
= 0x80; lpm
; lpm
>>= 1) {
1262 memset(&data
->rcd_buffer
, 0, sizeof(data
->rcd_buffer
));
1263 memset(&data
->cqr
, 0, sizeof(data
->cqr
));
1264 data
->cqr
.cpaddr
= &data
->ccw
;
1265 rc
= dasd_eckd_read_conf_immediately(device
, &data
->cqr
,
1270 if (rc
== -EOPNOTSUPP
) /* -EOPNOTSUPP is ok */
1272 DBF_EVENT_DEVID(DBF_WARNING
, device
->cdev
,
1273 "Read configuration data "
1274 "returned error %d", rc
);
1277 memcpy(private->conf_data
, data
->rcd_buffer
,
1278 DASD_ECKD_RCD_DATA_SIZE
);
1279 if (dasd_eckd_identify_conf_parts(private)) {
1281 } else /* first valid path is enough */
1286 rc
= dasd_eckd_generate_uid(device
);
1291 static void dasd_eckd_path_available_action(struct dasd_device
*device
,
1292 struct pe_handler_work_data
*data
)
1294 struct dasd_eckd_private path_private
;
1295 struct dasd_uid
*uid
;
1296 __u8 path_rcd_buf
[DASD_ECKD_RCD_DATA_SIZE
];
1297 __u8 lpm
, opm
, npm
, ppm
, epm
, hpfpm
, cablepm
;
1298 struct dasd_conf_data
*conf_data
;
1299 unsigned long flags
;
1310 for (lpm
= 0x80; lpm
; lpm
>>= 1) {
1311 if (!(lpm
& data
->tbvpm
))
1313 memset(&data
->rcd_buffer
, 0, sizeof(data
->rcd_buffer
));
1314 memset(&data
->cqr
, 0, sizeof(data
->cqr
));
1315 data
->cqr
.cpaddr
= &data
->ccw
;
1316 rc
= dasd_eckd_read_conf_immediately(device
, &data
->cqr
,
1320 switch (dasd_eckd_path_access(data
->rcd_buffer
,
1321 DASD_ECKD_RCD_DATA_SIZE
)
1331 } else if (rc
== -EOPNOTSUPP
) {
1332 DBF_EVENT_DEVID(DBF_WARNING
, device
->cdev
, "%s",
1333 "path verification: No configuration "
1336 } else if (rc
== -EAGAIN
) {
1337 DBF_EVENT_DEVID(DBF_WARNING
, device
->cdev
, "%s",
1338 "path verification: device is stopped,"
1339 " try again later");
1342 dev_warn(&device
->cdev
->dev
,
1343 "Reading device feature codes failed "
1344 "(rc=%d) for new path %x\n", rc
, lpm
);
1347 if (verify_fcx_max_data(device
, lpm
)) {
1356 * save conf_data for comparison after
1357 * rebuild_device_uid may have changed
1360 memcpy(&path_rcd_buf
, data
->rcd_buffer
,
1361 DASD_ECKD_RCD_DATA_SIZE
);
1362 path_private
.conf_data
= (void *) &path_rcd_buf
;
1363 path_private
.conf_len
= DASD_ECKD_RCD_DATA_SIZE
;
1364 if (dasd_eckd_identify_conf_parts(&path_private
)) {
1365 path_private
.conf_data
= NULL
;
1366 path_private
.conf_len
= 0;
1371 * compare path UID with device UID only if at least
1372 * one valid path is left
1373 * in other case the device UID may have changed and
1374 * the first working path UID will be used as device UID
1376 if (dasd_path_get_opm(device
) &&
1377 dasd_eckd_compare_path_uid(device
, &path_private
)) {
1379 * the comparison was not successful
1380 * rebuild the device UID with at least one
1381 * known path in case a z/VM hyperswap command
1382 * has changed the device
1384 * after this compare again
1386 * if either the rebuild or the recompare fails
1387 * the path can not be used
1389 if (rebuild_device_uid(device
, data
) ||
1390 dasd_eckd_compare_path_uid(
1391 device
, &path_private
)) {
1392 uid
= &path_private
.uid
;
1393 if (strlen(uid
->vduit
) > 0)
1394 snprintf(print_uid
, sizeof(print_uid
),
1395 "%s.%s.%04x.%02x.%s",
1396 uid
->vendor
, uid
->serial
,
1397 uid
->ssid
, uid
->real_unit_addr
,
1400 snprintf(print_uid
, sizeof(print_uid
),
1402 uid
->vendor
, uid
->serial
,
1404 uid
->real_unit_addr
);
1405 dev_err(&device
->cdev
->dev
,
1406 "The newly added channel path %02X "
1407 "will not be used because it leads "
1408 "to a different device %s\n",
1418 conf_data
= kzalloc(DASD_ECKD_RCD_DATA_SIZE
, GFP_KERNEL
);
1420 memcpy(conf_data
, data
->rcd_buffer
,
1421 DASD_ECKD_RCD_DATA_SIZE
);
1423 pos
= pathmask_to_pos(lpm
);
1424 dasd_eckd_store_conf_data(device
, conf_data
, pos
);
1427 * There is a small chance that a path is lost again between
1428 * above path verification and the following modification of
1429 * the device opm mask. We could avoid that race here by using
1430 * yet another path mask, but we rather deal with this unlikely
1431 * situation in dasd_start_IO.
1433 spin_lock_irqsave(get_ccwdev_lock(device
->cdev
), flags
);
1434 if (!dasd_path_get_opm(device
) && opm
) {
1435 dasd_path_set_opm(device
, opm
);
1436 dasd_generic_path_operational(device
);
1438 dasd_path_add_opm(device
, opm
);
1440 dasd_path_add_nppm(device
, npm
);
1441 dasd_path_add_ppm(device
, ppm
);
1442 dasd_path_add_tbvpm(device
, epm
);
1443 dasd_path_add_cablepm(device
, cablepm
);
1444 dasd_path_add_nohpfpm(device
, hpfpm
);
1445 spin_unlock_irqrestore(get_ccwdev_lock(device
->cdev
), flags
);
1447 dasd_path_create_kobj(device
, pos
);
1451 static void do_pe_handler_work(struct work_struct
*work
)
1453 struct pe_handler_work_data
*data
;
1454 struct dasd_device
*device
;
1456 data
= container_of(work
, struct pe_handler_work_data
, worker
);
1457 device
= data
->device
;
1459 /* delay path verification until device was resumed */
1460 if (test_bit(DASD_FLAG_SUSPENDED
, &device
->flags
)) {
1461 schedule_work(work
);
1464 /* check if path verification already running and delay if so */
1465 if (test_and_set_bit(DASD_FLAG_PATH_VERIFY
, &device
->flags
)) {
1466 schedule_work(work
);
1471 dasd_eckd_path_available_action(device
, data
);
1473 dasd_eckd_read_fc_security(device
);
1475 clear_bit(DASD_FLAG_PATH_VERIFY
, &device
->flags
);
1476 dasd_put_device(device
);
1478 mutex_unlock(&dasd_pe_handler_mutex
);
1483 static int dasd_eckd_pe_handler(struct dasd_device
*device
,
1484 __u8 tbvpm
, __u8 fcsecpm
)
1486 struct pe_handler_work_data
*data
;
1488 data
= kmalloc(sizeof(*data
), GFP_ATOMIC
| GFP_DMA
);
1490 if (mutex_trylock(&dasd_pe_handler_mutex
)) {
1491 data
= pe_handler_worker
;
1497 memset(data
, 0, sizeof(*data
));
1500 INIT_WORK(&data
->worker
, do_pe_handler_work
);
1501 dasd_get_device(device
);
1502 data
->device
= device
;
1503 data
->tbvpm
= tbvpm
;
1504 data
->fcsecpm
= fcsecpm
;
1505 schedule_work(&data
->worker
);
1509 static void dasd_eckd_reset_path(struct dasd_device
*device
, __u8 pm
)
1511 struct dasd_eckd_private
*private = device
->private;
1512 unsigned long flags
;
1514 if (!private->fcx_max_data
)
1515 private->fcx_max_data
= get_fcx_max_data(device
);
1516 spin_lock_irqsave(get_ccwdev_lock(device
->cdev
), flags
);
1517 dasd_path_set_tbvpm(device
, pm
? : dasd_path_get_notoperpm(device
));
1518 dasd_schedule_device_bh(device
);
1519 spin_unlock_irqrestore(get_ccwdev_lock(device
->cdev
), flags
);
1522 static int dasd_eckd_read_features(struct dasd_device
*device
)
1524 struct dasd_eckd_private
*private = device
->private;
1525 struct dasd_psf_prssd_data
*prssdp
;
1526 struct dasd_rssd_features
*features
;
1527 struct dasd_ccw_req
*cqr
;
1531 memset(&private->features
, 0, sizeof(struct dasd_rssd_features
));
1532 cqr
= dasd_smalloc_request(DASD_ECKD_MAGIC
, 1 /* PSF */ + 1 /* RSSD */,
1533 (sizeof(struct dasd_psf_prssd_data
) +
1534 sizeof(struct dasd_rssd_features
)),
1537 DBF_EVENT_DEVID(DBF_WARNING
, device
->cdev
, "%s", "Could not "
1538 "allocate initialization request");
1539 return PTR_ERR(cqr
);
1541 cqr
->startdev
= device
;
1542 cqr
->memdev
= device
;
1545 cqr
->expires
= 10 * HZ
;
1547 /* Prepare for Read Subsystem Data */
1548 prssdp
= (struct dasd_psf_prssd_data
*) cqr
->data
;
1549 memset(prssdp
, 0, sizeof(struct dasd_psf_prssd_data
));
1550 prssdp
->order
= PSF_ORDER_PRSSD
;
1551 prssdp
->suborder
= 0x41; /* Read Feature Codes */
1552 /* all other bytes of prssdp must be zero */
1555 ccw
->cmd_code
= DASD_ECKD_CCW_PSF
;
1556 ccw
->count
= sizeof(struct dasd_psf_prssd_data
);
1557 ccw
->flags
|= CCW_FLAG_CC
;
1558 ccw
->cda
= (__u32
)(addr_t
) prssdp
;
1560 /* Read Subsystem Data - feature codes */
1561 features
= (struct dasd_rssd_features
*) (prssdp
+ 1);
1562 memset(features
, 0, sizeof(struct dasd_rssd_features
));
1565 ccw
->cmd_code
= DASD_ECKD_CCW_RSSD
;
1566 ccw
->count
= sizeof(struct dasd_rssd_features
);
1567 ccw
->cda
= (__u32
)(addr_t
) features
;
1569 cqr
->buildclk
= get_tod_clock();
1570 cqr
->status
= DASD_CQR_FILLED
;
1571 rc
= dasd_sleep_on(cqr
);
1573 prssdp
= (struct dasd_psf_prssd_data
*) cqr
->data
;
1574 features
= (struct dasd_rssd_features
*) (prssdp
+ 1);
1575 memcpy(&private->features
, features
,
1576 sizeof(struct dasd_rssd_features
));
1578 dev_warn(&device
->cdev
->dev
, "Reading device feature codes"
1579 " failed with rc=%d\n", rc
);
1580 dasd_sfree_request(cqr
, cqr
->memdev
);
1584 /* Read Volume Information - Volume Storage Query */
1585 static int dasd_eckd_read_vol_info(struct dasd_device
*device
)
1587 struct dasd_eckd_private
*private = device
->private;
1588 struct dasd_psf_prssd_data
*prssdp
;
1589 struct dasd_rssd_vsq
*vsq
;
1590 struct dasd_ccw_req
*cqr
;
1595 /* This command cannot be executed on an alias device */
1596 if (private->uid
.type
== UA_BASE_PAV_ALIAS
||
1597 private->uid
.type
== UA_HYPER_PAV_ALIAS
)
1601 cqr
= dasd_smalloc_request(DASD_ECKD_MAGIC
, 2 /* PSF + RSSD */,
1602 sizeof(*prssdp
) + sizeof(*vsq
), device
, NULL
);
1604 DBF_EVENT_DEVID(DBF_WARNING
, device
->cdev
, "%s",
1605 "Could not allocate initialization request");
1606 mutex_lock(&dasd_vol_info_mutex
);
1608 cqr
= &dasd_vol_info_req
->cqr
;
1609 memset(cqr
, 0, sizeof(*cqr
));
1610 memset(dasd_vol_info_req
, 0, sizeof(*dasd_vol_info_req
));
1611 cqr
->cpaddr
= &dasd_vol_info_req
->ccw
;
1612 cqr
->data
= &dasd_vol_info_req
->data
;
1613 cqr
->magic
= DASD_ECKD_MAGIC
;
1616 /* Prepare for Read Subsystem Data */
1618 prssdp
->order
= PSF_ORDER_PRSSD
;
1619 prssdp
->suborder
= PSF_SUBORDER_VSQ
; /* Volume Storage Query */
1620 prssdp
->lss
= private->ned
->ID
;
1621 prssdp
->volume
= private->ned
->unit_addr
;
1624 ccw
->cmd_code
= DASD_ECKD_CCW_PSF
;
1625 ccw
->count
= sizeof(*prssdp
);
1626 ccw
->flags
|= CCW_FLAG_CC
;
1627 ccw
->cda
= (__u32
)(addr_t
)prssdp
;
1629 /* Read Subsystem Data - Volume Storage Query */
1630 vsq
= (struct dasd_rssd_vsq
*)(prssdp
+ 1);
1631 memset(vsq
, 0, sizeof(*vsq
));
1634 ccw
->cmd_code
= DASD_ECKD_CCW_RSSD
;
1635 ccw
->count
= sizeof(*vsq
);
1636 ccw
->flags
|= CCW_FLAG_SLI
;
1637 ccw
->cda
= (__u32
)(addr_t
)vsq
;
1639 cqr
->buildclk
= get_tod_clock();
1640 cqr
->status
= DASD_CQR_FILLED
;
1641 cqr
->startdev
= device
;
1642 cqr
->memdev
= device
;
1645 cqr
->expires
= device
->default_expires
* HZ
;
1646 /* The command might not be supported. Suppress the error output */
1647 __set_bit(DASD_CQR_SUPPRESS_CR
, &cqr
->flags
);
1649 rc
= dasd_sleep_on_interruptible(cqr
);
1651 memcpy(&private->vsq
, vsq
, sizeof(*vsq
));
1653 DBF_EVENT_DEVID(DBF_WARNING
, device
->cdev
,
1654 "Reading the volume storage information failed with rc=%d", rc
);
1658 mutex_unlock(&dasd_vol_info_mutex
);
1660 dasd_sfree_request(cqr
, cqr
->memdev
);
1665 static int dasd_eckd_is_ese(struct dasd_device
*device
)
1667 struct dasd_eckd_private
*private = device
->private;
1669 return private->vsq
.vol_info
.ese
;
1672 static int dasd_eckd_ext_pool_id(struct dasd_device
*device
)
1674 struct dasd_eckd_private
*private = device
->private;
1676 return private->vsq
.extent_pool_id
;
1680 * This value represents the total amount of available space. As more space is
1681 * allocated by ESE volumes, this value will decrease.
1682 * The data for this value is therefore updated on any call.
1684 static int dasd_eckd_space_configured(struct dasd_device
*device
)
1686 struct dasd_eckd_private
*private = device
->private;
1689 rc
= dasd_eckd_read_vol_info(device
);
1691 return rc
? : private->vsq
.space_configured
;
1695 * The value of space allocated by an ESE volume may have changed and is
1696 * therefore updated on any call.
1698 static int dasd_eckd_space_allocated(struct dasd_device
*device
)
1700 struct dasd_eckd_private
*private = device
->private;
1703 rc
= dasd_eckd_read_vol_info(device
);
1705 return rc
? : private->vsq
.space_allocated
;
1708 static int dasd_eckd_logical_capacity(struct dasd_device
*device
)
1710 struct dasd_eckd_private
*private = device
->private;
1712 return private->vsq
.logical_capacity
;
1715 static void dasd_eckd_ext_pool_exhaust_work(struct work_struct
*work
)
1717 struct ext_pool_exhaust_work_data
*data
;
1718 struct dasd_device
*device
;
1719 struct dasd_device
*base
;
1721 data
= container_of(work
, struct ext_pool_exhaust_work_data
, worker
);
1722 device
= data
->device
;
1727 if (dasd_eckd_space_configured(base
) != 0) {
1728 dasd_generic_space_avail(device
);
1730 dev_warn(&device
->cdev
->dev
, "No space left in the extent pool\n");
1731 DBF_DEV_EVENT(DBF_WARNING
, device
, "%s", "out of space");
1734 dasd_put_device(device
);
1738 static int dasd_eckd_ext_pool_exhaust(struct dasd_device
*device
,
1739 struct dasd_ccw_req
*cqr
)
1741 struct ext_pool_exhaust_work_data
*data
;
1743 data
= kzalloc(sizeof(*data
), GFP_ATOMIC
);
1746 INIT_WORK(&data
->worker
, dasd_eckd_ext_pool_exhaust_work
);
1747 dasd_get_device(device
);
1748 data
->device
= device
;
1751 data
->base
= cqr
->block
->base
;
1752 else if (cqr
->basedev
)
1753 data
->base
= cqr
->basedev
;
1757 schedule_work(&data
->worker
);
1762 static void dasd_eckd_cpy_ext_pool_data(struct dasd_device
*device
,
1763 struct dasd_rssd_lcq
*lcq
)
1765 struct dasd_eckd_private
*private = device
->private;
1766 int pool_id
= dasd_eckd_ext_pool_id(device
);
1767 struct dasd_ext_pool_sum eps
;
1770 for (i
= 0; i
< lcq
->pool_count
; i
++) {
1771 eps
= lcq
->ext_pool_sum
[i
];
1772 if (eps
.pool_id
== pool_id
) {
1773 memcpy(&private->eps
, &eps
,
1774 sizeof(struct dasd_ext_pool_sum
));
1779 /* Read Extent Pool Information - Logical Configuration Query */
1780 static int dasd_eckd_read_ext_pool_info(struct dasd_device
*device
)
1782 struct dasd_eckd_private
*private = device
->private;
1783 struct dasd_psf_prssd_data
*prssdp
;
1784 struct dasd_rssd_lcq
*lcq
;
1785 struct dasd_ccw_req
*cqr
;
1789 /* This command cannot be executed on an alias device */
1790 if (private->uid
.type
== UA_BASE_PAV_ALIAS
||
1791 private->uid
.type
== UA_HYPER_PAV_ALIAS
)
1794 cqr
= dasd_smalloc_request(DASD_ECKD_MAGIC
, 2 /* PSF + RSSD */,
1795 sizeof(*prssdp
) + sizeof(*lcq
), device
, NULL
);
1797 DBF_EVENT_DEVID(DBF_WARNING
, device
->cdev
, "%s",
1798 "Could not allocate initialization request");
1799 return PTR_ERR(cqr
);
1802 /* Prepare for Read Subsystem Data */
1804 memset(prssdp
, 0, sizeof(*prssdp
));
1805 prssdp
->order
= PSF_ORDER_PRSSD
;
1806 prssdp
->suborder
= PSF_SUBORDER_LCQ
; /* Logical Configuration Query */
1809 ccw
->cmd_code
= DASD_ECKD_CCW_PSF
;
1810 ccw
->count
= sizeof(*prssdp
);
1811 ccw
->flags
|= CCW_FLAG_CC
;
1812 ccw
->cda
= (__u32
)(addr_t
)prssdp
;
1814 lcq
= (struct dasd_rssd_lcq
*)(prssdp
+ 1);
1815 memset(lcq
, 0, sizeof(*lcq
));
1818 ccw
->cmd_code
= DASD_ECKD_CCW_RSSD
;
1819 ccw
->count
= sizeof(*lcq
);
1820 ccw
->flags
|= CCW_FLAG_SLI
;
1821 ccw
->cda
= (__u32
)(addr_t
)lcq
;
1823 cqr
->buildclk
= get_tod_clock();
1824 cqr
->status
= DASD_CQR_FILLED
;
1825 cqr
->startdev
= device
;
1826 cqr
->memdev
= device
;
1829 cqr
->expires
= device
->default_expires
* HZ
;
1830 /* The command might not be supported. Suppress the error output */
1831 __set_bit(DASD_CQR_SUPPRESS_CR
, &cqr
->flags
);
1833 rc
= dasd_sleep_on_interruptible(cqr
);
1835 dasd_eckd_cpy_ext_pool_data(device
, lcq
);
1837 DBF_EVENT_DEVID(DBF_WARNING
, device
->cdev
,
1838 "Reading the logical configuration failed with rc=%d", rc
);
1841 dasd_sfree_request(cqr
, cqr
->memdev
);
1847 * Depending on the device type, the extent size is specified either as
1848 * cylinders per extent (CKD) or size per extent (FBA)
1849 * A 1GB size corresponds to 1113cyl, and 16MB to 21cyl.
1851 static int dasd_eckd_ext_size(struct dasd_device
*device
)
1853 struct dasd_eckd_private
*private = device
->private;
1854 struct dasd_ext_pool_sum eps
= private->eps
;
1856 if (!eps
.flags
.extent_size_valid
)
1858 if (eps
.extent_size
.size_1G
)
1860 if (eps
.extent_size
.size_16M
)
1866 static int dasd_eckd_ext_pool_warn_thrshld(struct dasd_device
*device
)
1868 struct dasd_eckd_private
*private = device
->private;
1870 return private->eps
.warn_thrshld
;
1873 static int dasd_eckd_ext_pool_cap_at_warnlevel(struct dasd_device
*device
)
1875 struct dasd_eckd_private
*private = device
->private;
1877 return private->eps
.flags
.capacity_at_warnlevel
;
1881 * Extent Pool out of space
1883 static int dasd_eckd_ext_pool_oos(struct dasd_device
*device
)
1885 struct dasd_eckd_private
*private = device
->private;
1887 return private->eps
.flags
.pool_oos
;
1891 * Build CP for Perform Subsystem Function - SSC.
1893 static struct dasd_ccw_req
*dasd_eckd_build_psf_ssc(struct dasd_device
*device
,
1896 struct dasd_ccw_req
*cqr
;
1897 struct dasd_psf_ssc_data
*psf_ssc_data
;
1900 cqr
= dasd_smalloc_request(DASD_ECKD_MAGIC
, 1 /* PSF */ ,
1901 sizeof(struct dasd_psf_ssc_data
),
1905 DBF_DEV_EVENT(DBF_WARNING
, device
, "%s",
1906 "Could not allocate PSF-SSC request");
1909 psf_ssc_data
= (struct dasd_psf_ssc_data
*)cqr
->data
;
1910 psf_ssc_data
->order
= PSF_ORDER_SSC
;
1911 psf_ssc_data
->suborder
= 0xc0;
1913 psf_ssc_data
->suborder
|= 0x08;
1914 psf_ssc_data
->reserved
[0] = 0x88;
1917 ccw
->cmd_code
= DASD_ECKD_CCW_PSF
;
1918 ccw
->cda
= (__u32
)(addr_t
)psf_ssc_data
;
1921 cqr
->startdev
= device
;
1922 cqr
->memdev
= device
;
1925 cqr
->expires
= 10*HZ
;
1926 cqr
->buildclk
= get_tod_clock();
1927 cqr
->status
= DASD_CQR_FILLED
;
1932 * Perform Subsystem Function.
1933 * It is necessary to trigger CIO for channel revalidation since this
1934 * call might change behaviour of DASD devices.
1937 dasd_eckd_psf_ssc(struct dasd_device
*device
, int enable_pav
,
1938 unsigned long flags
)
1940 struct dasd_ccw_req
*cqr
;
1943 cqr
= dasd_eckd_build_psf_ssc(device
, enable_pav
);
1945 return PTR_ERR(cqr
);
1948 * set flags e.g. turn on failfast, to prevent blocking
1949 * the calling function should handle failed requests
1951 cqr
->flags
|= flags
;
1953 rc
= dasd_sleep_on(cqr
);
1955 /* trigger CIO to reprobe devices */
1956 css_schedule_reprobe();
1957 else if (cqr
->intrc
== -EAGAIN
)
1960 dasd_sfree_request(cqr
, cqr
->memdev
);
1965 * Valide storage server of current device.
1967 static int dasd_eckd_validate_server(struct dasd_device
*device
,
1968 unsigned long flags
)
1970 struct dasd_eckd_private
*private = device
->private;
1973 if (private->uid
.type
== UA_BASE_PAV_ALIAS
||
1974 private->uid
.type
== UA_HYPER_PAV_ALIAS
)
1976 if (dasd_nopav
|| MACHINE_IS_VM
)
1980 rc
= dasd_eckd_psf_ssc(device
, enable_pav
, flags
);
1982 /* may be requested feature is not available on server,
1983 * therefore just report error and go ahead */
1984 DBF_EVENT_DEVID(DBF_WARNING
, device
->cdev
, "PSF-SSC for SSID %04x "
1985 "returned rc=%d", private->uid
.ssid
, rc
);
1990 * worker to do a validate server in case of a lost pathgroup
1992 static void dasd_eckd_do_validate_server(struct work_struct
*work
)
1994 struct dasd_device
*device
= container_of(work
, struct dasd_device
,
1996 unsigned long flags
= 0;
1998 set_bit(DASD_CQR_FLAGS_FAILFAST
, &flags
);
1999 if (dasd_eckd_validate_server(device
, flags
)
2001 /* schedule worker again if failed */
2002 schedule_work(&device
->kick_validate
);
2006 dasd_put_device(device
);
2009 static void dasd_eckd_kick_validate_server(struct dasd_device
*device
)
2011 dasd_get_device(device
);
2012 /* exit if device not online or in offline processing */
2013 if (test_bit(DASD_FLAG_OFFLINE
, &device
->flags
) ||
2014 device
->state
< DASD_STATE_ONLINE
) {
2015 dasd_put_device(device
);
2018 /* queue call to do_validate_server to the kernel event daemon. */
2019 if (!schedule_work(&device
->kick_validate
))
2020 dasd_put_device(device
);
2024 * Check device characteristics.
2025 * If the device is accessible using ECKD discipline, the device is enabled.
2028 dasd_eckd_check_characteristics(struct dasd_device
*device
)
2030 struct dasd_eckd_private
*private = device
->private;
2031 struct dasd_block
*block
;
2032 struct dasd_uid temp_uid
;
2035 unsigned long value
;
2037 /* setup work queue for validate server*/
2038 INIT_WORK(&device
->kick_validate
, dasd_eckd_do_validate_server
);
2039 /* setup work queue for summary unit check */
2040 INIT_WORK(&device
->suc_work
, dasd_alias_handle_summary_unit_check
);
2042 if (!ccw_device_is_pathgroup(device
->cdev
)) {
2043 dev_warn(&device
->cdev
->dev
,
2044 "A channel path group could not be established\n");
2047 if (!ccw_device_is_multipath(device
->cdev
)) {
2048 dev_info(&device
->cdev
->dev
,
2049 "The DASD is not operating in multipath mode\n");
2052 private = kzalloc(sizeof(*private), GFP_KERNEL
| GFP_DMA
);
2054 dev_warn(&device
->cdev
->dev
,
2055 "Allocating memory for private DASD data "
2059 device
->private = private;
2061 memset(private, 0, sizeof(*private));
2063 /* Invalidate status of initial analysis. */
2064 private->init_cqr_status
= -1;
2065 /* Set default cache operations. */
2066 private->attrib
.operation
= DASD_NORMAL_CACHE
;
2067 private->attrib
.nr_cyl
= 0;
2069 /* Read Configuration Data */
2070 rc
= dasd_eckd_read_conf(device
);
2074 /* set some default values */
2075 device
->default_expires
= DASD_EXPIRES
;
2076 device
->default_retries
= DASD_RETRIES
;
2077 device
->path_thrhld
= DASD_ECKD_PATH_THRHLD
;
2078 device
->path_interval
= DASD_ECKD_PATH_INTERVAL
;
2080 if (private->gneq
) {
2082 for (i
= 0; i
< private->gneq
->timeout
.value
; i
++)
2084 value
= value
* private->gneq
->timeout
.number
;
2085 /* do not accept useless values */
2086 if (value
!= 0 && value
<= DASD_EXPIRES_MAX
)
2087 device
->default_expires
= value
;
2090 dasd_eckd_get_uid(device
, &temp_uid
);
2091 if (temp_uid
.type
== UA_BASE_DEVICE
) {
2092 block
= dasd_alloc_block();
2093 if (IS_ERR(block
)) {
2094 DBF_EVENT_DEVID(DBF_WARNING
, device
->cdev
, "%s",
2095 "could not allocate dasd "
2097 rc
= PTR_ERR(block
);
2100 device
->block
= block
;
2101 block
->base
= device
;
2104 /* register lcu with alias handling, enable PAV */
2105 rc
= dasd_alias_make_device_known_to_lcu(device
);
2109 dasd_eckd_validate_server(device
, 0);
2111 /* device may report different configuration data after LCU setup */
2112 rc
= dasd_eckd_read_conf(device
);
2116 dasd_path_create_kobjects(device
);
2118 /* Read Feature Codes */
2119 dasd_eckd_read_features(device
);
2121 /* Read Volume Information */
2122 dasd_eckd_read_vol_info(device
);
2124 /* Read Extent Pool Information */
2125 dasd_eckd_read_ext_pool_info(device
);
2127 /* Read Device Characteristics */
2128 rc
= dasd_generic_read_dev_chars(device
, DASD_ECKD_MAGIC
,
2129 &private->rdc_data
, 64);
2131 DBF_EVENT_DEVID(DBF_WARNING
, device
->cdev
,
2132 "Read device characteristic failed, rc=%d", rc
);
2136 if ((device
->features
& DASD_FEATURE_USERAW
) &&
2137 !(private->rdc_data
.facilities
.RT_in_LR
)) {
2138 dev_err(&device
->cdev
->dev
, "The storage server does not "
2139 "support raw-track access\n");
2144 /* find the valid cylinder size */
2145 if (private->rdc_data
.no_cyl
== LV_COMPAT_CYL
&&
2146 private->rdc_data
.long_no_cyl
)
2147 private->real_cyl
= private->rdc_data
.long_no_cyl
;
2149 private->real_cyl
= private->rdc_data
.no_cyl
;
2151 private->fcx_max_data
= get_fcx_max_data(device
);
2153 readonly
= dasd_device_is_ro(device
);
2155 set_bit(DASD_FLAG_DEVICE_RO
, &device
->flags
);
2157 dev_info(&device
->cdev
->dev
, "New DASD %04X/%02X (CU %04X/%02X) "
2158 "with %d cylinders, %d heads, %d sectors%s\n",
2159 private->rdc_data
.dev_type
,
2160 private->rdc_data
.dev_model
,
2161 private->rdc_data
.cu_type
,
2162 private->rdc_data
.cu_model
.model
,
2164 private->rdc_data
.trk_per_cyl
,
2165 private->rdc_data
.sec_per_trk
,
2166 readonly
? ", read-only device" : "");
2170 dasd_alias_disconnect_device_from_lcu(device
);
2172 dasd_free_block(device
->block
);
2173 device
->block
= NULL
;
2175 dasd_eckd_clear_conf_data(device
);
2176 kfree(device
->private);
2177 device
->private = NULL
;
2181 static void dasd_eckd_uncheck_device(struct dasd_device
*device
)
2183 struct dasd_eckd_private
*private = device
->private;
2188 dasd_alias_disconnect_device_from_lcu(device
);
2189 private->ned
= NULL
;
2190 private->sneq
= NULL
;
2191 private->vdsneq
= NULL
;
2192 private->gneq
= NULL
;
2193 dasd_eckd_clear_conf_data(device
);
2196 static struct dasd_ccw_req
*
2197 dasd_eckd_analysis_ccw(struct dasd_device
*device
)
2199 struct dasd_eckd_private
*private = device
->private;
2200 struct eckd_count
*count_data
;
2201 struct LO_eckd_data
*LO_data
;
2202 struct dasd_ccw_req
*cqr
;
2204 int cplength
, datasize
;
2208 datasize
= sizeof(struct DE_eckd_data
) + 2*sizeof(struct LO_eckd_data
);
2209 cqr
= dasd_smalloc_request(DASD_ECKD_MAGIC
, cplength
, datasize
, device
,
2214 /* Define extent for the first 2 tracks. */
2215 define_extent(ccw
++, cqr
->data
, 0, 1,
2216 DASD_ECKD_CCW_READ_COUNT
, device
, 0);
2217 LO_data
= cqr
->data
+ sizeof(struct DE_eckd_data
);
2218 /* Locate record for the first 4 records on track 0. */
2219 ccw
[-1].flags
|= CCW_FLAG_CC
;
2220 locate_record(ccw
++, LO_data
++, 0, 0, 4,
2221 DASD_ECKD_CCW_READ_COUNT
, device
, 0);
2223 count_data
= private->count_area
;
2224 for (i
= 0; i
< 4; i
++) {
2225 ccw
[-1].flags
|= CCW_FLAG_CC
;
2226 ccw
->cmd_code
= DASD_ECKD_CCW_READ_COUNT
;
2229 ccw
->cda
= (__u32
)(addr_t
) count_data
;
2234 /* Locate record for the first record on track 1. */
2235 ccw
[-1].flags
|= CCW_FLAG_CC
;
2236 locate_record(ccw
++, LO_data
++, 1, 0, 1,
2237 DASD_ECKD_CCW_READ_COUNT
, device
, 0);
2238 /* Read count ccw. */
2239 ccw
[-1].flags
|= CCW_FLAG_CC
;
2240 ccw
->cmd_code
= DASD_ECKD_CCW_READ_COUNT
;
2243 ccw
->cda
= (__u32
)(addr_t
) count_data
;
2246 cqr
->startdev
= device
;
2247 cqr
->memdev
= device
;
2249 cqr
->buildclk
= get_tod_clock();
2250 cqr
->status
= DASD_CQR_FILLED
;
2251 /* Set flags to suppress output for expected errors */
2252 set_bit(DASD_CQR_SUPPRESS_NRF
, &cqr
->flags
);
2257 /* differentiate between 'no record found' and any other error */
2258 static int dasd_eckd_analysis_evaluation(struct dasd_ccw_req
*init_cqr
)
2261 if (init_cqr
->status
== DASD_CQR_DONE
)
2263 else if (init_cqr
->status
== DASD_CQR_NEED_ERP
||
2264 init_cqr
->status
== DASD_CQR_FAILED
) {
2265 sense
= dasd_get_sense(&init_cqr
->irb
);
2266 if (sense
&& (sense
[1] & SNS1_NO_REC_FOUND
))
2267 return INIT_CQR_UNFORMATTED
;
2269 return INIT_CQR_ERROR
;
2271 return INIT_CQR_ERROR
;
2275 * This is the callback function for the init_analysis cqr. It saves
2276 * the status of the initial analysis ccw before it frees it and kicks
2277 * the device to continue the startup sequence. This will call
2278 * dasd_eckd_do_analysis again (if the devices has not been marked
2279 * for deletion in the meantime).
2281 static void dasd_eckd_analysis_callback(struct dasd_ccw_req
*init_cqr
,
2284 struct dasd_device
*device
= init_cqr
->startdev
;
2285 struct dasd_eckd_private
*private = device
->private;
2287 private->init_cqr_status
= dasd_eckd_analysis_evaluation(init_cqr
);
2288 dasd_sfree_request(init_cqr
, device
);
2289 dasd_kick_device(device
);
2292 static int dasd_eckd_start_analysis(struct dasd_block
*block
)
2294 struct dasd_ccw_req
*init_cqr
;
2296 init_cqr
= dasd_eckd_analysis_ccw(block
->base
);
2297 if (IS_ERR(init_cqr
))
2298 return PTR_ERR(init_cqr
);
2299 init_cqr
->callback
= dasd_eckd_analysis_callback
;
2300 init_cqr
->callback_data
= NULL
;
2301 init_cqr
->expires
= 5*HZ
;
2302 /* first try without ERP, so we can later handle unformatted
2303 * devices as special case
2305 clear_bit(DASD_CQR_FLAGS_USE_ERP
, &init_cqr
->flags
);
2306 init_cqr
->retries
= 0;
2307 dasd_add_request_head(init_cqr
);
2311 static int dasd_eckd_end_analysis(struct dasd_block
*block
)
2313 struct dasd_device
*device
= block
->base
;
2314 struct dasd_eckd_private
*private = device
->private;
2315 struct eckd_count
*count_area
;
2316 unsigned int sb
, blk_per_trk
;
2318 struct dasd_ccw_req
*init_cqr
;
2320 status
= private->init_cqr_status
;
2321 private->init_cqr_status
= -1;
2322 if (status
== INIT_CQR_ERROR
) {
2323 /* try again, this time with full ERP */
2324 init_cqr
= dasd_eckd_analysis_ccw(device
);
2325 dasd_sleep_on(init_cqr
);
2326 status
= dasd_eckd_analysis_evaluation(init_cqr
);
2327 dasd_sfree_request(init_cqr
, device
);
2330 if (device
->features
& DASD_FEATURE_USERAW
) {
2331 block
->bp_block
= DASD_RAW_BLOCKSIZE
;
2332 blk_per_trk
= DASD_RAW_BLOCK_PER_TRACK
;
2333 block
->s2b_shift
= 3;
2337 if (status
== INIT_CQR_UNFORMATTED
) {
2338 dev_warn(&device
->cdev
->dev
, "The DASD is not formatted\n");
2339 return -EMEDIUMTYPE
;
2340 } else if (status
== INIT_CQR_ERROR
) {
2341 dev_err(&device
->cdev
->dev
,
2342 "Detecting the DASD disk layout failed because "
2343 "of an I/O error\n");
2347 private->uses_cdl
= 1;
2348 /* Check Track 0 for Compatible Disk Layout */
2350 for (i
= 0; i
< 3; i
++) {
2351 if (private->count_area
[i
].kl
!= 4 ||
2352 private->count_area
[i
].dl
!= dasd_eckd_cdl_reclen(i
) - 4 ||
2353 private->count_area
[i
].cyl
!= 0 ||
2354 private->count_area
[i
].head
!= count_area_head
[i
] ||
2355 private->count_area
[i
].record
!= count_area_rec
[i
]) {
2356 private->uses_cdl
= 0;
2361 count_area
= &private->count_area
[3];
2363 if (private->uses_cdl
== 0) {
2364 for (i
= 0; i
< 5; i
++) {
2365 if ((private->count_area
[i
].kl
!= 0) ||
2366 (private->count_area
[i
].dl
!=
2367 private->count_area
[0].dl
) ||
2368 private->count_area
[i
].cyl
!= 0 ||
2369 private->count_area
[i
].head
!= count_area_head
[i
] ||
2370 private->count_area
[i
].record
!= count_area_rec
[i
])
2374 count_area
= &private->count_area
[0];
2376 if (private->count_area
[3].record
== 1)
2377 dev_warn(&device
->cdev
->dev
,
2378 "Track 0 has no records following the VTOC\n");
2381 if (count_area
!= NULL
&& count_area
->kl
== 0) {
2382 /* we found notthing violating our disk layout */
2383 if (dasd_check_blocksize(count_area
->dl
) == 0)
2384 block
->bp_block
= count_area
->dl
;
2386 if (block
->bp_block
== 0) {
2387 dev_warn(&device
->cdev
->dev
,
2388 "The disk layout of the DASD is not supported\n");
2389 return -EMEDIUMTYPE
;
2391 block
->s2b_shift
= 0; /* bits to shift 512 to get a block */
2392 for (sb
= 512; sb
< block
->bp_block
; sb
= sb
<< 1)
2395 blk_per_trk
= recs_per_track(&private->rdc_data
, 0, block
->bp_block
);
2398 block
->blocks
= ((unsigned long) private->real_cyl
*
2399 private->rdc_data
.trk_per_cyl
*
2402 dev_info(&device
->cdev
->dev
,
2403 "DASD with %u KB/block, %lu KB total size, %u KB/track, "
2404 "%s\n", (block
->bp_block
>> 10),
2405 (((unsigned long) private->real_cyl
*
2406 private->rdc_data
.trk_per_cyl
*
2407 blk_per_trk
* (block
->bp_block
>> 9)) >> 1),
2408 ((blk_per_trk
* block
->bp_block
) >> 10),
2410 "compatible disk layout" : "linux disk layout");
2415 static int dasd_eckd_do_analysis(struct dasd_block
*block
)
2417 struct dasd_eckd_private
*private = block
->base
->private;
2419 if (private->init_cqr_status
< 0)
2420 return dasd_eckd_start_analysis(block
);
2422 return dasd_eckd_end_analysis(block
);
2425 static int dasd_eckd_basic_to_ready(struct dasd_device
*device
)
2427 return dasd_alias_add_device(device
);
2430 static int dasd_eckd_online_to_ready(struct dasd_device
*device
)
2432 if (cancel_work_sync(&device
->reload_device
))
2433 dasd_put_device(device
);
2434 if (cancel_work_sync(&device
->kick_validate
))
2435 dasd_put_device(device
);
2440 static int dasd_eckd_basic_to_known(struct dasd_device
*device
)
2442 return dasd_alias_remove_device(device
);
2446 dasd_eckd_fill_geometry(struct dasd_block
*block
, struct hd_geometry
*geo
)
2448 struct dasd_eckd_private
*private = block
->base
->private;
2450 if (dasd_check_blocksize(block
->bp_block
) == 0) {
2451 geo
->sectors
= recs_per_track(&private->rdc_data
,
2452 0, block
->bp_block
);
2454 geo
->cylinders
= private->rdc_data
.no_cyl
;
2455 geo
->heads
= private->rdc_data
.trk_per_cyl
;
2460 * Build the TCW request for the format check
2462 static struct dasd_ccw_req
*
2463 dasd_eckd_build_check_tcw(struct dasd_device
*base
, struct format_data_t
*fdata
,
2464 int enable_pav
, struct eckd_count
*fmt_buffer
,
2467 struct dasd_eckd_private
*start_priv
;
2468 struct dasd_device
*startdev
= NULL
;
2469 struct tidaw
*last_tidaw
= NULL
;
2470 struct dasd_ccw_req
*cqr
;
2478 startdev
= dasd_alias_get_start_dev(base
);
2483 start_priv
= startdev
->private;
2485 count
= rpt
* (fdata
->stop_unit
- fdata
->start_unit
+ 1);
2488 * we're adding 'count' amount of tidaw to the itcw.
2489 * calculate the corresponding itcw_size
2491 itcw_size
= itcw_calc_size(0, count
, 0);
2493 cqr
= dasd_fmalloc_request(DASD_ECKD_MAGIC
, 0, itcw_size
, startdev
);
2497 start_priv
->count
++;
2499 itcw
= itcw_init(cqr
->data
, itcw_size
, ITCW_OP_READ
, 0, count
, 0);
2505 cqr
->cpaddr
= itcw_get_tcw(itcw
);
2506 rc
= prepare_itcw(itcw
, fdata
->start_unit
, fdata
->stop_unit
,
2507 DASD_ECKD_CCW_READ_COUNT_MT
, base
, startdev
, 0, count
,
2508 sizeof(struct eckd_count
),
2509 count
* sizeof(struct eckd_count
), 0, rpt
);
2513 for (i
= 0; i
< count
; i
++) {
2514 last_tidaw
= itcw_add_tidaw(itcw
, 0, fmt_buffer
++,
2515 sizeof(struct eckd_count
));
2516 if (IS_ERR(last_tidaw
)) {
2522 last_tidaw
->flags
|= TIDAW_FLAGS_LAST
;
2523 itcw_finalize(itcw
);
2526 cqr
->startdev
= startdev
;
2527 cqr
->memdev
= startdev
;
2528 cqr
->basedev
= base
;
2529 cqr
->retries
= startdev
->default_retries
;
2530 cqr
->expires
= startdev
->default_expires
* HZ
;
2531 cqr
->buildclk
= get_tod_clock();
2532 cqr
->status
= DASD_CQR_FILLED
;
2533 /* Set flags to suppress output for expected errors */
2534 set_bit(DASD_CQR_SUPPRESS_FP
, &cqr
->flags
);
2535 set_bit(DASD_CQR_SUPPRESS_IL
, &cqr
->flags
);
2540 dasd_sfree_request(cqr
, startdev
);
2546 * Build the CCW request for the format check
2548 static struct dasd_ccw_req
*
2549 dasd_eckd_build_check(struct dasd_device
*base
, struct format_data_t
*fdata
,
2550 int enable_pav
, struct eckd_count
*fmt_buffer
, int rpt
)
2552 struct dasd_eckd_private
*start_priv
;
2553 struct dasd_eckd_private
*base_priv
;
2554 struct dasd_device
*startdev
= NULL
;
2555 struct dasd_ccw_req
*cqr
;
2558 int cplength
, datasize
;
2564 startdev
= dasd_alias_get_start_dev(base
);
2569 start_priv
= startdev
->private;
2570 base_priv
= base
->private;
2572 count
= rpt
* (fdata
->stop_unit
- fdata
->start_unit
+ 1);
2574 use_prefix
= base_priv
->features
.feature
[8] & 0x01;
2578 datasize
= sizeof(struct PFX_eckd_data
);
2581 datasize
= sizeof(struct DE_eckd_data
) +
2582 sizeof(struct LO_eckd_data
);
2586 cqr
= dasd_fmalloc_request(DASD_ECKD_MAGIC
, cplength
, datasize
, startdev
);
2590 start_priv
->count
++;
2595 prefix_LRE(ccw
++, data
, fdata
->start_unit
, fdata
->stop_unit
,
2596 DASD_ECKD_CCW_READ_COUNT
, base
, startdev
, 1, 0,
2599 define_extent(ccw
++, data
, fdata
->start_unit
, fdata
->stop_unit
,
2600 DASD_ECKD_CCW_READ_COUNT
, startdev
, 0);
2602 data
+= sizeof(struct DE_eckd_data
);
2603 ccw
[-1].flags
|= CCW_FLAG_CC
;
2605 locate_record(ccw
++, data
, fdata
->start_unit
, 0, count
,
2606 DASD_ECKD_CCW_READ_COUNT
, base
, 0);
2609 for (i
= 0; i
< count
; i
++) {
2610 ccw
[-1].flags
|= CCW_FLAG_CC
;
2611 ccw
->cmd_code
= DASD_ECKD_CCW_READ_COUNT
;
2612 ccw
->flags
= CCW_FLAG_SLI
;
2614 ccw
->cda
= (__u32
)(addr_t
) fmt_buffer
;
2619 cqr
->startdev
= startdev
;
2620 cqr
->memdev
= startdev
;
2621 cqr
->basedev
= base
;
2622 cqr
->retries
= DASD_RETRIES
;
2623 cqr
->expires
= startdev
->default_expires
* HZ
;
2624 cqr
->buildclk
= get_tod_clock();
2625 cqr
->status
= DASD_CQR_FILLED
;
2626 /* Set flags to suppress output for expected errors */
2627 set_bit(DASD_CQR_SUPPRESS_NRF
, &cqr
->flags
);
2632 static struct dasd_ccw_req
*
2633 dasd_eckd_build_format(struct dasd_device
*base
, struct dasd_device
*startdev
,
2634 struct format_data_t
*fdata
, int enable_pav
)
2636 struct dasd_eckd_private
*base_priv
;
2637 struct dasd_eckd_private
*start_priv
;
2638 struct dasd_ccw_req
*fcp
;
2639 struct eckd_count
*ect
;
2640 struct ch_t address
;
2644 int cplength
, datasize
;
2652 startdev
= dasd_alias_get_start_dev(base
);
2657 start_priv
= startdev
->private;
2658 base_priv
= base
->private;
2660 rpt
= recs_per_track(&base_priv
->rdc_data
, 0, fdata
->blksize
);
2662 nr_tracks
= fdata
->stop_unit
- fdata
->start_unit
+ 1;
2665 * fdata->intensity is a bit string that tells us what to do:
2666 * Bit 0: write record zero
2667 * Bit 1: write home address, currently not supported
2668 * Bit 2: invalidate tracks
2669 * Bit 3: use OS/390 compatible disk layout (cdl)
2670 * Bit 4: do not allow storage subsystem to modify record zero
2671 * Only some bit combinations do make sense.
2673 if (fdata
->intensity
& 0x10) {
2675 intensity
= fdata
->intensity
& ~0x10;
2678 intensity
= fdata
->intensity
;
2681 use_prefix
= base_priv
->features
.feature
[8] & 0x01;
2683 switch (intensity
) {
2684 case 0x00: /* Normal format */
2685 case 0x08: /* Normal format, use cdl. */
2686 cplength
= 2 + (rpt
*nr_tracks
);
2688 datasize
= sizeof(struct PFX_eckd_data
) +
2689 sizeof(struct LO_eckd_data
) +
2690 rpt
* nr_tracks
* sizeof(struct eckd_count
);
2692 datasize
= sizeof(struct DE_eckd_data
) +
2693 sizeof(struct LO_eckd_data
) +
2694 rpt
* nr_tracks
* sizeof(struct eckd_count
);
2696 case 0x01: /* Write record zero and format track. */
2697 case 0x09: /* Write record zero and format track, use cdl. */
2698 cplength
= 2 + rpt
* nr_tracks
;
2700 datasize
= sizeof(struct PFX_eckd_data
) +
2701 sizeof(struct LO_eckd_data
) +
2702 sizeof(struct eckd_count
) +
2703 rpt
* nr_tracks
* sizeof(struct eckd_count
);
2705 datasize
= sizeof(struct DE_eckd_data
) +
2706 sizeof(struct LO_eckd_data
) +
2707 sizeof(struct eckd_count
) +
2708 rpt
* nr_tracks
* sizeof(struct eckd_count
);
2710 case 0x04: /* Invalidate track. */
2711 case 0x0c: /* Invalidate track, use cdl. */
2714 datasize
= sizeof(struct PFX_eckd_data
) +
2715 sizeof(struct LO_eckd_data
) +
2716 sizeof(struct eckd_count
);
2718 datasize
= sizeof(struct DE_eckd_data
) +
2719 sizeof(struct LO_eckd_data
) +
2720 sizeof(struct eckd_count
);
2723 dev_warn(&startdev
->cdev
->dev
,
2724 "An I/O control call used incorrect flags 0x%x\n",
2726 return ERR_PTR(-EINVAL
);
2729 fcp
= dasd_fmalloc_request(DASD_ECKD_MAGIC
, cplength
, datasize
, startdev
);
2733 start_priv
->count
++;
2737 switch (intensity
& ~0x08) {
2738 case 0x00: /* Normal format. */
2740 prefix(ccw
++, (struct PFX_eckd_data
*) data
,
2741 fdata
->start_unit
, fdata
->stop_unit
,
2742 DASD_ECKD_CCW_WRITE_CKD
, base
, startdev
);
2743 /* grant subsystem permission to format R0 */
2745 ((struct PFX_eckd_data
*)data
)
2746 ->define_extent
.ga_extended
|= 0x04;
2747 data
+= sizeof(struct PFX_eckd_data
);
2749 define_extent(ccw
++, (struct DE_eckd_data
*) data
,
2750 fdata
->start_unit
, fdata
->stop_unit
,
2751 DASD_ECKD_CCW_WRITE_CKD
, startdev
, 0);
2752 /* grant subsystem permission to format R0 */
2754 ((struct DE_eckd_data
*) data
)
2755 ->ga_extended
|= 0x04;
2756 data
+= sizeof(struct DE_eckd_data
);
2758 ccw
[-1].flags
|= CCW_FLAG_CC
;
2759 locate_record(ccw
++, (struct LO_eckd_data
*) data
,
2760 fdata
->start_unit
, 0, rpt
*nr_tracks
,
2761 DASD_ECKD_CCW_WRITE_CKD
, base
,
2763 data
+= sizeof(struct LO_eckd_data
);
2765 case 0x01: /* Write record zero + format track. */
2767 prefix(ccw
++, (struct PFX_eckd_data
*) data
,
2768 fdata
->start_unit
, fdata
->stop_unit
,
2769 DASD_ECKD_CCW_WRITE_RECORD_ZERO
,
2771 data
+= sizeof(struct PFX_eckd_data
);
2773 define_extent(ccw
++, (struct DE_eckd_data
*) data
,
2774 fdata
->start_unit
, fdata
->stop_unit
,
2775 DASD_ECKD_CCW_WRITE_RECORD_ZERO
, startdev
, 0);
2776 data
+= sizeof(struct DE_eckd_data
);
2778 ccw
[-1].flags
|= CCW_FLAG_CC
;
2779 locate_record(ccw
++, (struct LO_eckd_data
*) data
,
2780 fdata
->start_unit
, 0, rpt
* nr_tracks
+ 1,
2781 DASD_ECKD_CCW_WRITE_RECORD_ZERO
, base
,
2782 base
->block
->bp_block
);
2783 data
+= sizeof(struct LO_eckd_data
);
2785 case 0x04: /* Invalidate track. */
2787 prefix(ccw
++, (struct PFX_eckd_data
*) data
,
2788 fdata
->start_unit
, fdata
->stop_unit
,
2789 DASD_ECKD_CCW_WRITE_CKD
, base
, startdev
);
2790 data
+= sizeof(struct PFX_eckd_data
);
2792 define_extent(ccw
++, (struct DE_eckd_data
*) data
,
2793 fdata
->start_unit
, fdata
->stop_unit
,
2794 DASD_ECKD_CCW_WRITE_CKD
, startdev
, 0);
2795 data
+= sizeof(struct DE_eckd_data
);
2797 ccw
[-1].flags
|= CCW_FLAG_CC
;
2798 locate_record(ccw
++, (struct LO_eckd_data
*) data
,
2799 fdata
->start_unit
, 0, 1,
2800 DASD_ECKD_CCW_WRITE_CKD
, base
, 8);
2801 data
+= sizeof(struct LO_eckd_data
);
2805 for (j
= 0; j
< nr_tracks
; j
++) {
2806 /* calculate cylinder and head for the current track */
2808 (fdata
->start_unit
+ j
) /
2809 base_priv
->rdc_data
.trk_per_cyl
,
2810 (fdata
->start_unit
+ j
) %
2811 base_priv
->rdc_data
.trk_per_cyl
);
2812 if (intensity
& 0x01) { /* write record zero */
2813 ect
= (struct eckd_count
*) data
;
2814 data
+= sizeof(struct eckd_count
);
2815 ect
->cyl
= address
.cyl
;
2816 ect
->head
= address
.head
;
2820 ccw
[-1].flags
|= CCW_FLAG_CC
;
2821 ccw
->cmd_code
= DASD_ECKD_CCW_WRITE_RECORD_ZERO
;
2822 ccw
->flags
= CCW_FLAG_SLI
;
2824 ccw
->cda
= (__u32
)(addr_t
) ect
;
2827 if ((intensity
& ~0x08) & 0x04) { /* erase track */
2828 ect
= (struct eckd_count
*) data
;
2829 data
+= sizeof(struct eckd_count
);
2830 ect
->cyl
= address
.cyl
;
2831 ect
->head
= address
.head
;
2835 ccw
[-1].flags
|= CCW_FLAG_CC
;
2836 ccw
->cmd_code
= DASD_ECKD_CCW_WRITE_CKD
;
2837 ccw
->flags
= CCW_FLAG_SLI
;
2839 ccw
->cda
= (__u32
)(addr_t
) ect
;
2840 } else { /* write remaining records */
2841 for (i
= 0; i
< rpt
; i
++) {
2842 ect
= (struct eckd_count
*) data
;
2843 data
+= sizeof(struct eckd_count
);
2844 ect
->cyl
= address
.cyl
;
2845 ect
->head
= address
.head
;
2846 ect
->record
= i
+ 1;
2848 ect
->dl
= fdata
->blksize
;
2850 * Check for special tracks 0-1
2851 * when formatting CDL
2853 if ((intensity
& 0x08) &&
2854 address
.cyl
== 0 && address
.head
== 0) {
2857 ect
->dl
= sizes_trk0
[i
] - 4;
2860 if ((intensity
& 0x08) &&
2861 address
.cyl
== 0 && address
.head
== 1) {
2863 ect
->dl
= LABEL_SIZE
- 44;
2865 ccw
[-1].flags
|= CCW_FLAG_CC
;
2866 if (i
!= 0 || j
== 0)
2868 DASD_ECKD_CCW_WRITE_CKD
;
2871 DASD_ECKD_CCW_WRITE_CKD_MT
;
2872 ccw
->flags
= CCW_FLAG_SLI
;
2874 ccw
->cda
= (__u32
)(addr_t
) ect
;
2880 fcp
->startdev
= startdev
;
2881 fcp
->memdev
= startdev
;
2882 fcp
->basedev
= base
;
2884 fcp
->expires
= startdev
->default_expires
* HZ
;
2885 fcp
->buildclk
= get_tod_clock();
2886 fcp
->status
= DASD_CQR_FILLED
;
2892 * Wrapper function to build a CCW request depending on input data
2894 static struct dasd_ccw_req
*
2895 dasd_eckd_format_build_ccw_req(struct dasd_device
*base
,
2896 struct format_data_t
*fdata
, int enable_pav
,
2897 int tpm
, struct eckd_count
*fmt_buffer
, int rpt
)
2899 struct dasd_ccw_req
*ccw_req
;
2902 ccw_req
= dasd_eckd_build_format(base
, NULL
, fdata
, enable_pav
);
2905 ccw_req
= dasd_eckd_build_check_tcw(base
, fdata
,
2909 ccw_req
= dasd_eckd_build_check(base
, fdata
, enable_pav
,
2917 * Sanity checks on format_data
2919 static int dasd_eckd_format_sanity_checks(struct dasd_device
*base
,
2920 struct format_data_t
*fdata
)
2922 struct dasd_eckd_private
*private = base
->private;
2924 if (fdata
->start_unit
>=
2925 (private->real_cyl
* private->rdc_data
.trk_per_cyl
)) {
2926 dev_warn(&base
->cdev
->dev
,
2927 "Start track number %u used in formatting is too big\n",
2931 if (fdata
->stop_unit
>=
2932 (private->real_cyl
* private->rdc_data
.trk_per_cyl
)) {
2933 dev_warn(&base
->cdev
->dev
,
2934 "Stop track number %u used in formatting is too big\n",
2938 if (fdata
->start_unit
> fdata
->stop_unit
) {
2939 dev_warn(&base
->cdev
->dev
,
2940 "Start track %u used in formatting exceeds end track\n",
2944 if (dasd_check_blocksize(fdata
->blksize
) != 0) {
2945 dev_warn(&base
->cdev
->dev
,
2946 "The DASD cannot be formatted with block size %u\n",
2954 * This function will process format_data originally coming from an IOCTL
2956 static int dasd_eckd_format_process_data(struct dasd_device
*base
,
2957 struct format_data_t
*fdata
,
2958 int enable_pav
, int tpm
,
2959 struct eckd_count
*fmt_buffer
, int rpt
,
2962 struct dasd_eckd_private
*private = base
->private;
2963 struct dasd_ccw_req
*cqr
, *n
;
2964 struct list_head format_queue
;
2965 struct dasd_device
*device
;
2967 int old_start
, old_stop
, format_step
;
2971 rc
= dasd_eckd_format_sanity_checks(base
, fdata
);
2975 INIT_LIST_HEAD(&format_queue
);
2977 old_start
= fdata
->start_unit
;
2978 old_stop
= fdata
->stop_unit
;
2980 if (!tpm
&& fmt_buffer
!= NULL
) {
2981 /* Command Mode / Format Check */
2983 } else if (tpm
&& fmt_buffer
!= NULL
) {
2984 /* Transport Mode / Format Check */
2985 format_step
= DASD_CQR_MAX_CCW
/ rpt
;
2987 /* Normal Formatting */
2988 format_step
= DASD_CQR_MAX_CCW
/
2989 recs_per_track(&private->rdc_data
, 0, fdata
->blksize
);
2994 while (fdata
->start_unit
<= old_stop
) {
2995 step
= fdata
->stop_unit
- fdata
->start_unit
+ 1;
2996 if (step
> format_step
) {
2998 fdata
->start_unit
+ format_step
- 1;
3001 cqr
= dasd_eckd_format_build_ccw_req(base
, fdata
,
3006 if (rc
== -ENOMEM
) {
3007 if (list_empty(&format_queue
))
3010 * not enough memory available, start
3011 * requests retry after first requests
3019 list_add_tail(&cqr
->blocklist
, &format_queue
);
3022 step
= fdata
->stop_unit
- fdata
->start_unit
+ 1;
3023 fmt_buffer
+= rpt
* step
;
3025 fdata
->start_unit
= fdata
->stop_unit
+ 1;
3026 fdata
->stop_unit
= old_stop
;
3029 rc
= dasd_sleep_on_queue(&format_queue
);
3032 list_for_each_entry_safe(cqr
, n
, &format_queue
, blocklist
) {
3033 device
= cqr
->startdev
;
3034 private = device
->private;
3036 if (cqr
->status
== DASD_CQR_FAILED
) {
3038 * Only get sense data if called by format
3041 if (fmt_buffer
&& irb
) {
3042 sense
= dasd_get_sense(&cqr
->irb
);
3043 memcpy(irb
, &cqr
->irb
, sizeof(*irb
));
3047 list_del_init(&cqr
->blocklist
);
3048 dasd_ffree_request(cqr
, device
);
3052 if (rc
&& rc
!= -EIO
)
3056 * In case fewer than the expected records are on the
3057 * track, we will most likely get a 'No Record Found'
3058 * error (in command mode) or a 'File Protected' error
3059 * (in transport mode). Those particular cases shouldn't
3060 * pass the -EIO to the IOCTL, therefore reset the rc
3064 (sense
[1] & SNS1_NO_REC_FOUND
||
3065 sense
[1] & SNS1_FILE_PROTECTED
))
3074 fdata
->start_unit
= old_start
;
3075 fdata
->stop_unit
= old_stop
;
3080 static int dasd_eckd_format_device(struct dasd_device
*base
,
3081 struct format_data_t
*fdata
, int enable_pav
)
3083 return dasd_eckd_format_process_data(base
, fdata
, enable_pav
, 0, NULL
,
3087 static bool test_and_set_format_track(struct dasd_format_entry
*to_format
,
3088 struct dasd_block
*block
)
3090 struct dasd_format_entry
*format
;
3091 unsigned long flags
;
3094 spin_lock_irqsave(&block
->format_lock
, flags
);
3095 list_for_each_entry(format
, &block
->format_list
, list
) {
3096 if (format
->track
== to_format
->track
) {
3101 list_add_tail(&to_format
->list
, &block
->format_list
);
3104 spin_unlock_irqrestore(&block
->format_lock
, flags
);
3108 static void clear_format_track(struct dasd_format_entry
*format
,
3109 struct dasd_block
*block
)
3111 unsigned long flags
;
3113 spin_lock_irqsave(&block
->format_lock
, flags
);
3114 list_del_init(&format
->list
);
3115 spin_unlock_irqrestore(&block
->format_lock
, flags
);
3119 * Callback function to free ESE format requests.
3121 static void dasd_eckd_ese_format_cb(struct dasd_ccw_req
*cqr
, void *data
)
3123 struct dasd_device
*device
= cqr
->startdev
;
3124 struct dasd_eckd_private
*private = device
->private;
3125 struct dasd_format_entry
*format
= data
;
3127 clear_format_track(format
, cqr
->basedev
->block
);
3129 dasd_ffree_request(cqr
, device
);
3132 static struct dasd_ccw_req
*
3133 dasd_eckd_ese_format(struct dasd_device
*startdev
, struct dasd_ccw_req
*cqr
,
3136 struct dasd_eckd_private
*private;
3137 struct dasd_format_entry
*format
;
3138 struct format_data_t fdata
;
3139 unsigned int recs_per_trk
;
3140 struct dasd_ccw_req
*fcqr
;
3141 struct dasd_device
*base
;
3142 struct dasd_block
*block
;
3143 unsigned int blksize
;
3144 struct request
*req
;
3150 req
= cqr
->callback_data
;
3153 private = base
->private;
3154 blksize
= block
->bp_block
;
3155 recs_per_trk
= recs_per_track(&private->rdc_data
, 0, blksize
);
3156 format
= &startdev
->format_entry
;
3158 first_trk
= blk_rq_pos(req
) >> block
->s2b_shift
;
3159 sector_div(first_trk
, recs_per_trk
);
3161 (blk_rq_pos(req
) + blk_rq_sectors(req
) - 1) >> block
->s2b_shift
;
3162 sector_div(last_trk
, recs_per_trk
);
3163 rc
= dasd_eckd_track_from_irb(irb
, base
, &curr_trk
);
3167 if (curr_trk
< first_trk
|| curr_trk
> last_trk
) {
3168 DBF_DEV_EVENT(DBF_WARNING
, startdev
,
3169 "ESE error track %llu not within range %llu - %llu\n",
3170 curr_trk
, first_trk
, last_trk
);
3171 return ERR_PTR(-EINVAL
);
3173 format
->track
= curr_trk
;
3174 /* test if track is already in formatting by another thread */
3175 if (test_and_set_format_track(format
, block
))
3176 return ERR_PTR(-EEXIST
);
3178 fdata
.start_unit
= curr_trk
;
3179 fdata
.stop_unit
= curr_trk
;
3180 fdata
.blksize
= blksize
;
3181 fdata
.intensity
= private->uses_cdl
? DASD_FMT_INT_COMPAT
: 0;
3183 rc
= dasd_eckd_format_sanity_checks(base
, &fdata
);
3185 return ERR_PTR(-EINVAL
);
3188 * We're building the request with PAV disabled as we're reusing
3189 * the former startdev.
3191 fcqr
= dasd_eckd_build_format(base
, startdev
, &fdata
, 0);
3195 fcqr
->callback
= dasd_eckd_ese_format_cb
;
3196 fcqr
->callback_data
= (void *) format
;
3202 * When data is read from an unformatted area of an ESE volume, this function
3203 * returns zeroed data and thereby mimics a read of zero data.
3205 * The first unformatted track is the one that got the NRF error, the address is
3206 * encoded in the sense data.
3208 * All tracks before have returned valid data and should not be touched.
3209 * All tracks after the unformatted track might be formatted or not. This is
3210 * currently not known, remember the processed data and return the remainder of
3211 * the request to the blocklayer in __dasd_cleanup_cqr().
3213 static int dasd_eckd_ese_read(struct dasd_ccw_req
*cqr
, struct irb
*irb
)
3215 struct dasd_eckd_private
*private;
3216 sector_t first_trk
, last_trk
;
3217 sector_t first_blk
, last_blk
;
3218 unsigned int blksize
, off
;
3219 unsigned int recs_per_trk
;
3220 struct dasd_device
*base
;
3221 struct req_iterator iter
;
3222 struct dasd_block
*block
;
3223 unsigned int skip_block
;
3224 unsigned int blk_count
;
3225 struct request
*req
;
3232 req
= (struct request
*) cqr
->callback_data
;
3233 base
= cqr
->block
->base
;
3234 blksize
= base
->block
->bp_block
;
3236 private = base
->private;
3240 recs_per_trk
= recs_per_track(&private->rdc_data
, 0, blksize
);
3241 first_trk
= first_blk
= blk_rq_pos(req
) >> block
->s2b_shift
;
3242 sector_div(first_trk
, recs_per_trk
);
3243 last_trk
= last_blk
=
3244 (blk_rq_pos(req
) + blk_rq_sectors(req
) - 1) >> block
->s2b_shift
;
3245 sector_div(last_trk
, recs_per_trk
);
3246 rc
= dasd_eckd_track_from_irb(irb
, base
, &curr_trk
);
3250 /* sanity check if the current track from sense data is valid */
3251 if (curr_trk
< first_trk
|| curr_trk
> last_trk
) {
3252 DBF_DEV_EVENT(DBF_WARNING
, base
,
3253 "ESE error track %llu not within range %llu - %llu\n",
3254 curr_trk
, first_trk
, last_trk
);
3259 * if not the first track got the NRF error we have to skip over valid
3262 if (curr_trk
!= first_trk
)
3263 skip_block
= curr_trk
* recs_per_trk
- first_blk
;
3265 /* we have no information beyond the current track */
3266 end_blk
= (curr_trk
+ 1) * recs_per_trk
;
3268 rq_for_each_segment(bv
, req
, iter
) {
3269 dst
= page_address(bv
.bv_page
) + bv
.bv_offset
;
3270 for (off
= 0; off
< bv
.bv_len
; off
+= blksize
) {
3271 if (first_blk
+ blk_count
>= end_blk
) {
3272 cqr
->proc_bytes
= blk_count
* blksize
;
3275 if (dst
&& !skip_block
) {
3277 memset(dst
, 0, blksize
);
3288 * Helper function to count consecutive records of a single track.
3290 static int dasd_eckd_count_records(struct eckd_count
*fmt_buffer
, int start
,
3296 head
= fmt_buffer
[start
].head
;
3299 * There are 3 conditions where we stop counting:
3300 * - if data reoccurs (same head and record may reoccur), which may
3301 * happen due to the way DASD_ECKD_CCW_READ_COUNT works
3302 * - when the head changes, because we're iterating over several tracks
3303 * then (DASD_ECKD_CCW_READ_COUNT_MT)
3304 * - when we've reached the end of sensible data in the buffer (the
3305 * record will be 0 then)
3307 for (i
= start
; i
< max
; i
++) {
3309 if ((fmt_buffer
[i
].head
== head
&&
3310 fmt_buffer
[i
].record
== 1) ||
3311 fmt_buffer
[i
].head
!= head
||
3312 fmt_buffer
[i
].record
== 0)
3321 * Evaluate a given range of tracks. Data like number of records, blocksize,
3322 * record ids, and key length are compared with expected data.
3324 * If a mismatch occurs, the corresponding error bit is set, as well as
3325 * additional information, depending on the error.
3327 static void dasd_eckd_format_evaluate_tracks(struct eckd_count
*fmt_buffer
,
3328 struct format_check_t
*cdata
,
3329 int rpt_max
, int rpt_exp
,
3330 int trk_per_cyl
, int tpm
)
3341 trkcount
= cdata
->expect
.stop_unit
- cdata
->expect
.start_unit
+ 1;
3342 max_entries
= trkcount
* rpt_max
;
3344 for (i
= cdata
->expect
.start_unit
; i
<= cdata
->expect
.stop_unit
; i
++) {
3345 /* Calculate the correct next starting position in the buffer */
3347 while (fmt_buffer
[pos
].record
== 0 &&
3348 fmt_buffer
[pos
].dl
== 0) {
3349 if (pos
++ > max_entries
)
3353 if (i
!= cdata
->expect
.start_unit
)
3354 pos
+= rpt_max
- count
;
3357 /* Calculate the expected geo values for the current track */
3358 set_ch_t(&geo
, i
/ trk_per_cyl
, i
% trk_per_cyl
);
3360 /* Count and check number of records */
3361 count
= dasd_eckd_count_records(fmt_buffer
, pos
, pos
+ rpt_max
);
3363 if (count
< rpt_exp
) {
3364 cdata
->result
= DASD_FMT_ERR_TOO_FEW_RECORDS
;
3367 if (count
> rpt_exp
) {
3368 cdata
->result
= DASD_FMT_ERR_TOO_MANY_RECORDS
;
3372 for (j
= 0; j
< count
; j
++, pos
++) {
3373 blksize
= cdata
->expect
.blksize
;
3377 * Set special values when checking CDL formatted
3380 if ((cdata
->expect
.intensity
& 0x08) &&
3381 geo
.cyl
== 0 && geo
.head
== 0) {
3383 blksize
= sizes_trk0
[j
] - 4;
3387 if ((cdata
->expect
.intensity
& 0x08) &&
3388 geo
.cyl
== 0 && geo
.head
== 1) {
3389 blksize
= LABEL_SIZE
- 44;
3393 /* Check blocksize */
3394 if (fmt_buffer
[pos
].dl
!= blksize
) {
3395 cdata
->result
= DASD_FMT_ERR_BLKSIZE
;
3398 /* Check if key length is 0 */
3399 if (fmt_buffer
[pos
].kl
!= kl
) {
3400 cdata
->result
= DASD_FMT_ERR_KEY_LENGTH
;
3403 /* Check if record_id is correct */
3404 if (fmt_buffer
[pos
].cyl
!= geo
.cyl
||
3405 fmt_buffer
[pos
].head
!= geo
.head
||
3406 fmt_buffer
[pos
].record
!= (j
+ 1)) {
3407 cdata
->result
= DASD_FMT_ERR_RECORD_ID
;
3415 * In case of no errors, we need to decrease by one
3416 * to get the correct positions.
3418 if (!cdata
->result
) {
3424 cdata
->num_records
= count
;
3425 cdata
->rec
= fmt_buffer
[pos
].record
;
3426 cdata
->blksize
= fmt_buffer
[pos
].dl
;
3427 cdata
->key_length
= fmt_buffer
[pos
].kl
;
3431 * Check the format of a range of tracks of a DASD.
3433 static int dasd_eckd_check_device_format(struct dasd_device
*base
,
3434 struct format_check_t
*cdata
,
3437 struct dasd_eckd_private
*private = base
->private;
3438 struct eckd_count
*fmt_buffer
;
3440 int rpt_max
, rpt_exp
;
3441 int fmt_buffer_size
;
3447 trk_per_cyl
= private->rdc_data
.trk_per_cyl
;
3449 /* Get maximum and expected amount of records per track */
3450 rpt_max
= recs_per_track(&private->rdc_data
, 0, 512) + 1;
3451 rpt_exp
= recs_per_track(&private->rdc_data
, 0, cdata
->expect
.blksize
);
3453 trkcount
= cdata
->expect
.stop_unit
- cdata
->expect
.start_unit
+ 1;
3454 fmt_buffer_size
= trkcount
* rpt_max
* sizeof(struct eckd_count
);
3456 fmt_buffer
= kzalloc(fmt_buffer_size
, GFP_KERNEL
| GFP_DMA
);
3461 * A certain FICON feature subset is needed to operate in transport
3462 * mode. Additionally, the support for transport mode is implicitly
3463 * checked by comparing the buffer size with fcx_max_data. As long as
3464 * the buffer size is smaller we can operate in transport mode and
3465 * process multiple tracks. If not, only one track at once is being
3466 * processed using command mode.
3468 if ((private->features
.feature
[40] & 0x04) &&
3469 fmt_buffer_size
<= private->fcx_max_data
)
3472 rc
= dasd_eckd_format_process_data(base
, &cdata
->expect
, enable_pav
,
3473 tpm
, fmt_buffer
, rpt_max
, &irb
);
3474 if (rc
&& rc
!= -EIO
)
3478 * If our first attempt with transport mode enabled comes back
3479 * with an incorrect length error, we're going to retry the
3480 * check with command mode.
3482 if (tpm
&& scsw_cstat(&irb
.scsw
) == 0x40) {
3484 rc
= dasd_eckd_format_process_data(base
, &cdata
->expect
,
3486 fmt_buffer
, rpt_max
,
3495 dasd_eckd_format_evaluate_tracks(fmt_buffer
, cdata
, rpt_max
, rpt_exp
,
3504 static void dasd_eckd_handle_terminated_request(struct dasd_ccw_req
*cqr
)
3506 if (cqr
->retries
< 0) {
3507 cqr
->status
= DASD_CQR_FAILED
;
3510 cqr
->status
= DASD_CQR_FILLED
;
3511 if (cqr
->block
&& (cqr
->startdev
!= cqr
->block
->base
)) {
3512 dasd_eckd_reset_ccw_to_base_io(cqr
);
3513 cqr
->startdev
= cqr
->block
->base
;
3514 cqr
->lpm
= dasd_path_get_opm(cqr
->block
->base
);
3518 static dasd_erp_fn_t
3519 dasd_eckd_erp_action(struct dasd_ccw_req
* cqr
)
3521 struct dasd_device
*device
= (struct dasd_device
*) cqr
->startdev
;
3522 struct ccw_device
*cdev
= device
->cdev
;
3524 switch (cdev
->id
.cu_type
) {
3529 return dasd_3990_erp_action
;
3533 return dasd_default_erp_action
;
3537 static dasd_erp_fn_t
3538 dasd_eckd_erp_postaction(struct dasd_ccw_req
* cqr
)
3540 return dasd_default_erp_postaction
;
3543 static void dasd_eckd_check_for_device_change(struct dasd_device
*device
,
3544 struct dasd_ccw_req
*cqr
,
3549 struct dasd_eckd_private
*private = device
->private;
3551 /* first of all check for state change pending interrupt */
3552 mask
= DEV_STAT_ATTENTION
| DEV_STAT_DEV_END
| DEV_STAT_UNIT_EXCEP
;
3553 if ((scsw_dstat(&irb
->scsw
) & mask
) == mask
) {
3555 * for alias only, not in offline processing
3556 * and only if not suspended
3558 if (!device
->block
&& private->lcu
&&
3559 device
->state
== DASD_STATE_ONLINE
&&
3560 !test_bit(DASD_FLAG_OFFLINE
, &device
->flags
) &&
3561 !test_bit(DASD_FLAG_SUSPENDED
, &device
->flags
)) {
3562 /* schedule worker to reload device */
3563 dasd_reload_device(device
);
3565 dasd_generic_handle_state_change(device
);
3569 sense
= dasd_get_sense(irb
);
3573 /* summary unit check */
3574 if ((sense
[27] & DASD_SENSE_BIT_0
) && (sense
[7] == 0x0D) &&
3575 (scsw_dstat(&irb
->scsw
) & DEV_STAT_UNIT_CHECK
)) {
3576 if (test_and_set_bit(DASD_FLAG_SUC
, &device
->flags
)) {
3577 DBF_DEV_EVENT(DBF_WARNING
, device
, "%s",
3578 "eckd suc: device already notified");
3581 sense
= dasd_get_sense(irb
);
3583 DBF_DEV_EVENT(DBF_WARNING
, device
, "%s",
3584 "eckd suc: no reason code available");
3585 clear_bit(DASD_FLAG_SUC
, &device
->flags
);
3589 private->suc_reason
= sense
[8];
3590 DBF_DEV_EVENT(DBF_NOTICE
, device
, "%s %x",
3591 "eckd handle summary unit check: reason",
3592 private->suc_reason
);
3593 dasd_get_device(device
);
3594 if (!schedule_work(&device
->suc_work
))
3595 dasd_put_device(device
);
3600 /* service information message SIM */
3601 if (!cqr
&& !(sense
[27] & DASD_SENSE_BIT_0
) &&
3602 ((sense
[6] & DASD_SIM_SENSE
) == DASD_SIM_SENSE
)) {
3603 dasd_3990_erp_handle_sim(device
, sense
);
3607 /* loss of device reservation is handled via base devices only
3608 * as alias devices may be used with several bases
3610 if (device
->block
&& (sense
[27] & DASD_SENSE_BIT_0
) &&
3611 (sense
[7] == 0x3F) &&
3612 (scsw_dstat(&irb
->scsw
) & DEV_STAT_UNIT_CHECK
) &&
3613 test_bit(DASD_FLAG_IS_RESERVED
, &device
->flags
)) {
3614 if (device
->features
& DASD_FEATURE_FAILONSLCK
)
3615 set_bit(DASD_FLAG_LOCK_STOLEN
, &device
->flags
);
3616 clear_bit(DASD_FLAG_IS_RESERVED
, &device
->flags
);
3617 dev_err(&device
->cdev
->dev
,
3618 "The device reservation was lost\n");
3622 static int dasd_eckd_ras_sanity_checks(struct dasd_device
*device
,
3623 unsigned int first_trk
,
3624 unsigned int last_trk
)
3626 struct dasd_eckd_private
*private = device
->private;
3627 unsigned int trks_per_vol
;
3630 trks_per_vol
= private->real_cyl
* private->rdc_data
.trk_per_cyl
;
3632 if (first_trk
>= trks_per_vol
) {
3633 dev_warn(&device
->cdev
->dev
,
3634 "Start track number %u used in the space release command is too big\n",
3637 } else if (last_trk
>= trks_per_vol
) {
3638 dev_warn(&device
->cdev
->dev
,
3639 "Stop track number %u used in the space release command is too big\n",
3642 } else if (first_trk
> last_trk
) {
3643 dev_warn(&device
->cdev
->dev
,
3644 "Start track %u used in the space release command exceeds the end track\n",
3652 * Helper function to count the amount of involved extents within a given range
3653 * with extent alignment in mind.
3655 static int count_exts(unsigned int from
, unsigned int to
, int trks_per_ext
)
3664 /* Count first partial extent */
3665 if (from
% trks_per_ext
!= 0) {
3666 tmp
= from
+ trks_per_ext
- (from
% trks_per_ext
) - 1;
3669 cur_pos
= tmp
- from
+ 1;
3672 /* Count full extents */
3673 if (to
- (from
+ cur_pos
) + 1 >= trks_per_ext
) {
3674 tmp
= to
- ((to
- trks_per_ext
+ 1) % trks_per_ext
);
3675 count
+= (tmp
- (from
+ cur_pos
) + 1) / trks_per_ext
;
3678 /* Count last partial extent */
3686 * Release allocated space for a given range or an entire volume.
3688 static struct dasd_ccw_req
*
3689 dasd_eckd_dso_ras(struct dasd_device
*device
, struct dasd_block
*block
,
3690 struct request
*req
, unsigned int first_trk
,
3691 unsigned int last_trk
, int by_extent
)
3693 struct dasd_eckd_private
*private = device
->private;
3694 struct dasd_dso_ras_ext_range
*ras_range
;
3695 struct dasd_rssd_features
*features
;
3696 struct dasd_dso_ras_data
*ras_data
;
3697 u16 heads
, beg_head
, end_head
;
3698 int cur_to_trk
, cur_from_trk
;
3699 struct dasd_ccw_req
*cqr
;
3700 u32 beg_cyl
, end_cyl
;
3709 if (dasd_eckd_ras_sanity_checks(device
, first_trk
, last_trk
))
3710 return ERR_PTR(-EINVAL
);
3712 rq
= req
? blk_mq_rq_to_pdu(req
) : NULL
;
3714 features
= &private->features
;
3716 trks_per_ext
= dasd_eckd_ext_size(device
) * private->rdc_data
.trk_per_cyl
;
3719 nr_exts
= count_exts(first_trk
, last_trk
, trks_per_ext
);
3720 ras_size
= sizeof(*ras_data
);
3721 size
= ras_size
+ (nr_exts
* sizeof(*ras_range
));
3723 cqr
= dasd_smalloc_request(DASD_ECKD_MAGIC
, 1, size
, device
, rq
);
3725 DBF_EVENT_DEVID(DBF_WARNING
, device
->cdev
, "%s",
3726 "Could not allocate RAS request");
3730 ras_data
= cqr
->data
;
3731 memset(ras_data
, 0, size
);
3733 ras_data
->order
= DSO_ORDER_RAS
;
3734 ras_data
->flags
.vol_type
= 0; /* CKD volume */
3735 /* Release specified extents or entire volume */
3736 ras_data
->op_flags
.by_extent
= by_extent
;
3738 * This bit guarantees initialisation of tracks within an extent that is
3739 * not fully specified, but is only supported with a certain feature
3742 ras_data
->op_flags
.guarantee_init
= !!(features
->feature
[56] & 0x01);
3743 ras_data
->lss
= private->ned
->ID
;
3744 ras_data
->dev_addr
= private->ned
->unit_addr
;
3745 ras_data
->nr_exts
= nr_exts
;
3748 heads
= private->rdc_data
.trk_per_cyl
;
3749 cur_from_trk
= first_trk
;
3750 cur_to_trk
= first_trk
+ trks_per_ext
-
3751 (first_trk
% trks_per_ext
) - 1;
3752 if (cur_to_trk
> last_trk
)
3753 cur_to_trk
= last_trk
;
3754 ras_range
= (struct dasd_dso_ras_ext_range
*)(cqr
->data
+ ras_size
);
3756 for (i
= 0; i
< nr_exts
; i
++) {
3757 beg_cyl
= cur_from_trk
/ heads
;
3758 beg_head
= cur_from_trk
% heads
;
3759 end_cyl
= cur_to_trk
/ heads
;
3760 end_head
= cur_to_trk
% heads
;
3762 set_ch_t(&ras_range
->beg_ext
, beg_cyl
, beg_head
);
3763 set_ch_t(&ras_range
->end_ext
, end_cyl
, end_head
);
3765 cur_from_trk
= cur_to_trk
+ 1;
3766 cur_to_trk
= cur_from_trk
+ trks_per_ext
- 1;
3767 if (cur_to_trk
> last_trk
)
3768 cur_to_trk
= last_trk
;
3774 ccw
->cda
= (__u32
)(addr_t
)cqr
->data
;
3775 ccw
->cmd_code
= DASD_ECKD_CCW_DSO
;
3778 cqr
->startdev
= device
;
3779 cqr
->memdev
= device
;
3782 cqr
->expires
= device
->default_expires
* HZ
;
3783 cqr
->buildclk
= get_tod_clock();
3784 cqr
->status
= DASD_CQR_FILLED
;
3789 static int dasd_eckd_release_space_full(struct dasd_device
*device
)
3791 struct dasd_ccw_req
*cqr
;
3794 cqr
= dasd_eckd_dso_ras(device
, NULL
, NULL
, 0, 0, 0);
3796 return PTR_ERR(cqr
);
3798 rc
= dasd_sleep_on_interruptible(cqr
);
3800 dasd_sfree_request(cqr
, cqr
->memdev
);
3805 static int dasd_eckd_release_space_trks(struct dasd_device
*device
,
3806 unsigned int from
, unsigned int to
)
3808 struct dasd_eckd_private
*private = device
->private;
3809 struct dasd_block
*block
= device
->block
;
3810 struct dasd_ccw_req
*cqr
, *n
;
3811 struct list_head ras_queue
;
3812 unsigned int device_exts
;
3819 INIT_LIST_HEAD(&ras_queue
);
3821 device_exts
= private->real_cyl
/ dasd_eckd_ext_size(device
);
3822 trks_per_ext
= dasd_eckd_ext_size(device
) * private->rdc_data
.trk_per_cyl
;
3824 /* Make sure device limits are not exceeded */
3825 step
= trks_per_ext
* min(device_exts
, DASD_ECKD_RAS_EXTS_MAX
);
3830 while (cur_pos
< to
) {
3831 stop
= cur_pos
+ step
-
3832 ((cur_pos
+ step
) % trks_per_ext
) - 1;
3836 cqr
= dasd_eckd_dso_ras(device
, NULL
, NULL
, cur_pos
, stop
, 1);
3839 if (rc
== -ENOMEM
) {
3840 if (list_empty(&ras_queue
))
3848 spin_lock_irq(&block
->queue_lock
);
3849 list_add_tail(&cqr
->blocklist
, &ras_queue
);
3850 spin_unlock_irq(&block
->queue_lock
);
3854 rc
= dasd_sleep_on_queue_interruptible(&ras_queue
);
3857 list_for_each_entry_safe(cqr
, n
, &ras_queue
, blocklist
) {
3858 device
= cqr
->startdev
;
3859 private = device
->private;
3861 spin_lock_irq(&block
->queue_lock
);
3862 list_del_init(&cqr
->blocklist
);
3863 spin_unlock_irq(&block
->queue_lock
);
3864 dasd_sfree_request(cqr
, device
);
3873 static int dasd_eckd_release_space(struct dasd_device
*device
,
3874 struct format_data_t
*rdata
)
3876 if (rdata
->intensity
& DASD_FMT_INT_ESE_FULL
)
3877 return dasd_eckd_release_space_full(device
);
3878 else if (rdata
->intensity
== 0)
3879 return dasd_eckd_release_space_trks(device
, rdata
->start_unit
,
3885 static struct dasd_ccw_req
*dasd_eckd_build_cp_cmd_single(
3886 struct dasd_device
*startdev
,
3887 struct dasd_block
*block
,
3888 struct request
*req
,
3893 unsigned int first_offs
,
3894 unsigned int last_offs
,
3895 unsigned int blk_per_trk
,
3896 unsigned int blksize
)
3898 struct dasd_eckd_private
*private;
3899 unsigned long *idaws
;
3900 struct LO_eckd_data
*LO_data
;
3901 struct dasd_ccw_req
*cqr
;
3903 struct req_iterator iter
;
3907 int count
, cidaw
, cplength
, datasize
;
3909 unsigned char cmd
, rcmd
;
3911 struct dasd_device
*basedev
;
3913 basedev
= block
->base
;
3914 private = basedev
->private;
3915 if (rq_data_dir(req
) == READ
)
3916 cmd
= DASD_ECKD_CCW_READ_MT
;
3917 else if (rq_data_dir(req
) == WRITE
)
3918 cmd
= DASD_ECKD_CCW_WRITE_MT
;
3920 return ERR_PTR(-EINVAL
);
3922 /* Check struct bio and count the number of blocks for the request. */
3925 rq_for_each_segment(bv
, req
, iter
) {
3926 if (bv
.bv_len
& (blksize
- 1))
3927 /* Eckd can only do full blocks. */
3928 return ERR_PTR(-EINVAL
);
3929 count
+= bv
.bv_len
>> (block
->s2b_shift
+ 9);
3930 if (idal_is_needed (page_address(bv
.bv_page
), bv
.bv_len
))
3931 cidaw
+= bv
.bv_len
>> (block
->s2b_shift
+ 9);
3934 if (count
!= last_rec
- first_rec
+ 1)
3935 return ERR_PTR(-EINVAL
);
3937 /* use the prefix command if available */
3938 use_prefix
= private->features
.feature
[8] & 0x01;
3940 /* 1x prefix + number of blocks */
3941 cplength
= 2 + count
;
3942 /* 1x prefix + cidaws*sizeof(long) */
3943 datasize
= sizeof(struct PFX_eckd_data
) +
3944 sizeof(struct LO_eckd_data
) +
3945 cidaw
* sizeof(unsigned long);
3947 /* 1x define extent + 1x locate record + number of blocks */
3948 cplength
= 2 + count
;
3949 /* 1x define extent + 1x locate record + cidaws*sizeof(long) */
3950 datasize
= sizeof(struct DE_eckd_data
) +
3951 sizeof(struct LO_eckd_data
) +
3952 cidaw
* sizeof(unsigned long);
3954 /* Find out the number of additional locate record ccws for cdl. */
3955 if (private->uses_cdl
&& first_rec
< 2*blk_per_trk
) {
3956 if (last_rec
>= 2*blk_per_trk
)
3957 count
= 2*blk_per_trk
- first_rec
;
3959 datasize
+= count
*sizeof(struct LO_eckd_data
);
3961 /* Allocate the ccw request. */
3962 cqr
= dasd_smalloc_request(DASD_ECKD_MAGIC
, cplength
, datasize
,
3963 startdev
, blk_mq_rq_to_pdu(req
));
3967 /* First ccw is define extent or prefix. */
3969 if (prefix(ccw
++, cqr
->data
, first_trk
,
3970 last_trk
, cmd
, basedev
, startdev
) == -EAGAIN
) {
3971 /* Clock not in sync and XRC is enabled.
3974 dasd_sfree_request(cqr
, startdev
);
3975 return ERR_PTR(-EAGAIN
);
3977 idaws
= (unsigned long *) (cqr
->data
+
3978 sizeof(struct PFX_eckd_data
));
3980 if (define_extent(ccw
++, cqr
->data
, first_trk
,
3981 last_trk
, cmd
, basedev
, 0) == -EAGAIN
) {
3982 /* Clock not in sync and XRC is enabled.
3985 dasd_sfree_request(cqr
, startdev
);
3986 return ERR_PTR(-EAGAIN
);
3988 idaws
= (unsigned long *) (cqr
->data
+
3989 sizeof(struct DE_eckd_data
));
3991 /* Build locate_record+read/write/ccws. */
3992 LO_data
= (struct LO_eckd_data
*) (idaws
+ cidaw
);
3994 if (private->uses_cdl
== 0 || recid
> 2*blk_per_trk
) {
3995 /* Only standard blocks so there is just one locate record. */
3996 ccw
[-1].flags
|= CCW_FLAG_CC
;
3997 locate_record(ccw
++, LO_data
++, first_trk
, first_offs
+ 1,
3998 last_rec
- recid
+ 1, cmd
, basedev
, blksize
);
4000 rq_for_each_segment(bv
, req
, iter
) {
4001 dst
= page_address(bv
.bv_page
) + bv
.bv_offset
;
4002 if (dasd_page_cache
) {
4003 char *copy
= kmem_cache_alloc(dasd_page_cache
,
4004 GFP_DMA
| __GFP_NOWARN
);
4005 if (copy
&& rq_data_dir(req
) == WRITE
)
4006 memcpy(copy
+ bv
.bv_offset
, dst
, bv
.bv_len
);
4008 dst
= copy
+ bv
.bv_offset
;
4010 for (off
= 0; off
< bv
.bv_len
; off
+= blksize
) {
4011 sector_t trkid
= recid
;
4012 unsigned int recoffs
= sector_div(trkid
, blk_per_trk
);
4015 /* Locate record for cdl special block ? */
4016 if (private->uses_cdl
&& recid
< 2*blk_per_trk
) {
4017 if (dasd_eckd_cdl_special(blk_per_trk
, recid
)){
4019 count
= dasd_eckd_cdl_reclen(recid
);
4020 if (count
< blksize
&&
4021 rq_data_dir(req
) == READ
)
4022 memset(dst
+ count
, 0xe5,
4025 ccw
[-1].flags
|= CCW_FLAG_CC
;
4026 locate_record(ccw
++, LO_data
++,
4028 1, rcmd
, basedev
, count
);
4030 /* Locate record for standard blocks ? */
4031 if (private->uses_cdl
&& recid
== 2*blk_per_trk
) {
4032 ccw
[-1].flags
|= CCW_FLAG_CC
;
4033 locate_record(ccw
++, LO_data
++,
4035 last_rec
- recid
+ 1,
4036 cmd
, basedev
, count
);
4038 /* Read/write ccw. */
4039 ccw
[-1].flags
|= CCW_FLAG_CC
;
4040 ccw
->cmd_code
= rcmd
;
4042 if (idal_is_needed(dst
, blksize
)) {
4043 ccw
->cda
= (__u32
)(addr_t
) idaws
;
4044 ccw
->flags
= CCW_FLAG_IDA
;
4045 idaws
= idal_create_words(idaws
, dst
, blksize
);
4047 ccw
->cda
= (__u32
)(addr_t
) dst
;
4055 if (blk_noretry_request(req
) ||
4056 block
->base
->features
& DASD_FEATURE_FAILFAST
)
4057 set_bit(DASD_CQR_FLAGS_FAILFAST
, &cqr
->flags
);
4058 cqr
->startdev
= startdev
;
4059 cqr
->memdev
= startdev
;
4061 cqr
->expires
= startdev
->default_expires
* HZ
; /* default 5 minutes */
4062 cqr
->lpm
= dasd_path_get_ppm(startdev
);
4063 cqr
->retries
= startdev
->default_retries
;
4064 cqr
->buildclk
= get_tod_clock();
4065 cqr
->status
= DASD_CQR_FILLED
;
4067 /* Set flags to suppress output for expected errors */
4068 if (dasd_eckd_is_ese(basedev
)) {
4069 set_bit(DASD_CQR_SUPPRESS_FP
, &cqr
->flags
);
4070 set_bit(DASD_CQR_SUPPRESS_IL
, &cqr
->flags
);
4071 set_bit(DASD_CQR_SUPPRESS_NRF
, &cqr
->flags
);
4077 static struct dasd_ccw_req
*dasd_eckd_build_cp_cmd_track(
4078 struct dasd_device
*startdev
,
4079 struct dasd_block
*block
,
4080 struct request
*req
,
4085 unsigned int first_offs
,
4086 unsigned int last_offs
,
4087 unsigned int blk_per_trk
,
4088 unsigned int blksize
)
4090 unsigned long *idaws
;
4091 struct dasd_ccw_req
*cqr
;
4093 struct req_iterator iter
;
4095 char *dst
, *idaw_dst
;
4096 unsigned int cidaw
, cplength
, datasize
;
4100 struct dasd_device
*basedev
;
4101 unsigned int trkcount
, count
, count_to_trk_end
;
4102 unsigned int idaw_len
, seg_len
, part_len
, len_to_track_end
;
4103 unsigned char new_track
, end_idaw
;
4105 unsigned int recoffs
;
4107 basedev
= block
->base
;
4108 if (rq_data_dir(req
) == READ
)
4109 cmd
= DASD_ECKD_CCW_READ_TRACK_DATA
;
4110 else if (rq_data_dir(req
) == WRITE
)
4111 cmd
= DASD_ECKD_CCW_WRITE_TRACK_DATA
;
4113 return ERR_PTR(-EINVAL
);
4115 /* Track based I/O needs IDAWs for each page, and not just for
4116 * 64 bit addresses. We need additional idals for pages
4117 * that get filled from two tracks, so we use the number
4118 * of records as upper limit.
4120 cidaw
= last_rec
- first_rec
+ 1;
4121 trkcount
= last_trk
- first_trk
+ 1;
4123 /* 1x prefix + one read/write ccw per track */
4124 cplength
= 1 + trkcount
;
4126 datasize
= sizeof(struct PFX_eckd_data
) + cidaw
* sizeof(unsigned long);
4128 /* Allocate the ccw request. */
4129 cqr
= dasd_smalloc_request(DASD_ECKD_MAGIC
, cplength
, datasize
,
4130 startdev
, blk_mq_rq_to_pdu(req
));
4134 /* transfer length factor: how many bytes to read from the last track */
4135 if (first_trk
== last_trk
)
4136 tlf
= last_offs
- first_offs
+ 1;
4138 tlf
= last_offs
+ 1;
4141 if (prefix_LRE(ccw
++, cqr
->data
, first_trk
,
4142 last_trk
, cmd
, basedev
, startdev
,
4143 1 /* format */, first_offs
+ 1,
4146 /* Clock not in sync and XRC is enabled.
4149 dasd_sfree_request(cqr
, startdev
);
4150 return ERR_PTR(-EAGAIN
);
4154 * The translation of request into ccw programs must meet the
4155 * following conditions:
4156 * - all idaws but the first and the last must address full pages
4157 * (or 2K blocks on 31-bit)
4158 * - the scope of a ccw and it's idal ends with the track boundaries
4160 idaws
= (unsigned long *) (cqr
->data
+ sizeof(struct PFX_eckd_data
));
4164 len_to_track_end
= 0;
4167 rq_for_each_segment(bv
, req
, iter
) {
4168 dst
= page_address(bv
.bv_page
) + bv
.bv_offset
;
4169 seg_len
= bv
.bv_len
;
4173 recoffs
= sector_div(trkid
, blk_per_trk
);
4174 count_to_trk_end
= blk_per_trk
- recoffs
;
4175 count
= min((last_rec
- recid
+ 1),
4176 (sector_t
)count_to_trk_end
);
4177 len_to_track_end
= count
* blksize
;
4178 ccw
[-1].flags
|= CCW_FLAG_CC
;
4179 ccw
->cmd_code
= cmd
;
4180 ccw
->count
= len_to_track_end
;
4181 ccw
->cda
= (__u32
)(addr_t
)idaws
;
4182 ccw
->flags
= CCW_FLAG_IDA
;
4186 /* first idaw for a ccw may start anywhere */
4190 /* If we start a new idaw, we must make sure that it
4191 * starts on an IDA_BLOCK_SIZE boundary.
4192 * If we continue an idaw, we must make sure that the
4193 * current segment begins where the so far accumulated
4197 if (__pa(dst
) & (IDA_BLOCK_SIZE
-1)) {
4198 dasd_sfree_request(cqr
, startdev
);
4199 return ERR_PTR(-ERANGE
);
4203 if ((idaw_dst
+ idaw_len
) != dst
) {
4204 dasd_sfree_request(cqr
, startdev
);
4205 return ERR_PTR(-ERANGE
);
4207 part_len
= min(seg_len
, len_to_track_end
);
4208 seg_len
-= part_len
;
4210 idaw_len
+= part_len
;
4211 len_to_track_end
-= part_len
;
4212 /* collected memory area ends on an IDA_BLOCK border,
4214 * idal_create_words will handle cases where idaw_len
4215 * is larger then IDA_BLOCK_SIZE
4217 if (!(__pa(idaw_dst
+ idaw_len
) & (IDA_BLOCK_SIZE
-1)))
4219 /* We also need to end the idaw at track end */
4220 if (!len_to_track_end
) {
4225 idaws
= idal_create_words(idaws
, idaw_dst
,
4234 if (blk_noretry_request(req
) ||
4235 block
->base
->features
& DASD_FEATURE_FAILFAST
)
4236 set_bit(DASD_CQR_FLAGS_FAILFAST
, &cqr
->flags
);
4237 cqr
->startdev
= startdev
;
4238 cqr
->memdev
= startdev
;
4240 cqr
->expires
= startdev
->default_expires
* HZ
; /* default 5 minutes */
4241 cqr
->lpm
= dasd_path_get_ppm(startdev
);
4242 cqr
->retries
= startdev
->default_retries
;
4243 cqr
->buildclk
= get_tod_clock();
4244 cqr
->status
= DASD_CQR_FILLED
;
4246 /* Set flags to suppress output for expected errors */
4247 if (dasd_eckd_is_ese(basedev
))
4248 set_bit(DASD_CQR_SUPPRESS_NRF
, &cqr
->flags
);
4253 static int prepare_itcw(struct itcw
*itcw
,
4254 unsigned int trk
, unsigned int totrk
, int cmd
,
4255 struct dasd_device
*basedev
,
4256 struct dasd_device
*startdev
,
4257 unsigned int rec_on_trk
, int count
,
4258 unsigned int blksize
,
4259 unsigned int total_data_size
,
4261 unsigned int blk_per_trk
)
4263 struct PFX_eckd_data pfxdata
;
4264 struct dasd_eckd_private
*basepriv
, *startpriv
;
4265 struct DE_eckd_data
*dedata
;
4266 struct LRE_eckd_data
*lredata
;
4270 u16 heads
, beghead
, endhead
;
4278 /* setup prefix data */
4279 basepriv
= basedev
->private;
4280 startpriv
= startdev
->private;
4281 dedata
= &pfxdata
.define_extent
;
4282 lredata
= &pfxdata
.locate_record
;
4284 memset(&pfxdata
, 0, sizeof(pfxdata
));
4285 pfxdata
.format
= 1; /* PFX with LRE */
4286 pfxdata
.base_address
= basepriv
->ned
->unit_addr
;
4287 pfxdata
.base_lss
= basepriv
->ned
->ID
;
4288 pfxdata
.validity
.define_extent
= 1;
4290 /* private uid is kept up to date, conf_data may be outdated */
4291 if (startpriv
->uid
.type
== UA_BASE_PAV_ALIAS
)
4292 pfxdata
.validity
.verify_base
= 1;
4294 if (startpriv
->uid
.type
== UA_HYPER_PAV_ALIAS
) {
4295 pfxdata
.validity
.verify_base
= 1;
4296 pfxdata
.validity
.hyper_pav
= 1;
4300 case DASD_ECKD_CCW_READ_TRACK_DATA
:
4301 dedata
->mask
.perm
= 0x1;
4302 dedata
->attributes
.operation
= basepriv
->attrib
.operation
;
4303 dedata
->blk_size
= blksize
;
4304 dedata
->ga_extended
|= 0x42;
4305 lredata
->operation
.orientation
= 0x0;
4306 lredata
->operation
.operation
= 0x0C;
4307 lredata
->auxiliary
.check_bytes
= 0x01;
4308 pfx_cmd
= DASD_ECKD_CCW_PFX_READ
;
4310 case DASD_ECKD_CCW_WRITE_TRACK_DATA
:
4311 dedata
->mask
.perm
= 0x02;
4312 dedata
->attributes
.operation
= basepriv
->attrib
.operation
;
4313 dedata
->blk_size
= blksize
;
4314 rc
= set_timestamp(NULL
, dedata
, basedev
);
4315 dedata
->ga_extended
|= 0x42;
4316 lredata
->operation
.orientation
= 0x0;
4317 lredata
->operation
.operation
= 0x3F;
4318 lredata
->extended_operation
= 0x23;
4319 lredata
->auxiliary
.check_bytes
= 0x2;
4321 * If XRC is supported the System Time Stamp is set. The
4322 * validity of the time stamp must be reflected in the prefix
4325 if (dedata
->ga_extended
& 0x08 && dedata
->ga_extended
& 0x02)
4326 pfxdata
.validity
.time_stamp
= 1; /* 'Time Stamp Valid' */
4327 pfx_cmd
= DASD_ECKD_CCW_PFX
;
4329 case DASD_ECKD_CCW_READ_COUNT_MT
:
4330 dedata
->mask
.perm
= 0x1;
4331 dedata
->attributes
.operation
= DASD_BYPASS_CACHE
;
4332 dedata
->ga_extended
|= 0x42;
4333 dedata
->blk_size
= blksize
;
4334 lredata
->operation
.orientation
= 0x2;
4335 lredata
->operation
.operation
= 0x16;
4336 lredata
->auxiliary
.check_bytes
= 0x01;
4337 pfx_cmd
= DASD_ECKD_CCW_PFX_READ
;
4340 DBF_DEV_EVENT(DBF_ERR
, basedev
,
4341 "prepare itcw, unknown opcode 0x%x", cmd
);
4348 dedata
->attributes
.mode
= 0x3; /* ECKD */
4350 heads
= basepriv
->rdc_data
.trk_per_cyl
;
4351 begcyl
= trk
/ heads
;
4352 beghead
= trk
% heads
;
4353 endcyl
= totrk
/ heads
;
4354 endhead
= totrk
% heads
;
4356 /* check for sequential prestage - enhance cylinder range */
4357 if (dedata
->attributes
.operation
== DASD_SEQ_PRESTAGE
||
4358 dedata
->attributes
.operation
== DASD_SEQ_ACCESS
) {
4360 if (endcyl
+ basepriv
->attrib
.nr_cyl
< basepriv
->real_cyl
)
4361 endcyl
+= basepriv
->attrib
.nr_cyl
;
4363 endcyl
= (basepriv
->real_cyl
- 1);
4366 set_ch_t(&dedata
->beg_ext
, begcyl
, beghead
);
4367 set_ch_t(&dedata
->end_ext
, endcyl
, endhead
);
4369 dedata
->ep_format
= 0x20; /* records per track is valid */
4370 dedata
->ep_rec_per_track
= blk_per_trk
;
4373 switch (basepriv
->rdc_data
.dev_type
) {
4375 dn
= ceil_quot(blksize
+ 6, 232);
4376 d
= 9 + ceil_quot(blksize
+ 6 * (dn
+ 1), 34);
4377 sector
= (49 + (rec_on_trk
- 1) * (10 + d
)) / 8;
4380 d
= 7 + ceil_quot(blksize
+ 12, 32);
4381 sector
= (39 + (rec_on_trk
- 1) * (8 + d
)) / 7;
4386 if (cmd
== DASD_ECKD_CCW_READ_COUNT_MT
) {
4387 lredata
->auxiliary
.length_valid
= 0;
4388 lredata
->auxiliary
.length_scope
= 0;
4389 lredata
->sector
= 0xff;
4391 lredata
->auxiliary
.length_valid
= 1;
4392 lredata
->auxiliary
.length_scope
= 1;
4393 lredata
->sector
= sector
;
4395 lredata
->auxiliary
.imbedded_ccw_valid
= 1;
4396 lredata
->length
= tlf
;
4397 lredata
->imbedded_ccw
= cmd
;
4398 lredata
->count
= count
;
4399 set_ch_t(&lredata
->seek_addr
, begcyl
, beghead
);
4400 lredata
->search_arg
.cyl
= lredata
->seek_addr
.cyl
;
4401 lredata
->search_arg
.head
= lredata
->seek_addr
.head
;
4402 lredata
->search_arg
.record
= rec_on_trk
;
4404 dcw
= itcw_add_dcw(itcw
, pfx_cmd
, 0,
4405 &pfxdata
, sizeof(pfxdata
), total_data_size
);
4406 return PTR_ERR_OR_ZERO(dcw
);
4409 static struct dasd_ccw_req
*dasd_eckd_build_cp_tpm_track(
4410 struct dasd_device
*startdev
,
4411 struct dasd_block
*block
,
4412 struct request
*req
,
4417 unsigned int first_offs
,
4418 unsigned int last_offs
,
4419 unsigned int blk_per_trk
,
4420 unsigned int blksize
)
4422 struct dasd_ccw_req
*cqr
;
4423 struct req_iterator iter
;
4426 unsigned int trkcount
, ctidaw
;
4428 struct dasd_device
*basedev
;
4431 struct tidaw
*last_tidaw
= NULL
;
4435 unsigned int seg_len
, part_len
, len_to_track_end
;
4436 unsigned char new_track
;
4437 sector_t recid
, trkid
;
4439 unsigned int count
, count_to_trk_end
;
4442 basedev
= block
->base
;
4443 if (rq_data_dir(req
) == READ
) {
4444 cmd
= DASD_ECKD_CCW_READ_TRACK_DATA
;
4445 itcw_op
= ITCW_OP_READ
;
4446 } else if (rq_data_dir(req
) == WRITE
) {
4447 cmd
= DASD_ECKD_CCW_WRITE_TRACK_DATA
;
4448 itcw_op
= ITCW_OP_WRITE
;
4450 return ERR_PTR(-EINVAL
);
4452 /* trackbased I/O needs address all memory via TIDAWs,
4453 * not just for 64 bit addresses. This allows us to map
4454 * each segment directly to one tidaw.
4455 * In the case of write requests, additional tidaws may
4456 * be needed when a segment crosses a track boundary.
4458 trkcount
= last_trk
- first_trk
+ 1;
4460 rq_for_each_segment(bv
, req
, iter
) {
4463 if (rq_data_dir(req
) == WRITE
)
4464 ctidaw
+= (last_trk
- first_trk
);
4466 /* Allocate the ccw request. */
4467 itcw_size
= itcw_calc_size(0, ctidaw
, 0);
4468 cqr
= dasd_smalloc_request(DASD_ECKD_MAGIC
, 0, itcw_size
, startdev
,
4469 blk_mq_rq_to_pdu(req
));
4473 /* transfer length factor: how many bytes to read from the last track */
4474 if (first_trk
== last_trk
)
4475 tlf
= last_offs
- first_offs
+ 1;
4477 tlf
= last_offs
+ 1;
4480 itcw
= itcw_init(cqr
->data
, itcw_size
, itcw_op
, 0, ctidaw
, 0);
4485 cqr
->cpaddr
= itcw_get_tcw(itcw
);
4486 if (prepare_itcw(itcw
, first_trk
, last_trk
,
4487 cmd
, basedev
, startdev
,
4490 (last_rec
- first_rec
+ 1) * blksize
,
4491 tlf
, blk_per_trk
) == -EAGAIN
) {
4492 /* Clock not in sync and XRC is enabled.
4498 len_to_track_end
= 0;
4500 * A tidaw can address 4k of memory, but must not cross page boundaries
4501 * We can let the block layer handle this by setting
4502 * blk_queue_segment_boundary to page boundaries and
4503 * blk_max_segment_size to page size when setting up the request queue.
4504 * For write requests, a TIDAW must not cross track boundaries, because
4505 * we have to set the CBC flag on the last tidaw for each track.
4507 if (rq_data_dir(req
) == WRITE
) {
4510 rq_for_each_segment(bv
, req
, iter
) {
4511 dst
= page_address(bv
.bv_page
) + bv
.bv_offset
;
4512 seg_len
= bv
.bv_len
;
4516 offs
= sector_div(trkid
, blk_per_trk
);
4517 count_to_trk_end
= blk_per_trk
- offs
;
4518 count
= min((last_rec
- recid
+ 1),
4519 (sector_t
)count_to_trk_end
);
4520 len_to_track_end
= count
* blksize
;
4524 part_len
= min(seg_len
, len_to_track_end
);
4525 seg_len
-= part_len
;
4526 len_to_track_end
-= part_len
;
4527 /* We need to end the tidaw at track end */
4528 if (!len_to_track_end
) {
4530 tidaw_flags
= TIDAW_FLAGS_INSERT_CBC
;
4533 last_tidaw
= itcw_add_tidaw(itcw
, tidaw_flags
,
4535 if (IS_ERR(last_tidaw
)) {
4543 rq_for_each_segment(bv
, req
, iter
) {
4544 dst
= page_address(bv
.bv_page
) + bv
.bv_offset
;
4545 last_tidaw
= itcw_add_tidaw(itcw
, 0x00,
4547 if (IS_ERR(last_tidaw
)) {
4553 last_tidaw
->flags
|= TIDAW_FLAGS_LAST
;
4554 last_tidaw
->flags
&= ~TIDAW_FLAGS_INSERT_CBC
;
4555 itcw_finalize(itcw
);
4557 if (blk_noretry_request(req
) ||
4558 block
->base
->features
& DASD_FEATURE_FAILFAST
)
4559 set_bit(DASD_CQR_FLAGS_FAILFAST
, &cqr
->flags
);
4561 cqr
->startdev
= startdev
;
4562 cqr
->memdev
= startdev
;
4564 cqr
->expires
= startdev
->default_expires
* HZ
; /* default 5 minutes */
4565 cqr
->lpm
= dasd_path_get_ppm(startdev
);
4566 cqr
->retries
= startdev
->default_retries
;
4567 cqr
->buildclk
= get_tod_clock();
4568 cqr
->status
= DASD_CQR_FILLED
;
4570 /* Set flags to suppress output for expected errors */
4571 if (dasd_eckd_is_ese(basedev
)) {
4572 set_bit(DASD_CQR_SUPPRESS_FP
, &cqr
->flags
);
4573 set_bit(DASD_CQR_SUPPRESS_IL
, &cqr
->flags
);
4574 set_bit(DASD_CQR_SUPPRESS_NRF
, &cqr
->flags
);
4579 dasd_sfree_request(cqr
, startdev
);
4580 return ERR_PTR(ret
);
4583 static struct dasd_ccw_req
*dasd_eckd_build_cp(struct dasd_device
*startdev
,
4584 struct dasd_block
*block
,
4585 struct request
*req
)
4590 struct dasd_eckd_private
*private;
4591 struct dasd_device
*basedev
;
4592 sector_t first_rec
, last_rec
;
4593 sector_t first_trk
, last_trk
;
4594 unsigned int first_offs
, last_offs
;
4595 unsigned int blk_per_trk
, blksize
;
4597 unsigned int data_size
;
4598 struct dasd_ccw_req
*cqr
;
4600 basedev
= block
->base
;
4601 private = basedev
->private;
4603 /* Calculate number of blocks/records per track. */
4604 blksize
= block
->bp_block
;
4605 blk_per_trk
= recs_per_track(&private->rdc_data
, 0, blksize
);
4606 if (blk_per_trk
== 0)
4607 return ERR_PTR(-EINVAL
);
4608 /* Calculate record id of first and last block. */
4609 first_rec
= first_trk
= blk_rq_pos(req
) >> block
->s2b_shift
;
4610 first_offs
= sector_div(first_trk
, blk_per_trk
);
4611 last_rec
= last_trk
=
4612 (blk_rq_pos(req
) + blk_rq_sectors(req
) - 1) >> block
->s2b_shift
;
4613 last_offs
= sector_div(last_trk
, blk_per_trk
);
4614 cdlspecial
= (private->uses_cdl
&& first_rec
< 2*blk_per_trk
);
4616 fcx_multitrack
= private->features
.feature
[40] & 0x20;
4617 data_size
= blk_rq_bytes(req
);
4618 if (data_size
% blksize
)
4619 return ERR_PTR(-EINVAL
);
4620 /* tpm write request add CBC data on each track boundary */
4621 if (rq_data_dir(req
) == WRITE
)
4622 data_size
+= (last_trk
- first_trk
) * 4;
4624 /* is read track data and write track data in command mode supported? */
4625 cmdrtd
= private->features
.feature
[9] & 0x20;
4626 cmdwtd
= private->features
.feature
[12] & 0x40;
4627 use_prefix
= private->features
.feature
[8] & 0x01;
4630 if (cdlspecial
|| dasd_page_cache
) {
4631 /* do nothing, just fall through to the cmd mode single case */
4632 } else if ((data_size
<= private->fcx_max_data
)
4633 && (fcx_multitrack
|| (first_trk
== last_trk
))) {
4634 cqr
= dasd_eckd_build_cp_tpm_track(startdev
, block
, req
,
4635 first_rec
, last_rec
,
4636 first_trk
, last_trk
,
4637 first_offs
, last_offs
,
4638 blk_per_trk
, blksize
);
4639 if (IS_ERR(cqr
) && (PTR_ERR(cqr
) != -EAGAIN
) &&
4640 (PTR_ERR(cqr
) != -ENOMEM
))
4642 } else if (use_prefix
&&
4643 (((rq_data_dir(req
) == READ
) && cmdrtd
) ||
4644 ((rq_data_dir(req
) == WRITE
) && cmdwtd
))) {
4645 cqr
= dasd_eckd_build_cp_cmd_track(startdev
, block
, req
,
4646 first_rec
, last_rec
,
4647 first_trk
, last_trk
,
4648 first_offs
, last_offs
,
4649 blk_per_trk
, blksize
);
4650 if (IS_ERR(cqr
) && (PTR_ERR(cqr
) != -EAGAIN
) &&
4651 (PTR_ERR(cqr
) != -ENOMEM
))
4655 cqr
= dasd_eckd_build_cp_cmd_single(startdev
, block
, req
,
4656 first_rec
, last_rec
,
4657 first_trk
, last_trk
,
4658 first_offs
, last_offs
,
4659 blk_per_trk
, blksize
);
4663 static struct dasd_ccw_req
*dasd_eckd_build_cp_raw(struct dasd_device
*startdev
,
4664 struct dasd_block
*block
,
4665 struct request
*req
)
4667 sector_t start_padding_sectors
, end_sector_offset
, end_padding_sectors
;
4668 unsigned int seg_len
, len_to_track_end
;
4669 unsigned int cidaw
, cplength
, datasize
;
4670 sector_t first_trk
, last_trk
, sectors
;
4671 struct dasd_eckd_private
*base_priv
;
4672 struct dasd_device
*basedev
;
4673 struct req_iterator iter
;
4674 struct dasd_ccw_req
*cqr
;
4675 unsigned int first_offs
;
4676 unsigned int trkcount
;
4677 unsigned long *idaws
;
4687 * raw track access needs to be mutiple of 64k and on 64k boundary
4688 * For read requests we can fix an incorrect alignment by padding
4689 * the request with dummy pages.
4691 start_padding_sectors
= blk_rq_pos(req
) % DASD_RAW_SECTORS_PER_TRACK
;
4692 end_sector_offset
= (blk_rq_pos(req
) + blk_rq_sectors(req
)) %
4693 DASD_RAW_SECTORS_PER_TRACK
;
4694 end_padding_sectors
= (DASD_RAW_SECTORS_PER_TRACK
- end_sector_offset
) %
4695 DASD_RAW_SECTORS_PER_TRACK
;
4696 basedev
= block
->base
;
4697 if ((start_padding_sectors
|| end_padding_sectors
) &&
4698 (rq_data_dir(req
) == WRITE
)) {
4699 DBF_DEV_EVENT(DBF_ERR
, basedev
,
4700 "raw write not track aligned (%llu,%llu) req %p",
4701 start_padding_sectors
, end_padding_sectors
, req
);
4702 return ERR_PTR(-EINVAL
);
4705 first_trk
= blk_rq_pos(req
) / DASD_RAW_SECTORS_PER_TRACK
;
4706 last_trk
= (blk_rq_pos(req
) + blk_rq_sectors(req
) - 1) /
4707 DASD_RAW_SECTORS_PER_TRACK
;
4708 trkcount
= last_trk
- first_trk
+ 1;
4711 if (rq_data_dir(req
) == READ
)
4712 cmd
= DASD_ECKD_CCW_READ_TRACK
;
4713 else if (rq_data_dir(req
) == WRITE
)
4714 cmd
= DASD_ECKD_CCW_WRITE_FULL_TRACK
;
4716 return ERR_PTR(-EINVAL
);
4719 * Raw track based I/O needs IDAWs for each page,
4720 * and not just for 64 bit addresses.
4722 cidaw
= trkcount
* DASD_RAW_BLOCK_PER_TRACK
;
4725 * struct PFX_eckd_data and struct LRE_eckd_data can have up to 2 bytes
4726 * of extended parameter. This is needed for write full track.
4728 base_priv
= basedev
->private;
4729 use_prefix
= base_priv
->features
.feature
[8] & 0x01;
4731 cplength
= 1 + trkcount
;
4732 size
= sizeof(struct PFX_eckd_data
) + 2;
4734 cplength
= 2 + trkcount
;
4735 size
= sizeof(struct DE_eckd_data
) +
4736 sizeof(struct LRE_eckd_data
) + 2;
4738 size
= ALIGN(size
, 8);
4740 datasize
= size
+ cidaw
* sizeof(unsigned long);
4742 /* Allocate the ccw request. */
4743 cqr
= dasd_smalloc_request(DASD_ECKD_MAGIC
, cplength
,
4744 datasize
, startdev
, blk_mq_rq_to_pdu(req
));
4752 prefix_LRE(ccw
++, data
, first_trk
, last_trk
, cmd
, basedev
,
4753 startdev
, 1, first_offs
+ 1, trkcount
, 0, 0);
4755 define_extent(ccw
++, data
, first_trk
, last_trk
, cmd
, basedev
, 0);
4756 ccw
[-1].flags
|= CCW_FLAG_CC
;
4758 data
+= sizeof(struct DE_eckd_data
);
4759 locate_record_ext(ccw
++, data
, first_trk
, first_offs
+ 1,
4760 trkcount
, cmd
, basedev
, 0, 0);
4763 idaws
= (unsigned long *)(cqr
->data
+ size
);
4764 len_to_track_end
= 0;
4765 if (start_padding_sectors
) {
4766 ccw
[-1].flags
|= CCW_FLAG_CC
;
4767 ccw
->cmd_code
= cmd
;
4768 /* maximum 3390 track size */
4770 /* 64k map to one track */
4771 len_to_track_end
= 65536 - start_padding_sectors
* 512;
4772 ccw
->cda
= (__u32
)(addr_t
)idaws
;
4773 ccw
->flags
|= CCW_FLAG_IDA
;
4774 ccw
->flags
|= CCW_FLAG_SLI
;
4776 for (sectors
= 0; sectors
< start_padding_sectors
; sectors
+= 8)
4777 idaws
= idal_create_words(idaws
, rawpadpage
, PAGE_SIZE
);
4779 rq_for_each_segment(bv
, req
, iter
) {
4780 dst
= page_address(bv
.bv_page
) + bv
.bv_offset
;
4781 seg_len
= bv
.bv_len
;
4782 if (cmd
== DASD_ECKD_CCW_READ_TRACK
)
4783 memset(dst
, 0, seg_len
);
4784 if (!len_to_track_end
) {
4785 ccw
[-1].flags
|= CCW_FLAG_CC
;
4786 ccw
->cmd_code
= cmd
;
4787 /* maximum 3390 track size */
4789 /* 64k map to one track */
4790 len_to_track_end
= 65536;
4791 ccw
->cda
= (__u32
)(addr_t
)idaws
;
4792 ccw
->flags
|= CCW_FLAG_IDA
;
4793 ccw
->flags
|= CCW_FLAG_SLI
;
4796 len_to_track_end
-= seg_len
;
4797 idaws
= idal_create_words(idaws
, dst
, seg_len
);
4799 for (sectors
= 0; sectors
< end_padding_sectors
; sectors
+= 8)
4800 idaws
= idal_create_words(idaws
, rawpadpage
, PAGE_SIZE
);
4801 if (blk_noretry_request(req
) ||
4802 block
->base
->features
& DASD_FEATURE_FAILFAST
)
4803 set_bit(DASD_CQR_FLAGS_FAILFAST
, &cqr
->flags
);
4804 cqr
->startdev
= startdev
;
4805 cqr
->memdev
= startdev
;
4807 cqr
->expires
= startdev
->default_expires
* HZ
;
4808 cqr
->lpm
= dasd_path_get_ppm(startdev
);
4809 cqr
->retries
= startdev
->default_retries
;
4810 cqr
->buildclk
= get_tod_clock();
4811 cqr
->status
= DASD_CQR_FILLED
;
4818 dasd_eckd_free_cp(struct dasd_ccw_req
*cqr
, struct request
*req
)
4820 struct dasd_eckd_private
*private;
4822 struct req_iterator iter
;
4825 unsigned int blksize
, blk_per_trk
, off
;
4829 if (!dasd_page_cache
)
4831 private = cqr
->block
->base
->private;
4832 blksize
= cqr
->block
->bp_block
;
4833 blk_per_trk
= recs_per_track(&private->rdc_data
, 0, blksize
);
4834 recid
= blk_rq_pos(req
) >> cqr
->block
->s2b_shift
;
4836 /* Skip over define extent & locate record. */
4838 if (private->uses_cdl
== 0 || recid
> 2*blk_per_trk
)
4840 rq_for_each_segment(bv
, req
, iter
) {
4841 dst
= page_address(bv
.bv_page
) + bv
.bv_offset
;
4842 for (off
= 0; off
< bv
.bv_len
; off
+= blksize
) {
4843 /* Skip locate record. */
4844 if (private->uses_cdl
&& recid
<= 2*blk_per_trk
)
4847 if (ccw
->flags
& CCW_FLAG_IDA
)
4848 cda
= *((char **)((addr_t
) ccw
->cda
));
4850 cda
= (char *)((addr_t
) ccw
->cda
);
4852 if (rq_data_dir(req
) == READ
)
4853 memcpy(dst
, cda
, bv
.bv_len
);
4854 kmem_cache_free(dasd_page_cache
,
4855 (void *)((addr_t
)cda
& PAGE_MASK
));
4864 status
= cqr
->status
== DASD_CQR_DONE
;
4865 dasd_sfree_request(cqr
, cqr
->memdev
);
4870 * Modify ccw/tcw in cqr so it can be started on a base device.
4872 * Note that this is not enough to restart the cqr!
4873 * Either reset cqr->startdev as well (summary unit check handling)
4874 * or restart via separate cqr (as in ERP handling).
4876 void dasd_eckd_reset_ccw_to_base_io(struct dasd_ccw_req
*cqr
)
4879 struct PFX_eckd_data
*pfxdata
;
4884 if (cqr
->cpmode
== 1) {
4886 tccb
= tcw_get_tccb(tcw
);
4887 dcw
= (struct dcw
*)&tccb
->tca
[0];
4888 pfxdata
= (struct PFX_eckd_data
*)&dcw
->cd
[0];
4889 pfxdata
->validity
.verify_base
= 0;
4890 pfxdata
->validity
.hyper_pav
= 0;
4893 pfxdata
= cqr
->data
;
4894 if (ccw
->cmd_code
== DASD_ECKD_CCW_PFX
) {
4895 pfxdata
->validity
.verify_base
= 0;
4896 pfxdata
->validity
.hyper_pav
= 0;
4901 #define DASD_ECKD_CHANQ_MAX_SIZE 4
4903 static struct dasd_ccw_req
*dasd_eckd_build_alias_cp(struct dasd_device
*base
,
4904 struct dasd_block
*block
,
4905 struct request
*req
)
4907 struct dasd_eckd_private
*private;
4908 struct dasd_device
*startdev
;
4909 unsigned long flags
;
4910 struct dasd_ccw_req
*cqr
;
4912 startdev
= dasd_alias_get_start_dev(base
);
4915 private = startdev
->private;
4916 if (private->count
>= DASD_ECKD_CHANQ_MAX_SIZE
)
4917 return ERR_PTR(-EBUSY
);
4919 spin_lock_irqsave(get_ccwdev_lock(startdev
->cdev
), flags
);
4921 if ((base
->features
& DASD_FEATURE_USERAW
))
4922 cqr
= dasd_eckd_build_cp_raw(startdev
, block
, req
);
4924 cqr
= dasd_eckd_build_cp(startdev
, block
, req
);
4927 spin_unlock_irqrestore(get_ccwdev_lock(startdev
->cdev
), flags
);
4931 static int dasd_eckd_free_alias_cp(struct dasd_ccw_req
*cqr
,
4932 struct request
*req
)
4934 struct dasd_eckd_private
*private;
4935 unsigned long flags
;
4937 spin_lock_irqsave(get_ccwdev_lock(cqr
->memdev
->cdev
), flags
);
4938 private = cqr
->memdev
->private;
4940 spin_unlock_irqrestore(get_ccwdev_lock(cqr
->memdev
->cdev
), flags
);
4941 return dasd_eckd_free_cp(cqr
, req
);
4945 dasd_eckd_fill_info(struct dasd_device
* device
,
4946 struct dasd_information2_t
* info
)
4948 struct dasd_eckd_private
*private = device
->private;
4950 info
->label_block
= 2;
4951 info
->FBA_layout
= private->uses_cdl
? 0 : 1;
4952 info
->format
= private->uses_cdl
? DASD_FORMAT_CDL
: DASD_FORMAT_LDL
;
4953 info
->characteristics_size
= sizeof(private->rdc_data
);
4954 memcpy(info
->characteristics
, &private->rdc_data
,
4955 sizeof(private->rdc_data
));
4956 info
->confdata_size
= min((unsigned long)private->conf_len
,
4957 sizeof(info
->configuration_data
));
4958 memcpy(info
->configuration_data
, private->conf_data
,
4959 info
->confdata_size
);
4964 * SECTION: ioctl functions for eckd devices.
4968 * Release device ioctl.
4969 * Buils a channel programm to releases a prior reserved
4970 * (see dasd_eckd_reserve) device.
4973 dasd_eckd_release(struct dasd_device
*device
)
4975 struct dasd_ccw_req
*cqr
;
4980 if (!capable(CAP_SYS_ADMIN
))
4984 cqr
= dasd_smalloc_request(DASD_ECKD_MAGIC
, 1, 32, device
, NULL
);
4986 mutex_lock(&dasd_reserve_mutex
);
4988 cqr
= &dasd_reserve_req
->cqr
;
4989 memset(cqr
, 0, sizeof(*cqr
));
4990 memset(&dasd_reserve_req
->ccw
, 0,
4991 sizeof(dasd_reserve_req
->ccw
));
4992 cqr
->cpaddr
= &dasd_reserve_req
->ccw
;
4993 cqr
->data
= &dasd_reserve_req
->data
;
4994 cqr
->magic
= DASD_ECKD_MAGIC
;
4997 ccw
->cmd_code
= DASD_ECKD_CCW_RELEASE
;
4998 ccw
->flags
|= CCW_FLAG_SLI
;
5000 ccw
->cda
= (__u32
)(addr_t
) cqr
->data
;
5001 cqr
->startdev
= device
;
5002 cqr
->memdev
= device
;
5003 clear_bit(DASD_CQR_FLAGS_USE_ERP
, &cqr
->flags
);
5004 set_bit(DASD_CQR_FLAGS_FAILFAST
, &cqr
->flags
);
5005 cqr
->retries
= 2; /* set retry counter to enable basic ERP */
5006 cqr
->expires
= 2 * HZ
;
5007 cqr
->buildclk
= get_tod_clock();
5008 cqr
->status
= DASD_CQR_FILLED
;
5010 rc
= dasd_sleep_on_immediatly(cqr
);
5012 clear_bit(DASD_FLAG_IS_RESERVED
, &device
->flags
);
5015 mutex_unlock(&dasd_reserve_mutex
);
5017 dasd_sfree_request(cqr
, cqr
->memdev
);
5022 * Reserve device ioctl.
5023 * Options are set to 'synchronous wait for interrupt' and
5024 * 'timeout the request'. This leads to a terminate IO if
5025 * the interrupt is outstanding for a certain time.
5028 dasd_eckd_reserve(struct dasd_device
*device
)
5030 struct dasd_ccw_req
*cqr
;
5035 if (!capable(CAP_SYS_ADMIN
))
5039 cqr
= dasd_smalloc_request(DASD_ECKD_MAGIC
, 1, 32, device
, NULL
);
5041 mutex_lock(&dasd_reserve_mutex
);
5043 cqr
= &dasd_reserve_req
->cqr
;
5044 memset(cqr
, 0, sizeof(*cqr
));
5045 memset(&dasd_reserve_req
->ccw
, 0,
5046 sizeof(dasd_reserve_req
->ccw
));
5047 cqr
->cpaddr
= &dasd_reserve_req
->ccw
;
5048 cqr
->data
= &dasd_reserve_req
->data
;
5049 cqr
->magic
= DASD_ECKD_MAGIC
;
5052 ccw
->cmd_code
= DASD_ECKD_CCW_RESERVE
;
5053 ccw
->flags
|= CCW_FLAG_SLI
;
5055 ccw
->cda
= (__u32
)(addr_t
) cqr
->data
;
5056 cqr
->startdev
= device
;
5057 cqr
->memdev
= device
;
5058 clear_bit(DASD_CQR_FLAGS_USE_ERP
, &cqr
->flags
);
5059 set_bit(DASD_CQR_FLAGS_FAILFAST
, &cqr
->flags
);
5060 cqr
->retries
= 2; /* set retry counter to enable basic ERP */
5061 cqr
->expires
= 2 * HZ
;
5062 cqr
->buildclk
= get_tod_clock();
5063 cqr
->status
= DASD_CQR_FILLED
;
5065 rc
= dasd_sleep_on_immediatly(cqr
);
5067 set_bit(DASD_FLAG_IS_RESERVED
, &device
->flags
);
5070 mutex_unlock(&dasd_reserve_mutex
);
5072 dasd_sfree_request(cqr
, cqr
->memdev
);
5077 * Steal lock ioctl - unconditional reserve device.
5078 * Buils a channel programm to break a device's reservation.
5079 * (unconditional reserve)
5082 dasd_eckd_steal_lock(struct dasd_device
*device
)
5084 struct dasd_ccw_req
*cqr
;
5089 if (!capable(CAP_SYS_ADMIN
))
5093 cqr
= dasd_smalloc_request(DASD_ECKD_MAGIC
, 1, 32, device
, NULL
);
5095 mutex_lock(&dasd_reserve_mutex
);
5097 cqr
= &dasd_reserve_req
->cqr
;
5098 memset(cqr
, 0, sizeof(*cqr
));
5099 memset(&dasd_reserve_req
->ccw
, 0,
5100 sizeof(dasd_reserve_req
->ccw
));
5101 cqr
->cpaddr
= &dasd_reserve_req
->ccw
;
5102 cqr
->data
= &dasd_reserve_req
->data
;
5103 cqr
->magic
= DASD_ECKD_MAGIC
;
5106 ccw
->cmd_code
= DASD_ECKD_CCW_SLCK
;
5107 ccw
->flags
|= CCW_FLAG_SLI
;
5109 ccw
->cda
= (__u32
)(addr_t
) cqr
->data
;
5110 cqr
->startdev
= device
;
5111 cqr
->memdev
= device
;
5112 clear_bit(DASD_CQR_FLAGS_USE_ERP
, &cqr
->flags
);
5113 set_bit(DASD_CQR_FLAGS_FAILFAST
, &cqr
->flags
);
5114 cqr
->retries
= 2; /* set retry counter to enable basic ERP */
5115 cqr
->expires
= 2 * HZ
;
5116 cqr
->buildclk
= get_tod_clock();
5117 cqr
->status
= DASD_CQR_FILLED
;
5119 rc
= dasd_sleep_on_immediatly(cqr
);
5121 set_bit(DASD_FLAG_IS_RESERVED
, &device
->flags
);
5124 mutex_unlock(&dasd_reserve_mutex
);
5126 dasd_sfree_request(cqr
, cqr
->memdev
);
5131 * SNID - Sense Path Group ID
5132 * This ioctl may be used in situations where I/O is stalled due to
5133 * a reserve, so if the normal dasd_smalloc_request fails, we use the
5134 * preallocated dasd_reserve_req.
5136 static int dasd_eckd_snid(struct dasd_device
*device
,
5139 struct dasd_ccw_req
*cqr
;
5143 struct dasd_snid_ioctl_data usrparm
;
5145 if (!capable(CAP_SYS_ADMIN
))
5148 if (copy_from_user(&usrparm
, argp
, sizeof(usrparm
)))
5152 cqr
= dasd_smalloc_request(DASD_ECKD_MAGIC
, 1,
5153 sizeof(struct dasd_snid_data
), device
,
5156 mutex_lock(&dasd_reserve_mutex
);
5158 cqr
= &dasd_reserve_req
->cqr
;
5159 memset(cqr
, 0, sizeof(*cqr
));
5160 memset(&dasd_reserve_req
->ccw
, 0,
5161 sizeof(dasd_reserve_req
->ccw
));
5162 cqr
->cpaddr
= &dasd_reserve_req
->ccw
;
5163 cqr
->data
= &dasd_reserve_req
->data
;
5164 cqr
->magic
= DASD_ECKD_MAGIC
;
5167 ccw
->cmd_code
= DASD_ECKD_CCW_SNID
;
5168 ccw
->flags
|= CCW_FLAG_SLI
;
5170 ccw
->cda
= (__u32
)(addr_t
) cqr
->data
;
5171 cqr
->startdev
= device
;
5172 cqr
->memdev
= device
;
5173 clear_bit(DASD_CQR_FLAGS_USE_ERP
, &cqr
->flags
);
5174 set_bit(DASD_CQR_FLAGS_FAILFAST
, &cqr
->flags
);
5175 set_bit(DASD_CQR_ALLOW_SLOCK
, &cqr
->flags
);
5177 cqr
->expires
= 10 * HZ
;
5178 cqr
->buildclk
= get_tod_clock();
5179 cqr
->status
= DASD_CQR_FILLED
;
5180 cqr
->lpm
= usrparm
.path_mask
;
5182 rc
= dasd_sleep_on_immediatly(cqr
);
5183 /* verify that I/O processing didn't modify the path mask */
5184 if (!rc
&& usrparm
.path_mask
&& (cqr
->lpm
!= usrparm
.path_mask
))
5187 usrparm
.data
= *((struct dasd_snid_data
*)cqr
->data
);
5188 if (copy_to_user(argp
, &usrparm
, sizeof(usrparm
)))
5193 mutex_unlock(&dasd_reserve_mutex
);
5195 dasd_sfree_request(cqr
, cqr
->memdev
);
5200 * Read performance statistics
5203 dasd_eckd_performance(struct dasd_device
*device
, void __user
*argp
)
5205 struct dasd_psf_prssd_data
*prssdp
;
5206 struct dasd_rssd_perf_stats_t
*stats
;
5207 struct dasd_ccw_req
*cqr
;
5211 cqr
= dasd_smalloc_request(DASD_ECKD_MAGIC
, 1 /* PSF */ + 1 /* RSSD */,
5212 (sizeof(struct dasd_psf_prssd_data
) +
5213 sizeof(struct dasd_rssd_perf_stats_t
)),
5216 DBF_DEV_EVENT(DBF_WARNING
, device
, "%s",
5217 "Could not allocate initialization request");
5218 return PTR_ERR(cqr
);
5220 cqr
->startdev
= device
;
5221 cqr
->memdev
= device
;
5223 clear_bit(DASD_CQR_FLAGS_USE_ERP
, &cqr
->flags
);
5224 cqr
->expires
= 10 * HZ
;
5226 /* Prepare for Read Subsystem Data */
5227 prssdp
= (struct dasd_psf_prssd_data
*) cqr
->data
;
5228 memset(prssdp
, 0, sizeof(struct dasd_psf_prssd_data
));
5229 prssdp
->order
= PSF_ORDER_PRSSD
;
5230 prssdp
->suborder
= 0x01; /* Performance Statistics */
5231 prssdp
->varies
[1] = 0x01; /* Perf Statistics for the Subsystem */
5234 ccw
->cmd_code
= DASD_ECKD_CCW_PSF
;
5235 ccw
->count
= sizeof(struct dasd_psf_prssd_data
);
5236 ccw
->flags
|= CCW_FLAG_CC
;
5237 ccw
->cda
= (__u32
)(addr_t
) prssdp
;
5239 /* Read Subsystem Data - Performance Statistics */
5240 stats
= (struct dasd_rssd_perf_stats_t
*) (prssdp
+ 1);
5241 memset(stats
, 0, sizeof(struct dasd_rssd_perf_stats_t
));
5244 ccw
->cmd_code
= DASD_ECKD_CCW_RSSD
;
5245 ccw
->count
= sizeof(struct dasd_rssd_perf_stats_t
);
5246 ccw
->cda
= (__u32
)(addr_t
) stats
;
5248 cqr
->buildclk
= get_tod_clock();
5249 cqr
->status
= DASD_CQR_FILLED
;
5250 rc
= dasd_sleep_on(cqr
);
5252 prssdp
= (struct dasd_psf_prssd_data
*) cqr
->data
;
5253 stats
= (struct dasd_rssd_perf_stats_t
*) (prssdp
+ 1);
5254 if (copy_to_user(argp
, stats
,
5255 sizeof(struct dasd_rssd_perf_stats_t
)))
5258 dasd_sfree_request(cqr
, cqr
->memdev
);
5263 * Get attributes (cache operations)
5264 * Returnes the cache attributes used in Define Extend (DE).
5267 dasd_eckd_get_attrib(struct dasd_device
*device
, void __user
*argp
)
5269 struct dasd_eckd_private
*private = device
->private;
5270 struct attrib_data_t attrib
= private->attrib
;
5273 if (!capable(CAP_SYS_ADMIN
))
5279 if (copy_to_user(argp
, (long *) &attrib
,
5280 sizeof(struct attrib_data_t
)))
5287 * Set attributes (cache operations)
5288 * Stores the attributes for cache operation to be used in Define Extend (DE).
5291 dasd_eckd_set_attrib(struct dasd_device
*device
, void __user
*argp
)
5293 struct dasd_eckd_private
*private = device
->private;
5294 struct attrib_data_t attrib
;
5296 if (!capable(CAP_SYS_ADMIN
))
5301 if (copy_from_user(&attrib
, argp
, sizeof(struct attrib_data_t
)))
5303 private->attrib
= attrib
;
5305 dev_info(&device
->cdev
->dev
,
5306 "The DASD cache mode was set to %x (%i cylinder prestage)\n",
5307 private->attrib
.operation
, private->attrib
.nr_cyl
);
5312 * Issue syscall I/O to EMC Symmetrix array.
5313 * CCWs are PSF and RSSD
5315 static int dasd_symm_io(struct dasd_device
*device
, void __user
*argp
)
5317 struct dasd_symmio_parms usrparm
;
5318 char *psf_data
, *rssd_result
;
5319 struct dasd_ccw_req
*cqr
;
5324 if (!capable(CAP_SYS_ADMIN
) && !capable(CAP_SYS_RAWIO
))
5328 /* Copy parms from caller */
5330 if (copy_from_user(&usrparm
, argp
, sizeof(usrparm
)))
5332 if (is_compat_task()) {
5333 /* Make sure pointers are sane even on 31 bit. */
5335 if ((usrparm
.psf_data
>> 32) != 0)
5337 if ((usrparm
.rssd_result
>> 32) != 0)
5339 usrparm
.psf_data
&= 0x7fffffffULL
;
5340 usrparm
.rssd_result
&= 0x7fffffffULL
;
5342 /* at least 2 bytes are accessed and should be allocated */
5343 if (usrparm
.psf_data_len
< 2) {
5344 DBF_DEV_EVENT(DBF_WARNING
, device
,
5345 "Symmetrix ioctl invalid data length %d",
5346 usrparm
.psf_data_len
);
5350 /* alloc I/O data area */
5351 psf_data
= kzalloc(usrparm
.psf_data_len
, GFP_KERNEL
| GFP_DMA
);
5352 rssd_result
= kzalloc(usrparm
.rssd_result_len
, GFP_KERNEL
| GFP_DMA
);
5353 if (!psf_data
|| !rssd_result
) {
5358 /* get syscall header from user space */
5360 if (copy_from_user(psf_data
,
5361 (void __user
*)(unsigned long) usrparm
.psf_data
,
5362 usrparm
.psf_data_len
))
5367 /* setup CCWs for PSF + RSSD */
5368 cqr
= dasd_smalloc_request(DASD_ECKD_MAGIC
, 2, 0, device
, NULL
);
5370 DBF_DEV_EVENT(DBF_WARNING
, device
, "%s",
5371 "Could not allocate initialization request");
5376 cqr
->startdev
= device
;
5377 cqr
->memdev
= device
;
5379 cqr
->expires
= 10 * HZ
;
5380 cqr
->buildclk
= get_tod_clock();
5381 cqr
->status
= DASD_CQR_FILLED
;
5383 /* Build the ccws */
5387 ccw
->cmd_code
= DASD_ECKD_CCW_PSF
;
5388 ccw
->count
= usrparm
.psf_data_len
;
5389 ccw
->flags
|= CCW_FLAG_CC
;
5390 ccw
->cda
= (__u32
)(addr_t
) psf_data
;
5395 ccw
->cmd_code
= DASD_ECKD_CCW_RSSD
;
5396 ccw
->count
= usrparm
.rssd_result_len
;
5397 ccw
->flags
= CCW_FLAG_SLI
;
5398 ccw
->cda
= (__u32
)(addr_t
) rssd_result
;
5400 rc
= dasd_sleep_on(cqr
);
5405 if (copy_to_user((void __user
*)(unsigned long) usrparm
.rssd_result
,
5406 rssd_result
, usrparm
.rssd_result_len
))
5411 dasd_sfree_request(cqr
, cqr
->memdev
);
5416 DBF_DEV_EVENT(DBF_WARNING
, device
,
5417 "Symmetrix ioctl (0x%02x 0x%02x): rc=%d",
5418 (int) psf0
, (int) psf1
, rc
);
5423 dasd_eckd_ioctl(struct dasd_block
*block
, unsigned int cmd
, void __user
*argp
)
5425 struct dasd_device
*device
= block
->base
;
5429 return dasd_eckd_get_attrib(device
, argp
);
5431 return dasd_eckd_set_attrib(device
, argp
);
5433 return dasd_eckd_performance(device
, argp
);
5435 return dasd_eckd_release(device
);
5437 return dasd_eckd_reserve(device
);
5439 return dasd_eckd_steal_lock(device
);
5441 return dasd_eckd_snid(device
, argp
);
5443 return dasd_symm_io(device
, argp
);
5450 * Dump the range of CCWs into 'page' buffer
5451 * and return number of printed chars.
5454 dasd_eckd_dump_ccw_range(struct ccw1
*from
, struct ccw1
*to
, char *page
)
5460 while (from
<= to
) {
5461 len
+= sprintf(page
+ len
, PRINTK_HEADER
5462 " CCW %p: %08X %08X DAT:",
5463 from
, ((int *) from
)[0], ((int *) from
)[1]);
5465 /* get pointer to data (consider IDALs) */
5466 if (from
->flags
& CCW_FLAG_IDA
)
5467 datap
= (char *) *((addr_t
*) (addr_t
) from
->cda
);
5469 datap
= (char *) ((addr_t
) from
->cda
);
5471 /* dump data (max 32 bytes) */
5472 for (count
= 0; count
< from
->count
&& count
< 32; count
++) {
5473 if (count
% 8 == 0) len
+= sprintf(page
+ len
, " ");
5474 if (count
% 4 == 0) len
+= sprintf(page
+ len
, " ");
5475 len
+= sprintf(page
+ len
, "%02x", datap
[count
]);
5477 len
+= sprintf(page
+ len
, "\n");
5484 dasd_eckd_dump_sense_dbf(struct dasd_device
*device
, struct irb
*irb
,
5490 sense
= (u64
*) dasd_get_sense(irb
);
5491 stat
= (u64
*) &irb
->scsw
;
5493 DBF_DEV_EVENT(DBF_EMERG
, device
, "%s: %016llx %08x : "
5494 "%016llx %016llx %016llx %016llx",
5495 reason
, *stat
, *((u32
*) (stat
+ 1)),
5496 sense
[0], sense
[1], sense
[2], sense
[3]);
5498 DBF_DEV_EVENT(DBF_EMERG
, device
, "%s: %016llx %08x : %s",
5499 reason
, *stat
, *((u32
*) (stat
+ 1)),
5505 * Print sense data and related channel program.
5506 * Parts are printed because printk buffer is only 1024 bytes.
5508 static void dasd_eckd_dump_sense_ccw(struct dasd_device
*device
,
5509 struct dasd_ccw_req
*req
, struct irb
*irb
)
5512 struct ccw1
*first
, *last
, *fail
, *from
, *to
;
5515 page
= (char *) get_zeroed_page(GFP_ATOMIC
);
5517 DBF_DEV_EVENT(DBF_WARNING
, device
, "%s",
5518 "No memory to dump sense data\n");
5521 /* dump the sense data */
5522 len
= sprintf(page
, PRINTK_HEADER
5523 " I/O status report for device %s:\n",
5524 dev_name(&device
->cdev
->dev
));
5525 len
+= sprintf(page
+ len
, PRINTK_HEADER
5526 " in req: %p CC:%02X FC:%02X AC:%02X SC:%02X DS:%02X "
5528 req
, scsw_cc(&irb
->scsw
), scsw_fctl(&irb
->scsw
),
5529 scsw_actl(&irb
->scsw
), scsw_stctl(&irb
->scsw
),
5530 scsw_dstat(&irb
->scsw
), scsw_cstat(&irb
->scsw
),
5531 req
? req
->intrc
: 0);
5532 len
+= sprintf(page
+ len
, PRINTK_HEADER
5533 " device %s: Failing CCW: %p\n",
5534 dev_name(&device
->cdev
->dev
),
5535 (void *) (addr_t
) irb
->scsw
.cmd
.cpa
);
5536 if (irb
->esw
.esw0
.erw
.cons
) {
5537 for (sl
= 0; sl
< 4; sl
++) {
5538 len
+= sprintf(page
+ len
, PRINTK_HEADER
5539 " Sense(hex) %2d-%2d:",
5540 (8 * sl
), ((8 * sl
) + 7));
5542 for (sct
= 0; sct
< 8; sct
++) {
5543 len
+= sprintf(page
+ len
, " %02x",
5544 irb
->ecw
[8 * sl
+ sct
]);
5546 len
+= sprintf(page
+ len
, "\n");
5549 if (irb
->ecw
[27] & DASD_SENSE_BIT_0
) {
5550 /* 24 Byte Sense Data */
5551 sprintf(page
+ len
, PRINTK_HEADER
5552 " 24 Byte: %x MSG %x, "
5553 "%s MSGb to SYSOP\n",
5554 irb
->ecw
[7] >> 4, irb
->ecw
[7] & 0x0f,
5555 irb
->ecw
[1] & 0x10 ? "" : "no");
5557 /* 32 Byte Sense Data */
5558 sprintf(page
+ len
, PRINTK_HEADER
5559 " 32 Byte: Format: %x "
5560 "Exception class %x\n",
5561 irb
->ecw
[6] & 0x0f, irb
->ecw
[22] >> 4);
5564 sprintf(page
+ len
, PRINTK_HEADER
5565 " SORRY - NO VALID SENSE AVAILABLE\n");
5567 printk(KERN_ERR
"%s", page
);
5570 /* req == NULL for unsolicited interrupts */
5571 /* dump the Channel Program (max 140 Bytes per line) */
5572 /* Count CCW and print first CCWs (maximum 1024 % 140 = 7) */
5573 first
= req
->cpaddr
;
5574 for (last
= first
; last
->flags
& (CCW_FLAG_CC
| CCW_FLAG_DC
); last
++);
5575 to
= min(first
+ 6, last
);
5576 len
= sprintf(page
, PRINTK_HEADER
5577 " Related CP in req: %p\n", req
);
5578 dasd_eckd_dump_ccw_range(first
, to
, page
+ len
);
5579 printk(KERN_ERR
"%s", page
);
5581 /* print failing CCW area (maximum 4) */
5582 /* scsw->cda is either valid or zero */
5585 fail
= (struct ccw1
*)(addr_t
)
5586 irb
->scsw
.cmd
.cpa
; /* failing CCW */
5587 if (from
< fail
- 2) {
5588 from
= fail
- 2; /* there is a gap - print header */
5589 len
+= sprintf(page
, PRINTK_HEADER
"......\n");
5591 to
= min(fail
+ 1, last
);
5592 len
+= dasd_eckd_dump_ccw_range(from
, to
, page
+ len
);
5594 /* print last CCWs (maximum 2) */
5595 from
= max(from
, ++to
);
5596 if (from
< last
- 1) {
5597 from
= last
- 1; /* there is a gap - print header */
5598 len
+= sprintf(page
+ len
, PRINTK_HEADER
"......\n");
5600 len
+= dasd_eckd_dump_ccw_range(from
, last
, page
+ len
);
5602 printk(KERN_ERR
"%s", page
);
5604 free_page((unsigned long) page
);
5609 * Print sense data from a tcw.
5611 static void dasd_eckd_dump_sense_tcw(struct dasd_device
*device
,
5612 struct dasd_ccw_req
*req
, struct irb
*irb
)
5615 int len
, sl
, sct
, residual
;
5619 page
= (char *) get_zeroed_page(GFP_ATOMIC
);
5621 DBF_DEV_EVENT(DBF_WARNING
, device
, " %s",
5622 "No memory to dump sense data");
5625 /* dump the sense data */
5626 len
= sprintf(page
, PRINTK_HEADER
5627 " I/O status report for device %s:\n",
5628 dev_name(&device
->cdev
->dev
));
5629 len
+= sprintf(page
+ len
, PRINTK_HEADER
5630 " in req: %p CC:%02X FC:%02X AC:%02X SC:%02X DS:%02X "
5631 "CS:%02X fcxs:%02X schxs:%02X RC:%d\n",
5632 req
, scsw_cc(&irb
->scsw
), scsw_fctl(&irb
->scsw
),
5633 scsw_actl(&irb
->scsw
), scsw_stctl(&irb
->scsw
),
5634 scsw_dstat(&irb
->scsw
), scsw_cstat(&irb
->scsw
),
5636 (irb
->scsw
.tm
.ifob
<< 7) | irb
->scsw
.tm
.sesq
,
5637 req
? req
->intrc
: 0);
5638 len
+= sprintf(page
+ len
, PRINTK_HEADER
5639 " device %s: Failing TCW: %p\n",
5640 dev_name(&device
->cdev
->dev
),
5641 (void *) (addr_t
) irb
->scsw
.tm
.tcw
);
5645 if (irb
->scsw
.tm
.tcw
&& (irb
->scsw
.tm
.fcxs
& 0x01))
5647 (struct tcw
*)(unsigned long)irb
->scsw
.tm
.tcw
);
5650 len
+= sprintf(page
+ len
, PRINTK_HEADER
5651 " tsb->length %d\n", tsb
->length
);
5652 len
+= sprintf(page
+ len
, PRINTK_HEADER
5653 " tsb->flags %x\n", tsb
->flags
);
5654 len
+= sprintf(page
+ len
, PRINTK_HEADER
5655 " tsb->dcw_offset %d\n", tsb
->dcw_offset
);
5656 len
+= sprintf(page
+ len
, PRINTK_HEADER
5657 " tsb->count %d\n", tsb
->count
);
5658 residual
= tsb
->count
- 28;
5659 len
+= sprintf(page
+ len
, PRINTK_HEADER
5660 " residual %d\n", residual
);
5662 switch (tsb
->flags
& 0x07) {
5663 case 1: /* tsa_iostat */
5664 len
+= sprintf(page
+ len
, PRINTK_HEADER
5665 " tsb->tsa.iostat.dev_time %d\n",
5666 tsb
->tsa
.iostat
.dev_time
);
5667 len
+= sprintf(page
+ len
, PRINTK_HEADER
5668 " tsb->tsa.iostat.def_time %d\n",
5669 tsb
->tsa
.iostat
.def_time
);
5670 len
+= sprintf(page
+ len
, PRINTK_HEADER
5671 " tsb->tsa.iostat.queue_time %d\n",
5672 tsb
->tsa
.iostat
.queue_time
);
5673 len
+= sprintf(page
+ len
, PRINTK_HEADER
5674 " tsb->tsa.iostat.dev_busy_time %d\n",
5675 tsb
->tsa
.iostat
.dev_busy_time
);
5676 len
+= sprintf(page
+ len
, PRINTK_HEADER
5677 " tsb->tsa.iostat.dev_act_time %d\n",
5678 tsb
->tsa
.iostat
.dev_act_time
);
5679 sense
= tsb
->tsa
.iostat
.sense
;
5681 case 2: /* ts_ddpc */
5682 len
+= sprintf(page
+ len
, PRINTK_HEADER
5683 " tsb->tsa.ddpc.rc %d\n", tsb
->tsa
.ddpc
.rc
);
5684 for (sl
= 0; sl
< 2; sl
++) {
5685 len
+= sprintf(page
+ len
, PRINTK_HEADER
5686 " tsb->tsa.ddpc.rcq %2d-%2d: ",
5687 (8 * sl
), ((8 * sl
) + 7));
5688 rcq
= tsb
->tsa
.ddpc
.rcq
;
5689 for (sct
= 0; sct
< 8; sct
++) {
5690 len
+= sprintf(page
+ len
, " %02x",
5693 len
+= sprintf(page
+ len
, "\n");
5695 sense
= tsb
->tsa
.ddpc
.sense
;
5697 case 3: /* tsa_intrg */
5698 len
+= sprintf(page
+ len
, PRINTK_HEADER
5699 " tsb->tsa.intrg.: not supported yet\n");
5704 for (sl
= 0; sl
< 4; sl
++) {
5705 len
+= sprintf(page
+ len
, PRINTK_HEADER
5706 " Sense(hex) %2d-%2d:",
5707 (8 * sl
), ((8 * sl
) + 7));
5708 for (sct
= 0; sct
< 8; sct
++) {
5709 len
+= sprintf(page
+ len
, " %02x",
5710 sense
[8 * sl
+ sct
]);
5712 len
+= sprintf(page
+ len
, "\n");
5715 if (sense
[27] & DASD_SENSE_BIT_0
) {
5716 /* 24 Byte Sense Data */
5717 sprintf(page
+ len
, PRINTK_HEADER
5718 " 24 Byte: %x MSG %x, "
5719 "%s MSGb to SYSOP\n",
5720 sense
[7] >> 4, sense
[7] & 0x0f,
5721 sense
[1] & 0x10 ? "" : "no");
5723 /* 32 Byte Sense Data */
5724 sprintf(page
+ len
, PRINTK_HEADER
5725 " 32 Byte: Format: %x "
5726 "Exception class %x\n",
5727 sense
[6] & 0x0f, sense
[22] >> 4);
5730 sprintf(page
+ len
, PRINTK_HEADER
5731 " SORRY - NO VALID SENSE AVAILABLE\n");
5734 sprintf(page
+ len
, PRINTK_HEADER
5735 " SORRY - NO TSB DATA AVAILABLE\n");
5737 printk(KERN_ERR
"%s", page
);
5738 free_page((unsigned long) page
);
5741 static void dasd_eckd_dump_sense(struct dasd_device
*device
,
5742 struct dasd_ccw_req
*req
, struct irb
*irb
)
5744 u8
*sense
= dasd_get_sense(irb
);
5746 if (scsw_is_tm(&irb
->scsw
)) {
5748 * In some cases the 'File Protected' or 'Incorrect Length'
5749 * error might be expected and log messages shouldn't be written
5750 * then. Check if the according suppress bit is set.
5752 if (sense
&& (sense
[1] & SNS1_FILE_PROTECTED
) &&
5753 test_bit(DASD_CQR_SUPPRESS_FP
, &req
->flags
))
5755 if (scsw_cstat(&irb
->scsw
) == 0x40 &&
5756 test_bit(DASD_CQR_SUPPRESS_IL
, &req
->flags
))
5759 dasd_eckd_dump_sense_tcw(device
, req
, irb
);
5762 * In some cases the 'Command Reject' or 'No Record Found'
5763 * error might be expected and log messages shouldn't be
5764 * written then. Check if the according suppress bit is set.
5766 if (sense
&& sense
[0] & SNS0_CMD_REJECT
&&
5767 test_bit(DASD_CQR_SUPPRESS_CR
, &req
->flags
))
5770 if (sense
&& sense
[1] & SNS1_NO_REC_FOUND
&&
5771 test_bit(DASD_CQR_SUPPRESS_NRF
, &req
->flags
))
5774 dasd_eckd_dump_sense_ccw(device
, req
, irb
);
5778 static int dasd_eckd_reload_device(struct dasd_device
*device
)
5780 struct dasd_eckd_private
*private = device
->private;
5783 struct dasd_uid uid
;
5784 unsigned long flags
;
5787 * remove device from alias handling to prevent new requests
5788 * from being scheduled on the wrong alias device
5790 dasd_alias_remove_device(device
);
5792 spin_lock_irqsave(get_ccwdev_lock(device
->cdev
), flags
);
5793 old_base
= private->uid
.base_unit_addr
;
5794 spin_unlock_irqrestore(get_ccwdev_lock(device
->cdev
), flags
);
5796 /* Read Configuration Data */
5797 rc
= dasd_eckd_read_conf(device
);
5801 rc
= dasd_eckd_generate_uid(device
);
5805 * update unit address configuration and
5806 * add device to alias management
5808 dasd_alias_update_add_device(device
);
5810 dasd_eckd_get_uid(device
, &uid
);
5812 if (old_base
!= uid
.base_unit_addr
) {
5813 if (strlen(uid
.vduit
) > 0)
5814 snprintf(print_uid
, sizeof(print_uid
),
5815 "%s.%s.%04x.%02x.%s", uid
.vendor
, uid
.serial
,
5816 uid
.ssid
, uid
.base_unit_addr
, uid
.vduit
);
5818 snprintf(print_uid
, sizeof(print_uid
),
5819 "%s.%s.%04x.%02x", uid
.vendor
, uid
.serial
,
5820 uid
.ssid
, uid
.base_unit_addr
);
5822 dev_info(&device
->cdev
->dev
,
5823 "An Alias device was reassigned to a new base device "
5824 "with UID: %s\n", print_uid
);
5832 static int dasd_eckd_read_message_buffer(struct dasd_device
*device
,
5833 struct dasd_rssd_messages
*messages
,
5836 struct dasd_rssd_messages
*message_buf
;
5837 struct dasd_psf_prssd_data
*prssdp
;
5838 struct dasd_ccw_req
*cqr
;
5842 cqr
= dasd_smalloc_request(DASD_ECKD_MAGIC
, 1 /* PSF */ + 1 /* RSSD */,
5843 (sizeof(struct dasd_psf_prssd_data
) +
5844 sizeof(struct dasd_rssd_messages
)),
5847 DBF_EVENT_DEVID(DBF_WARNING
, device
->cdev
, "%s",
5848 "Could not allocate read message buffer request");
5849 return PTR_ERR(cqr
);
5854 cqr
->startdev
= device
;
5855 cqr
->memdev
= device
;
5857 cqr
->expires
= 10 * HZ
;
5858 set_bit(DASD_CQR_VERIFY_PATH
, &cqr
->flags
);
5859 /* dasd_sleep_on_immediatly does not do complex error
5860 * recovery so clear erp flag and set retry counter to
5862 clear_bit(DASD_CQR_FLAGS_USE_ERP
, &cqr
->flags
);
5865 /* Prepare for Read Subsystem Data */
5866 prssdp
= (struct dasd_psf_prssd_data
*) cqr
->data
;
5867 memset(prssdp
, 0, sizeof(struct dasd_psf_prssd_data
));
5868 prssdp
->order
= PSF_ORDER_PRSSD
;
5869 prssdp
->suborder
= 0x03; /* Message Buffer */
5870 /* all other bytes of prssdp must be zero */
5873 ccw
->cmd_code
= DASD_ECKD_CCW_PSF
;
5874 ccw
->count
= sizeof(struct dasd_psf_prssd_data
);
5875 ccw
->flags
|= CCW_FLAG_CC
;
5876 ccw
->flags
|= CCW_FLAG_SLI
;
5877 ccw
->cda
= (__u32
)(addr_t
) prssdp
;
5879 /* Read Subsystem Data - message buffer */
5880 message_buf
= (struct dasd_rssd_messages
*) (prssdp
+ 1);
5881 memset(message_buf
, 0, sizeof(struct dasd_rssd_messages
));
5884 ccw
->cmd_code
= DASD_ECKD_CCW_RSSD
;
5885 ccw
->count
= sizeof(struct dasd_rssd_messages
);
5886 ccw
->flags
|= CCW_FLAG_SLI
;
5887 ccw
->cda
= (__u32
)(addr_t
) message_buf
;
5889 cqr
->buildclk
= get_tod_clock();
5890 cqr
->status
= DASD_CQR_FILLED
;
5891 rc
= dasd_sleep_on_immediatly(cqr
);
5893 prssdp
= (struct dasd_psf_prssd_data
*) cqr
->data
;
5894 message_buf
= (struct dasd_rssd_messages
*)
5896 memcpy(messages
, message_buf
,
5897 sizeof(struct dasd_rssd_messages
));
5898 } else if (cqr
->lpm
) {
5900 * on z/VM we might not be able to do I/O on the requested path
5901 * but instead we get the required information on any path
5902 * so retry with open path mask
5907 DBF_EVENT_DEVID(DBF_WARNING
, device
->cdev
,
5908 "Reading messages failed with rc=%d\n"
5910 dasd_sfree_request(cqr
, cqr
->memdev
);
5914 static int dasd_eckd_query_host_access(struct dasd_device
*device
,
5915 struct dasd_psf_query_host_access
*data
)
5917 struct dasd_eckd_private
*private = device
->private;
5918 struct dasd_psf_query_host_access
*host_access
;
5919 struct dasd_psf_prssd_data
*prssdp
;
5920 struct dasd_ccw_req
*cqr
;
5924 /* not available for HYPER PAV alias devices */
5925 if (!device
->block
&& private->lcu
->pav
== HYPER_PAV
)
5928 /* may not be supported by the storage server */
5929 if (!(private->features
.feature
[14] & 0x80))
5932 cqr
= dasd_smalloc_request(DASD_ECKD_MAGIC
, 1 /* PSF */ + 1 /* RSSD */,
5933 sizeof(struct dasd_psf_prssd_data
) + 1,
5936 DBF_EVENT_DEVID(DBF_WARNING
, device
->cdev
, "%s",
5937 "Could not allocate read message buffer request");
5938 return PTR_ERR(cqr
);
5940 host_access
= kzalloc(sizeof(*host_access
), GFP_KERNEL
| GFP_DMA
);
5942 dasd_sfree_request(cqr
, device
);
5943 DBF_EVENT_DEVID(DBF_WARNING
, device
->cdev
, "%s",
5944 "Could not allocate host_access buffer");
5947 cqr
->startdev
= device
;
5948 cqr
->memdev
= device
;
5951 cqr
->expires
= 10 * HZ
;
5953 /* Prepare for Read Subsystem Data */
5954 prssdp
= (struct dasd_psf_prssd_data
*) cqr
->data
;
5955 memset(prssdp
, 0, sizeof(struct dasd_psf_prssd_data
));
5956 prssdp
->order
= PSF_ORDER_PRSSD
;
5957 prssdp
->suborder
= PSF_SUBORDER_QHA
; /* query host access */
5958 /* LSS and Volume that will be queried */
5959 prssdp
->lss
= private->ned
->ID
;
5960 prssdp
->volume
= private->ned
->unit_addr
;
5961 /* all other bytes of prssdp must be zero */
5964 ccw
->cmd_code
= DASD_ECKD_CCW_PSF
;
5965 ccw
->count
= sizeof(struct dasd_psf_prssd_data
);
5966 ccw
->flags
|= CCW_FLAG_CC
;
5967 ccw
->flags
|= CCW_FLAG_SLI
;
5968 ccw
->cda
= (__u32
)(addr_t
) prssdp
;
5970 /* Read Subsystem Data - query host access */
5972 ccw
->cmd_code
= DASD_ECKD_CCW_RSSD
;
5973 ccw
->count
= sizeof(struct dasd_psf_query_host_access
);
5974 ccw
->flags
|= CCW_FLAG_SLI
;
5975 ccw
->cda
= (__u32
)(addr_t
) host_access
;
5977 cqr
->buildclk
= get_tod_clock();
5978 cqr
->status
= DASD_CQR_FILLED
;
5979 /* the command might not be supported, suppress error message */
5980 __set_bit(DASD_CQR_SUPPRESS_CR
, &cqr
->flags
);
5981 rc
= dasd_sleep_on_interruptible(cqr
);
5983 *data
= *host_access
;
5985 DBF_EVENT_DEVID(DBF_WARNING
, device
->cdev
,
5986 "Reading host access data failed with rc=%d\n",
5991 dasd_sfree_request(cqr
, cqr
->memdev
);
5996 * return number of grouped devices
5998 static int dasd_eckd_host_access_count(struct dasd_device
*device
)
6000 struct dasd_psf_query_host_access
*access
;
6001 struct dasd_ckd_path_group_entry
*entry
;
6002 struct dasd_ckd_host_information
*info
;
6006 access
= kzalloc(sizeof(*access
), GFP_NOIO
);
6008 DBF_EVENT_DEVID(DBF_WARNING
, device
->cdev
, "%s",
6009 "Could not allocate access buffer");
6012 rc
= dasd_eckd_query_host_access(device
, access
);
6018 info
= (struct dasd_ckd_host_information
*)
6019 access
->host_access_information
;
6020 for (i
= 0; i
< info
->entry_count
; i
++) {
6021 entry
= (struct dasd_ckd_path_group_entry
*)
6022 (info
->entry
+ i
* info
->entry_size
);
6023 if (entry
->status_flags
& DASD_ECKD_PG_GROUPED
)
6032 * write host access information to a sequential file
6034 static int dasd_hosts_print(struct dasd_device
*device
, struct seq_file
*m
)
6036 struct dasd_psf_query_host_access
*access
;
6037 struct dasd_ckd_path_group_entry
*entry
;
6038 struct dasd_ckd_host_information
*info
;
6039 char sysplex
[9] = "";
6042 access
= kzalloc(sizeof(*access
), GFP_NOIO
);
6044 DBF_EVENT_DEVID(DBF_WARNING
, device
->cdev
, "%s",
6045 "Could not allocate access buffer");
6048 rc
= dasd_eckd_query_host_access(device
, access
);
6054 info
= (struct dasd_ckd_host_information
*)
6055 access
->host_access_information
;
6056 for (i
= 0; i
< info
->entry_count
; i
++) {
6057 entry
= (struct dasd_ckd_path_group_entry
*)
6058 (info
->entry
+ i
* info
->entry_size
);
6060 seq_printf(m
, "pgid %*phN\n", 11, entry
->pgid
);
6062 seq_printf(m
, "status_flags %02x\n", entry
->status_flags
);
6064 memcpy(&sysplex
, &entry
->sysplex_name
, sizeof(sysplex
) - 1);
6065 EBCASC(sysplex
, sizeof(sysplex
));
6066 seq_printf(m
, "sysplex_name %8s\n", sysplex
);
6067 /* SUPPORTED CYLINDER */
6068 seq_printf(m
, "supported_cylinder %d\n", entry
->cylinder
);
6070 seq_printf(m
, "timestamp %lu\n", (unsigned long)
6079 * Perform Subsystem Function - CUIR response
6082 dasd_eckd_psf_cuir_response(struct dasd_device
*device
, int response
,
6083 __u32 message_id
, __u8 lpum
)
6085 struct dasd_psf_cuir_response
*psf_cuir
;
6086 int pos
= pathmask_to_pos(lpum
);
6087 struct dasd_ccw_req
*cqr
;
6091 cqr
= dasd_smalloc_request(DASD_ECKD_MAGIC
, 1 /* PSF */ ,
6092 sizeof(struct dasd_psf_cuir_response
),
6096 DBF_DEV_EVENT(DBF_WARNING
, device
, "%s",
6097 "Could not allocate PSF-CUIR request");
6098 return PTR_ERR(cqr
);
6101 psf_cuir
= (struct dasd_psf_cuir_response
*)cqr
->data
;
6102 psf_cuir
->order
= PSF_ORDER_CUIR_RESPONSE
;
6103 psf_cuir
->cc
= response
;
6104 psf_cuir
->chpid
= device
->path
[pos
].chpid
;
6105 psf_cuir
->message_id
= message_id
;
6106 psf_cuir
->cssid
= device
->path
[pos
].cssid
;
6107 psf_cuir
->ssid
= device
->path
[pos
].ssid
;
6109 ccw
->cmd_code
= DASD_ECKD_CCW_PSF
;
6110 ccw
->cda
= (__u32
)(addr_t
)psf_cuir
;
6111 ccw
->flags
= CCW_FLAG_SLI
;
6112 ccw
->count
= sizeof(struct dasd_psf_cuir_response
);
6114 cqr
->startdev
= device
;
6115 cqr
->memdev
= device
;
6118 cqr
->expires
= 10*HZ
;
6119 cqr
->buildclk
= get_tod_clock();
6120 cqr
->status
= DASD_CQR_FILLED
;
6121 set_bit(DASD_CQR_VERIFY_PATH
, &cqr
->flags
);
6123 rc
= dasd_sleep_on(cqr
);
6125 dasd_sfree_request(cqr
, cqr
->memdev
);
6130 * return configuration data that is referenced by record selector
6131 * if a record selector is specified or per default return the
6132 * conf_data pointer for the path specified by lpum
6134 static struct dasd_conf_data
*dasd_eckd_get_ref_conf(struct dasd_device
*device
,
6136 struct dasd_cuir_message
*cuir
)
6138 struct dasd_conf_data
*conf_data
;
6141 if (cuir
->record_selector
== 0)
6143 for (path
= 0x80, pos
= 0; path
; path
>>= 1, pos
++) {
6144 conf_data
= device
->path
[pos
].conf_data
;
6145 if (conf_data
->gneq
.record_selector
==
6146 cuir
->record_selector
)
6150 return device
->path
[pathmask_to_pos(lpum
)].conf_data
;
6154 * This function determines the scope of a reconfiguration request by
6155 * analysing the path and device selection data provided in the CUIR request.
6156 * Returns a path mask containing CUIR affected paths for the give device.
6158 * If the CUIR request does not contain the required information return the
6159 * path mask of the path the attention message for the CUIR request was reveived
6162 static int dasd_eckd_cuir_scope(struct dasd_device
*device
, __u8 lpum
,
6163 struct dasd_cuir_message
*cuir
)
6165 struct dasd_conf_data
*ref_conf_data
;
6166 unsigned long bitmask
= 0, mask
= 0;
6167 struct dasd_conf_data
*conf_data
;
6168 unsigned int pos
, path
;
6169 char *ref_gneq
, *gneq
;
6170 char *ref_ned
, *ned
;
6173 /* if CUIR request does not specify the scope use the path
6174 the attention message was presented on */
6175 if (!cuir
->ned_map
||
6176 !(cuir
->neq_map
[0] | cuir
->neq_map
[1] | cuir
->neq_map
[2]))
6179 /* get reference conf data */
6180 ref_conf_data
= dasd_eckd_get_ref_conf(device
, lpum
, cuir
);
6181 /* reference ned is determined by ned_map field */
6182 pos
= 8 - ffs(cuir
->ned_map
);
6183 ref_ned
= (char *)&ref_conf_data
->neds
[pos
];
6184 ref_gneq
= (char *)&ref_conf_data
->gneq
;
6185 /* transfer 24 bit neq_map to mask */
6186 mask
= cuir
->neq_map
[2];
6187 mask
|= cuir
->neq_map
[1] << 8;
6188 mask
|= cuir
->neq_map
[0] << 16;
6190 for (path
= 0; path
< 8; path
++) {
6191 /* initialise data per path */
6193 conf_data
= device
->path
[path
].conf_data
;
6194 pos
= 8 - ffs(cuir
->ned_map
);
6195 ned
= (char *) &conf_data
->neds
[pos
];
6196 /* compare reference ned and per path ned */
6197 if (memcmp(ref_ned
, ned
, sizeof(*ned
)) != 0)
6199 gneq
= (char *)&conf_data
->gneq
;
6200 /* compare reference gneq and per_path gneq under
6201 24 bit mask where mask bit 0 equals byte 7 of
6202 the gneq and mask bit 24 equals byte 31 */
6204 pos
= ffs(bitmask
) - 1;
6205 if (memcmp(&ref_gneq
[31 - pos
], &gneq
[31 - pos
], 1)
6208 clear_bit(pos
, &bitmask
);
6212 /* device and path match the reference values
6213 add path to CUIR scope */
6214 tbcpm
|= 0x80 >> path
;
6219 static void dasd_eckd_cuir_notify_user(struct dasd_device
*device
,
6220 unsigned long paths
, int action
)
6225 /* get position of bit in mask */
6226 pos
= 8 - ffs(paths
);
6227 /* get channel path descriptor from this position */
6228 if (action
== CUIR_QUIESCE
)
6229 pr_warn("Service on the storage server caused path %x.%02x to go offline",
6230 device
->path
[pos
].cssid
,
6231 device
->path
[pos
].chpid
);
6232 else if (action
== CUIR_RESUME
)
6233 pr_info("Path %x.%02x is back online after service on the storage server",
6234 device
->path
[pos
].cssid
,
6235 device
->path
[pos
].chpid
);
6236 clear_bit(7 - pos
, &paths
);
6240 static int dasd_eckd_cuir_remove_path(struct dasd_device
*device
, __u8 lpum
,
6241 struct dasd_cuir_message
*cuir
)
6243 unsigned long tbcpm
;
6245 tbcpm
= dasd_eckd_cuir_scope(device
, lpum
, cuir
);
6246 /* nothing to do if path is not in use */
6247 if (!(dasd_path_get_opm(device
) & tbcpm
))
6249 if (!(dasd_path_get_opm(device
) & ~tbcpm
)) {
6250 /* no path would be left if the CUIR action is taken
6254 /* remove device from operational path mask */
6255 dasd_path_remove_opm(device
, tbcpm
);
6256 dasd_path_add_cuirpm(device
, tbcpm
);
6261 * walk through all devices and build a path mask to quiesce them
6262 * return an error if the last path to a device would be removed
6264 * if only part of the devices are quiesced and an error
6265 * occurs no onlining necessary, the storage server will
6266 * notify the already set offline devices again
6268 static int dasd_eckd_cuir_quiesce(struct dasd_device
*device
, __u8 lpum
,
6269 struct dasd_cuir_message
*cuir
)
6271 struct dasd_eckd_private
*private = device
->private;
6272 struct alias_pav_group
*pavgroup
, *tempgroup
;
6273 struct dasd_device
*dev
, *n
;
6274 unsigned long paths
= 0;
6275 unsigned long flags
;
6278 /* active devices */
6279 list_for_each_entry_safe(dev
, n
, &private->lcu
->active_devices
,
6281 spin_lock_irqsave(get_ccwdev_lock(dev
->cdev
), flags
);
6282 tbcpm
= dasd_eckd_cuir_remove_path(dev
, lpum
, cuir
);
6283 spin_unlock_irqrestore(get_ccwdev_lock(dev
->cdev
), flags
);
6288 /* inactive devices */
6289 list_for_each_entry_safe(dev
, n
, &private->lcu
->inactive_devices
,
6291 spin_lock_irqsave(get_ccwdev_lock(dev
->cdev
), flags
);
6292 tbcpm
= dasd_eckd_cuir_remove_path(dev
, lpum
, cuir
);
6293 spin_unlock_irqrestore(get_ccwdev_lock(dev
->cdev
), flags
);
6298 /* devices in PAV groups */
6299 list_for_each_entry_safe(pavgroup
, tempgroup
,
6300 &private->lcu
->grouplist
, group
) {
6301 list_for_each_entry_safe(dev
, n
, &pavgroup
->baselist
,
6303 spin_lock_irqsave(get_ccwdev_lock(dev
->cdev
), flags
);
6304 tbcpm
= dasd_eckd_cuir_remove_path(dev
, lpum
, cuir
);
6305 spin_unlock_irqrestore(
6306 get_ccwdev_lock(dev
->cdev
), flags
);
6311 list_for_each_entry_safe(dev
, n
, &pavgroup
->aliaslist
,
6313 spin_lock_irqsave(get_ccwdev_lock(dev
->cdev
), flags
);
6314 tbcpm
= dasd_eckd_cuir_remove_path(dev
, lpum
, cuir
);
6315 spin_unlock_irqrestore(
6316 get_ccwdev_lock(dev
->cdev
), flags
);
6322 /* notify user about all paths affected by CUIR action */
6323 dasd_eckd_cuir_notify_user(device
, paths
, CUIR_QUIESCE
);
6329 static int dasd_eckd_cuir_resume(struct dasd_device
*device
, __u8 lpum
,
6330 struct dasd_cuir_message
*cuir
)
6332 struct dasd_eckd_private
*private = device
->private;
6333 struct alias_pav_group
*pavgroup
, *tempgroup
;
6334 struct dasd_device
*dev
, *n
;
6335 unsigned long paths
= 0;
6339 * the path may have been added through a generic path event before
6340 * only trigger path verification if the path is not already in use
6342 list_for_each_entry_safe(dev
, n
,
6343 &private->lcu
->active_devices
,
6345 tbcpm
= dasd_eckd_cuir_scope(dev
, lpum
, cuir
);
6347 if (!(dasd_path_get_opm(dev
) & tbcpm
)) {
6348 dasd_path_add_tbvpm(dev
, tbcpm
);
6349 dasd_schedule_device_bh(dev
);
6352 list_for_each_entry_safe(dev
, n
,
6353 &private->lcu
->inactive_devices
,
6355 tbcpm
= dasd_eckd_cuir_scope(dev
, lpum
, cuir
);
6357 if (!(dasd_path_get_opm(dev
) & tbcpm
)) {
6358 dasd_path_add_tbvpm(dev
, tbcpm
);
6359 dasd_schedule_device_bh(dev
);
6362 /* devices in PAV groups */
6363 list_for_each_entry_safe(pavgroup
, tempgroup
,
6364 &private->lcu
->grouplist
,
6366 list_for_each_entry_safe(dev
, n
,
6367 &pavgroup
->baselist
,
6369 tbcpm
= dasd_eckd_cuir_scope(dev
, lpum
, cuir
);
6371 if (!(dasd_path_get_opm(dev
) & tbcpm
)) {
6372 dasd_path_add_tbvpm(dev
, tbcpm
);
6373 dasd_schedule_device_bh(dev
);
6376 list_for_each_entry_safe(dev
, n
,
6377 &pavgroup
->aliaslist
,
6379 tbcpm
= dasd_eckd_cuir_scope(dev
, lpum
, cuir
);
6381 if (!(dasd_path_get_opm(dev
) & tbcpm
)) {
6382 dasd_path_add_tbvpm(dev
, tbcpm
);
6383 dasd_schedule_device_bh(dev
);
6387 /* notify user about all paths affected by CUIR action */
6388 dasd_eckd_cuir_notify_user(device
, paths
, CUIR_RESUME
);
6392 static void dasd_eckd_handle_cuir(struct dasd_device
*device
, void *messages
,
6395 struct dasd_cuir_message
*cuir
= messages
;
6398 DBF_DEV_EVENT(DBF_WARNING
, device
,
6399 "CUIR request: %016llx %016llx %016llx %08x",
6400 ((u64
*)cuir
)[0], ((u64
*)cuir
)[1], ((u64
*)cuir
)[2],
6403 if (cuir
->code
== CUIR_QUIESCE
) {
6405 if (dasd_eckd_cuir_quiesce(device
, lpum
, cuir
))
6406 response
= PSF_CUIR_LAST_PATH
;
6408 response
= PSF_CUIR_COMPLETED
;
6409 } else if (cuir
->code
== CUIR_RESUME
) {
6411 dasd_eckd_cuir_resume(device
, lpum
, cuir
);
6412 response
= PSF_CUIR_COMPLETED
;
6414 response
= PSF_CUIR_NOT_SUPPORTED
;
6416 dasd_eckd_psf_cuir_response(device
, response
,
6417 cuir
->message_id
, lpum
);
6418 DBF_DEV_EVENT(DBF_WARNING
, device
,
6419 "CUIR response: %d on message ID %08x", response
,
6421 /* to make sure there is no attention left schedule work again */
6422 device
->discipline
->check_attention(device
, lpum
);
6425 static void dasd_eckd_oos_resume(struct dasd_device
*device
)
6427 struct dasd_eckd_private
*private = device
->private;
6428 struct alias_pav_group
*pavgroup
, *tempgroup
;
6429 struct dasd_device
*dev
, *n
;
6430 unsigned long flags
;
6432 spin_lock_irqsave(&private->lcu
->lock
, flags
);
6433 list_for_each_entry_safe(dev
, n
, &private->lcu
->active_devices
,
6435 if (dev
->stopped
& DASD_STOPPED_NOSPC
)
6436 dasd_generic_space_avail(dev
);
6438 list_for_each_entry_safe(dev
, n
, &private->lcu
->inactive_devices
,
6440 if (dev
->stopped
& DASD_STOPPED_NOSPC
)
6441 dasd_generic_space_avail(dev
);
6443 /* devices in PAV groups */
6444 list_for_each_entry_safe(pavgroup
, tempgroup
,
6445 &private->lcu
->grouplist
,
6447 list_for_each_entry_safe(dev
, n
, &pavgroup
->baselist
,
6449 if (dev
->stopped
& DASD_STOPPED_NOSPC
)
6450 dasd_generic_space_avail(dev
);
6452 list_for_each_entry_safe(dev
, n
, &pavgroup
->aliaslist
,
6454 if (dev
->stopped
& DASD_STOPPED_NOSPC
)
6455 dasd_generic_space_avail(dev
);
6458 spin_unlock_irqrestore(&private->lcu
->lock
, flags
);
6461 static void dasd_eckd_handle_oos(struct dasd_device
*device
, void *messages
,
6464 struct dasd_oos_message
*oos
= messages
;
6466 switch (oos
->code
) {
6469 dev_warn(&device
->cdev
->dev
,
6470 "Extent pool usage has reached a critical value\n");
6471 dasd_eckd_oos_resume(device
);
6475 dev_warn(&device
->cdev
->dev
,
6476 "Extent pool is exhausted\n");
6480 dev_info(&device
->cdev
->dev
,
6481 "Extent pool physical space constraint has been relieved\n");
6485 /* In any case, update related data */
6486 dasd_eckd_read_ext_pool_info(device
);
6488 /* to make sure there is no attention left schedule work again */
6489 device
->discipline
->check_attention(device
, lpum
);
6492 static void dasd_eckd_check_attention_work(struct work_struct
*work
)
6494 struct check_attention_work_data
*data
;
6495 struct dasd_rssd_messages
*messages
;
6496 struct dasd_device
*device
;
6499 data
= container_of(work
, struct check_attention_work_data
, worker
);
6500 device
= data
->device
;
6501 messages
= kzalloc(sizeof(*messages
), GFP_KERNEL
);
6503 DBF_DEV_EVENT(DBF_WARNING
, device
, "%s",
6504 "Could not allocate attention message buffer");
6507 rc
= dasd_eckd_read_message_buffer(device
, messages
, data
->lpum
);
6511 if (messages
->length
== ATTENTION_LENGTH_CUIR
&&
6512 messages
->format
== ATTENTION_FORMAT_CUIR
)
6513 dasd_eckd_handle_cuir(device
, messages
, data
->lpum
);
6514 if (messages
->length
== ATTENTION_LENGTH_OOS
&&
6515 messages
->format
== ATTENTION_FORMAT_OOS
)
6516 dasd_eckd_handle_oos(device
, messages
, data
->lpum
);
6519 dasd_put_device(device
);
6524 static int dasd_eckd_check_attention(struct dasd_device
*device
, __u8 lpum
)
6526 struct check_attention_work_data
*data
;
6528 data
= kzalloc(sizeof(*data
), GFP_ATOMIC
);
6531 INIT_WORK(&data
->worker
, dasd_eckd_check_attention_work
);
6532 dasd_get_device(device
);
6533 data
->device
= device
;
6535 schedule_work(&data
->worker
);
6539 static int dasd_eckd_disable_hpf_path(struct dasd_device
*device
, __u8 lpum
)
6541 if (~lpum
& dasd_path_get_opm(device
)) {
6542 dasd_path_add_nohpfpm(device
, lpum
);
6543 dasd_path_remove_opm(device
, lpum
);
6544 dev_err(&device
->cdev
->dev
,
6545 "Channel path %02X lost HPF functionality and is disabled\n",
6552 static void dasd_eckd_disable_hpf_device(struct dasd_device
*device
)
6554 struct dasd_eckd_private
*private = device
->private;
6556 dev_err(&device
->cdev
->dev
,
6557 "High Performance FICON disabled\n");
6558 private->fcx_max_data
= 0;
6561 static int dasd_eckd_hpf_enabled(struct dasd_device
*device
)
6563 struct dasd_eckd_private
*private = device
->private;
6565 return private->fcx_max_data
? 1 : 0;
6568 static void dasd_eckd_handle_hpf_error(struct dasd_device
*device
,
6571 struct dasd_eckd_private
*private = device
->private;
6573 if (!private->fcx_max_data
) {
6574 /* sanity check for no HPF, the error makes no sense */
6575 DBF_DEV_EVENT(DBF_WARNING
, device
, "%s",
6576 "Trying to disable HPF for a non HPF device");
6579 if (irb
->scsw
.tm
.sesq
== SCSW_SESQ_DEV_NOFCX
) {
6580 dasd_eckd_disable_hpf_device(device
);
6581 } else if (irb
->scsw
.tm
.sesq
== SCSW_SESQ_PATH_NOFCX
) {
6582 if (dasd_eckd_disable_hpf_path(device
, irb
->esw
.esw1
.lpum
))
6584 dasd_eckd_disable_hpf_device(device
);
6585 dasd_path_set_tbvpm(device
,
6586 dasd_path_get_hpfpm(device
));
6589 * prevent that any new I/O ist started on the device and schedule a
6590 * requeue of existing requests
6592 dasd_device_set_stop_bits(device
, DASD_STOPPED_NOT_ACC
);
6593 dasd_schedule_requeue(device
);
6597 * Initialize block layer request queue.
6599 static void dasd_eckd_setup_blk_queue(struct dasd_block
*block
)
6601 unsigned int logical_block_size
= block
->bp_block
;
6602 struct request_queue
*q
= block
->request_queue
;
6603 struct dasd_device
*device
= block
->base
;
6606 if (device
->features
& DASD_FEATURE_USERAW
) {
6608 * the max_blocks value for raw_track access is 256
6609 * it is higher than the native ECKD value because we
6610 * only need one ccw per track
6611 * so the max_hw_sectors are
6612 * 2048 x 512B = 1024kB = 16 tracks
6614 max
= DASD_ECKD_MAX_BLOCKS_RAW
<< block
->s2b_shift
;
6616 max
= DASD_ECKD_MAX_BLOCKS
<< block
->s2b_shift
;
6618 blk_queue_flag_set(QUEUE_FLAG_NONROT
, q
);
6619 q
->limits
.max_dev_sectors
= max
;
6620 blk_queue_logical_block_size(q
, logical_block_size
);
6621 blk_queue_max_hw_sectors(q
, max
);
6622 blk_queue_max_segments(q
, USHRT_MAX
);
6623 /* With page sized segments each segment can be translated into one idaw/tidaw */
6624 blk_queue_max_segment_size(q
, PAGE_SIZE
);
6625 blk_queue_segment_boundary(q
, PAGE_SIZE
- 1);
6628 static struct ccw_driver dasd_eckd_driver
= {
6630 .name
= "dasd-eckd",
6631 .owner
= THIS_MODULE
,
6633 .ids
= dasd_eckd_ids
,
6634 .probe
= dasd_eckd_probe
,
6635 .remove
= dasd_generic_remove
,
6636 .set_offline
= dasd_generic_set_offline
,
6637 .set_online
= dasd_eckd_set_online
,
6638 .notify
= dasd_generic_notify
,
6639 .path_event
= dasd_generic_path_event
,
6640 .shutdown
= dasd_generic_shutdown
,
6641 .uc_handler
= dasd_generic_uc_handler
,
6642 .int_class
= IRQIO_DAS
,
6645 static struct dasd_discipline dasd_eckd_discipline
= {
6646 .owner
= THIS_MODULE
,
6649 .check_device
= dasd_eckd_check_characteristics
,
6650 .uncheck_device
= dasd_eckd_uncheck_device
,
6651 .do_analysis
= dasd_eckd_do_analysis
,
6652 .pe_handler
= dasd_eckd_pe_handler
,
6653 .basic_to_ready
= dasd_eckd_basic_to_ready
,
6654 .online_to_ready
= dasd_eckd_online_to_ready
,
6655 .basic_to_known
= dasd_eckd_basic_to_known
,
6656 .setup_blk_queue
= dasd_eckd_setup_blk_queue
,
6657 .fill_geometry
= dasd_eckd_fill_geometry
,
6658 .start_IO
= dasd_start_IO
,
6659 .term_IO
= dasd_term_IO
,
6660 .handle_terminated_request
= dasd_eckd_handle_terminated_request
,
6661 .format_device
= dasd_eckd_format_device
,
6662 .check_device_format
= dasd_eckd_check_device_format
,
6663 .erp_action
= dasd_eckd_erp_action
,
6664 .erp_postaction
= dasd_eckd_erp_postaction
,
6665 .check_for_device_change
= dasd_eckd_check_for_device_change
,
6666 .build_cp
= dasd_eckd_build_alias_cp
,
6667 .free_cp
= dasd_eckd_free_alias_cp
,
6668 .dump_sense
= dasd_eckd_dump_sense
,
6669 .dump_sense_dbf
= dasd_eckd_dump_sense_dbf
,
6670 .fill_info
= dasd_eckd_fill_info
,
6671 .ioctl
= dasd_eckd_ioctl
,
6672 .reload
= dasd_eckd_reload_device
,
6673 .get_uid
= dasd_eckd_get_uid
,
6674 .kick_validate
= dasd_eckd_kick_validate_server
,
6675 .check_attention
= dasd_eckd_check_attention
,
6676 .host_access_count
= dasd_eckd_host_access_count
,
6677 .hosts_print
= dasd_hosts_print
,
6678 .handle_hpf_error
= dasd_eckd_handle_hpf_error
,
6679 .disable_hpf
= dasd_eckd_disable_hpf_device
,
6680 .hpf_enabled
= dasd_eckd_hpf_enabled
,
6681 .reset_path
= dasd_eckd_reset_path
,
6682 .is_ese
= dasd_eckd_is_ese
,
6683 .space_allocated
= dasd_eckd_space_allocated
,
6684 .space_configured
= dasd_eckd_space_configured
,
6685 .logical_capacity
= dasd_eckd_logical_capacity
,
6686 .release_space
= dasd_eckd_release_space
,
6687 .ext_pool_id
= dasd_eckd_ext_pool_id
,
6688 .ext_size
= dasd_eckd_ext_size
,
6689 .ext_pool_cap_at_warnlevel
= dasd_eckd_ext_pool_cap_at_warnlevel
,
6690 .ext_pool_warn_thrshld
= dasd_eckd_ext_pool_warn_thrshld
,
6691 .ext_pool_oos
= dasd_eckd_ext_pool_oos
,
6692 .ext_pool_exhaust
= dasd_eckd_ext_pool_exhaust
,
6693 .ese_format
= dasd_eckd_ese_format
,
6694 .ese_read
= dasd_eckd_ese_read
,
6698 dasd_eckd_init(void)
6702 ASCEBC(dasd_eckd_discipline
.ebcname
, 4);
6703 dasd_reserve_req
= kmalloc(sizeof(*dasd_reserve_req
),
6704 GFP_KERNEL
| GFP_DMA
);
6705 if (!dasd_reserve_req
)
6707 dasd_vol_info_req
= kmalloc(sizeof(*dasd_vol_info_req
),
6708 GFP_KERNEL
| GFP_DMA
);
6709 if (!dasd_vol_info_req
)
6711 pe_handler_worker
= kmalloc(sizeof(*pe_handler_worker
),
6712 GFP_KERNEL
| GFP_DMA
);
6713 if (!pe_handler_worker
) {
6714 kfree(dasd_reserve_req
);
6715 kfree(dasd_vol_info_req
);
6718 rawpadpage
= (void *)__get_free_page(GFP_KERNEL
);
6720 kfree(pe_handler_worker
);
6721 kfree(dasd_reserve_req
);
6722 kfree(dasd_vol_info_req
);
6725 ret
= ccw_driver_register(&dasd_eckd_driver
);
6727 wait_for_device_probe();
6729 kfree(pe_handler_worker
);
6730 kfree(dasd_reserve_req
);
6731 kfree(dasd_vol_info_req
);
6732 free_page((unsigned long)rawpadpage
);
6738 dasd_eckd_cleanup(void)
6740 ccw_driver_unregister(&dasd_eckd_driver
);
6741 kfree(pe_handler_worker
);
6742 kfree(dasd_reserve_req
);
6743 free_page((unsigned long)rawpadpage
);
6746 module_init(dasd_eckd_init
);
6747 module_exit(dasd_eckd_cleanup
);