1 // SPDX-License-Identifier: GPL-2.0
3 * Author(s)......: Holger Smolinski <Holger.Smolinski@de.ibm.com>
4 * Horst Hummel <Horst.Hummel@de.ibm.com>
5 * Carsten Otte <Cotte@de.ibm.com>
6 * Martin Schwidefsky <schwidefsky@de.ibm.com>
7 * Bugreports.to..: <Linux390@de.ibm.com>
8 * Copyright IBM Corp. 1999, 2009
9 * EMC Symmetrix ioctl Copyright EMC Corporation, 2008
10 * Author.........: Nigel Hislop <hislop_nigel@emc.com>
13 #include <linux/stddef.h>
14 #include <linux/kernel.h>
15 #include <linux/slab.h>
16 #include <linux/hdreg.h> /* HDIO_GETGEO */
17 #include <linux/bio.h>
18 #include <linux/module.h>
19 #include <linux/compat.h>
20 #include <linux/init.h>
21 #include <linux/seq_file.h>
22 #include <linux/uaccess.h>
25 #include <asm/css_chars.h>
26 #include <asm/debug.h>
27 #include <asm/idals.h>
28 #include <asm/ebcdic.h>
30 #include <asm/ccwdev.h>
32 #include <asm/schid.h>
33 #include <asm/chpid.h>
36 #include "dasd_eckd.h"
39 * raw track access always map to 64k in memory
40 * so it maps to 16 blocks of 4k per track
42 #define DASD_RAW_BLOCK_PER_TRACK 16
43 #define DASD_RAW_BLOCKSIZE 4096
44 /* 64k are 128 x 512 byte sectors */
45 #define DASD_RAW_SECTORS_PER_TRACK 128
47 MODULE_DESCRIPTION("S/390 DASD ECKD Disks device driver");
48 MODULE_LICENSE("GPL");
50 static struct dasd_discipline dasd_eckd_discipline
;
52 /* The ccw bus type uses this table to find devices that it sends to
54 static struct ccw_device_id dasd_eckd_ids
[] = {
55 { CCW_DEVICE_DEVTYPE (0x3990, 0, 0x3390, 0), .driver_info
= 0x1},
56 { CCW_DEVICE_DEVTYPE (0x2105, 0, 0x3390, 0), .driver_info
= 0x2},
57 { CCW_DEVICE_DEVTYPE (0x3880, 0, 0x3380, 0), .driver_info
= 0x3},
58 { CCW_DEVICE_DEVTYPE (0x3990, 0, 0x3380, 0), .driver_info
= 0x4},
59 { CCW_DEVICE_DEVTYPE (0x2105, 0, 0x3380, 0), .driver_info
= 0x5},
60 { CCW_DEVICE_DEVTYPE (0x9343, 0, 0x9345, 0), .driver_info
= 0x6},
61 { CCW_DEVICE_DEVTYPE (0x2107, 0, 0x3390, 0), .driver_info
= 0x7},
62 { CCW_DEVICE_DEVTYPE (0x2107, 0, 0x3380, 0), .driver_info
= 0x8},
63 { CCW_DEVICE_DEVTYPE (0x1750, 0, 0x3390, 0), .driver_info
= 0x9},
64 { CCW_DEVICE_DEVTYPE (0x1750, 0, 0x3380, 0), .driver_info
= 0xa},
65 { /* end of list */ },
68 MODULE_DEVICE_TABLE(ccw
, dasd_eckd_ids
);
70 static struct ccw_driver dasd_eckd_driver
; /* see below */
72 static void *rawpadpage
;
75 #define INIT_CQR_UNFORMATTED 1
76 #define INIT_CQR_ERROR 2
78 /* emergency request for reserve/release */
80 struct dasd_ccw_req cqr
;
84 static DEFINE_MUTEX(dasd_reserve_mutex
);
87 struct dasd_ccw_req cqr
;
91 static DEFINE_MUTEX(dasd_vol_info_mutex
);
93 struct ext_pool_exhaust_work_data
{
94 struct work_struct worker
;
95 struct dasd_device
*device
;
96 struct dasd_device
*base
;
99 /* definitions for the path verification worker */
100 struct pe_handler_work_data
{
101 struct work_struct worker
;
102 struct dasd_device
*device
;
103 struct dasd_ccw_req cqr
;
105 __u8 rcd_buffer
[DASD_ECKD_RCD_DATA_SIZE
];
110 static struct pe_handler_work_data
*pe_handler_worker
;
111 static DEFINE_MUTEX(dasd_pe_handler_mutex
);
113 struct check_attention_work_data
{
114 struct work_struct worker
;
115 struct dasd_device
*device
;
119 static int dasd_eckd_ext_pool_id(struct dasd_device
*);
120 static int prepare_itcw(struct itcw
*, unsigned int, unsigned int, int,
121 struct dasd_device
*, struct dasd_device
*,
122 unsigned int, int, unsigned int, unsigned int,
123 unsigned int, unsigned int);
124 static int dasd_eckd_query_pprc_status(struct dasd_device
*,
125 struct dasd_pprc_data_sc4
*);
127 /* initial attempt at a probe function. this can be simplified once
128 * the other detection code is gone */
130 dasd_eckd_probe (struct ccw_device
*cdev
)
134 /* set ECKD specific ccw-device options */
135 ret
= ccw_device_set_options(cdev
, CCWDEV_ALLOW_FORCE
|
136 CCWDEV_DO_PATHGROUP
| CCWDEV_DO_MULTIPATH
);
138 DBF_EVENT_DEVID(DBF_WARNING
, cdev
, "%s",
139 "dasd_eckd_probe: could not set "
140 "ccw-device options");
143 ret
= dasd_generic_probe(cdev
);
148 dasd_eckd_set_online(struct ccw_device
*cdev
)
150 return dasd_generic_set_online(cdev
, &dasd_eckd_discipline
);
153 static const int sizes_trk0
[] = { 28, 148, 84 };
154 #define LABEL_SIZE 140
156 /* head and record addresses of count_area read in analysis ccw */
157 static const int count_area_head
[] = { 0, 0, 0, 0, 1 };
158 static const int count_area_rec
[] = { 1, 2, 3, 4, 1 };
160 static inline unsigned int
161 ceil_quot(unsigned int d1
, unsigned int d2
)
163 return (d1
+ (d2
- 1)) / d2
;
167 recs_per_track(struct dasd_eckd_characteristics
* rdc
,
168 unsigned int kl
, unsigned int dl
)
172 switch (rdc
->dev_type
) {
175 return 1499 / (15 + 7 + ceil_quot(kl
+ 12, 32) +
176 ceil_quot(dl
+ 12, 32));
178 return 1499 / (15 + ceil_quot(dl
+ 12, 32));
180 dn
= ceil_quot(dl
+ 6, 232) + 1;
182 kn
= ceil_quot(kl
+ 6, 232) + 1;
183 return 1729 / (10 + 9 + ceil_quot(kl
+ 6 * kn
, 34) +
184 9 + ceil_quot(dl
+ 6 * dn
, 34));
186 return 1729 / (10 + 9 + ceil_quot(dl
+ 6 * dn
, 34));
188 dn
= ceil_quot(dl
+ 6, 232) + 1;
190 kn
= ceil_quot(kl
+ 6, 232) + 1;
191 return 1420 / (18 + 7 + ceil_quot(kl
+ 6 * kn
, 34) +
192 ceil_quot(dl
+ 6 * dn
, 34));
194 return 1420 / (18 + 7 + ceil_quot(dl
+ 6 * dn
, 34));
199 static void set_ch_t(struct ch_t
*geo
, __u32 cyl
, __u8 head
)
201 geo
->cyl
= (__u16
) cyl
;
202 geo
->head
= cyl
>> 16;
208 * calculate failing track from sense data depending if
209 * it is an EAV device or not
211 static int dasd_eckd_track_from_irb(struct irb
*irb
, struct dasd_device
*device
,
214 struct dasd_eckd_private
*private = device
->private;
219 sense
= dasd_get_sense(irb
);
221 DBF_DEV_EVENT(DBF_WARNING
, device
, "%s",
222 "ESE error no sense data\n");
225 if (!(sense
[27] & DASD_SENSE_BIT_2
)) {
226 DBF_DEV_EVENT(DBF_WARNING
, device
, "%s",
227 "ESE error no valid track data\n");
231 if (sense
[27] & DASD_SENSE_BIT_3
) {
232 /* enhanced addressing */
233 cyl
= sense
[30] << 20;
234 cyl
|= (sense
[31] & 0xF0) << 12;
235 cyl
|= sense
[28] << 8;
238 cyl
= sense
[29] << 8;
241 head
= sense
[31] & 0x0F;
242 *track
= cyl
* private->rdc_data
.trk_per_cyl
+ head
;
246 static int set_timestamp(struct ccw1
*ccw
, struct DE_eckd_data
*data
,
247 struct dasd_device
*device
)
249 struct dasd_eckd_private
*private = device
->private;
252 rc
= get_phys_clock(&data
->ep_sys_time
);
254 * Ignore return code if XRC is not supported or
255 * sync clock is switched off
257 if ((rc
&& !private->rdc_data
.facilities
.XRC_supported
) ||
258 rc
== -EOPNOTSUPP
|| rc
== -EACCES
)
261 /* switch on System Time Stamp - needed for XRC Support */
262 data
->ga_extended
|= 0x08; /* switch on 'Time Stamp Valid' */
263 data
->ga_extended
|= 0x02; /* switch on 'Extended Parameter' */
266 ccw
->count
= sizeof(struct DE_eckd_data
);
267 ccw
->flags
|= CCW_FLAG_SLI
;
274 define_extent(struct ccw1
*ccw
, struct DE_eckd_data
*data
, unsigned int trk
,
275 unsigned int totrk
, int cmd
, struct dasd_device
*device
,
278 struct dasd_eckd_private
*private = device
->private;
279 u16 heads
, beghead
, endhead
;
284 ccw
->cmd_code
= DASD_ECKD_CCW_DEFINE_EXTENT
;
287 ccw
->cda
= virt_to_dma32(data
);
290 memset(data
, 0, sizeof(struct DE_eckd_data
));
292 case DASD_ECKD_CCW_READ_HOME_ADDRESS
:
293 case DASD_ECKD_CCW_READ_RECORD_ZERO
:
294 case DASD_ECKD_CCW_READ
:
295 case DASD_ECKD_CCW_READ_MT
:
296 case DASD_ECKD_CCW_READ_CKD
:
297 case DASD_ECKD_CCW_READ_CKD_MT
:
298 case DASD_ECKD_CCW_READ_KD
:
299 case DASD_ECKD_CCW_READ_KD_MT
:
300 data
->mask
.perm
= 0x1;
301 data
->attributes
.operation
= private->attrib
.operation
;
303 case DASD_ECKD_CCW_READ_COUNT
:
304 data
->mask
.perm
= 0x1;
305 data
->attributes
.operation
= DASD_BYPASS_CACHE
;
307 case DASD_ECKD_CCW_READ_TRACK
:
308 case DASD_ECKD_CCW_READ_TRACK_DATA
:
309 data
->mask
.perm
= 0x1;
310 data
->attributes
.operation
= private->attrib
.operation
;
313 case DASD_ECKD_CCW_WRITE
:
314 case DASD_ECKD_CCW_WRITE_MT
:
315 case DASD_ECKD_CCW_WRITE_KD
:
316 case DASD_ECKD_CCW_WRITE_KD_MT
:
317 data
->mask
.perm
= 0x02;
318 data
->attributes
.operation
= private->attrib
.operation
;
319 rc
= set_timestamp(ccw
, data
, device
);
321 case DASD_ECKD_CCW_WRITE_CKD
:
322 case DASD_ECKD_CCW_WRITE_CKD_MT
:
323 data
->attributes
.operation
= DASD_BYPASS_CACHE
;
324 rc
= set_timestamp(ccw
, data
, device
);
326 case DASD_ECKD_CCW_ERASE
:
327 case DASD_ECKD_CCW_WRITE_HOME_ADDRESS
:
328 case DASD_ECKD_CCW_WRITE_RECORD_ZERO
:
329 data
->mask
.perm
= 0x3;
330 data
->mask
.auth
= 0x1;
331 data
->attributes
.operation
= DASD_BYPASS_CACHE
;
332 rc
= set_timestamp(ccw
, data
, device
);
334 case DASD_ECKD_CCW_WRITE_FULL_TRACK
:
335 data
->mask
.perm
= 0x03;
336 data
->attributes
.operation
= private->attrib
.operation
;
339 case DASD_ECKD_CCW_WRITE_TRACK_DATA
:
340 data
->mask
.perm
= 0x02;
341 data
->attributes
.operation
= private->attrib
.operation
;
342 data
->blk_size
= blksize
;
343 rc
= set_timestamp(ccw
, data
, device
);
346 dev_err(&device
->cdev
->dev
,
347 "0x%x is not a known command\n", cmd
);
351 data
->attributes
.mode
= 0x3; /* ECKD */
353 if ((private->rdc_data
.cu_type
== 0x2105 ||
354 private->rdc_data
.cu_type
== 0x2107 ||
355 private->rdc_data
.cu_type
== 0x1750)
356 && !(private->uses_cdl
&& trk
< 2))
357 data
->ga_extended
|= 0x40; /* Regular Data Format Mode */
359 heads
= private->rdc_data
.trk_per_cyl
;
360 begcyl
= trk
/ heads
;
361 beghead
= trk
% heads
;
362 endcyl
= totrk
/ heads
;
363 endhead
= totrk
% heads
;
365 /* check for sequential prestage - enhance cylinder range */
366 if (data
->attributes
.operation
== DASD_SEQ_PRESTAGE
||
367 data
->attributes
.operation
== DASD_SEQ_ACCESS
) {
369 if (endcyl
+ private->attrib
.nr_cyl
< private->real_cyl
)
370 endcyl
+= private->attrib
.nr_cyl
;
372 endcyl
= (private->real_cyl
- 1);
375 set_ch_t(&data
->beg_ext
, begcyl
, beghead
);
376 set_ch_t(&data
->end_ext
, endcyl
, endhead
);
381 static void locate_record_ext(struct ccw1
*ccw
, struct LRE_eckd_data
*data
,
382 unsigned int trk
, unsigned int rec_on_trk
,
383 int count
, int cmd
, struct dasd_device
*device
,
384 unsigned int reclen
, unsigned int tlf
)
386 struct dasd_eckd_private
*private = device
->private;
391 ccw
->cmd_code
= DASD_ECKD_CCW_LOCATE_RECORD_EXT
;
393 if (cmd
== DASD_ECKD_CCW_WRITE_FULL_TRACK
)
397 ccw
->cda
= virt_to_dma32(data
);
400 memset(data
, 0, sizeof(*data
));
403 switch (private->rdc_data
.dev_type
) {
405 dn
= ceil_quot(reclen
+ 6, 232);
406 d
= 9 + ceil_quot(reclen
+ 6 * (dn
+ 1), 34);
407 sector
= (49 + (rec_on_trk
- 1) * (10 + d
)) / 8;
410 d
= 7 + ceil_quot(reclen
+ 12, 32);
411 sector
= (39 + (rec_on_trk
- 1) * (8 + d
)) / 7;
415 data
->sector
= sector
;
416 /* note: meaning of count depends on the operation
417 * for record based I/O it's the number of records, but for
418 * track based I/O it's the number of tracks
422 case DASD_ECKD_CCW_WRITE_HOME_ADDRESS
:
423 data
->operation
.orientation
= 0x3;
424 data
->operation
.operation
= 0x03;
426 case DASD_ECKD_CCW_READ_HOME_ADDRESS
:
427 data
->operation
.orientation
= 0x3;
428 data
->operation
.operation
= 0x16;
430 case DASD_ECKD_CCW_WRITE_RECORD_ZERO
:
431 data
->operation
.orientation
= 0x1;
432 data
->operation
.operation
= 0x03;
435 case DASD_ECKD_CCW_READ_RECORD_ZERO
:
436 data
->operation
.orientation
= 0x3;
437 data
->operation
.operation
= 0x16;
440 case DASD_ECKD_CCW_WRITE
:
441 case DASD_ECKD_CCW_WRITE_MT
:
442 case DASD_ECKD_CCW_WRITE_KD
:
443 case DASD_ECKD_CCW_WRITE_KD_MT
:
444 data
->auxiliary
.length_valid
= 0x1;
445 data
->length
= reclen
;
446 data
->operation
.operation
= 0x01;
448 case DASD_ECKD_CCW_WRITE_CKD
:
449 case DASD_ECKD_CCW_WRITE_CKD_MT
:
450 data
->auxiliary
.length_valid
= 0x1;
451 data
->length
= reclen
;
452 data
->operation
.operation
= 0x03;
454 case DASD_ECKD_CCW_WRITE_FULL_TRACK
:
455 data
->operation
.orientation
= 0x0;
456 data
->operation
.operation
= 0x3F;
457 data
->extended_operation
= 0x11;
459 data
->extended_parameter_length
= 0x02;
460 if (data
->count
> 8) {
461 data
->extended_parameter
[0] = 0xFF;
462 data
->extended_parameter
[1] = 0xFF;
463 data
->extended_parameter
[1] <<= (16 - count
);
465 data
->extended_parameter
[0] = 0xFF;
466 data
->extended_parameter
[0] <<= (8 - count
);
467 data
->extended_parameter
[1] = 0x00;
471 case DASD_ECKD_CCW_WRITE_TRACK_DATA
:
472 data
->auxiliary
.length_valid
= 0x1;
473 data
->length
= reclen
; /* not tlf, as one might think */
474 data
->operation
.operation
= 0x3F;
475 data
->extended_operation
= 0x23;
477 case DASD_ECKD_CCW_READ
:
478 case DASD_ECKD_CCW_READ_MT
:
479 case DASD_ECKD_CCW_READ_KD
:
480 case DASD_ECKD_CCW_READ_KD_MT
:
481 data
->auxiliary
.length_valid
= 0x1;
482 data
->length
= reclen
;
483 data
->operation
.operation
= 0x06;
485 case DASD_ECKD_CCW_READ_CKD
:
486 case DASD_ECKD_CCW_READ_CKD_MT
:
487 data
->auxiliary
.length_valid
= 0x1;
488 data
->length
= reclen
;
489 data
->operation
.operation
= 0x16;
491 case DASD_ECKD_CCW_READ_COUNT
:
492 data
->operation
.operation
= 0x06;
494 case DASD_ECKD_CCW_READ_TRACK
:
495 data
->operation
.orientation
= 0x1;
496 data
->operation
.operation
= 0x0C;
497 data
->extended_parameter_length
= 0;
500 case DASD_ECKD_CCW_READ_TRACK_DATA
:
501 data
->auxiliary
.length_valid
= 0x1;
503 data
->operation
.operation
= 0x0C;
505 case DASD_ECKD_CCW_ERASE
:
506 data
->length
= reclen
;
507 data
->auxiliary
.length_valid
= 0x1;
508 data
->operation
.operation
= 0x0b;
511 DBF_DEV_EVENT(DBF_ERR
, device
,
512 "fill LRE unknown opcode 0x%x", cmd
);
515 set_ch_t(&data
->seek_addr
,
516 trk
/ private->rdc_data
.trk_per_cyl
,
517 trk
% private->rdc_data
.trk_per_cyl
);
518 data
->search_arg
.cyl
= data
->seek_addr
.cyl
;
519 data
->search_arg
.head
= data
->seek_addr
.head
;
520 data
->search_arg
.record
= rec_on_trk
;
523 static int prefix_LRE(struct ccw1
*ccw
, struct PFX_eckd_data
*pfxdata
,
524 unsigned int trk
, unsigned int totrk
, int cmd
,
525 struct dasd_device
*basedev
, struct dasd_device
*startdev
,
526 unsigned int format
, unsigned int rec_on_trk
, int count
,
527 unsigned int blksize
, unsigned int tlf
)
529 struct dasd_eckd_private
*basepriv
, *startpriv
;
530 struct LRE_eckd_data
*lredata
;
531 struct DE_eckd_data
*dedata
;
534 basepriv
= basedev
->private;
535 startpriv
= startdev
->private;
536 dedata
= &pfxdata
->define_extent
;
537 lredata
= &pfxdata
->locate_record
;
539 ccw
->cmd_code
= DASD_ECKD_CCW_PFX
;
541 if (cmd
== DASD_ECKD_CCW_WRITE_FULL_TRACK
) {
542 ccw
->count
= sizeof(*pfxdata
) + 2;
543 ccw
->cda
= virt_to_dma32(pfxdata
);
544 memset(pfxdata
, 0, sizeof(*pfxdata
) + 2);
546 ccw
->count
= sizeof(*pfxdata
);
547 ccw
->cda
= virt_to_dma32(pfxdata
);
548 memset(pfxdata
, 0, sizeof(*pfxdata
));
553 DBF_DEV_EVENT(DBF_ERR
, basedev
,
554 "PFX LRE unknown format 0x%x", format
);
558 pfxdata
->format
= format
;
559 pfxdata
->base_address
= basepriv
->conf
.ned
->unit_addr
;
560 pfxdata
->base_lss
= basepriv
->conf
.ned
->ID
;
561 pfxdata
->validity
.define_extent
= 1;
563 /* private uid is kept up to date, conf_data may be outdated */
564 if (startpriv
->uid
.type
== UA_BASE_PAV_ALIAS
)
565 pfxdata
->validity
.verify_base
= 1;
567 if (startpriv
->uid
.type
== UA_HYPER_PAV_ALIAS
) {
568 pfxdata
->validity
.verify_base
= 1;
569 pfxdata
->validity
.hyper_pav
= 1;
572 rc
= define_extent(NULL
, dedata
, trk
, totrk
, cmd
, basedev
, blksize
);
575 * For some commands the System Time Stamp is set in the define extent
576 * data when XRC is supported. The validity of the time stamp must be
577 * reflected in the prefix data as well.
579 if (dedata
->ga_extended
& 0x08 && dedata
->ga_extended
& 0x02)
580 pfxdata
->validity
.time_stamp
= 1; /* 'Time Stamp Valid' */
583 locate_record_ext(NULL
, lredata
, trk
, rec_on_trk
, count
, cmd
,
584 basedev
, blksize
, tlf
);
590 static int prefix(struct ccw1
*ccw
, struct PFX_eckd_data
*pfxdata
,
591 unsigned int trk
, unsigned int totrk
, int cmd
,
592 struct dasd_device
*basedev
, struct dasd_device
*startdev
)
594 return prefix_LRE(ccw
, pfxdata
, trk
, totrk
, cmd
, basedev
, startdev
,
599 locate_record(struct ccw1
*ccw
, struct LO_eckd_data
*data
, unsigned int trk
,
600 unsigned int rec_on_trk
, int no_rec
, int cmd
,
601 struct dasd_device
* device
, int reclen
)
603 struct dasd_eckd_private
*private = device
->private;
607 DBF_DEV_EVENT(DBF_INFO
, device
,
608 "Locate: trk %d, rec %d, no_rec %d, cmd %d, reclen %d",
609 trk
, rec_on_trk
, no_rec
, cmd
, reclen
);
611 ccw
->cmd_code
= DASD_ECKD_CCW_LOCATE_RECORD
;
614 ccw
->cda
= virt_to_dma32(data
);
616 memset(data
, 0, sizeof(struct LO_eckd_data
));
619 switch (private->rdc_data
.dev_type
) {
621 dn
= ceil_quot(reclen
+ 6, 232);
622 d
= 9 + ceil_quot(reclen
+ 6 * (dn
+ 1), 34);
623 sector
= (49 + (rec_on_trk
- 1) * (10 + d
)) / 8;
626 d
= 7 + ceil_quot(reclen
+ 12, 32);
627 sector
= (39 + (rec_on_trk
- 1) * (8 + d
)) / 7;
631 data
->sector
= sector
;
632 data
->count
= no_rec
;
634 case DASD_ECKD_CCW_WRITE_HOME_ADDRESS
:
635 data
->operation
.orientation
= 0x3;
636 data
->operation
.operation
= 0x03;
638 case DASD_ECKD_CCW_READ_HOME_ADDRESS
:
639 data
->operation
.orientation
= 0x3;
640 data
->operation
.operation
= 0x16;
642 case DASD_ECKD_CCW_WRITE_RECORD_ZERO
:
643 data
->operation
.orientation
= 0x1;
644 data
->operation
.operation
= 0x03;
647 case DASD_ECKD_CCW_READ_RECORD_ZERO
:
648 data
->operation
.orientation
= 0x3;
649 data
->operation
.operation
= 0x16;
652 case DASD_ECKD_CCW_WRITE
:
653 case DASD_ECKD_CCW_WRITE_MT
:
654 case DASD_ECKD_CCW_WRITE_KD
:
655 case DASD_ECKD_CCW_WRITE_KD_MT
:
656 data
->auxiliary
.last_bytes_used
= 0x1;
657 data
->length
= reclen
;
658 data
->operation
.operation
= 0x01;
660 case DASD_ECKD_CCW_WRITE_CKD
:
661 case DASD_ECKD_CCW_WRITE_CKD_MT
:
662 data
->auxiliary
.last_bytes_used
= 0x1;
663 data
->length
= reclen
;
664 data
->operation
.operation
= 0x03;
666 case DASD_ECKD_CCW_READ
:
667 case DASD_ECKD_CCW_READ_MT
:
668 case DASD_ECKD_CCW_READ_KD
:
669 case DASD_ECKD_CCW_READ_KD_MT
:
670 data
->auxiliary
.last_bytes_used
= 0x1;
671 data
->length
= reclen
;
672 data
->operation
.operation
= 0x06;
674 case DASD_ECKD_CCW_READ_CKD
:
675 case DASD_ECKD_CCW_READ_CKD_MT
:
676 data
->auxiliary
.last_bytes_used
= 0x1;
677 data
->length
= reclen
;
678 data
->operation
.operation
= 0x16;
680 case DASD_ECKD_CCW_READ_COUNT
:
681 data
->operation
.operation
= 0x06;
683 case DASD_ECKD_CCW_ERASE
:
684 data
->length
= reclen
;
685 data
->auxiliary
.last_bytes_used
= 0x1;
686 data
->operation
.operation
= 0x0b;
689 DBF_DEV_EVENT(DBF_ERR
, device
, "unknown locate record "
692 set_ch_t(&data
->seek_addr
,
693 trk
/ private->rdc_data
.trk_per_cyl
,
694 trk
% private->rdc_data
.trk_per_cyl
);
695 data
->search_arg
.cyl
= data
->seek_addr
.cyl
;
696 data
->search_arg
.head
= data
->seek_addr
.head
;
697 data
->search_arg
.record
= rec_on_trk
;
701 * Returns 1 if the block is one of the special blocks that needs
702 * to get read/written with the KD variant of the command.
703 * That is DASD_ECKD_READ_KD_MT instead of DASD_ECKD_READ_MT and
704 * DASD_ECKD_WRITE_KD_MT instead of DASD_ECKD_WRITE_MT.
705 * Luckily the KD variants differ only by one bit (0x08) from the
706 * normal variant. So don't wonder about code like:
707 * if (dasd_eckd_cdl_special(blk_per_trk, recid))
708 * ccw->cmd_code |= 0x8;
711 dasd_eckd_cdl_special(int blk_per_trk
, int recid
)
715 if (recid
< blk_per_trk
)
717 if (recid
< 2 * blk_per_trk
)
723 * Returns the record size for the special blocks of the cdl format.
724 * Only returns something useful if dasd_eckd_cdl_special is true
728 dasd_eckd_cdl_reclen(int recid
)
731 return sizes_trk0
[recid
];
734 /* create unique id from private structure. */
735 static void create_uid(struct dasd_conf
*conf
, struct dasd_uid
*uid
)
739 memset(uid
, 0, sizeof(struct dasd_uid
));
740 memcpy(uid
->vendor
, conf
->ned
->HDA_manufacturer
,
741 sizeof(uid
->vendor
) - 1);
742 EBCASC(uid
->vendor
, sizeof(uid
->vendor
) - 1);
743 memcpy(uid
->serial
, &conf
->ned
->serial
,
744 sizeof(uid
->serial
) - 1);
745 EBCASC(uid
->serial
, sizeof(uid
->serial
) - 1);
746 uid
->ssid
= conf
->gneq
->subsystemID
;
747 uid
->real_unit_addr
= conf
->ned
->unit_addr
;
749 uid
->type
= conf
->sneq
->sua_flags
;
750 if (uid
->type
== UA_BASE_PAV_ALIAS
)
751 uid
->base_unit_addr
= conf
->sneq
->base_unit_addr
;
753 uid
->type
= UA_BASE_DEVICE
;
756 for (count
= 0; count
< 16; count
++) {
757 sprintf(uid
->vduit
+2*count
, "%02x",
758 conf
->vdsneq
->uit
[count
]);
764 * Generate device unique id that specifies the physical device.
766 static int dasd_eckd_generate_uid(struct dasd_device
*device
)
768 struct dasd_eckd_private
*private = device
->private;
773 if (!private->conf
.ned
|| !private->conf
.gneq
)
775 spin_lock_irqsave(get_ccwdev_lock(device
->cdev
), flags
);
776 create_uid(&private->conf
, &private->uid
);
777 spin_unlock_irqrestore(get_ccwdev_lock(device
->cdev
), flags
);
781 static int dasd_eckd_get_uid(struct dasd_device
*device
, struct dasd_uid
*uid
)
783 struct dasd_eckd_private
*private = device
->private;
787 spin_lock_irqsave(get_ccwdev_lock(device
->cdev
), flags
);
789 spin_unlock_irqrestore(get_ccwdev_lock(device
->cdev
), flags
);
796 * compare device UID with data of a given dasd_eckd_private structure
799 static int dasd_eckd_compare_path_uid(struct dasd_device
*device
,
800 struct dasd_conf
*path_conf
)
802 struct dasd_uid device_uid
;
803 struct dasd_uid path_uid
;
805 create_uid(path_conf
, &path_uid
);
806 dasd_eckd_get_uid(device
, &device_uid
);
808 return memcmp(&device_uid
, &path_uid
, sizeof(struct dasd_uid
));
811 static void dasd_eckd_fill_rcd_cqr(struct dasd_device
*device
,
812 struct dasd_ccw_req
*cqr
,
818 * buffer has to start with EBCDIC "V1.0" to show
819 * support for virtual device SNEQ
821 rcd_buffer
[0] = 0xE5;
822 rcd_buffer
[1] = 0xF1;
823 rcd_buffer
[2] = 0x4B;
824 rcd_buffer
[3] = 0xF0;
827 ccw
->cmd_code
= DASD_ECKD_CCW_RCD
;
829 ccw
->cda
= virt_to_dma32(rcd_buffer
);
830 ccw
->count
= DASD_ECKD_RCD_DATA_SIZE
;
831 cqr
->magic
= DASD_ECKD_MAGIC
;
833 cqr
->startdev
= device
;
834 cqr
->memdev
= device
;
836 cqr
->expires
= 10*HZ
;
839 cqr
->buildclk
= get_tod_clock();
840 cqr
->status
= DASD_CQR_FILLED
;
841 set_bit(DASD_CQR_VERIFY_PATH
, &cqr
->flags
);
845 * Wakeup helper for read_conf
846 * if the cqr is not done and needs some error recovery
847 * the buffer has to be re-initialized with the EBCDIC "V1.0"
848 * to show support for virtual device SNEQ
850 static void read_conf_cb(struct dasd_ccw_req
*cqr
, void *data
)
855 if (cqr
->status
!= DASD_CQR_DONE
) {
857 rcd_buffer
= dma32_to_virt(ccw
->cda
);
858 memset(rcd_buffer
, 0, sizeof(*rcd_buffer
));
860 rcd_buffer
[0] = 0xE5;
861 rcd_buffer
[1] = 0xF1;
862 rcd_buffer
[2] = 0x4B;
863 rcd_buffer
[3] = 0xF0;
865 dasd_wakeup_cb(cqr
, data
);
868 static int dasd_eckd_read_conf_immediately(struct dasd_device
*device
,
869 struct dasd_ccw_req
*cqr
,
876 * sanity check: scan for RCD command in extended SenseID data
877 * some devices do not support RCD
879 ciw
= ccw_device_get_ciw(device
->cdev
, CIW_TYPE_RCD
);
880 if (!ciw
|| ciw
->cmd
!= DASD_ECKD_CCW_RCD
)
883 dasd_eckd_fill_rcd_cqr(device
, cqr
, rcd_buffer
, lpm
);
884 clear_bit(DASD_CQR_FLAGS_USE_ERP
, &cqr
->flags
);
885 set_bit(DASD_CQR_ALLOW_SLOCK
, &cqr
->flags
);
887 cqr
->callback
= read_conf_cb
;
888 rc
= dasd_sleep_on_immediatly(cqr
);
892 static int dasd_eckd_read_conf_lpm(struct dasd_device
*device
,
894 int *rcd_buffer_size
, __u8 lpm
)
897 char *rcd_buf
= NULL
;
899 struct dasd_ccw_req
*cqr
;
902 * sanity check: scan for RCD command in extended SenseID data
903 * some devices do not support RCD
905 ciw
= ccw_device_get_ciw(device
->cdev
, CIW_TYPE_RCD
);
906 if (!ciw
|| ciw
->cmd
!= DASD_ECKD_CCW_RCD
) {
910 rcd_buf
= kzalloc(DASD_ECKD_RCD_DATA_SIZE
, GFP_KERNEL
| GFP_DMA
);
915 cqr
= dasd_smalloc_request(DASD_ECKD_MAGIC
, 1 /* RCD */,
916 0, /* use rcd_buf as data ara */
919 DBF_DEV_EVENT(DBF_WARNING
, device
, "%s",
920 "Could not allocate RCD request");
924 dasd_eckd_fill_rcd_cqr(device
, cqr
, rcd_buf
, lpm
);
925 cqr
->callback
= read_conf_cb
;
926 ret
= dasd_sleep_on(cqr
);
928 * on success we update the user input parms
930 dasd_sfree_request(cqr
, cqr
->memdev
);
934 *rcd_buffer_size
= DASD_ECKD_RCD_DATA_SIZE
;
935 *rcd_buffer
= rcd_buf
;
940 *rcd_buffer_size
= 0;
944 static int dasd_eckd_identify_conf_parts(struct dasd_conf
*conf
)
947 struct dasd_sneq
*sneq
;
954 count
= conf
->len
/ sizeof(struct dasd_sneq
);
955 sneq
= (struct dasd_sneq
*)conf
->data
;
956 for (i
= 0; i
< count
; ++i
) {
957 if (sneq
->flags
.identifier
== 1 && sneq
->format
== 1)
959 else if (sneq
->flags
.identifier
== 1 && sneq
->format
== 4)
960 conf
->vdsneq
= (struct vd_sneq
*)sneq
;
961 else if (sneq
->flags
.identifier
== 2)
962 conf
->gneq
= (struct dasd_gneq
*)sneq
;
963 else if (sneq
->flags
.identifier
== 3 && sneq
->res1
== 1)
964 conf
->ned
= (struct dasd_ned
*)sneq
;
967 if (!conf
->ned
|| !conf
->gneq
) {
978 static unsigned char dasd_eckd_path_access(void *conf_data
, int conf_len
)
980 struct dasd_gneq
*gneq
;
983 count
= conf_len
/ sizeof(*gneq
);
984 gneq
= (struct dasd_gneq
*)conf_data
;
986 for (i
= 0; i
< count
; ++i
) {
987 if (gneq
->flags
.identifier
== 2) {
994 return ((char *)gneq
)[18] & 0x07;
999 static void dasd_eckd_store_conf_data(struct dasd_device
*device
,
1000 struct dasd_conf_data
*conf_data
, int chp
)
1002 struct dasd_eckd_private
*private = device
->private;
1003 struct channel_path_desc_fmt0
*chp_desc
;
1004 struct subchannel_id sch_id
;
1008 * path handling and read_conf allocate data
1009 * free it before replacing the pointer
1010 * also replace the old private->conf_data pointer
1011 * with the new one if this points to the same data
1013 cdp
= device
->path
[chp
].conf_data
;
1014 if (private->conf
.data
== cdp
) {
1015 private->conf
.data
= (void *)conf_data
;
1016 dasd_eckd_identify_conf_parts(&private->conf
);
1018 ccw_device_get_schid(device
->cdev
, &sch_id
);
1019 device
->path
[chp
].conf_data
= conf_data
;
1020 device
->path
[chp
].cssid
= sch_id
.cssid
;
1021 device
->path
[chp
].ssid
= sch_id
.ssid
;
1022 chp_desc
= ccw_device_get_chp_desc(device
->cdev
, chp
);
1024 device
->path
[chp
].chpid
= chp_desc
->chpid
;
1029 static void dasd_eckd_clear_conf_data(struct dasd_device
*device
)
1031 struct dasd_eckd_private
*private = device
->private;
1034 private->conf
.data
= NULL
;
1035 private->conf
.len
= 0;
1036 for (i
= 0; i
< 8; i
++) {
1037 kfree(device
->path
[i
].conf_data
);
1038 device
->path
[i
].conf_data
= NULL
;
1039 device
->path
[i
].cssid
= 0;
1040 device
->path
[i
].ssid
= 0;
1041 device
->path
[i
].chpid
= 0;
1042 dasd_path_notoper(device
, i
);
1046 static void dasd_eckd_read_fc_security(struct dasd_device
*device
)
1048 struct dasd_eckd_private
*private = device
->private;
1054 rc
= chsc_scud(private->uid
.ssid
, (u64
*)esm
, &esm_valid
);
1056 for (chp
= 0; chp
< 8; chp
++)
1057 device
->path
[chp
].fc_security
= 0;
1061 for (chp
= 0; chp
< 8; chp
++) {
1062 if (esm_valid
& (0x80 >> chp
))
1063 device
->path
[chp
].fc_security
= esm
[chp
];
1065 device
->path
[chp
].fc_security
= 0;
1069 static void dasd_eckd_get_uid_string(struct dasd_conf
*conf
, char *print_uid
)
1071 struct dasd_uid uid
;
1073 create_uid(conf
, &uid
);
1074 snprintf(print_uid
, DASD_UID_STRLEN
, "%s.%s.%04x.%02x%s%s",
1075 uid
.vendor
, uid
.serial
, uid
.ssid
, uid
.real_unit_addr
,
1076 uid
.vduit
[0] ? "." : "", uid
.vduit
);
1079 static int dasd_eckd_check_cabling(struct dasd_device
*device
,
1080 void *conf_data
, __u8 lpm
)
1082 char print_path_uid
[DASD_UID_STRLEN
], print_device_uid
[DASD_UID_STRLEN
];
1083 struct dasd_eckd_private
*private = device
->private;
1084 struct dasd_conf path_conf
;
1086 path_conf
.data
= conf_data
;
1087 path_conf
.len
= DASD_ECKD_RCD_DATA_SIZE
;
1088 if (dasd_eckd_identify_conf_parts(&path_conf
))
1091 if (dasd_eckd_compare_path_uid(device
, &path_conf
)) {
1092 dasd_eckd_get_uid_string(&path_conf
, print_path_uid
);
1093 dasd_eckd_get_uid_string(&private->conf
, print_device_uid
);
1094 dev_err(&device
->cdev
->dev
,
1095 "Not all channel paths lead to the same device, path %02X leads to device %s instead of %s\n",
1096 lpm
, print_path_uid
, print_device_uid
);
1103 static int dasd_eckd_read_conf(struct dasd_device
*device
)
1106 int conf_len
, conf_data_saved
;
1107 int rc
, path_err
, pos
;
1109 struct dasd_eckd_private
*private;
1111 private = device
->private;
1112 opm
= ccw_device_get_path_mask(device
->cdev
);
1113 conf_data_saved
= 0;
1115 /* get configuration data per operational path */
1116 for (lpm
= 0x80; lpm
; lpm
>>= 1) {
1119 rc
= dasd_eckd_read_conf_lpm(device
, &conf_data
,
1121 if (rc
&& rc
!= -EOPNOTSUPP
) { /* -EOPNOTSUPP is ok */
1122 DBF_EVENT_DEVID(DBF_WARNING
, device
->cdev
,
1123 "Read configuration data returned "
1127 if (conf_data
== NULL
) {
1128 DBF_EVENT_DEVID(DBF_WARNING
, device
->cdev
, "%s",
1129 "No configuration data "
1131 /* no further analysis possible */
1132 dasd_path_add_opm(device
, opm
);
1133 continue; /* no error */
1135 /* save first valid configuration data */
1136 if (!conf_data_saved
) {
1137 /* initially clear previously stored conf_data */
1138 dasd_eckd_clear_conf_data(device
);
1139 private->conf
.data
= conf_data
;
1140 private->conf
.len
= conf_len
;
1141 if (dasd_eckd_identify_conf_parts(&private->conf
)) {
1142 private->conf
.data
= NULL
;
1143 private->conf
.len
= 0;
1148 * build device UID that other path data
1149 * can be compared to it
1151 dasd_eckd_generate_uid(device
);
1153 } else if (dasd_eckd_check_cabling(device
, conf_data
, lpm
)) {
1154 dasd_path_add_cablepm(device
, lpm
);
1160 pos
= pathmask_to_pos(lpm
);
1161 dasd_eckd_store_conf_data(device
, conf_data
, pos
);
1163 switch (dasd_eckd_path_access(conf_data
, conf_len
)) {
1165 dasd_path_add_nppm(device
, lpm
);
1168 dasd_path_add_ppm(device
, lpm
);
1171 if (!dasd_path_get_opm(device
)) {
1172 dasd_path_set_opm(device
, lpm
);
1173 dasd_generic_path_operational(device
);
1175 dasd_path_add_opm(device
, lpm
);
1182 static u32
get_fcx_max_data(struct dasd_device
*device
)
1184 struct dasd_eckd_private
*private = device
->private;
1185 int fcx_in_css
, fcx_in_gneq
, fcx_in_features
;
1191 /* is transport mode supported? */
1192 fcx_in_css
= css_general_characteristics
.fcx
;
1193 fcx_in_gneq
= private->conf
.gneq
->reserved2
[7] & 0x04;
1194 fcx_in_features
= private->features
.feature
[40] & 0x80;
1195 tpm
= fcx_in_css
&& fcx_in_gneq
&& fcx_in_features
;
1200 mdc
= ccw_device_get_mdc(device
->cdev
, 0);
1202 dev_warn(&device
->cdev
->dev
, "Detecting the maximum supported data size for zHPF requests failed\n");
1205 return (u32
)mdc
* FCX_MAX_DATA_FACTOR
;
1209 static int verify_fcx_max_data(struct dasd_device
*device
, __u8 lpm
)
1211 struct dasd_eckd_private
*private = device
->private;
1215 if (private->fcx_max_data
) {
1216 mdc
= ccw_device_get_mdc(device
->cdev
, lpm
);
1218 dev_warn(&device
->cdev
->dev
,
1219 "Detecting the maximum data size for zHPF "
1220 "requests failed (rc=%d) for a new path %x\n",
1224 fcx_max_data
= (u32
)mdc
* FCX_MAX_DATA_FACTOR
;
1225 if (fcx_max_data
< private->fcx_max_data
) {
1226 dev_warn(&device
->cdev
->dev
,
1227 "The maximum data size for zHPF requests %u "
1228 "on a new path %x is below the active maximum "
1229 "%u\n", fcx_max_data
, lpm
,
1230 private->fcx_max_data
);
1237 static int rebuild_device_uid(struct dasd_device
*device
,
1238 struct pe_handler_work_data
*data
)
1240 struct dasd_eckd_private
*private = device
->private;
1241 __u8 lpm
, opm
= dasd_path_get_opm(device
);
1244 for (lpm
= 0x80; lpm
; lpm
>>= 1) {
1247 memset(&data
->rcd_buffer
, 0, sizeof(data
->rcd_buffer
));
1248 memset(&data
->cqr
, 0, sizeof(data
->cqr
));
1249 data
->cqr
.cpaddr
= &data
->ccw
;
1250 rc
= dasd_eckd_read_conf_immediately(device
, &data
->cqr
,
1255 if (rc
== -EOPNOTSUPP
) /* -EOPNOTSUPP is ok */
1257 DBF_EVENT_DEVID(DBF_WARNING
, device
->cdev
,
1258 "Read configuration data "
1259 "returned error %d", rc
);
1262 memcpy(private->conf
.data
, data
->rcd_buffer
,
1263 DASD_ECKD_RCD_DATA_SIZE
);
1264 if (dasd_eckd_identify_conf_parts(&private->conf
)) {
1266 } else /* first valid path is enough */
1271 rc
= dasd_eckd_generate_uid(device
);
1276 static void dasd_eckd_path_available_action(struct dasd_device
*device
,
1277 struct pe_handler_work_data
*data
)
1279 __u8 path_rcd_buf
[DASD_ECKD_RCD_DATA_SIZE
];
1280 __u8 lpm
, opm
, npm
, ppm
, epm
, hpfpm
, cablepm
;
1281 struct dasd_conf_data
*conf_data
;
1282 char print_uid
[DASD_UID_STRLEN
];
1283 struct dasd_conf path_conf
;
1284 unsigned long flags
;
1294 for (lpm
= 0x80; lpm
; lpm
>>= 1) {
1295 if (!(lpm
& data
->tbvpm
))
1297 memset(&data
->rcd_buffer
, 0, sizeof(data
->rcd_buffer
));
1298 memset(&data
->cqr
, 0, sizeof(data
->cqr
));
1299 data
->cqr
.cpaddr
= &data
->ccw
;
1300 rc
= dasd_eckd_read_conf_immediately(device
, &data
->cqr
,
1304 switch (dasd_eckd_path_access(data
->rcd_buffer
,
1305 DASD_ECKD_RCD_DATA_SIZE
)
1315 } else if (rc
== -EOPNOTSUPP
) {
1316 DBF_EVENT_DEVID(DBF_WARNING
, device
->cdev
, "%s",
1317 "path verification: No configuration "
1320 } else if (rc
== -EAGAIN
) {
1321 DBF_EVENT_DEVID(DBF_WARNING
, device
->cdev
, "%s",
1322 "path verification: device is stopped,"
1323 " try again later");
1326 dev_warn(&device
->cdev
->dev
,
1327 "Reading device feature codes failed "
1328 "(rc=%d) for new path %x\n", rc
, lpm
);
1331 if (verify_fcx_max_data(device
, lpm
)) {
1340 * save conf_data for comparison after
1341 * rebuild_device_uid may have changed
1344 memcpy(&path_rcd_buf
, data
->rcd_buffer
,
1345 DASD_ECKD_RCD_DATA_SIZE
);
1346 path_conf
.data
= (void *)&path_rcd_buf
;
1347 path_conf
.len
= DASD_ECKD_RCD_DATA_SIZE
;
1348 if (dasd_eckd_identify_conf_parts(&path_conf
)) {
1349 path_conf
.data
= NULL
;
1355 * compare path UID with device UID only if at least
1356 * one valid path is left
1357 * in other case the device UID may have changed and
1358 * the first working path UID will be used as device UID
1360 if (dasd_path_get_opm(device
) &&
1361 dasd_eckd_compare_path_uid(device
, &path_conf
)) {
1363 * the comparison was not successful
1364 * rebuild the device UID with at least one
1365 * known path in case a z/VM hyperswap command
1366 * has changed the device
1368 * after this compare again
1370 * if either the rebuild or the recompare fails
1371 * the path can not be used
1373 if (rebuild_device_uid(device
, data
) ||
1374 dasd_eckd_compare_path_uid(
1375 device
, &path_conf
)) {
1376 dasd_eckd_get_uid_string(&path_conf
, print_uid
);
1377 dev_err(&device
->cdev
->dev
,
1378 "The newly added channel path %02X "
1379 "will not be used because it leads "
1380 "to a different device %s\n",
1390 conf_data
= kzalloc(DASD_ECKD_RCD_DATA_SIZE
, GFP_KERNEL
);
1392 memcpy(conf_data
, data
->rcd_buffer
,
1393 DASD_ECKD_RCD_DATA_SIZE
);
1396 * path is operational but path config data could not
1397 * be stored due to low mem condition
1398 * add it to the error path mask and schedule a path
1399 * verification later that this could be added again
1403 pos
= pathmask_to_pos(lpm
);
1404 dasd_eckd_store_conf_data(device
, conf_data
, pos
);
1407 * There is a small chance that a path is lost again between
1408 * above path verification and the following modification of
1409 * the device opm mask. We could avoid that race here by using
1410 * yet another path mask, but we rather deal with this unlikely
1411 * situation in dasd_start_IO.
1413 spin_lock_irqsave(get_ccwdev_lock(device
->cdev
), flags
);
1414 if (!dasd_path_get_opm(device
) && opm
) {
1415 dasd_path_set_opm(device
, opm
);
1416 dasd_generic_path_operational(device
);
1418 dasd_path_add_opm(device
, opm
);
1420 dasd_path_add_nppm(device
, npm
);
1421 dasd_path_add_ppm(device
, ppm
);
1423 dasd_path_add_tbvpm(device
, epm
);
1424 dasd_device_set_timer(device
, 50);
1426 dasd_path_add_cablepm(device
, cablepm
);
1427 dasd_path_add_nohpfpm(device
, hpfpm
);
1428 spin_unlock_irqrestore(get_ccwdev_lock(device
->cdev
), flags
);
1430 dasd_path_create_kobj(device
, pos
);
1434 static void do_pe_handler_work(struct work_struct
*work
)
1436 struct pe_handler_work_data
*data
;
1437 struct dasd_device
*device
;
1439 data
= container_of(work
, struct pe_handler_work_data
, worker
);
1440 device
= data
->device
;
1442 /* delay path verification until device was resumed */
1443 if (test_bit(DASD_FLAG_SUSPENDED
, &device
->flags
)) {
1444 schedule_work(work
);
1447 /* check if path verification already running and delay if so */
1448 if (test_and_set_bit(DASD_FLAG_PATH_VERIFY
, &device
->flags
)) {
1449 schedule_work(work
);
1454 dasd_eckd_path_available_action(device
, data
);
1456 dasd_eckd_read_fc_security(device
);
1458 clear_bit(DASD_FLAG_PATH_VERIFY
, &device
->flags
);
1459 dasd_put_device(device
);
1461 mutex_unlock(&dasd_pe_handler_mutex
);
1466 static int dasd_eckd_pe_handler(struct dasd_device
*device
,
1467 __u8 tbvpm
, __u8 fcsecpm
)
1469 struct pe_handler_work_data
*data
;
1471 data
= kzalloc(sizeof(*data
), GFP_ATOMIC
| GFP_DMA
);
1473 if (mutex_trylock(&dasd_pe_handler_mutex
)) {
1474 data
= pe_handler_worker
;
1480 INIT_WORK(&data
->worker
, do_pe_handler_work
);
1481 dasd_get_device(device
);
1482 data
->device
= device
;
1483 data
->tbvpm
= tbvpm
;
1484 data
->fcsecpm
= fcsecpm
;
1485 schedule_work(&data
->worker
);
1489 static void dasd_eckd_reset_path(struct dasd_device
*device
, __u8 pm
)
1491 struct dasd_eckd_private
*private = device
->private;
1492 unsigned long flags
;
1494 if (!private->fcx_max_data
)
1495 private->fcx_max_data
= get_fcx_max_data(device
);
1496 spin_lock_irqsave(get_ccwdev_lock(device
->cdev
), flags
);
1497 dasd_path_set_tbvpm(device
, pm
? : dasd_path_get_notoperpm(device
));
1498 dasd_schedule_device_bh(device
);
1499 spin_unlock_irqrestore(get_ccwdev_lock(device
->cdev
), flags
);
1502 static int dasd_eckd_read_features(struct dasd_device
*device
)
1504 struct dasd_eckd_private
*private = device
->private;
1505 struct dasd_psf_prssd_data
*prssdp
;
1506 struct dasd_rssd_features
*features
;
1507 struct dasd_ccw_req
*cqr
;
1511 memset(&private->features
, 0, sizeof(struct dasd_rssd_features
));
1512 cqr
= dasd_smalloc_request(DASD_ECKD_MAGIC
, 1 /* PSF */ + 1 /* RSSD */,
1513 (sizeof(struct dasd_psf_prssd_data
) +
1514 sizeof(struct dasd_rssd_features
)),
1517 DBF_EVENT_DEVID(DBF_WARNING
, device
->cdev
, "%s", "Could not "
1518 "allocate initialization request");
1519 return PTR_ERR(cqr
);
1521 cqr
->startdev
= device
;
1522 cqr
->memdev
= device
;
1525 cqr
->expires
= 10 * HZ
;
1527 /* Prepare for Read Subsystem Data */
1528 prssdp
= (struct dasd_psf_prssd_data
*) cqr
->data
;
1529 memset(prssdp
, 0, sizeof(struct dasd_psf_prssd_data
));
1530 prssdp
->order
= PSF_ORDER_PRSSD
;
1531 prssdp
->suborder
= 0x41; /* Read Feature Codes */
1532 /* all other bytes of prssdp must be zero */
1535 ccw
->cmd_code
= DASD_ECKD_CCW_PSF
;
1536 ccw
->count
= sizeof(struct dasd_psf_prssd_data
);
1537 ccw
->flags
|= CCW_FLAG_CC
;
1538 ccw
->cda
= virt_to_dma32(prssdp
);
1540 /* Read Subsystem Data - feature codes */
1541 features
= (struct dasd_rssd_features
*) (prssdp
+ 1);
1542 memset(features
, 0, sizeof(struct dasd_rssd_features
));
1545 ccw
->cmd_code
= DASD_ECKD_CCW_RSSD
;
1546 ccw
->count
= sizeof(struct dasd_rssd_features
);
1547 ccw
->cda
= virt_to_dma32(features
);
1549 cqr
->buildclk
= get_tod_clock();
1550 cqr
->status
= DASD_CQR_FILLED
;
1551 rc
= dasd_sleep_on(cqr
);
1553 prssdp
= (struct dasd_psf_prssd_data
*) cqr
->data
;
1554 features
= (struct dasd_rssd_features
*) (prssdp
+ 1);
1555 memcpy(&private->features
, features
,
1556 sizeof(struct dasd_rssd_features
));
1558 dev_warn(&device
->cdev
->dev
, "Reading device feature codes"
1559 " failed with rc=%d\n", rc
);
1560 dasd_sfree_request(cqr
, cqr
->memdev
);
1564 /* Read Volume Information - Volume Storage Query */
1565 static int dasd_eckd_read_vol_info(struct dasd_device
*device
)
1567 struct dasd_eckd_private
*private = device
->private;
1568 struct dasd_psf_prssd_data
*prssdp
;
1569 struct dasd_rssd_vsq
*vsq
;
1570 struct dasd_ccw_req
*cqr
;
1575 /* This command cannot be executed on an alias device */
1576 if (private->uid
.type
== UA_BASE_PAV_ALIAS
||
1577 private->uid
.type
== UA_HYPER_PAV_ALIAS
)
1581 cqr
= dasd_smalloc_request(DASD_ECKD_MAGIC
, 2 /* PSF + RSSD */,
1582 sizeof(*prssdp
) + sizeof(*vsq
), device
, NULL
);
1584 DBF_EVENT_DEVID(DBF_WARNING
, device
->cdev
, "%s",
1585 "Could not allocate initialization request");
1586 mutex_lock(&dasd_vol_info_mutex
);
1588 cqr
= &dasd_vol_info_req
->cqr
;
1589 memset(cqr
, 0, sizeof(*cqr
));
1590 memset(dasd_vol_info_req
, 0, sizeof(*dasd_vol_info_req
));
1591 cqr
->cpaddr
= &dasd_vol_info_req
->ccw
;
1592 cqr
->data
= &dasd_vol_info_req
->data
;
1593 cqr
->magic
= DASD_ECKD_MAGIC
;
1596 /* Prepare for Read Subsystem Data */
1598 prssdp
->order
= PSF_ORDER_PRSSD
;
1599 prssdp
->suborder
= PSF_SUBORDER_VSQ
; /* Volume Storage Query */
1600 prssdp
->lss
= private->conf
.ned
->ID
;
1601 prssdp
->volume
= private->conf
.ned
->unit_addr
;
1604 ccw
->cmd_code
= DASD_ECKD_CCW_PSF
;
1605 ccw
->count
= sizeof(*prssdp
);
1606 ccw
->flags
|= CCW_FLAG_CC
;
1607 ccw
->cda
= virt_to_dma32(prssdp
);
1609 /* Read Subsystem Data - Volume Storage Query */
1610 vsq
= (struct dasd_rssd_vsq
*)(prssdp
+ 1);
1611 memset(vsq
, 0, sizeof(*vsq
));
1614 ccw
->cmd_code
= DASD_ECKD_CCW_RSSD
;
1615 ccw
->count
= sizeof(*vsq
);
1616 ccw
->flags
|= CCW_FLAG_SLI
;
1617 ccw
->cda
= virt_to_dma32(vsq
);
1619 cqr
->buildclk
= get_tod_clock();
1620 cqr
->status
= DASD_CQR_FILLED
;
1621 cqr
->startdev
= device
;
1622 cqr
->memdev
= device
;
1625 cqr
->expires
= device
->default_expires
* HZ
;
1626 /* The command might not be supported. Suppress the error output */
1627 __set_bit(DASD_CQR_SUPPRESS_CR
, &cqr
->flags
);
1629 rc
= dasd_sleep_on_interruptible(cqr
);
1631 memcpy(&private->vsq
, vsq
, sizeof(*vsq
));
1633 DBF_EVENT_DEVID(DBF_WARNING
, device
->cdev
,
1634 "Reading the volume storage information failed with rc=%d", rc
);
1638 mutex_unlock(&dasd_vol_info_mutex
);
1640 dasd_sfree_request(cqr
, cqr
->memdev
);
1645 static int dasd_eckd_is_ese(struct dasd_device
*device
)
1647 struct dasd_eckd_private
*private = device
->private;
1649 return private->vsq
.vol_info
.ese
;
1652 static int dasd_eckd_ext_pool_id(struct dasd_device
*device
)
1654 struct dasd_eckd_private
*private = device
->private;
1656 return private->vsq
.extent_pool_id
;
1660 * This value represents the total amount of available space. As more space is
1661 * allocated by ESE volumes, this value will decrease.
1662 * The data for this value is therefore updated on any call.
1664 static int dasd_eckd_space_configured(struct dasd_device
*device
)
1666 struct dasd_eckd_private
*private = device
->private;
1669 rc
= dasd_eckd_read_vol_info(device
);
1671 return rc
? : private->vsq
.space_configured
;
1675 * The value of space allocated by an ESE volume may have changed and is
1676 * therefore updated on any call.
1678 static int dasd_eckd_space_allocated(struct dasd_device
*device
)
1680 struct dasd_eckd_private
*private = device
->private;
1683 rc
= dasd_eckd_read_vol_info(device
);
1685 return rc
? : private->vsq
.space_allocated
;
1688 static int dasd_eckd_logical_capacity(struct dasd_device
*device
)
1690 struct dasd_eckd_private
*private = device
->private;
1692 return private->vsq
.logical_capacity
;
1695 static void dasd_eckd_ext_pool_exhaust_work(struct work_struct
*work
)
1697 struct ext_pool_exhaust_work_data
*data
;
1698 struct dasd_device
*device
;
1699 struct dasd_device
*base
;
1701 data
= container_of(work
, struct ext_pool_exhaust_work_data
, worker
);
1702 device
= data
->device
;
1707 if (dasd_eckd_space_configured(base
) != 0) {
1708 dasd_generic_space_avail(device
);
1710 dev_warn(&device
->cdev
->dev
, "No space left in the extent pool\n");
1711 DBF_DEV_EVENT(DBF_WARNING
, device
, "%s", "out of space");
1714 dasd_put_device(device
);
1718 static int dasd_eckd_ext_pool_exhaust(struct dasd_device
*device
,
1719 struct dasd_ccw_req
*cqr
)
1721 struct ext_pool_exhaust_work_data
*data
;
1723 data
= kzalloc(sizeof(*data
), GFP_ATOMIC
);
1726 INIT_WORK(&data
->worker
, dasd_eckd_ext_pool_exhaust_work
);
1727 dasd_get_device(device
);
1728 data
->device
= device
;
1731 data
->base
= cqr
->block
->base
;
1732 else if (cqr
->basedev
)
1733 data
->base
= cqr
->basedev
;
1737 schedule_work(&data
->worker
);
1742 static void dasd_eckd_cpy_ext_pool_data(struct dasd_device
*device
,
1743 struct dasd_rssd_lcq
*lcq
)
1745 struct dasd_eckd_private
*private = device
->private;
1746 int pool_id
= dasd_eckd_ext_pool_id(device
);
1747 struct dasd_ext_pool_sum eps
;
1750 for (i
= 0; i
< lcq
->pool_count
; i
++) {
1751 eps
= lcq
->ext_pool_sum
[i
];
1752 if (eps
.pool_id
== pool_id
) {
1753 memcpy(&private->eps
, &eps
,
1754 sizeof(struct dasd_ext_pool_sum
));
1759 /* Read Extent Pool Information - Logical Configuration Query */
1760 static int dasd_eckd_read_ext_pool_info(struct dasd_device
*device
)
1762 struct dasd_eckd_private
*private = device
->private;
1763 struct dasd_psf_prssd_data
*prssdp
;
1764 struct dasd_rssd_lcq
*lcq
;
1765 struct dasd_ccw_req
*cqr
;
1769 /* This command cannot be executed on an alias device */
1770 if (private->uid
.type
== UA_BASE_PAV_ALIAS
||
1771 private->uid
.type
== UA_HYPER_PAV_ALIAS
)
1774 cqr
= dasd_smalloc_request(DASD_ECKD_MAGIC
, 2 /* PSF + RSSD */,
1775 sizeof(*prssdp
) + sizeof(*lcq
), device
, NULL
);
1777 DBF_EVENT_DEVID(DBF_WARNING
, device
->cdev
, "%s",
1778 "Could not allocate initialization request");
1779 return PTR_ERR(cqr
);
1782 /* Prepare for Read Subsystem Data */
1784 memset(prssdp
, 0, sizeof(*prssdp
));
1785 prssdp
->order
= PSF_ORDER_PRSSD
;
1786 prssdp
->suborder
= PSF_SUBORDER_LCQ
; /* Logical Configuration Query */
1789 ccw
->cmd_code
= DASD_ECKD_CCW_PSF
;
1790 ccw
->count
= sizeof(*prssdp
);
1791 ccw
->flags
|= CCW_FLAG_CC
;
1792 ccw
->cda
= virt_to_dma32(prssdp
);
1794 lcq
= (struct dasd_rssd_lcq
*)(prssdp
+ 1);
1795 memset(lcq
, 0, sizeof(*lcq
));
1798 ccw
->cmd_code
= DASD_ECKD_CCW_RSSD
;
1799 ccw
->count
= sizeof(*lcq
);
1800 ccw
->flags
|= CCW_FLAG_SLI
;
1801 ccw
->cda
= virt_to_dma32(lcq
);
1803 cqr
->buildclk
= get_tod_clock();
1804 cqr
->status
= DASD_CQR_FILLED
;
1805 cqr
->startdev
= device
;
1806 cqr
->memdev
= device
;
1809 cqr
->expires
= device
->default_expires
* HZ
;
1810 /* The command might not be supported. Suppress the error output */
1811 __set_bit(DASD_CQR_SUPPRESS_CR
, &cqr
->flags
);
1813 rc
= dasd_sleep_on_interruptible(cqr
);
1815 dasd_eckd_cpy_ext_pool_data(device
, lcq
);
1817 DBF_EVENT_DEVID(DBF_WARNING
, device
->cdev
,
1818 "Reading the logical configuration failed with rc=%d", rc
);
1821 dasd_sfree_request(cqr
, cqr
->memdev
);
1827 * Depending on the device type, the extent size is specified either as
1828 * cylinders per extent (CKD) or size per extent (FBA)
1829 * A 1GB size corresponds to 1113cyl, and 16MB to 21cyl.
1831 static int dasd_eckd_ext_size(struct dasd_device
*device
)
1833 struct dasd_eckd_private
*private = device
->private;
1834 struct dasd_ext_pool_sum eps
= private->eps
;
1836 if (!eps
.flags
.extent_size_valid
)
1838 if (eps
.extent_size
.size_1G
)
1840 if (eps
.extent_size
.size_16M
)
1846 static int dasd_eckd_ext_pool_warn_thrshld(struct dasd_device
*device
)
1848 struct dasd_eckd_private
*private = device
->private;
1850 return private->eps
.warn_thrshld
;
1853 static int dasd_eckd_ext_pool_cap_at_warnlevel(struct dasd_device
*device
)
1855 struct dasd_eckd_private
*private = device
->private;
1857 return private->eps
.flags
.capacity_at_warnlevel
;
1861 * Extent Pool out of space
1863 static int dasd_eckd_ext_pool_oos(struct dasd_device
*device
)
1865 struct dasd_eckd_private
*private = device
->private;
1867 return private->eps
.flags
.pool_oos
;
1871 * Build CP for Perform Subsystem Function - SSC.
1873 static struct dasd_ccw_req
*dasd_eckd_build_psf_ssc(struct dasd_device
*device
,
1876 struct dasd_ccw_req
*cqr
;
1877 struct dasd_psf_ssc_data
*psf_ssc_data
;
1880 cqr
= dasd_smalloc_request(DASD_ECKD_MAGIC
, 1 /* PSF */ ,
1881 sizeof(struct dasd_psf_ssc_data
),
1885 DBF_DEV_EVENT(DBF_WARNING
, device
, "%s",
1886 "Could not allocate PSF-SSC request");
1889 psf_ssc_data
= (struct dasd_psf_ssc_data
*)cqr
->data
;
1890 psf_ssc_data
->order
= PSF_ORDER_SSC
;
1891 psf_ssc_data
->suborder
= 0xc0;
1893 psf_ssc_data
->suborder
|= 0x08;
1894 psf_ssc_data
->reserved
[0] = 0x88;
1897 ccw
->cmd_code
= DASD_ECKD_CCW_PSF
;
1898 ccw
->cda
= virt_to_dma32(psf_ssc_data
);
1901 cqr
->startdev
= device
;
1902 cqr
->memdev
= device
;
1905 cqr
->expires
= 10*HZ
;
1906 cqr
->buildclk
= get_tod_clock();
1907 cqr
->status
= DASD_CQR_FILLED
;
1912 * Perform Subsystem Function.
1913 * It is necessary to trigger CIO for channel revalidation since this
1914 * call might change behaviour of DASD devices.
1917 dasd_eckd_psf_ssc(struct dasd_device
*device
, int enable_pav
,
1918 unsigned long flags
)
1920 struct dasd_ccw_req
*cqr
;
1923 cqr
= dasd_eckd_build_psf_ssc(device
, enable_pav
);
1925 return PTR_ERR(cqr
);
1928 * set flags e.g. turn on failfast, to prevent blocking
1929 * the calling function should handle failed requests
1931 cqr
->flags
|= flags
;
1933 rc
= dasd_sleep_on(cqr
);
1935 /* trigger CIO to reprobe devices */
1936 css_schedule_reprobe();
1937 else if (cqr
->intrc
== -EAGAIN
)
1940 dasd_sfree_request(cqr
, cqr
->memdev
);
1945 * Valide storage server of current device.
1947 static int dasd_eckd_validate_server(struct dasd_device
*device
,
1948 unsigned long flags
)
1950 struct dasd_eckd_private
*private = device
->private;
1953 if (private->uid
.type
== UA_BASE_PAV_ALIAS
||
1954 private->uid
.type
== UA_HYPER_PAV_ALIAS
)
1956 if (dasd_nopav
|| MACHINE_IS_VM
)
1960 rc
= dasd_eckd_psf_ssc(device
, enable_pav
, flags
);
1962 /* may be requested feature is not available on server,
1963 * therefore just report error and go ahead */
1964 DBF_EVENT_DEVID(DBF_WARNING
, device
->cdev
, "PSF-SSC for SSID %04x "
1965 "returned rc=%d", private->uid
.ssid
, rc
);
1970 * worker to do a validate server in case of a lost pathgroup
1972 static void dasd_eckd_do_validate_server(struct work_struct
*work
)
1974 struct dasd_device
*device
= container_of(work
, struct dasd_device
,
1976 unsigned long flags
= 0;
1978 set_bit(DASD_CQR_FLAGS_FAILFAST
, &flags
);
1979 if (dasd_eckd_validate_server(device
, flags
)
1981 /* schedule worker again if failed */
1982 schedule_work(&device
->kick_validate
);
1986 dasd_put_device(device
);
1989 static void dasd_eckd_kick_validate_server(struct dasd_device
*device
)
1991 dasd_get_device(device
);
1992 /* exit if device not online or in offline processing */
1993 if (test_bit(DASD_FLAG_OFFLINE
, &device
->flags
) ||
1994 device
->state
< DASD_STATE_ONLINE
) {
1995 dasd_put_device(device
);
1998 /* queue call to do_validate_server to the kernel event daemon. */
1999 if (!schedule_work(&device
->kick_validate
))
2000 dasd_put_device(device
);
2004 * return if the device is the copy relation primary if a copy relation is active
2006 static int dasd_device_is_primary(struct dasd_device
*device
)
2011 if (device
->copy
->active
->device
== device
)
2017 static int dasd_eckd_alloc_block(struct dasd_device
*device
)
2019 struct dasd_block
*block
;
2020 struct dasd_uid temp_uid
;
2022 if (!dasd_device_is_primary(device
))
2025 dasd_eckd_get_uid(device
, &temp_uid
);
2026 if (temp_uid
.type
== UA_BASE_DEVICE
) {
2027 block
= dasd_alloc_block();
2028 if (IS_ERR(block
)) {
2029 DBF_EVENT_DEVID(DBF_WARNING
, device
->cdev
, "%s",
2030 "could not allocate dasd block structure");
2031 return PTR_ERR(block
);
2033 device
->block
= block
;
2034 block
->base
= device
;
2039 static bool dasd_eckd_pprc_enabled(struct dasd_device
*device
)
2041 struct dasd_eckd_private
*private = device
->private;
2043 return private->rdc_data
.facilities
.PPRC_enabled
;
2047 * Check device characteristics.
2048 * If the device is accessible using ECKD discipline, the device is enabled.
2051 dasd_eckd_check_characteristics(struct dasd_device
*device
)
2053 struct dasd_eckd_private
*private = device
->private;
2056 unsigned long value
;
2058 /* setup work queue for validate server*/
2059 INIT_WORK(&device
->kick_validate
, dasd_eckd_do_validate_server
);
2060 /* setup work queue for summary unit check */
2061 INIT_WORK(&device
->suc_work
, dasd_alias_handle_summary_unit_check
);
2063 if (!ccw_device_is_pathgroup(device
->cdev
)) {
2064 dev_warn(&device
->cdev
->dev
,
2065 "A channel path group could not be established\n");
2068 if (!ccw_device_is_multipath(device
->cdev
)) {
2069 dev_info(&device
->cdev
->dev
,
2070 "The DASD is not operating in multipath mode\n");
2073 private = kzalloc(sizeof(*private), GFP_KERNEL
| GFP_DMA
);
2075 dev_warn(&device
->cdev
->dev
,
2076 "Allocating memory for private DASD data "
2080 device
->private = private;
2082 memset(private, 0, sizeof(*private));
2084 /* Invalidate status of initial analysis. */
2085 private->init_cqr_status
= -1;
2086 /* Set default cache operations. */
2087 private->attrib
.operation
= DASD_NORMAL_CACHE
;
2088 private->attrib
.nr_cyl
= 0;
2090 /* Read Configuration Data */
2091 rc
= dasd_eckd_read_conf(device
);
2095 /* set some default values */
2096 device
->default_expires
= DASD_EXPIRES
;
2097 device
->default_retries
= DASD_RETRIES
;
2098 device
->path_thrhld
= DASD_ECKD_PATH_THRHLD
;
2099 device
->path_interval
= DASD_ECKD_PATH_INTERVAL
;
2100 device
->aq_timeouts
= DASD_RETRIES_MAX
;
2102 if (private->conf
.gneq
) {
2104 for (i
= 0; i
< private->conf
.gneq
->timeout
.value
; i
++)
2106 value
= value
* private->conf
.gneq
->timeout
.number
;
2107 /* do not accept useless values */
2108 if (value
!= 0 && value
<= DASD_EXPIRES_MAX
)
2109 device
->default_expires
= value
;
2112 /* Read Device Characteristics */
2113 rc
= dasd_generic_read_dev_chars(device
, DASD_ECKD_MAGIC
,
2114 &private->rdc_data
, 64);
2116 DBF_EVENT_DEVID(DBF_WARNING
, device
->cdev
,
2117 "Read device characteristic failed, rc=%d", rc
);
2121 /* setup PPRC for device from devmap */
2122 rc
= dasd_devmap_set_device_copy_relation(device
->cdev
,
2123 dasd_eckd_pprc_enabled(device
));
2125 DBF_EVENT_DEVID(DBF_WARNING
, device
->cdev
,
2126 "copy relation setup failed, rc=%d", rc
);
2130 /* check if block device is needed and allocate in case */
2131 rc
= dasd_eckd_alloc_block(device
);
2135 /* register lcu with alias handling, enable PAV */
2136 rc
= dasd_alias_make_device_known_to_lcu(device
);
2140 dasd_eckd_validate_server(device
, 0);
2142 /* device may report different configuration data after LCU setup */
2143 rc
= dasd_eckd_read_conf(device
);
2147 dasd_eckd_read_fc_security(device
);
2148 dasd_path_create_kobjects(device
);
2150 /* Read Feature Codes */
2151 dasd_eckd_read_features(device
);
2153 /* Read Volume Information */
2154 dasd_eckd_read_vol_info(device
);
2156 /* Read Extent Pool Information */
2157 dasd_eckd_read_ext_pool_info(device
);
2159 if ((device
->features
& DASD_FEATURE_USERAW
) &&
2160 !(private->rdc_data
.facilities
.RT_in_LR
)) {
2161 dev_err(&device
->cdev
->dev
, "The storage server does not "
2162 "support raw-track access\n");
2167 /* find the valid cylinder size */
2168 if (private->rdc_data
.no_cyl
== LV_COMPAT_CYL
&&
2169 private->rdc_data
.long_no_cyl
)
2170 private->real_cyl
= private->rdc_data
.long_no_cyl
;
2172 private->real_cyl
= private->rdc_data
.no_cyl
;
2174 private->fcx_max_data
= get_fcx_max_data(device
);
2176 readonly
= dasd_device_is_ro(device
);
2178 set_bit(DASD_FLAG_DEVICE_RO
, &device
->flags
);
2180 dev_info(&device
->cdev
->dev
, "New DASD %04X/%02X (CU %04X/%02X) "
2181 "with %d cylinders, %d heads, %d sectors%s\n",
2182 private->rdc_data
.dev_type
,
2183 private->rdc_data
.dev_model
,
2184 private->rdc_data
.cu_type
,
2185 private->rdc_data
.cu_model
.model
,
2187 private->rdc_data
.trk_per_cyl
,
2188 private->rdc_data
.sec_per_trk
,
2189 readonly
? ", read-only device" : "");
2193 dasd_alias_disconnect_device_from_lcu(device
);
2195 dasd_free_block(device
->block
);
2196 device
->block
= NULL
;
2198 dasd_eckd_clear_conf_data(device
);
2199 dasd_path_remove_kobjects(device
);
2200 kfree(device
->private);
2201 device
->private = NULL
;
2205 static void dasd_eckd_uncheck_device(struct dasd_device
*device
)
2207 struct dasd_eckd_private
*private = device
->private;
2212 dasd_alias_disconnect_device_from_lcu(device
);
2213 private->conf
.ned
= NULL
;
2214 private->conf
.sneq
= NULL
;
2215 private->conf
.vdsneq
= NULL
;
2216 private->conf
.gneq
= NULL
;
2217 dasd_eckd_clear_conf_data(device
);
2218 dasd_path_remove_kobjects(device
);
2221 static struct dasd_ccw_req
*
2222 dasd_eckd_analysis_ccw(struct dasd_device
*device
)
2224 struct dasd_eckd_private
*private = device
->private;
2225 struct eckd_count
*count_data
;
2226 struct LO_eckd_data
*LO_data
;
2227 struct dasd_ccw_req
*cqr
;
2229 int cplength
, datasize
;
2233 datasize
= sizeof(struct DE_eckd_data
) + 2*sizeof(struct LO_eckd_data
);
2234 cqr
= dasd_smalloc_request(DASD_ECKD_MAGIC
, cplength
, datasize
, device
,
2239 /* Define extent for the first 2 tracks. */
2240 define_extent(ccw
++, cqr
->data
, 0, 1,
2241 DASD_ECKD_CCW_READ_COUNT
, device
, 0);
2242 LO_data
= cqr
->data
+ sizeof(struct DE_eckd_data
);
2243 /* Locate record for the first 4 records on track 0. */
2244 ccw
[-1].flags
|= CCW_FLAG_CC
;
2245 locate_record(ccw
++, LO_data
++, 0, 0, 4,
2246 DASD_ECKD_CCW_READ_COUNT
, device
, 0);
2248 count_data
= private->count_area
;
2249 for (i
= 0; i
< 4; i
++) {
2250 ccw
[-1].flags
|= CCW_FLAG_CC
;
2251 ccw
->cmd_code
= DASD_ECKD_CCW_READ_COUNT
;
2254 ccw
->cda
= virt_to_dma32(count_data
);
2259 /* Locate record for the first record on track 1. */
2260 ccw
[-1].flags
|= CCW_FLAG_CC
;
2261 locate_record(ccw
++, LO_data
++, 1, 0, 1,
2262 DASD_ECKD_CCW_READ_COUNT
, device
, 0);
2263 /* Read count ccw. */
2264 ccw
[-1].flags
|= CCW_FLAG_CC
;
2265 ccw
->cmd_code
= DASD_ECKD_CCW_READ_COUNT
;
2268 ccw
->cda
= virt_to_dma32(count_data
);
2271 cqr
->startdev
= device
;
2272 cqr
->memdev
= device
;
2274 cqr
->buildclk
= get_tod_clock();
2275 cqr
->status
= DASD_CQR_FILLED
;
2276 /* Set flags to suppress output for expected errors */
2277 set_bit(DASD_CQR_SUPPRESS_NRF
, &cqr
->flags
);
2278 set_bit(DASD_CQR_SUPPRESS_IT
, &cqr
->flags
);
2283 /* differentiate between 'no record found' and any other error */
2284 static int dasd_eckd_analysis_evaluation(struct dasd_ccw_req
*init_cqr
)
2287 if (init_cqr
->status
== DASD_CQR_DONE
)
2289 else if (init_cqr
->status
== DASD_CQR_NEED_ERP
||
2290 init_cqr
->status
== DASD_CQR_FAILED
) {
2291 sense
= dasd_get_sense(&init_cqr
->irb
);
2292 if (sense
&& (sense
[1] & SNS1_NO_REC_FOUND
))
2293 return INIT_CQR_UNFORMATTED
;
2295 return INIT_CQR_ERROR
;
2297 return INIT_CQR_ERROR
;
2301 * This is the callback function for the init_analysis cqr. It saves
2302 * the status of the initial analysis ccw before it frees it and kicks
2303 * the device to continue the startup sequence. This will call
2304 * dasd_eckd_do_analysis again (if the devices has not been marked
2305 * for deletion in the meantime).
2307 static void dasd_eckd_analysis_callback(struct dasd_ccw_req
*init_cqr
,
2310 struct dasd_device
*device
= init_cqr
->startdev
;
2311 struct dasd_eckd_private
*private = device
->private;
2313 private->init_cqr_status
= dasd_eckd_analysis_evaluation(init_cqr
);
2314 dasd_sfree_request(init_cqr
, device
);
2315 dasd_kick_device(device
);
2318 static int dasd_eckd_start_analysis(struct dasd_block
*block
)
2320 struct dasd_ccw_req
*init_cqr
;
2322 init_cqr
= dasd_eckd_analysis_ccw(block
->base
);
2323 if (IS_ERR(init_cqr
))
2324 return PTR_ERR(init_cqr
);
2325 init_cqr
->callback
= dasd_eckd_analysis_callback
;
2326 init_cqr
->callback_data
= NULL
;
2327 init_cqr
->expires
= 5*HZ
;
2328 /* first try without ERP, so we can later handle unformatted
2329 * devices as special case
2331 clear_bit(DASD_CQR_FLAGS_USE_ERP
, &init_cqr
->flags
);
2332 init_cqr
->retries
= 0;
2333 dasd_add_request_head(init_cqr
);
2337 static int dasd_eckd_end_analysis(struct dasd_block
*block
)
2339 struct dasd_device
*device
= block
->base
;
2340 struct dasd_eckd_private
*private = device
->private;
2341 struct eckd_count
*count_area
;
2342 unsigned int sb
, blk_per_trk
;
2344 struct dasd_ccw_req
*init_cqr
;
2346 status
= private->init_cqr_status
;
2347 private->init_cqr_status
= -1;
2348 if (status
== INIT_CQR_ERROR
) {
2349 /* try again, this time with full ERP */
2350 init_cqr
= dasd_eckd_analysis_ccw(device
);
2351 dasd_sleep_on(init_cqr
);
2352 status
= dasd_eckd_analysis_evaluation(init_cqr
);
2353 dasd_sfree_request(init_cqr
, device
);
2356 if (device
->features
& DASD_FEATURE_USERAW
) {
2357 block
->bp_block
= DASD_RAW_BLOCKSIZE
;
2358 blk_per_trk
= DASD_RAW_BLOCK_PER_TRACK
;
2359 block
->s2b_shift
= 3;
2363 if (status
== INIT_CQR_UNFORMATTED
) {
2364 dev_warn(&device
->cdev
->dev
, "The DASD is not formatted\n");
2365 return -EMEDIUMTYPE
;
2366 } else if (status
== INIT_CQR_ERROR
) {
2367 dev_err(&device
->cdev
->dev
,
2368 "Detecting the DASD disk layout failed because "
2369 "of an I/O error\n");
2373 private->uses_cdl
= 1;
2374 /* Check Track 0 for Compatible Disk Layout */
2376 for (i
= 0; i
< 3; i
++) {
2377 if (private->count_area
[i
].kl
!= 4 ||
2378 private->count_area
[i
].dl
!= dasd_eckd_cdl_reclen(i
) - 4 ||
2379 private->count_area
[i
].cyl
!= 0 ||
2380 private->count_area
[i
].head
!= count_area_head
[i
] ||
2381 private->count_area
[i
].record
!= count_area_rec
[i
]) {
2382 private->uses_cdl
= 0;
2387 count_area
= &private->count_area
[3];
2389 if (private->uses_cdl
== 0) {
2390 for (i
= 0; i
< 5; i
++) {
2391 if ((private->count_area
[i
].kl
!= 0) ||
2392 (private->count_area
[i
].dl
!=
2393 private->count_area
[0].dl
) ||
2394 private->count_area
[i
].cyl
!= 0 ||
2395 private->count_area
[i
].head
!= count_area_head
[i
] ||
2396 private->count_area
[i
].record
!= count_area_rec
[i
])
2400 count_area
= &private->count_area
[0];
2402 if (private->count_area
[3].record
== 1)
2403 dev_warn(&device
->cdev
->dev
,
2404 "Track 0 has no records following the VTOC\n");
2407 if (count_area
!= NULL
&& count_area
->kl
== 0) {
2408 /* we found nothing violating our disk layout */
2409 if (dasd_check_blocksize(count_area
->dl
) == 0)
2410 block
->bp_block
= count_area
->dl
;
2412 if (block
->bp_block
== 0) {
2413 dev_warn(&device
->cdev
->dev
,
2414 "The disk layout of the DASD is not supported\n");
2415 return -EMEDIUMTYPE
;
2417 block
->s2b_shift
= 0; /* bits to shift 512 to get a block */
2418 for (sb
= 512; sb
< block
->bp_block
; sb
= sb
<< 1)
2421 blk_per_trk
= recs_per_track(&private->rdc_data
, 0, block
->bp_block
);
2424 block
->blocks
= ((unsigned long) private->real_cyl
*
2425 private->rdc_data
.trk_per_cyl
*
2428 dev_info(&device
->cdev
->dev
,
2429 "DASD with %u KB/block, %lu KB total size, %u KB/track, "
2430 "%s\n", (block
->bp_block
>> 10),
2431 (((unsigned long) private->real_cyl
*
2432 private->rdc_data
.trk_per_cyl
*
2433 blk_per_trk
* (block
->bp_block
>> 9)) >> 1),
2434 ((blk_per_trk
* block
->bp_block
) >> 10),
2436 "compatible disk layout" : "linux disk layout");
2441 static int dasd_eckd_do_analysis(struct dasd_block
*block
)
2443 struct dasd_eckd_private
*private = block
->base
->private;
2445 if (private->init_cqr_status
< 0)
2446 return dasd_eckd_start_analysis(block
);
2448 return dasd_eckd_end_analysis(block
);
2451 static int dasd_eckd_basic_to_ready(struct dasd_device
*device
)
2453 return dasd_alias_add_device(device
);
2456 static int dasd_eckd_online_to_ready(struct dasd_device
*device
)
2458 if (cancel_work_sync(&device
->reload_device
))
2459 dasd_put_device(device
);
2460 if (cancel_work_sync(&device
->kick_validate
))
2461 dasd_put_device(device
);
2466 static int dasd_eckd_basic_to_known(struct dasd_device
*device
)
2468 return dasd_alias_remove_device(device
);
2472 dasd_eckd_fill_geometry(struct dasd_block
*block
, struct hd_geometry
*geo
)
2474 struct dasd_eckd_private
*private = block
->base
->private;
2476 if (dasd_check_blocksize(block
->bp_block
) == 0) {
2477 geo
->sectors
= recs_per_track(&private->rdc_data
,
2478 0, block
->bp_block
);
2480 geo
->cylinders
= private->rdc_data
.no_cyl
;
2481 geo
->heads
= private->rdc_data
.trk_per_cyl
;
2486 * Build the TCW request for the format check
2488 static struct dasd_ccw_req
*
2489 dasd_eckd_build_check_tcw(struct dasd_device
*base
, struct format_data_t
*fdata
,
2490 int enable_pav
, struct eckd_count
*fmt_buffer
,
2493 struct dasd_eckd_private
*start_priv
;
2494 struct dasd_device
*startdev
= NULL
;
2495 struct tidaw
*last_tidaw
= NULL
;
2496 struct dasd_ccw_req
*cqr
;
2504 startdev
= dasd_alias_get_start_dev(base
);
2509 start_priv
= startdev
->private;
2511 count
= rpt
* (fdata
->stop_unit
- fdata
->start_unit
+ 1);
2514 * we're adding 'count' amount of tidaw to the itcw.
2515 * calculate the corresponding itcw_size
2517 itcw_size
= itcw_calc_size(0, count
, 0);
2519 cqr
= dasd_fmalloc_request(DASD_ECKD_MAGIC
, 0, itcw_size
, startdev
);
2523 start_priv
->count
++;
2525 itcw
= itcw_init(cqr
->data
, itcw_size
, ITCW_OP_READ
, 0, count
, 0);
2531 cqr
->cpaddr
= itcw_get_tcw(itcw
);
2532 rc
= prepare_itcw(itcw
, fdata
->start_unit
, fdata
->stop_unit
,
2533 DASD_ECKD_CCW_READ_COUNT_MT
, base
, startdev
, 0, count
,
2534 sizeof(struct eckd_count
),
2535 count
* sizeof(struct eckd_count
), 0, rpt
);
2539 for (i
= 0; i
< count
; i
++) {
2540 last_tidaw
= itcw_add_tidaw(itcw
, 0, fmt_buffer
++,
2541 sizeof(struct eckd_count
));
2542 if (IS_ERR(last_tidaw
)) {
2548 last_tidaw
->flags
|= TIDAW_FLAGS_LAST
;
2549 itcw_finalize(itcw
);
2552 cqr
->startdev
= startdev
;
2553 cqr
->memdev
= startdev
;
2554 cqr
->basedev
= base
;
2555 cqr
->retries
= startdev
->default_retries
;
2556 cqr
->expires
= startdev
->default_expires
* HZ
;
2557 cqr
->buildclk
= get_tod_clock();
2558 cqr
->status
= DASD_CQR_FILLED
;
2559 /* Set flags to suppress output for expected errors */
2560 set_bit(DASD_CQR_SUPPRESS_IL
, &cqr
->flags
);
2565 dasd_sfree_request(cqr
, startdev
);
2571 * Build the CCW request for the format check
2573 static struct dasd_ccw_req
*
2574 dasd_eckd_build_check(struct dasd_device
*base
, struct format_data_t
*fdata
,
2575 int enable_pav
, struct eckd_count
*fmt_buffer
, int rpt
)
2577 struct dasd_eckd_private
*start_priv
;
2578 struct dasd_eckd_private
*base_priv
;
2579 struct dasd_device
*startdev
= NULL
;
2580 struct dasd_ccw_req
*cqr
;
2583 int cplength
, datasize
;
2589 startdev
= dasd_alias_get_start_dev(base
);
2594 start_priv
= startdev
->private;
2595 base_priv
= base
->private;
2597 count
= rpt
* (fdata
->stop_unit
- fdata
->start_unit
+ 1);
2599 use_prefix
= base_priv
->features
.feature
[8] & 0x01;
2603 datasize
= sizeof(struct PFX_eckd_data
);
2606 datasize
= sizeof(struct DE_eckd_data
) +
2607 sizeof(struct LO_eckd_data
);
2611 cqr
= dasd_fmalloc_request(DASD_ECKD_MAGIC
, cplength
, datasize
, startdev
);
2615 start_priv
->count
++;
2620 prefix_LRE(ccw
++, data
, fdata
->start_unit
, fdata
->stop_unit
,
2621 DASD_ECKD_CCW_READ_COUNT
, base
, startdev
, 1, 0,
2624 define_extent(ccw
++, data
, fdata
->start_unit
, fdata
->stop_unit
,
2625 DASD_ECKD_CCW_READ_COUNT
, startdev
, 0);
2627 data
+= sizeof(struct DE_eckd_data
);
2628 ccw
[-1].flags
|= CCW_FLAG_CC
;
2630 locate_record(ccw
++, data
, fdata
->start_unit
, 0, count
,
2631 DASD_ECKD_CCW_READ_COUNT
, base
, 0);
2634 for (i
= 0; i
< count
; i
++) {
2635 ccw
[-1].flags
|= CCW_FLAG_CC
;
2636 ccw
->cmd_code
= DASD_ECKD_CCW_READ_COUNT
;
2637 ccw
->flags
= CCW_FLAG_SLI
;
2639 ccw
->cda
= virt_to_dma32(fmt_buffer
);
2644 cqr
->startdev
= startdev
;
2645 cqr
->memdev
= startdev
;
2646 cqr
->basedev
= base
;
2647 cqr
->retries
= DASD_RETRIES
;
2648 cqr
->expires
= startdev
->default_expires
* HZ
;
2649 cqr
->buildclk
= get_tod_clock();
2650 cqr
->status
= DASD_CQR_FILLED
;
2651 /* Set flags to suppress output for expected errors */
2652 set_bit(DASD_CQR_SUPPRESS_NRF
, &cqr
->flags
);
2657 static struct dasd_ccw_req
*
2658 dasd_eckd_build_format(struct dasd_device
*base
, struct dasd_device
*startdev
,
2659 struct format_data_t
*fdata
, int enable_pav
)
2661 struct dasd_eckd_private
*base_priv
;
2662 struct dasd_eckd_private
*start_priv
;
2663 struct dasd_ccw_req
*fcp
;
2664 struct eckd_count
*ect
;
2665 struct ch_t address
;
2669 int cplength
, datasize
;
2677 startdev
= dasd_alias_get_start_dev(base
);
2682 start_priv
= startdev
->private;
2683 base_priv
= base
->private;
2685 rpt
= recs_per_track(&base_priv
->rdc_data
, 0, fdata
->blksize
);
2687 nr_tracks
= fdata
->stop_unit
- fdata
->start_unit
+ 1;
2690 * fdata->intensity is a bit string that tells us what to do:
2691 * Bit 0: write record zero
2692 * Bit 1: write home address, currently not supported
2693 * Bit 2: invalidate tracks
2694 * Bit 3: use OS/390 compatible disk layout (cdl)
2695 * Bit 4: do not allow storage subsystem to modify record zero
2696 * Only some bit combinations do make sense.
2698 if (fdata
->intensity
& 0x10) {
2700 intensity
= fdata
->intensity
& ~0x10;
2703 intensity
= fdata
->intensity
;
2706 use_prefix
= base_priv
->features
.feature
[8] & 0x01;
2708 switch (intensity
) {
2709 case 0x00: /* Normal format */
2710 case 0x08: /* Normal format, use cdl. */
2711 cplength
= 2 + (rpt
*nr_tracks
);
2713 datasize
= sizeof(struct PFX_eckd_data
) +
2714 sizeof(struct LO_eckd_data
) +
2715 rpt
* nr_tracks
* sizeof(struct eckd_count
);
2717 datasize
= sizeof(struct DE_eckd_data
) +
2718 sizeof(struct LO_eckd_data
) +
2719 rpt
* nr_tracks
* sizeof(struct eckd_count
);
2721 case 0x01: /* Write record zero and format track. */
2722 case 0x09: /* Write record zero and format track, use cdl. */
2723 cplength
= 2 + rpt
* nr_tracks
;
2725 datasize
= sizeof(struct PFX_eckd_data
) +
2726 sizeof(struct LO_eckd_data
) +
2727 sizeof(struct eckd_count
) +
2728 rpt
* nr_tracks
* sizeof(struct eckd_count
);
2730 datasize
= sizeof(struct DE_eckd_data
) +
2731 sizeof(struct LO_eckd_data
) +
2732 sizeof(struct eckd_count
) +
2733 rpt
* nr_tracks
* sizeof(struct eckd_count
);
2735 case 0x04: /* Invalidate track. */
2736 case 0x0c: /* Invalidate track, use cdl. */
2739 datasize
= sizeof(struct PFX_eckd_data
) +
2740 sizeof(struct LO_eckd_data
) +
2741 sizeof(struct eckd_count
);
2743 datasize
= sizeof(struct DE_eckd_data
) +
2744 sizeof(struct LO_eckd_data
) +
2745 sizeof(struct eckd_count
);
2748 dev_warn(&startdev
->cdev
->dev
,
2749 "An I/O control call used incorrect flags 0x%x\n",
2751 return ERR_PTR(-EINVAL
);
2754 fcp
= dasd_fmalloc_request(DASD_ECKD_MAGIC
, cplength
, datasize
, startdev
);
2758 start_priv
->count
++;
2762 switch (intensity
& ~0x08) {
2763 case 0x00: /* Normal format. */
2765 prefix(ccw
++, (struct PFX_eckd_data
*) data
,
2766 fdata
->start_unit
, fdata
->stop_unit
,
2767 DASD_ECKD_CCW_WRITE_CKD
, base
, startdev
);
2768 /* grant subsystem permission to format R0 */
2770 ((struct PFX_eckd_data
*)data
)
2771 ->define_extent
.ga_extended
|= 0x04;
2772 data
+= sizeof(struct PFX_eckd_data
);
2774 define_extent(ccw
++, (struct DE_eckd_data
*) data
,
2775 fdata
->start_unit
, fdata
->stop_unit
,
2776 DASD_ECKD_CCW_WRITE_CKD
, startdev
, 0);
2777 /* grant subsystem permission to format R0 */
2779 ((struct DE_eckd_data
*) data
)
2780 ->ga_extended
|= 0x04;
2781 data
+= sizeof(struct DE_eckd_data
);
2783 ccw
[-1].flags
|= CCW_FLAG_CC
;
2784 locate_record(ccw
++, (struct LO_eckd_data
*) data
,
2785 fdata
->start_unit
, 0, rpt
*nr_tracks
,
2786 DASD_ECKD_CCW_WRITE_CKD
, base
,
2788 data
+= sizeof(struct LO_eckd_data
);
2790 case 0x01: /* Write record zero + format track. */
2792 prefix(ccw
++, (struct PFX_eckd_data
*) data
,
2793 fdata
->start_unit
, fdata
->stop_unit
,
2794 DASD_ECKD_CCW_WRITE_RECORD_ZERO
,
2796 data
+= sizeof(struct PFX_eckd_data
);
2798 define_extent(ccw
++, (struct DE_eckd_data
*) data
,
2799 fdata
->start_unit
, fdata
->stop_unit
,
2800 DASD_ECKD_CCW_WRITE_RECORD_ZERO
, startdev
, 0);
2801 data
+= sizeof(struct DE_eckd_data
);
2803 ccw
[-1].flags
|= CCW_FLAG_CC
;
2804 locate_record(ccw
++, (struct LO_eckd_data
*) data
,
2805 fdata
->start_unit
, 0, rpt
* nr_tracks
+ 1,
2806 DASD_ECKD_CCW_WRITE_RECORD_ZERO
, base
,
2807 base
->block
->bp_block
);
2808 data
+= sizeof(struct LO_eckd_data
);
2810 case 0x04: /* Invalidate track. */
2812 prefix(ccw
++, (struct PFX_eckd_data
*) data
,
2813 fdata
->start_unit
, fdata
->stop_unit
,
2814 DASD_ECKD_CCW_WRITE_CKD
, base
, startdev
);
2815 data
+= sizeof(struct PFX_eckd_data
);
2817 define_extent(ccw
++, (struct DE_eckd_data
*) data
,
2818 fdata
->start_unit
, fdata
->stop_unit
,
2819 DASD_ECKD_CCW_WRITE_CKD
, startdev
, 0);
2820 data
+= sizeof(struct DE_eckd_data
);
2822 ccw
[-1].flags
|= CCW_FLAG_CC
;
2823 locate_record(ccw
++, (struct LO_eckd_data
*) data
,
2824 fdata
->start_unit
, 0, 1,
2825 DASD_ECKD_CCW_WRITE_CKD
, base
, 8);
2826 data
+= sizeof(struct LO_eckd_data
);
2830 for (j
= 0; j
< nr_tracks
; j
++) {
2831 /* calculate cylinder and head for the current track */
2833 (fdata
->start_unit
+ j
) /
2834 base_priv
->rdc_data
.trk_per_cyl
,
2835 (fdata
->start_unit
+ j
) %
2836 base_priv
->rdc_data
.trk_per_cyl
);
2837 if (intensity
& 0x01) { /* write record zero */
2838 ect
= (struct eckd_count
*) data
;
2839 data
+= sizeof(struct eckd_count
);
2840 ect
->cyl
= address
.cyl
;
2841 ect
->head
= address
.head
;
2845 ccw
[-1].flags
|= CCW_FLAG_CC
;
2846 ccw
->cmd_code
= DASD_ECKD_CCW_WRITE_RECORD_ZERO
;
2847 ccw
->flags
= CCW_FLAG_SLI
;
2849 ccw
->cda
= virt_to_dma32(ect
);
2852 if ((intensity
& ~0x08) & 0x04) { /* erase track */
2853 ect
= (struct eckd_count
*) data
;
2854 data
+= sizeof(struct eckd_count
);
2855 ect
->cyl
= address
.cyl
;
2856 ect
->head
= address
.head
;
2860 ccw
[-1].flags
|= CCW_FLAG_CC
;
2861 ccw
->cmd_code
= DASD_ECKD_CCW_WRITE_CKD
;
2862 ccw
->flags
= CCW_FLAG_SLI
;
2864 ccw
->cda
= virt_to_dma32(ect
);
2865 } else { /* write remaining records */
2866 for (i
= 0; i
< rpt
; i
++) {
2867 ect
= (struct eckd_count
*) data
;
2868 data
+= sizeof(struct eckd_count
);
2869 ect
->cyl
= address
.cyl
;
2870 ect
->head
= address
.head
;
2871 ect
->record
= i
+ 1;
2873 ect
->dl
= fdata
->blksize
;
2875 * Check for special tracks 0-1
2876 * when formatting CDL
2878 if ((intensity
& 0x08) &&
2879 address
.cyl
== 0 && address
.head
== 0) {
2882 ect
->dl
= sizes_trk0
[i
] - 4;
2885 if ((intensity
& 0x08) &&
2886 address
.cyl
== 0 && address
.head
== 1) {
2888 ect
->dl
= LABEL_SIZE
- 44;
2890 ccw
[-1].flags
|= CCW_FLAG_CC
;
2891 if (i
!= 0 || j
== 0)
2893 DASD_ECKD_CCW_WRITE_CKD
;
2896 DASD_ECKD_CCW_WRITE_CKD_MT
;
2897 ccw
->flags
= CCW_FLAG_SLI
;
2899 ccw
->cda
= virt_to_dma32(ect
);
2905 fcp
->startdev
= startdev
;
2906 fcp
->memdev
= startdev
;
2907 fcp
->basedev
= base
;
2909 fcp
->expires
= startdev
->default_expires
* HZ
;
2910 fcp
->buildclk
= get_tod_clock();
2911 fcp
->status
= DASD_CQR_FILLED
;
2917 * Wrapper function to build a CCW request depending on input data
2919 static struct dasd_ccw_req
*
2920 dasd_eckd_format_build_ccw_req(struct dasd_device
*base
,
2921 struct format_data_t
*fdata
, int enable_pav
,
2922 int tpm
, struct eckd_count
*fmt_buffer
, int rpt
)
2924 struct dasd_ccw_req
*ccw_req
;
2927 ccw_req
= dasd_eckd_build_format(base
, NULL
, fdata
, enable_pav
);
2930 ccw_req
= dasd_eckd_build_check_tcw(base
, fdata
,
2934 ccw_req
= dasd_eckd_build_check(base
, fdata
, enable_pav
,
2942 * Sanity checks on format_data
2944 static int dasd_eckd_format_sanity_checks(struct dasd_device
*base
,
2945 struct format_data_t
*fdata
)
2947 struct dasd_eckd_private
*private = base
->private;
2949 if (fdata
->start_unit
>=
2950 (private->real_cyl
* private->rdc_data
.trk_per_cyl
)) {
2951 dev_warn(&base
->cdev
->dev
,
2952 "Start track number %u used in formatting is too big\n",
2956 if (fdata
->stop_unit
>=
2957 (private->real_cyl
* private->rdc_data
.trk_per_cyl
)) {
2958 dev_warn(&base
->cdev
->dev
,
2959 "Stop track number %u used in formatting is too big\n",
2963 if (fdata
->start_unit
> fdata
->stop_unit
) {
2964 dev_warn(&base
->cdev
->dev
,
2965 "Start track %u used in formatting exceeds end track\n",
2969 if (dasd_check_blocksize(fdata
->blksize
) != 0) {
2970 dev_warn(&base
->cdev
->dev
,
2971 "The DASD cannot be formatted with block size %u\n",
2979 * This function will process format_data originally coming from an IOCTL
2981 static int dasd_eckd_format_process_data(struct dasd_device
*base
,
2982 struct format_data_t
*fdata
,
2983 int enable_pav
, int tpm
,
2984 struct eckd_count
*fmt_buffer
, int rpt
,
2987 struct dasd_eckd_private
*private = base
->private;
2988 struct dasd_ccw_req
*cqr
, *n
;
2989 struct list_head format_queue
;
2990 struct dasd_device
*device
;
2992 int old_start
, old_stop
, format_step
;
2996 rc
= dasd_eckd_format_sanity_checks(base
, fdata
);
3000 INIT_LIST_HEAD(&format_queue
);
3002 old_start
= fdata
->start_unit
;
3003 old_stop
= fdata
->stop_unit
;
3005 if (!tpm
&& fmt_buffer
!= NULL
) {
3006 /* Command Mode / Format Check */
3008 } else if (tpm
&& fmt_buffer
!= NULL
) {
3009 /* Transport Mode / Format Check */
3010 format_step
= DASD_CQR_MAX_CCW
/ rpt
;
3012 /* Normal Formatting */
3013 format_step
= DASD_CQR_MAX_CCW
/
3014 recs_per_track(&private->rdc_data
, 0, fdata
->blksize
);
3019 while (fdata
->start_unit
<= old_stop
) {
3020 step
= fdata
->stop_unit
- fdata
->start_unit
+ 1;
3021 if (step
> format_step
) {
3023 fdata
->start_unit
+ format_step
- 1;
3026 cqr
= dasd_eckd_format_build_ccw_req(base
, fdata
,
3031 if (rc
== -ENOMEM
) {
3032 if (list_empty(&format_queue
))
3035 * not enough memory available, start
3036 * requests retry after first requests
3044 list_add_tail(&cqr
->blocklist
, &format_queue
);
3047 step
= fdata
->stop_unit
- fdata
->start_unit
+ 1;
3048 fmt_buffer
+= rpt
* step
;
3050 fdata
->start_unit
= fdata
->stop_unit
+ 1;
3051 fdata
->stop_unit
= old_stop
;
3054 rc
= dasd_sleep_on_queue(&format_queue
);
3057 list_for_each_entry_safe(cqr
, n
, &format_queue
, blocklist
) {
3058 device
= cqr
->startdev
;
3059 private = device
->private;
3061 if (cqr
->status
== DASD_CQR_FAILED
) {
3063 * Only get sense data if called by format
3066 if (fmt_buffer
&& irb
) {
3067 sense
= dasd_get_sense(&cqr
->irb
);
3068 memcpy(irb
, &cqr
->irb
, sizeof(*irb
));
3072 list_del_init(&cqr
->blocklist
);
3073 dasd_ffree_request(cqr
, device
);
3077 if (rc
&& rc
!= -EIO
)
3081 * In case fewer than the expected records are on the
3082 * track, we will most likely get a 'No Record Found'
3083 * error (in command mode) or a 'File Protected' error
3084 * (in transport mode). Those particular cases shouldn't
3085 * pass the -EIO to the IOCTL, therefore reset the rc
3089 (sense
[1] & SNS1_NO_REC_FOUND
||
3090 sense
[1] & SNS1_FILE_PROTECTED
))
3099 fdata
->start_unit
= old_start
;
3100 fdata
->stop_unit
= old_stop
;
3105 static int dasd_eckd_format_device(struct dasd_device
*base
,
3106 struct format_data_t
*fdata
, int enable_pav
)
3108 return dasd_eckd_format_process_data(base
, fdata
, enable_pav
, 0, NULL
,
3112 static bool test_and_set_format_track(struct dasd_format_entry
*to_format
,
3113 struct dasd_ccw_req
*cqr
)
3115 struct dasd_block
*block
= cqr
->block
;
3116 struct dasd_format_entry
*format
;
3117 unsigned long flags
;
3120 spin_lock_irqsave(&block
->format_lock
, flags
);
3121 if (cqr
->trkcount
!= atomic_read(&block
->trkcount
)) {
3123 * The number of formatted tracks has changed after request
3124 * start and we can not tell if the current track was involved.
3125 * To avoid data corruption treat it as if the current track is
3131 list_for_each_entry(format
, &block
->format_list
, list
) {
3132 if (format
->track
== to_format
->track
) {
3137 list_add_tail(&to_format
->list
, &block
->format_list
);
3140 spin_unlock_irqrestore(&block
->format_lock
, flags
);
3144 static void clear_format_track(struct dasd_format_entry
*format
,
3145 struct dasd_block
*block
)
3147 unsigned long flags
;
3149 spin_lock_irqsave(&block
->format_lock
, flags
);
3150 atomic_inc(&block
->trkcount
);
3151 list_del_init(&format
->list
);
3152 spin_unlock_irqrestore(&block
->format_lock
, flags
);
3156 * Callback function to free ESE format requests.
3158 static void dasd_eckd_ese_format_cb(struct dasd_ccw_req
*cqr
, void *data
)
3160 struct dasd_device
*device
= cqr
->startdev
;
3161 struct dasd_eckd_private
*private = device
->private;
3162 struct dasd_format_entry
*format
= data
;
3164 clear_format_track(format
, cqr
->basedev
->block
);
3166 dasd_ffree_request(cqr
, device
);
3169 static struct dasd_ccw_req
*
3170 dasd_eckd_ese_format(struct dasd_device
*startdev
, struct dasd_ccw_req
*cqr
,
3173 struct dasd_eckd_private
*private;
3174 struct dasd_format_entry
*format
;
3175 struct format_data_t fdata
;
3176 unsigned int recs_per_trk
;
3177 struct dasd_ccw_req
*fcqr
;
3178 struct dasd_device
*base
;
3179 struct dasd_block
*block
;
3180 unsigned int blksize
;
3181 struct request
*req
;
3187 req
= dasd_get_callback_data(cqr
);
3190 private = base
->private;
3191 blksize
= block
->bp_block
;
3192 recs_per_trk
= recs_per_track(&private->rdc_data
, 0, blksize
);
3193 format
= &startdev
->format_entry
;
3195 first_trk
= blk_rq_pos(req
) >> block
->s2b_shift
;
3196 sector_div(first_trk
, recs_per_trk
);
3198 (blk_rq_pos(req
) + blk_rq_sectors(req
) - 1) >> block
->s2b_shift
;
3199 sector_div(last_trk
, recs_per_trk
);
3200 rc
= dasd_eckd_track_from_irb(irb
, base
, &curr_trk
);
3204 if (curr_trk
< first_trk
|| curr_trk
> last_trk
) {
3205 DBF_DEV_EVENT(DBF_WARNING
, startdev
,
3206 "ESE error track %llu not within range %llu - %llu\n",
3207 curr_trk
, first_trk
, last_trk
);
3208 return ERR_PTR(-EINVAL
);
3210 format
->track
= curr_trk
;
3211 /* test if track is already in formatting by another thread */
3212 if (test_and_set_format_track(format
, cqr
)) {
3213 /* this is no real error so do not count down retries */
3215 return ERR_PTR(-EEXIST
);
3218 fdata
.start_unit
= curr_trk
;
3219 fdata
.stop_unit
= curr_trk
;
3220 fdata
.blksize
= blksize
;
3221 fdata
.intensity
= private->uses_cdl
? DASD_FMT_INT_COMPAT
: 0;
3223 rc
= dasd_eckd_format_sanity_checks(base
, &fdata
);
3225 return ERR_PTR(-EINVAL
);
3228 * We're building the request with PAV disabled as we're reusing
3229 * the former startdev.
3231 fcqr
= dasd_eckd_build_format(base
, startdev
, &fdata
, 0);
3235 fcqr
->callback
= dasd_eckd_ese_format_cb
;
3236 fcqr
->callback_data
= (void *) format
;
3242 * When data is read from an unformatted area of an ESE volume, this function
3243 * returns zeroed data and thereby mimics a read of zero data.
3245 * The first unformatted track is the one that got the NRF error, the address is
3246 * encoded in the sense data.
3248 * All tracks before have returned valid data and should not be touched.
3249 * All tracks after the unformatted track might be formatted or not. This is
3250 * currently not known, remember the processed data and return the remainder of
3251 * the request to the blocklayer in __dasd_cleanup_cqr().
3253 static int dasd_eckd_ese_read(struct dasd_ccw_req
*cqr
, struct irb
*irb
)
3255 struct dasd_eckd_private
*private;
3256 sector_t first_trk
, last_trk
;
3257 sector_t first_blk
, last_blk
;
3258 unsigned int blksize
, off
;
3259 unsigned int recs_per_trk
;
3260 struct dasd_device
*base
;
3261 struct req_iterator iter
;
3262 struct dasd_block
*block
;
3263 unsigned int skip_block
;
3264 unsigned int blk_count
;
3265 struct request
*req
;
3272 req
= (struct request
*) cqr
->callback_data
;
3273 base
= cqr
->block
->base
;
3274 blksize
= base
->block
->bp_block
;
3276 private = base
->private;
3280 recs_per_trk
= recs_per_track(&private->rdc_data
, 0, blksize
);
3281 first_trk
= first_blk
= blk_rq_pos(req
) >> block
->s2b_shift
;
3282 sector_div(first_trk
, recs_per_trk
);
3283 last_trk
= last_blk
=
3284 (blk_rq_pos(req
) + blk_rq_sectors(req
) - 1) >> block
->s2b_shift
;
3285 sector_div(last_trk
, recs_per_trk
);
3286 rc
= dasd_eckd_track_from_irb(irb
, base
, &curr_trk
);
3290 /* sanity check if the current track from sense data is valid */
3291 if (curr_trk
< first_trk
|| curr_trk
> last_trk
) {
3292 DBF_DEV_EVENT(DBF_WARNING
, base
,
3293 "ESE error track %llu not within range %llu - %llu\n",
3294 curr_trk
, first_trk
, last_trk
);
3299 * if not the first track got the NRF error we have to skip over valid
3302 if (curr_trk
!= first_trk
)
3303 skip_block
= curr_trk
* recs_per_trk
- first_blk
;
3305 /* we have no information beyond the current track */
3306 end_blk
= (curr_trk
+ 1) * recs_per_trk
;
3308 rq_for_each_segment(bv
, req
, iter
) {
3309 dst
= bvec_virt(&bv
);
3310 for (off
= 0; off
< bv
.bv_len
; off
+= blksize
) {
3311 if (first_blk
+ blk_count
>= end_blk
) {
3312 cqr
->proc_bytes
= blk_count
* blksize
;
3315 if (dst
&& !skip_block
)
3316 memset(dst
, 0, blksize
);
3327 * Helper function to count consecutive records of a single track.
3329 static int dasd_eckd_count_records(struct eckd_count
*fmt_buffer
, int start
,
3335 head
= fmt_buffer
[start
].head
;
3338 * There are 3 conditions where we stop counting:
3339 * - if data reoccurs (same head and record may reoccur), which may
3340 * happen due to the way DASD_ECKD_CCW_READ_COUNT works
3341 * - when the head changes, because we're iterating over several tracks
3342 * then (DASD_ECKD_CCW_READ_COUNT_MT)
3343 * - when we've reached the end of sensible data in the buffer (the
3344 * record will be 0 then)
3346 for (i
= start
; i
< max
; i
++) {
3348 if ((fmt_buffer
[i
].head
== head
&&
3349 fmt_buffer
[i
].record
== 1) ||
3350 fmt_buffer
[i
].head
!= head
||
3351 fmt_buffer
[i
].record
== 0)
3360 * Evaluate a given range of tracks. Data like number of records, blocksize,
3361 * record ids, and key length are compared with expected data.
3363 * If a mismatch occurs, the corresponding error bit is set, as well as
3364 * additional information, depending on the error.
3366 static void dasd_eckd_format_evaluate_tracks(struct eckd_count
*fmt_buffer
,
3367 struct format_check_t
*cdata
,
3368 int rpt_max
, int rpt_exp
,
3369 int trk_per_cyl
, int tpm
)
3380 trkcount
= cdata
->expect
.stop_unit
- cdata
->expect
.start_unit
+ 1;
3381 max_entries
= trkcount
* rpt_max
;
3383 for (i
= cdata
->expect
.start_unit
; i
<= cdata
->expect
.stop_unit
; i
++) {
3384 /* Calculate the correct next starting position in the buffer */
3386 while (fmt_buffer
[pos
].record
== 0 &&
3387 fmt_buffer
[pos
].dl
== 0) {
3388 if (pos
++ > max_entries
)
3392 if (i
!= cdata
->expect
.start_unit
)
3393 pos
+= rpt_max
- count
;
3396 /* Calculate the expected geo values for the current track */
3397 set_ch_t(&geo
, i
/ trk_per_cyl
, i
% trk_per_cyl
);
3399 /* Count and check number of records */
3400 count
= dasd_eckd_count_records(fmt_buffer
, pos
, pos
+ rpt_max
);
3402 if (count
< rpt_exp
) {
3403 cdata
->result
= DASD_FMT_ERR_TOO_FEW_RECORDS
;
3406 if (count
> rpt_exp
) {
3407 cdata
->result
= DASD_FMT_ERR_TOO_MANY_RECORDS
;
3411 for (j
= 0; j
< count
; j
++, pos
++) {
3412 blksize
= cdata
->expect
.blksize
;
3416 * Set special values when checking CDL formatted
3419 if ((cdata
->expect
.intensity
& 0x08) &&
3420 geo
.cyl
== 0 && geo
.head
== 0) {
3422 blksize
= sizes_trk0
[j
] - 4;
3426 if ((cdata
->expect
.intensity
& 0x08) &&
3427 geo
.cyl
== 0 && geo
.head
== 1) {
3428 blksize
= LABEL_SIZE
- 44;
3432 /* Check blocksize */
3433 if (fmt_buffer
[pos
].dl
!= blksize
) {
3434 cdata
->result
= DASD_FMT_ERR_BLKSIZE
;
3437 /* Check if key length is 0 */
3438 if (fmt_buffer
[pos
].kl
!= kl
) {
3439 cdata
->result
= DASD_FMT_ERR_KEY_LENGTH
;
3442 /* Check if record_id is correct */
3443 if (fmt_buffer
[pos
].cyl
!= geo
.cyl
||
3444 fmt_buffer
[pos
].head
!= geo
.head
||
3445 fmt_buffer
[pos
].record
!= (j
+ 1)) {
3446 cdata
->result
= DASD_FMT_ERR_RECORD_ID
;
3454 * In case of no errors, we need to decrease by one
3455 * to get the correct positions.
3457 if (!cdata
->result
) {
3463 cdata
->num_records
= count
;
3464 cdata
->rec
= fmt_buffer
[pos
].record
;
3465 cdata
->blksize
= fmt_buffer
[pos
].dl
;
3466 cdata
->key_length
= fmt_buffer
[pos
].kl
;
3470 * Check the format of a range of tracks of a DASD.
3472 static int dasd_eckd_check_device_format(struct dasd_device
*base
,
3473 struct format_check_t
*cdata
,
3476 struct dasd_eckd_private
*private = base
->private;
3477 struct eckd_count
*fmt_buffer
;
3479 int rpt_max
, rpt_exp
;
3480 int fmt_buffer_size
;
3486 trk_per_cyl
= private->rdc_data
.trk_per_cyl
;
3488 /* Get maximum and expected amount of records per track */
3489 rpt_max
= recs_per_track(&private->rdc_data
, 0, 512) + 1;
3490 rpt_exp
= recs_per_track(&private->rdc_data
, 0, cdata
->expect
.blksize
);
3492 trkcount
= cdata
->expect
.stop_unit
- cdata
->expect
.start_unit
+ 1;
3493 fmt_buffer_size
= trkcount
* rpt_max
* sizeof(struct eckd_count
);
3495 fmt_buffer
= kzalloc(fmt_buffer_size
, GFP_KERNEL
| GFP_DMA
);
3500 * A certain FICON feature subset is needed to operate in transport
3501 * mode. Additionally, the support for transport mode is implicitly
3502 * checked by comparing the buffer size with fcx_max_data. As long as
3503 * the buffer size is smaller we can operate in transport mode and
3504 * process multiple tracks. If not, only one track at once is being
3505 * processed using command mode.
3507 if ((private->features
.feature
[40] & 0x04) &&
3508 fmt_buffer_size
<= private->fcx_max_data
)
3511 rc
= dasd_eckd_format_process_data(base
, &cdata
->expect
, enable_pav
,
3512 tpm
, fmt_buffer
, rpt_max
, &irb
);
3513 if (rc
&& rc
!= -EIO
)
3517 * If our first attempt with transport mode enabled comes back
3518 * with an incorrect length error, we're going to retry the
3519 * check with command mode.
3521 if (tpm
&& scsw_cstat(&irb
.scsw
) == 0x40) {
3523 rc
= dasd_eckd_format_process_data(base
, &cdata
->expect
,
3525 fmt_buffer
, rpt_max
,
3534 dasd_eckd_format_evaluate_tracks(fmt_buffer
, cdata
, rpt_max
, rpt_exp
,
3543 static void dasd_eckd_handle_terminated_request(struct dasd_ccw_req
*cqr
)
3545 if (cqr
->retries
< 0) {
3546 cqr
->status
= DASD_CQR_FAILED
;
3549 cqr
->status
= DASD_CQR_FILLED
;
3550 if (cqr
->block
&& (cqr
->startdev
!= cqr
->block
->base
)) {
3551 dasd_eckd_reset_ccw_to_base_io(cqr
);
3552 cqr
->startdev
= cqr
->block
->base
;
3553 cqr
->lpm
= dasd_path_get_opm(cqr
->block
->base
);
3557 static dasd_erp_fn_t
3558 dasd_eckd_erp_action(struct dasd_ccw_req
* cqr
)
3560 struct dasd_device
*device
= (struct dasd_device
*) cqr
->startdev
;
3561 struct ccw_device
*cdev
= device
->cdev
;
3563 switch (cdev
->id
.cu_type
) {
3568 return dasd_3990_erp_action
;
3572 return dasd_default_erp_action
;
3576 static dasd_erp_fn_t
3577 dasd_eckd_erp_postaction(struct dasd_ccw_req
* cqr
)
3579 return dasd_default_erp_postaction
;
3582 static void dasd_eckd_check_for_device_change(struct dasd_device
*device
,
3583 struct dasd_ccw_req
*cqr
,
3588 struct dasd_eckd_private
*private = device
->private;
3590 /* first of all check for state change pending interrupt */
3591 mask
= DEV_STAT_ATTENTION
| DEV_STAT_DEV_END
| DEV_STAT_UNIT_EXCEP
;
3592 if ((scsw_dstat(&irb
->scsw
) & mask
) == mask
) {
3594 * for alias only, not in offline processing
3595 * and only if not suspended
3597 if (!device
->block
&& private->lcu
&&
3598 device
->state
== DASD_STATE_ONLINE
&&
3599 !test_bit(DASD_FLAG_OFFLINE
, &device
->flags
) &&
3600 !test_bit(DASD_FLAG_SUSPENDED
, &device
->flags
)) {
3601 /* schedule worker to reload device */
3602 dasd_reload_device(device
);
3604 dasd_generic_handle_state_change(device
);
3608 sense
= dasd_get_sense(irb
);
3612 /* summary unit check */
3613 if ((sense
[27] & DASD_SENSE_BIT_0
) && (sense
[7] == 0x0D) &&
3614 (scsw_dstat(&irb
->scsw
) & DEV_STAT_UNIT_CHECK
)) {
3615 if (test_and_set_bit(DASD_FLAG_SUC
, &device
->flags
)) {
3616 DBF_DEV_EVENT(DBF_WARNING
, device
, "%s",
3617 "eckd suc: device already notified");
3620 sense
= dasd_get_sense(irb
);
3622 DBF_DEV_EVENT(DBF_WARNING
, device
, "%s",
3623 "eckd suc: no reason code available");
3624 clear_bit(DASD_FLAG_SUC
, &device
->flags
);
3628 private->suc_reason
= sense
[8];
3629 DBF_DEV_EVENT(DBF_NOTICE
, device
, "%s %x",
3630 "eckd handle summary unit check: reason",
3631 private->suc_reason
);
3632 dasd_get_device(device
);
3633 if (!schedule_work(&device
->suc_work
))
3634 dasd_put_device(device
);
3639 /* service information message SIM */
3640 if (!cqr
&& !(sense
[27] & DASD_SENSE_BIT_0
) &&
3641 ((sense
[6] & DASD_SIM_SENSE
) == DASD_SIM_SENSE
)) {
3642 dasd_3990_erp_handle_sim(device
, sense
);
3646 /* loss of device reservation is handled via base devices only
3647 * as alias devices may be used with several bases
3649 if (device
->block
&& (sense
[27] & DASD_SENSE_BIT_0
) &&
3650 (sense
[7] == 0x3F) &&
3651 (scsw_dstat(&irb
->scsw
) & DEV_STAT_UNIT_CHECK
) &&
3652 test_bit(DASD_FLAG_IS_RESERVED
, &device
->flags
)) {
3653 if (device
->features
& DASD_FEATURE_FAILONSLCK
)
3654 set_bit(DASD_FLAG_LOCK_STOLEN
, &device
->flags
);
3655 clear_bit(DASD_FLAG_IS_RESERVED
, &device
->flags
);
3656 dev_err(&device
->cdev
->dev
,
3657 "The device reservation was lost\n");
3661 static int dasd_eckd_ras_sanity_checks(struct dasd_device
*device
,
3662 unsigned int first_trk
,
3663 unsigned int last_trk
)
3665 struct dasd_eckd_private
*private = device
->private;
3666 unsigned int trks_per_vol
;
3669 trks_per_vol
= private->real_cyl
* private->rdc_data
.trk_per_cyl
;
3671 if (first_trk
>= trks_per_vol
) {
3672 dev_warn(&device
->cdev
->dev
,
3673 "Start track number %u used in the space release command is too big\n",
3676 } else if (last_trk
>= trks_per_vol
) {
3677 dev_warn(&device
->cdev
->dev
,
3678 "Stop track number %u used in the space release command is too big\n",
3681 } else if (first_trk
> last_trk
) {
3682 dev_warn(&device
->cdev
->dev
,
3683 "Start track %u used in the space release command exceeds the end track\n",
3691 * Helper function to count the amount of involved extents within a given range
3692 * with extent alignment in mind.
3694 static int count_exts(unsigned int from
, unsigned int to
, int trks_per_ext
)
3703 /* Count first partial extent */
3704 if (from
% trks_per_ext
!= 0) {
3705 tmp
= from
+ trks_per_ext
- (from
% trks_per_ext
) - 1;
3708 cur_pos
= tmp
- from
+ 1;
3711 /* Count full extents */
3712 if (to
- (from
+ cur_pos
) + 1 >= trks_per_ext
) {
3713 tmp
= to
- ((to
- trks_per_ext
+ 1) % trks_per_ext
);
3714 count
+= (tmp
- (from
+ cur_pos
) + 1) / trks_per_ext
;
3717 /* Count last partial extent */
3724 static int dasd_in_copy_relation(struct dasd_device
*device
)
3726 struct dasd_pprc_data_sc4
*temp
;
3729 if (!dasd_eckd_pprc_enabled(device
))
3732 temp
= kzalloc(sizeof(*temp
), GFP_KERNEL
);
3736 rc
= dasd_eckd_query_pprc_status(device
, temp
);
3738 rc
= temp
->dev_info
[0].state
;
3745 * Release allocated space for a given range or an entire volume.
3747 static struct dasd_ccw_req
*
3748 dasd_eckd_dso_ras(struct dasd_device
*device
, struct dasd_block
*block
,
3749 struct request
*req
, unsigned int first_trk
,
3750 unsigned int last_trk
, int by_extent
)
3752 struct dasd_eckd_private
*private = device
->private;
3753 struct dasd_dso_ras_ext_range
*ras_range
;
3754 struct dasd_rssd_features
*features
;
3755 struct dasd_dso_ras_data
*ras_data
;
3756 u16 heads
, beg_head
, end_head
;
3757 int cur_to_trk
, cur_from_trk
;
3758 struct dasd_ccw_req
*cqr
;
3759 u32 beg_cyl
, end_cyl
;
3769 if (dasd_eckd_ras_sanity_checks(device
, first_trk
, last_trk
))
3770 return ERR_PTR(-EINVAL
);
3772 copy_relation
= dasd_in_copy_relation(device
);
3773 if (copy_relation
< 0)
3774 return ERR_PTR(copy_relation
);
3776 rq
= req
? blk_mq_rq_to_pdu(req
) : NULL
;
3778 features
= &private->features
;
3780 trks_per_ext
= dasd_eckd_ext_size(device
) * private->rdc_data
.trk_per_cyl
;
3783 nr_exts
= count_exts(first_trk
, last_trk
, trks_per_ext
);
3784 ras_size
= sizeof(*ras_data
);
3785 size
= ras_size
+ (nr_exts
* sizeof(*ras_range
));
3787 cqr
= dasd_smalloc_request(DASD_ECKD_MAGIC
, 1, size
, device
, rq
);
3789 DBF_EVENT_DEVID(DBF_WARNING
, device
->cdev
, "%s",
3790 "Could not allocate RAS request");
3794 ras_data
= cqr
->data
;
3795 memset(ras_data
, 0, size
);
3797 ras_data
->order
= DSO_ORDER_RAS
;
3798 ras_data
->flags
.vol_type
= 0; /* CKD volume */
3799 /* Release specified extents or entire volume */
3800 ras_data
->op_flags
.by_extent
= by_extent
;
3802 * This bit guarantees initialisation of tracks within an extent that is
3803 * not fully specified, but is only supported with a certain feature
3804 * subset and for devices not in a copy relation.
3806 if (features
->feature
[56] & 0x01 && !copy_relation
)
3807 ras_data
->op_flags
.guarantee_init
= 1;
3809 ras_data
->lss
= private->conf
.ned
->ID
;
3810 ras_data
->dev_addr
= private->conf
.ned
->unit_addr
;
3811 ras_data
->nr_exts
= nr_exts
;
3814 heads
= private->rdc_data
.trk_per_cyl
;
3815 cur_from_trk
= first_trk
;
3816 cur_to_trk
= first_trk
+ trks_per_ext
-
3817 (first_trk
% trks_per_ext
) - 1;
3818 if (cur_to_trk
> last_trk
)
3819 cur_to_trk
= last_trk
;
3820 ras_range
= (struct dasd_dso_ras_ext_range
*)(cqr
->data
+ ras_size
);
3822 for (i
= 0; i
< nr_exts
; i
++) {
3823 beg_cyl
= cur_from_trk
/ heads
;
3824 beg_head
= cur_from_trk
% heads
;
3825 end_cyl
= cur_to_trk
/ heads
;
3826 end_head
= cur_to_trk
% heads
;
3828 set_ch_t(&ras_range
->beg_ext
, beg_cyl
, beg_head
);
3829 set_ch_t(&ras_range
->end_ext
, end_cyl
, end_head
);
3831 cur_from_trk
= cur_to_trk
+ 1;
3832 cur_to_trk
= cur_from_trk
+ trks_per_ext
- 1;
3833 if (cur_to_trk
> last_trk
)
3834 cur_to_trk
= last_trk
;
3840 ccw
->cda
= virt_to_dma32(cqr
->data
);
3841 ccw
->cmd_code
= DASD_ECKD_CCW_DSO
;
3844 cqr
->startdev
= device
;
3845 cqr
->memdev
= device
;
3848 cqr
->expires
= device
->default_expires
* HZ
;
3849 cqr
->buildclk
= get_tod_clock();
3850 cqr
->status
= DASD_CQR_FILLED
;
3855 static int dasd_eckd_release_space_full(struct dasd_device
*device
)
3857 struct dasd_ccw_req
*cqr
;
3860 cqr
= dasd_eckd_dso_ras(device
, NULL
, NULL
, 0, 0, 0);
3862 return PTR_ERR(cqr
);
3864 rc
= dasd_sleep_on_interruptible(cqr
);
3866 dasd_sfree_request(cqr
, cqr
->memdev
);
3871 static int dasd_eckd_release_space_trks(struct dasd_device
*device
,
3872 unsigned int from
, unsigned int to
)
3874 struct dasd_eckd_private
*private = device
->private;
3875 struct dasd_block
*block
= device
->block
;
3876 struct dasd_ccw_req
*cqr
, *n
;
3877 struct list_head ras_queue
;
3878 unsigned int device_exts
;
3885 INIT_LIST_HEAD(&ras_queue
);
3887 device_exts
= private->real_cyl
/ dasd_eckd_ext_size(device
);
3888 trks_per_ext
= dasd_eckd_ext_size(device
) * private->rdc_data
.trk_per_cyl
;
3890 /* Make sure device limits are not exceeded */
3891 step
= trks_per_ext
* min(device_exts
, DASD_ECKD_RAS_EXTS_MAX
);
3896 while (cur_pos
< to
) {
3897 stop
= cur_pos
+ step
-
3898 ((cur_pos
+ step
) % trks_per_ext
) - 1;
3902 cqr
= dasd_eckd_dso_ras(device
, NULL
, NULL
, cur_pos
, stop
, 1);
3905 if (rc
== -ENOMEM
) {
3906 if (list_empty(&ras_queue
))
3914 spin_lock_irq(&block
->queue_lock
);
3915 list_add_tail(&cqr
->blocklist
, &ras_queue
);
3916 spin_unlock_irq(&block
->queue_lock
);
3920 rc
= dasd_sleep_on_queue_interruptible(&ras_queue
);
3923 list_for_each_entry_safe(cqr
, n
, &ras_queue
, blocklist
) {
3924 device
= cqr
->startdev
;
3925 private = device
->private;
3927 spin_lock_irq(&block
->queue_lock
);
3928 list_del_init(&cqr
->blocklist
);
3929 spin_unlock_irq(&block
->queue_lock
);
3930 dasd_sfree_request(cqr
, device
);
3939 static int dasd_eckd_release_space(struct dasd_device
*device
,
3940 struct format_data_t
*rdata
)
3942 if (rdata
->intensity
& DASD_FMT_INT_ESE_FULL
)
3943 return dasd_eckd_release_space_full(device
);
3944 else if (rdata
->intensity
== 0)
3945 return dasd_eckd_release_space_trks(device
, rdata
->start_unit
,
3951 static struct dasd_ccw_req
*dasd_eckd_build_cp_cmd_single(
3952 struct dasd_device
*startdev
,
3953 struct dasd_block
*block
,
3954 struct request
*req
,
3959 unsigned int first_offs
,
3960 unsigned int last_offs
,
3961 unsigned int blk_per_trk
,
3962 unsigned int blksize
)
3964 struct dasd_eckd_private
*private;
3966 struct LO_eckd_data
*LO_data
;
3967 struct dasd_ccw_req
*cqr
;
3969 struct req_iterator iter
;
3973 int count
, cidaw
, cplength
, datasize
;
3975 unsigned char cmd
, rcmd
;
3977 struct dasd_device
*basedev
;
3979 basedev
= block
->base
;
3980 private = basedev
->private;
3981 if (rq_data_dir(req
) == READ
)
3982 cmd
= DASD_ECKD_CCW_READ_MT
;
3983 else if (rq_data_dir(req
) == WRITE
)
3984 cmd
= DASD_ECKD_CCW_WRITE_MT
;
3986 return ERR_PTR(-EINVAL
);
3988 /* Check struct bio and count the number of blocks for the request. */
3991 rq_for_each_segment(bv
, req
, iter
) {
3992 if (bv
.bv_len
& (blksize
- 1))
3993 /* Eckd can only do full blocks. */
3994 return ERR_PTR(-EINVAL
);
3995 count
+= bv
.bv_len
>> (block
->s2b_shift
+ 9);
3996 if (idal_is_needed (page_address(bv
.bv_page
), bv
.bv_len
))
3997 cidaw
+= bv
.bv_len
>> (block
->s2b_shift
+ 9);
4000 if (count
!= last_rec
- first_rec
+ 1)
4001 return ERR_PTR(-EINVAL
);
4003 /* use the prefix command if available */
4004 use_prefix
= private->features
.feature
[8] & 0x01;
4006 /* 1x prefix + number of blocks */
4007 cplength
= 2 + count
;
4008 /* 1x prefix + cidaws*sizeof(long) */
4009 datasize
= sizeof(struct PFX_eckd_data
) +
4010 sizeof(struct LO_eckd_data
) +
4011 cidaw
* sizeof(unsigned long);
4013 /* 1x define extent + 1x locate record + number of blocks */
4014 cplength
= 2 + count
;
4015 /* 1x define extent + 1x locate record + cidaws*sizeof(long) */
4016 datasize
= sizeof(struct DE_eckd_data
) +
4017 sizeof(struct LO_eckd_data
) +
4018 cidaw
* sizeof(unsigned long);
4020 /* Find out the number of additional locate record ccws for cdl. */
4021 if (private->uses_cdl
&& first_rec
< 2*blk_per_trk
) {
4022 if (last_rec
>= 2*blk_per_trk
)
4023 count
= 2*blk_per_trk
- first_rec
;
4025 datasize
+= count
*sizeof(struct LO_eckd_data
);
4027 /* Allocate the ccw request. */
4028 cqr
= dasd_smalloc_request(DASD_ECKD_MAGIC
, cplength
, datasize
,
4029 startdev
, blk_mq_rq_to_pdu(req
));
4033 /* First ccw is define extent or prefix. */
4035 if (prefix(ccw
++, cqr
->data
, first_trk
,
4036 last_trk
, cmd
, basedev
, startdev
) == -EAGAIN
) {
4037 /* Clock not in sync and XRC is enabled.
4040 dasd_sfree_request(cqr
, startdev
);
4041 return ERR_PTR(-EAGAIN
);
4043 idaws
= (dma64_t
*)(cqr
->data
+ sizeof(struct PFX_eckd_data
));
4045 if (define_extent(ccw
++, cqr
->data
, first_trk
,
4046 last_trk
, cmd
, basedev
, 0) == -EAGAIN
) {
4047 /* Clock not in sync and XRC is enabled.
4050 dasd_sfree_request(cqr
, startdev
);
4051 return ERR_PTR(-EAGAIN
);
4053 idaws
= (dma64_t
*)(cqr
->data
+ sizeof(struct DE_eckd_data
));
4055 /* Build locate_record+read/write/ccws. */
4056 LO_data
= (struct LO_eckd_data
*) (idaws
+ cidaw
);
4058 if (private->uses_cdl
== 0 || recid
> 2*blk_per_trk
) {
4059 /* Only standard blocks so there is just one locate record. */
4060 ccw
[-1].flags
|= CCW_FLAG_CC
;
4061 locate_record(ccw
++, LO_data
++, first_trk
, first_offs
+ 1,
4062 last_rec
- recid
+ 1, cmd
, basedev
, blksize
);
4064 rq_for_each_segment(bv
, req
, iter
) {
4065 dst
= bvec_virt(&bv
);
4066 if (dasd_page_cache
) {
4067 char *copy
= kmem_cache_alloc(dasd_page_cache
,
4068 GFP_DMA
| __GFP_NOWARN
);
4069 if (copy
&& rq_data_dir(req
) == WRITE
)
4070 memcpy(copy
+ bv
.bv_offset
, dst
, bv
.bv_len
);
4072 dst
= copy
+ bv
.bv_offset
;
4074 for (off
= 0; off
< bv
.bv_len
; off
+= blksize
) {
4075 sector_t trkid
= recid
;
4076 unsigned int recoffs
= sector_div(trkid
, blk_per_trk
);
4079 /* Locate record for cdl special block ? */
4080 if (private->uses_cdl
&& recid
< 2*blk_per_trk
) {
4081 if (dasd_eckd_cdl_special(blk_per_trk
, recid
)){
4083 count
= dasd_eckd_cdl_reclen(recid
);
4084 if (count
< blksize
&&
4085 rq_data_dir(req
) == READ
)
4086 memset(dst
+ count
, 0xe5,
4089 ccw
[-1].flags
|= CCW_FLAG_CC
;
4090 locate_record(ccw
++, LO_data
++,
4092 1, rcmd
, basedev
, count
);
4094 /* Locate record for standard blocks ? */
4095 if (private->uses_cdl
&& recid
== 2*blk_per_trk
) {
4096 ccw
[-1].flags
|= CCW_FLAG_CC
;
4097 locate_record(ccw
++, LO_data
++,
4099 last_rec
- recid
+ 1,
4100 cmd
, basedev
, count
);
4102 /* Read/write ccw. */
4103 ccw
[-1].flags
|= CCW_FLAG_CC
;
4104 ccw
->cmd_code
= rcmd
;
4106 if (idal_is_needed(dst
, blksize
)) {
4107 ccw
->cda
= virt_to_dma32(idaws
);
4108 ccw
->flags
= CCW_FLAG_IDA
;
4109 idaws
= idal_create_words(idaws
, dst
, blksize
);
4111 ccw
->cda
= virt_to_dma32(dst
);
4119 if (blk_noretry_request(req
) ||
4120 block
->base
->features
& DASD_FEATURE_FAILFAST
)
4121 set_bit(DASD_CQR_FLAGS_FAILFAST
, &cqr
->flags
);
4122 cqr
->startdev
= startdev
;
4123 cqr
->memdev
= startdev
;
4125 cqr
->expires
= startdev
->default_expires
* HZ
; /* default 5 minutes */
4126 cqr
->lpm
= dasd_path_get_ppm(startdev
);
4127 cqr
->retries
= startdev
->default_retries
;
4128 cqr
->buildclk
= get_tod_clock();
4129 cqr
->status
= DASD_CQR_FILLED
;
4131 /* Set flags to suppress output for expected errors */
4132 if (dasd_eckd_is_ese(basedev
)) {
4133 set_bit(DASD_CQR_SUPPRESS_NRF
, &cqr
->flags
);
4139 static struct dasd_ccw_req
*dasd_eckd_build_cp_cmd_track(
4140 struct dasd_device
*startdev
,
4141 struct dasd_block
*block
,
4142 struct request
*req
,
4147 unsigned int first_offs
,
4148 unsigned int last_offs
,
4149 unsigned int blk_per_trk
,
4150 unsigned int blksize
)
4153 struct dasd_ccw_req
*cqr
;
4155 struct req_iterator iter
;
4157 char *dst
, *idaw_dst
;
4158 unsigned int cidaw
, cplength
, datasize
;
4162 struct dasd_device
*basedev
;
4163 unsigned int trkcount
, count
, count_to_trk_end
;
4164 unsigned int idaw_len
, seg_len
, part_len
, len_to_track_end
;
4165 unsigned char new_track
, end_idaw
;
4167 unsigned int recoffs
;
4169 basedev
= block
->base
;
4170 if (rq_data_dir(req
) == READ
)
4171 cmd
= DASD_ECKD_CCW_READ_TRACK_DATA
;
4172 else if (rq_data_dir(req
) == WRITE
)
4173 cmd
= DASD_ECKD_CCW_WRITE_TRACK_DATA
;
4175 return ERR_PTR(-EINVAL
);
4177 /* Track based I/O needs IDAWs for each page, and not just for
4178 * 64 bit addresses. We need additional idals for pages
4179 * that get filled from two tracks, so we use the number
4180 * of records as upper limit.
4182 cidaw
= last_rec
- first_rec
+ 1;
4183 trkcount
= last_trk
- first_trk
+ 1;
4185 /* 1x prefix + one read/write ccw per track */
4186 cplength
= 1 + trkcount
;
4188 datasize
= sizeof(struct PFX_eckd_data
) + cidaw
* sizeof(unsigned long);
4190 /* Allocate the ccw request. */
4191 cqr
= dasd_smalloc_request(DASD_ECKD_MAGIC
, cplength
, datasize
,
4192 startdev
, blk_mq_rq_to_pdu(req
));
4196 /* transfer length factor: how many bytes to read from the last track */
4197 if (first_trk
== last_trk
)
4198 tlf
= last_offs
- first_offs
+ 1;
4200 tlf
= last_offs
+ 1;
4203 if (prefix_LRE(ccw
++, cqr
->data
, first_trk
,
4204 last_trk
, cmd
, basedev
, startdev
,
4205 1 /* format */, first_offs
+ 1,
4208 /* Clock not in sync and XRC is enabled.
4211 dasd_sfree_request(cqr
, startdev
);
4212 return ERR_PTR(-EAGAIN
);
4216 * The translation of request into ccw programs must meet the
4217 * following conditions:
4218 * - all idaws but the first and the last must address full pages
4219 * (or 2K blocks on 31-bit)
4220 * - the scope of a ccw and it's idal ends with the track boundaries
4222 idaws
= (dma64_t
*)(cqr
->data
+ sizeof(struct PFX_eckd_data
));
4226 len_to_track_end
= 0;
4229 rq_for_each_segment(bv
, req
, iter
) {
4230 dst
= bvec_virt(&bv
);
4231 seg_len
= bv
.bv_len
;
4235 recoffs
= sector_div(trkid
, blk_per_trk
);
4236 count_to_trk_end
= blk_per_trk
- recoffs
;
4237 count
= min((last_rec
- recid
+ 1),
4238 (sector_t
)count_to_trk_end
);
4239 len_to_track_end
= count
* blksize
;
4240 ccw
[-1].flags
|= CCW_FLAG_CC
;
4241 ccw
->cmd_code
= cmd
;
4242 ccw
->count
= len_to_track_end
;
4243 ccw
->cda
= virt_to_dma32(idaws
);
4244 ccw
->flags
= CCW_FLAG_IDA
;
4248 /* first idaw for a ccw may start anywhere */
4252 /* If we start a new idaw, we must make sure that it
4253 * starts on an IDA_BLOCK_SIZE boundary.
4254 * If we continue an idaw, we must make sure that the
4255 * current segment begins where the so far accumulated
4259 if ((unsigned long)(dst
) & (IDA_BLOCK_SIZE
- 1)) {
4260 dasd_sfree_request(cqr
, startdev
);
4261 return ERR_PTR(-ERANGE
);
4265 if ((idaw_dst
+ idaw_len
) != dst
) {
4266 dasd_sfree_request(cqr
, startdev
);
4267 return ERR_PTR(-ERANGE
);
4269 part_len
= min(seg_len
, len_to_track_end
);
4270 seg_len
-= part_len
;
4272 idaw_len
+= part_len
;
4273 len_to_track_end
-= part_len
;
4274 /* collected memory area ends on an IDA_BLOCK border,
4276 * idal_create_words will handle cases where idaw_len
4277 * is larger then IDA_BLOCK_SIZE
4279 if (!((unsigned long)(idaw_dst
+ idaw_len
) & (IDA_BLOCK_SIZE
- 1)))
4281 /* We also need to end the idaw at track end */
4282 if (!len_to_track_end
) {
4287 idaws
= idal_create_words(idaws
, idaw_dst
,
4296 if (blk_noretry_request(req
) ||
4297 block
->base
->features
& DASD_FEATURE_FAILFAST
)
4298 set_bit(DASD_CQR_FLAGS_FAILFAST
, &cqr
->flags
);
4299 cqr
->startdev
= startdev
;
4300 cqr
->memdev
= startdev
;
4302 cqr
->expires
= startdev
->default_expires
* HZ
; /* default 5 minutes */
4303 cqr
->lpm
= dasd_path_get_ppm(startdev
);
4304 cqr
->retries
= startdev
->default_retries
;
4305 cqr
->buildclk
= get_tod_clock();
4306 cqr
->status
= DASD_CQR_FILLED
;
4308 /* Set flags to suppress output for expected errors */
4309 if (dasd_eckd_is_ese(basedev
))
4310 set_bit(DASD_CQR_SUPPRESS_NRF
, &cqr
->flags
);
4315 static int prepare_itcw(struct itcw
*itcw
,
4316 unsigned int trk
, unsigned int totrk
, int cmd
,
4317 struct dasd_device
*basedev
,
4318 struct dasd_device
*startdev
,
4319 unsigned int rec_on_trk
, int count
,
4320 unsigned int blksize
,
4321 unsigned int total_data_size
,
4323 unsigned int blk_per_trk
)
4325 struct PFX_eckd_data pfxdata
;
4326 struct dasd_eckd_private
*basepriv
, *startpriv
;
4327 struct DE_eckd_data
*dedata
;
4328 struct LRE_eckd_data
*lredata
;
4332 u16 heads
, beghead
, endhead
;
4340 /* setup prefix data */
4341 basepriv
= basedev
->private;
4342 startpriv
= startdev
->private;
4343 dedata
= &pfxdata
.define_extent
;
4344 lredata
= &pfxdata
.locate_record
;
4346 memset(&pfxdata
, 0, sizeof(pfxdata
));
4347 pfxdata
.format
= 1; /* PFX with LRE */
4348 pfxdata
.base_address
= basepriv
->conf
.ned
->unit_addr
;
4349 pfxdata
.base_lss
= basepriv
->conf
.ned
->ID
;
4350 pfxdata
.validity
.define_extent
= 1;
4352 /* private uid is kept up to date, conf_data may be outdated */
4353 if (startpriv
->uid
.type
== UA_BASE_PAV_ALIAS
)
4354 pfxdata
.validity
.verify_base
= 1;
4356 if (startpriv
->uid
.type
== UA_HYPER_PAV_ALIAS
) {
4357 pfxdata
.validity
.verify_base
= 1;
4358 pfxdata
.validity
.hyper_pav
= 1;
4362 case DASD_ECKD_CCW_READ_TRACK_DATA
:
4363 dedata
->mask
.perm
= 0x1;
4364 dedata
->attributes
.operation
= basepriv
->attrib
.operation
;
4365 dedata
->blk_size
= blksize
;
4366 dedata
->ga_extended
|= 0x42;
4367 lredata
->operation
.orientation
= 0x0;
4368 lredata
->operation
.operation
= 0x0C;
4369 lredata
->auxiliary
.check_bytes
= 0x01;
4370 pfx_cmd
= DASD_ECKD_CCW_PFX_READ
;
4372 case DASD_ECKD_CCW_WRITE_TRACK_DATA
:
4373 dedata
->mask
.perm
= 0x02;
4374 dedata
->attributes
.operation
= basepriv
->attrib
.operation
;
4375 dedata
->blk_size
= blksize
;
4376 rc
= set_timestamp(NULL
, dedata
, basedev
);
4377 dedata
->ga_extended
|= 0x42;
4378 lredata
->operation
.orientation
= 0x0;
4379 lredata
->operation
.operation
= 0x3F;
4380 lredata
->extended_operation
= 0x23;
4381 lredata
->auxiliary
.check_bytes
= 0x2;
4383 * If XRC is supported the System Time Stamp is set. The
4384 * validity of the time stamp must be reflected in the prefix
4387 if (dedata
->ga_extended
& 0x08 && dedata
->ga_extended
& 0x02)
4388 pfxdata
.validity
.time_stamp
= 1; /* 'Time Stamp Valid' */
4389 pfx_cmd
= DASD_ECKD_CCW_PFX
;
4391 case DASD_ECKD_CCW_READ_COUNT_MT
:
4392 dedata
->mask
.perm
= 0x1;
4393 dedata
->attributes
.operation
= DASD_BYPASS_CACHE
;
4394 dedata
->ga_extended
|= 0x42;
4395 dedata
->blk_size
= blksize
;
4396 lredata
->operation
.orientation
= 0x2;
4397 lredata
->operation
.operation
= 0x16;
4398 lredata
->auxiliary
.check_bytes
= 0x01;
4399 pfx_cmd
= DASD_ECKD_CCW_PFX_READ
;
4402 DBF_DEV_EVENT(DBF_ERR
, basedev
,
4403 "prepare itcw, unknown opcode 0x%x", cmd
);
4410 dedata
->attributes
.mode
= 0x3; /* ECKD */
4412 heads
= basepriv
->rdc_data
.trk_per_cyl
;
4413 begcyl
= trk
/ heads
;
4414 beghead
= trk
% heads
;
4415 endcyl
= totrk
/ heads
;
4416 endhead
= totrk
% heads
;
4418 /* check for sequential prestage - enhance cylinder range */
4419 if (dedata
->attributes
.operation
== DASD_SEQ_PRESTAGE
||
4420 dedata
->attributes
.operation
== DASD_SEQ_ACCESS
) {
4422 if (endcyl
+ basepriv
->attrib
.nr_cyl
< basepriv
->real_cyl
)
4423 endcyl
+= basepriv
->attrib
.nr_cyl
;
4425 endcyl
= (basepriv
->real_cyl
- 1);
4428 set_ch_t(&dedata
->beg_ext
, begcyl
, beghead
);
4429 set_ch_t(&dedata
->end_ext
, endcyl
, endhead
);
4431 dedata
->ep_format
= 0x20; /* records per track is valid */
4432 dedata
->ep_rec_per_track
= blk_per_trk
;
4435 switch (basepriv
->rdc_data
.dev_type
) {
4437 dn
= ceil_quot(blksize
+ 6, 232);
4438 d
= 9 + ceil_quot(blksize
+ 6 * (dn
+ 1), 34);
4439 sector
= (49 + (rec_on_trk
- 1) * (10 + d
)) / 8;
4442 d
= 7 + ceil_quot(blksize
+ 12, 32);
4443 sector
= (39 + (rec_on_trk
- 1) * (8 + d
)) / 7;
4448 if (cmd
== DASD_ECKD_CCW_READ_COUNT_MT
) {
4449 lredata
->auxiliary
.length_valid
= 0;
4450 lredata
->auxiliary
.length_scope
= 0;
4451 lredata
->sector
= 0xff;
4453 lredata
->auxiliary
.length_valid
= 1;
4454 lredata
->auxiliary
.length_scope
= 1;
4455 lredata
->sector
= sector
;
4457 lredata
->auxiliary
.imbedded_ccw_valid
= 1;
4458 lredata
->length
= tlf
;
4459 lredata
->imbedded_ccw
= cmd
;
4460 lredata
->count
= count
;
4461 set_ch_t(&lredata
->seek_addr
, begcyl
, beghead
);
4462 lredata
->search_arg
.cyl
= lredata
->seek_addr
.cyl
;
4463 lredata
->search_arg
.head
= lredata
->seek_addr
.head
;
4464 lredata
->search_arg
.record
= rec_on_trk
;
4466 dcw
= itcw_add_dcw(itcw
, pfx_cmd
, 0,
4467 &pfxdata
, sizeof(pfxdata
), total_data_size
);
4468 return PTR_ERR_OR_ZERO(dcw
);
4471 static struct dasd_ccw_req
*dasd_eckd_build_cp_tpm_track(
4472 struct dasd_device
*startdev
,
4473 struct dasd_block
*block
,
4474 struct request
*req
,
4479 unsigned int first_offs
,
4480 unsigned int last_offs
,
4481 unsigned int blk_per_trk
,
4482 unsigned int blksize
)
4484 struct dasd_ccw_req
*cqr
;
4485 struct req_iterator iter
;
4488 unsigned int trkcount
, ctidaw
;
4490 struct dasd_device
*basedev
;
4493 struct tidaw
*last_tidaw
= NULL
;
4497 unsigned int seg_len
, part_len
, len_to_track_end
;
4498 unsigned char new_track
;
4499 sector_t recid
, trkid
;
4501 unsigned int count
, count_to_trk_end
;
4504 basedev
= block
->base
;
4505 if (rq_data_dir(req
) == READ
) {
4506 cmd
= DASD_ECKD_CCW_READ_TRACK_DATA
;
4507 itcw_op
= ITCW_OP_READ
;
4508 } else if (rq_data_dir(req
) == WRITE
) {
4509 cmd
= DASD_ECKD_CCW_WRITE_TRACK_DATA
;
4510 itcw_op
= ITCW_OP_WRITE
;
4512 return ERR_PTR(-EINVAL
);
4514 /* trackbased I/O needs address all memory via TIDAWs,
4515 * not just for 64 bit addresses. This allows us to map
4516 * each segment directly to one tidaw.
4517 * In the case of write requests, additional tidaws may
4518 * be needed when a segment crosses a track boundary.
4520 trkcount
= last_trk
- first_trk
+ 1;
4522 rq_for_each_segment(bv
, req
, iter
) {
4525 if (rq_data_dir(req
) == WRITE
)
4526 ctidaw
+= (last_trk
- first_trk
);
4528 /* Allocate the ccw request. */
4529 itcw_size
= itcw_calc_size(0, ctidaw
, 0);
4530 cqr
= dasd_smalloc_request(DASD_ECKD_MAGIC
, 0, itcw_size
, startdev
,
4531 blk_mq_rq_to_pdu(req
));
4535 /* transfer length factor: how many bytes to read from the last track */
4536 if (first_trk
== last_trk
)
4537 tlf
= last_offs
- first_offs
+ 1;
4539 tlf
= last_offs
+ 1;
4542 itcw
= itcw_init(cqr
->data
, itcw_size
, itcw_op
, 0, ctidaw
, 0);
4547 cqr
->cpaddr
= itcw_get_tcw(itcw
);
4548 if (prepare_itcw(itcw
, first_trk
, last_trk
,
4549 cmd
, basedev
, startdev
,
4552 (last_rec
- first_rec
+ 1) * blksize
,
4553 tlf
, blk_per_trk
) == -EAGAIN
) {
4554 /* Clock not in sync and XRC is enabled.
4560 len_to_track_end
= 0;
4562 * A tidaw can address 4k of memory, but must not cross page boundaries
4563 * We can let the block layer handle this by setting seg_boundary_mask
4564 * to page boundaries and max_segment_size to page size when setting up
4565 * the request queue.
4566 * For write requests, a TIDAW must not cross track boundaries, because
4567 * we have to set the CBC flag on the last tidaw for each track.
4569 if (rq_data_dir(req
) == WRITE
) {
4572 rq_for_each_segment(bv
, req
, iter
) {
4573 dst
= bvec_virt(&bv
);
4574 seg_len
= bv
.bv_len
;
4578 offs
= sector_div(trkid
, blk_per_trk
);
4579 count_to_trk_end
= blk_per_trk
- offs
;
4580 count
= min((last_rec
- recid
+ 1),
4581 (sector_t
)count_to_trk_end
);
4582 len_to_track_end
= count
* blksize
;
4586 part_len
= min(seg_len
, len_to_track_end
);
4587 seg_len
-= part_len
;
4588 len_to_track_end
-= part_len
;
4589 /* We need to end the tidaw at track end */
4590 if (!len_to_track_end
) {
4592 tidaw_flags
= TIDAW_FLAGS_INSERT_CBC
;
4595 last_tidaw
= itcw_add_tidaw(itcw
, tidaw_flags
,
4597 if (IS_ERR(last_tidaw
)) {
4605 rq_for_each_segment(bv
, req
, iter
) {
4606 dst
= bvec_virt(&bv
);
4607 last_tidaw
= itcw_add_tidaw(itcw
, 0x00,
4609 if (IS_ERR(last_tidaw
)) {
4615 last_tidaw
->flags
|= TIDAW_FLAGS_LAST
;
4616 last_tidaw
->flags
&= ~TIDAW_FLAGS_INSERT_CBC
;
4617 itcw_finalize(itcw
);
4619 if (blk_noretry_request(req
) ||
4620 block
->base
->features
& DASD_FEATURE_FAILFAST
)
4621 set_bit(DASD_CQR_FLAGS_FAILFAST
, &cqr
->flags
);
4623 cqr
->startdev
= startdev
;
4624 cqr
->memdev
= startdev
;
4626 cqr
->expires
= startdev
->default_expires
* HZ
; /* default 5 minutes */
4627 cqr
->lpm
= dasd_path_get_ppm(startdev
);
4628 cqr
->retries
= startdev
->default_retries
;
4629 cqr
->buildclk
= get_tod_clock();
4630 cqr
->status
= DASD_CQR_FILLED
;
4632 /* Set flags to suppress output for expected errors */
4633 if (dasd_eckd_is_ese(basedev
)) {
4634 set_bit(DASD_CQR_SUPPRESS_NRF
, &cqr
->flags
);
4635 set_bit(DASD_CQR_SUPPRESS_IT
, &cqr
->flags
);
4640 dasd_sfree_request(cqr
, startdev
);
4641 return ERR_PTR(ret
);
4644 static struct dasd_ccw_req
*dasd_eckd_build_cp(struct dasd_device
*startdev
,
4645 struct dasd_block
*block
,
4646 struct request
*req
)
4651 struct dasd_eckd_private
*private;
4652 struct dasd_device
*basedev
;
4653 sector_t first_rec
, last_rec
;
4654 sector_t first_trk
, last_trk
;
4655 unsigned int first_offs
, last_offs
;
4656 unsigned int blk_per_trk
, blksize
;
4658 unsigned int data_size
;
4659 struct dasd_ccw_req
*cqr
;
4661 basedev
= block
->base
;
4662 private = basedev
->private;
4664 /* Calculate number of blocks/records per track. */
4665 blksize
= block
->bp_block
;
4666 blk_per_trk
= recs_per_track(&private->rdc_data
, 0, blksize
);
4667 if (blk_per_trk
== 0)
4668 return ERR_PTR(-EINVAL
);
4669 /* Calculate record id of first and last block. */
4670 first_rec
= first_trk
= blk_rq_pos(req
) >> block
->s2b_shift
;
4671 first_offs
= sector_div(first_trk
, blk_per_trk
);
4672 last_rec
= last_trk
=
4673 (blk_rq_pos(req
) + blk_rq_sectors(req
) - 1) >> block
->s2b_shift
;
4674 last_offs
= sector_div(last_trk
, blk_per_trk
);
4675 cdlspecial
= (private->uses_cdl
&& first_rec
< 2*blk_per_trk
);
4677 fcx_multitrack
= private->features
.feature
[40] & 0x20;
4678 data_size
= blk_rq_bytes(req
);
4679 if (data_size
% blksize
)
4680 return ERR_PTR(-EINVAL
);
4681 /* tpm write request add CBC data on each track boundary */
4682 if (rq_data_dir(req
) == WRITE
)
4683 data_size
+= (last_trk
- first_trk
) * 4;
4685 /* is read track data and write track data in command mode supported? */
4686 cmdrtd
= private->features
.feature
[9] & 0x20;
4687 cmdwtd
= private->features
.feature
[12] & 0x40;
4688 use_prefix
= private->features
.feature
[8] & 0x01;
4691 if (cdlspecial
|| dasd_page_cache
) {
4692 /* do nothing, just fall through to the cmd mode single case */
4693 } else if ((data_size
<= private->fcx_max_data
)
4694 && (fcx_multitrack
|| (first_trk
== last_trk
))) {
4695 cqr
= dasd_eckd_build_cp_tpm_track(startdev
, block
, req
,
4696 first_rec
, last_rec
,
4697 first_trk
, last_trk
,
4698 first_offs
, last_offs
,
4699 blk_per_trk
, blksize
);
4700 if (IS_ERR(cqr
) && (PTR_ERR(cqr
) != -EAGAIN
) &&
4701 (PTR_ERR(cqr
) != -ENOMEM
))
4703 } else if (use_prefix
&&
4704 (((rq_data_dir(req
) == READ
) && cmdrtd
) ||
4705 ((rq_data_dir(req
) == WRITE
) && cmdwtd
))) {
4706 cqr
= dasd_eckd_build_cp_cmd_track(startdev
, block
, req
,
4707 first_rec
, last_rec
,
4708 first_trk
, last_trk
,
4709 first_offs
, last_offs
,
4710 blk_per_trk
, blksize
);
4711 if (IS_ERR(cqr
) && (PTR_ERR(cqr
) != -EAGAIN
) &&
4712 (PTR_ERR(cqr
) != -ENOMEM
))
4716 cqr
= dasd_eckd_build_cp_cmd_single(startdev
, block
, req
,
4717 first_rec
, last_rec
,
4718 first_trk
, last_trk
,
4719 first_offs
, last_offs
,
4720 blk_per_trk
, blksize
);
4724 static struct dasd_ccw_req
*dasd_eckd_build_cp_raw(struct dasd_device
*startdev
,
4725 struct dasd_block
*block
,
4726 struct request
*req
)
4728 sector_t start_padding_sectors
, end_sector_offset
, end_padding_sectors
;
4729 unsigned int seg_len
, len_to_track_end
;
4730 unsigned int cidaw
, cplength
, datasize
;
4731 sector_t first_trk
, last_trk
, sectors
;
4732 struct dasd_eckd_private
*base_priv
;
4733 struct dasd_device
*basedev
;
4734 struct req_iterator iter
;
4735 struct dasd_ccw_req
*cqr
;
4736 unsigned int trkcount
;
4747 * raw track access needs to be mutiple of 64k and on 64k boundary
4748 * For read requests we can fix an incorrect alignment by padding
4749 * the request with dummy pages.
4751 start_padding_sectors
= blk_rq_pos(req
) % DASD_RAW_SECTORS_PER_TRACK
;
4752 end_sector_offset
= (blk_rq_pos(req
) + blk_rq_sectors(req
)) %
4753 DASD_RAW_SECTORS_PER_TRACK
;
4754 end_padding_sectors
= (DASD_RAW_SECTORS_PER_TRACK
- end_sector_offset
) %
4755 DASD_RAW_SECTORS_PER_TRACK
;
4756 basedev
= block
->base
;
4757 if ((start_padding_sectors
|| end_padding_sectors
) &&
4758 (rq_data_dir(req
) == WRITE
)) {
4759 DBF_DEV_EVENT(DBF_ERR
, basedev
,
4760 "raw write not track aligned (%llu,%llu) req %p",
4761 start_padding_sectors
, end_padding_sectors
, req
);
4762 return ERR_PTR(-EINVAL
);
4765 first_trk
= blk_rq_pos(req
) / DASD_RAW_SECTORS_PER_TRACK
;
4766 last_trk
= (blk_rq_pos(req
) + blk_rq_sectors(req
) - 1) /
4767 DASD_RAW_SECTORS_PER_TRACK
;
4768 trkcount
= last_trk
- first_trk
+ 1;
4770 if (rq_data_dir(req
) == READ
)
4771 cmd
= DASD_ECKD_CCW_READ_TRACK
;
4772 else if (rq_data_dir(req
) == WRITE
)
4773 cmd
= DASD_ECKD_CCW_WRITE_FULL_TRACK
;
4775 return ERR_PTR(-EINVAL
);
4778 * Raw track based I/O needs IDAWs for each page,
4779 * and not just for 64 bit addresses.
4781 cidaw
= trkcount
* DASD_RAW_BLOCK_PER_TRACK
;
4784 * struct PFX_eckd_data and struct LRE_eckd_data can have up to 2 bytes
4785 * of extended parameter. This is needed for write full track.
4787 base_priv
= basedev
->private;
4788 use_prefix
= base_priv
->features
.feature
[8] & 0x01;
4790 cplength
= 1 + trkcount
;
4791 size
= sizeof(struct PFX_eckd_data
) + 2;
4793 cplength
= 2 + trkcount
;
4794 size
= sizeof(struct DE_eckd_data
) +
4795 sizeof(struct LRE_eckd_data
) + 2;
4797 size
= ALIGN(size
, 8);
4799 datasize
= size
+ cidaw
* sizeof(unsigned long);
4801 /* Allocate the ccw request. */
4802 cqr
= dasd_smalloc_request(DASD_ECKD_MAGIC
, cplength
,
4803 datasize
, startdev
, blk_mq_rq_to_pdu(req
));
4811 prefix_LRE(ccw
++, data
, first_trk
, last_trk
, cmd
, basedev
,
4812 startdev
, 1, 0, trkcount
, 0, 0);
4814 define_extent(ccw
++, data
, first_trk
, last_trk
, cmd
, basedev
, 0);
4815 ccw
[-1].flags
|= CCW_FLAG_CC
;
4817 data
+= sizeof(struct DE_eckd_data
);
4818 locate_record_ext(ccw
++, data
, first_trk
, 0,
4819 trkcount
, cmd
, basedev
, 0, 0);
4822 idaws
= (dma64_t
*)(cqr
->data
+ size
);
4823 len_to_track_end
= 0;
4824 if (start_padding_sectors
) {
4825 ccw
[-1].flags
|= CCW_FLAG_CC
;
4826 ccw
->cmd_code
= cmd
;
4827 /* maximum 3390 track size */
4829 /* 64k map to one track */
4830 len_to_track_end
= 65536 - start_padding_sectors
* 512;
4831 ccw
->cda
= virt_to_dma32(idaws
);
4832 ccw
->flags
|= CCW_FLAG_IDA
;
4833 ccw
->flags
|= CCW_FLAG_SLI
;
4835 for (sectors
= 0; sectors
< start_padding_sectors
; sectors
+= 8)
4836 idaws
= idal_create_words(idaws
, rawpadpage
, PAGE_SIZE
);
4838 rq_for_each_segment(bv
, req
, iter
) {
4839 dst
= bvec_virt(&bv
);
4840 seg_len
= bv
.bv_len
;
4841 if (cmd
== DASD_ECKD_CCW_READ_TRACK
)
4842 memset(dst
, 0, seg_len
);
4843 if (!len_to_track_end
) {
4844 ccw
[-1].flags
|= CCW_FLAG_CC
;
4845 ccw
->cmd_code
= cmd
;
4846 /* maximum 3390 track size */
4848 /* 64k map to one track */
4849 len_to_track_end
= 65536;
4850 ccw
->cda
= virt_to_dma32(idaws
);
4851 ccw
->flags
|= CCW_FLAG_IDA
;
4852 ccw
->flags
|= CCW_FLAG_SLI
;
4855 len_to_track_end
-= seg_len
;
4856 idaws
= idal_create_words(idaws
, dst
, seg_len
);
4858 for (sectors
= 0; sectors
< end_padding_sectors
; sectors
+= 8)
4859 idaws
= idal_create_words(idaws
, rawpadpage
, PAGE_SIZE
);
4860 if (blk_noretry_request(req
) ||
4861 block
->base
->features
& DASD_FEATURE_FAILFAST
)
4862 set_bit(DASD_CQR_FLAGS_FAILFAST
, &cqr
->flags
);
4863 cqr
->startdev
= startdev
;
4864 cqr
->memdev
= startdev
;
4866 cqr
->expires
= startdev
->default_expires
* HZ
;
4867 cqr
->lpm
= dasd_path_get_ppm(startdev
);
4868 cqr
->retries
= startdev
->default_retries
;
4869 cqr
->buildclk
= get_tod_clock();
4870 cqr
->status
= DASD_CQR_FILLED
;
4877 dasd_eckd_free_cp(struct dasd_ccw_req
*cqr
, struct request
*req
)
4879 struct dasd_eckd_private
*private;
4881 struct req_iterator iter
;
4884 unsigned int blksize
, blk_per_trk
, off
;
4888 if (!dasd_page_cache
)
4890 private = cqr
->block
->base
->private;
4891 blksize
= cqr
->block
->bp_block
;
4892 blk_per_trk
= recs_per_track(&private->rdc_data
, 0, blksize
);
4893 recid
= blk_rq_pos(req
) >> cqr
->block
->s2b_shift
;
4895 /* Skip over define extent & locate record. */
4897 if (private->uses_cdl
== 0 || recid
> 2*blk_per_trk
)
4899 rq_for_each_segment(bv
, req
, iter
) {
4900 dst
= bvec_virt(&bv
);
4901 for (off
= 0; off
< bv
.bv_len
; off
+= blksize
) {
4902 /* Skip locate record. */
4903 if (private->uses_cdl
&& recid
<= 2*blk_per_trk
)
4906 if (ccw
->flags
& CCW_FLAG_IDA
)
4907 cda
= dma64_to_virt(*((dma64_t
*)dma32_to_virt(ccw
->cda
)));
4909 cda
= dma32_to_virt(ccw
->cda
);
4911 if (rq_data_dir(req
) == READ
)
4912 memcpy(dst
, cda
, bv
.bv_len
);
4913 kmem_cache_free(dasd_page_cache
,
4914 (void *)((addr_t
)cda
& PAGE_MASK
));
4923 status
= cqr
->status
== DASD_CQR_DONE
;
4924 dasd_sfree_request(cqr
, cqr
->memdev
);
4929 * Modify ccw/tcw in cqr so it can be started on a base device.
4931 * Note that this is not enough to restart the cqr!
4932 * Either reset cqr->startdev as well (summary unit check handling)
4933 * or restart via separate cqr (as in ERP handling).
4935 void dasd_eckd_reset_ccw_to_base_io(struct dasd_ccw_req
*cqr
)
4938 struct PFX_eckd_data
*pfxdata
;
4943 if (cqr
->cpmode
== 1) {
4945 tccb
= tcw_get_tccb(tcw
);
4946 dcw
= (struct dcw
*)&tccb
->tca
[0];
4947 pfxdata
= (struct PFX_eckd_data
*)&dcw
->cd
[0];
4948 pfxdata
->validity
.verify_base
= 0;
4949 pfxdata
->validity
.hyper_pav
= 0;
4952 pfxdata
= cqr
->data
;
4953 if (ccw
->cmd_code
== DASD_ECKD_CCW_PFX
) {
4954 pfxdata
->validity
.verify_base
= 0;
4955 pfxdata
->validity
.hyper_pav
= 0;
4960 #define DASD_ECKD_CHANQ_MAX_SIZE 4
4962 static struct dasd_ccw_req
*dasd_eckd_build_alias_cp(struct dasd_device
*base
,
4963 struct dasd_block
*block
,
4964 struct request
*req
)
4966 struct dasd_eckd_private
*private;
4967 struct dasd_device
*startdev
;
4968 unsigned long flags
;
4969 struct dasd_ccw_req
*cqr
;
4971 startdev
= dasd_alias_get_start_dev(base
);
4974 private = startdev
->private;
4975 if (private->count
>= DASD_ECKD_CHANQ_MAX_SIZE
)
4976 return ERR_PTR(-EBUSY
);
4978 spin_lock_irqsave(get_ccwdev_lock(startdev
->cdev
), flags
);
4980 if ((base
->features
& DASD_FEATURE_USERAW
))
4981 cqr
= dasd_eckd_build_cp_raw(startdev
, block
, req
);
4983 cqr
= dasd_eckd_build_cp(startdev
, block
, req
);
4986 spin_unlock_irqrestore(get_ccwdev_lock(startdev
->cdev
), flags
);
4990 static int dasd_eckd_free_alias_cp(struct dasd_ccw_req
*cqr
,
4991 struct request
*req
)
4993 struct dasd_eckd_private
*private;
4994 unsigned long flags
;
4996 spin_lock_irqsave(get_ccwdev_lock(cqr
->memdev
->cdev
), flags
);
4997 private = cqr
->memdev
->private;
4999 spin_unlock_irqrestore(get_ccwdev_lock(cqr
->memdev
->cdev
), flags
);
5000 return dasd_eckd_free_cp(cqr
, req
);
5004 dasd_eckd_fill_info(struct dasd_device
* device
,
5005 struct dasd_information2_t
* info
)
5007 struct dasd_eckd_private
*private = device
->private;
5009 info
->label_block
= 2;
5010 info
->FBA_layout
= private->uses_cdl
? 0 : 1;
5011 info
->format
= private->uses_cdl
? DASD_FORMAT_CDL
: DASD_FORMAT_LDL
;
5012 info
->characteristics_size
= sizeof(private->rdc_data
);
5013 memcpy(info
->characteristics
, &private->rdc_data
,
5014 sizeof(private->rdc_data
));
5015 info
->confdata_size
= min_t(unsigned long, private->conf
.len
,
5016 sizeof(info
->configuration_data
));
5017 memcpy(info
->configuration_data
, private->conf
.data
,
5018 info
->confdata_size
);
5023 * SECTION: ioctl functions for eckd devices.
5027 * Release device ioctl.
5028 * Buils a channel programm to releases a prior reserved
5029 * (see dasd_eckd_reserve) device.
5032 dasd_eckd_release(struct dasd_device
*device
)
5034 struct dasd_ccw_req
*cqr
;
5039 if (!capable(CAP_SYS_ADMIN
))
5043 cqr
= dasd_smalloc_request(DASD_ECKD_MAGIC
, 1, 32, device
, NULL
);
5045 mutex_lock(&dasd_reserve_mutex
);
5047 cqr
= &dasd_reserve_req
->cqr
;
5048 memset(cqr
, 0, sizeof(*cqr
));
5049 memset(&dasd_reserve_req
->ccw
, 0,
5050 sizeof(dasd_reserve_req
->ccw
));
5051 cqr
->cpaddr
= &dasd_reserve_req
->ccw
;
5052 cqr
->data
= &dasd_reserve_req
->data
;
5053 cqr
->magic
= DASD_ECKD_MAGIC
;
5056 ccw
->cmd_code
= DASD_ECKD_CCW_RELEASE
;
5057 ccw
->flags
|= CCW_FLAG_SLI
;
5059 ccw
->cda
= virt_to_dma32(cqr
->data
);
5060 cqr
->startdev
= device
;
5061 cqr
->memdev
= device
;
5062 clear_bit(DASD_CQR_FLAGS_USE_ERP
, &cqr
->flags
);
5063 set_bit(DASD_CQR_FLAGS_FAILFAST
, &cqr
->flags
);
5064 cqr
->retries
= 2; /* set retry counter to enable basic ERP */
5065 cqr
->expires
= 2 * HZ
;
5066 cqr
->buildclk
= get_tod_clock();
5067 cqr
->status
= DASD_CQR_FILLED
;
5069 rc
= dasd_sleep_on_immediatly(cqr
);
5071 clear_bit(DASD_FLAG_IS_RESERVED
, &device
->flags
);
5074 mutex_unlock(&dasd_reserve_mutex
);
5076 dasd_sfree_request(cqr
, cqr
->memdev
);
5081 * Reserve device ioctl.
5082 * Options are set to 'synchronous wait for interrupt' and
5083 * 'timeout the request'. This leads to a terminate IO if
5084 * the interrupt is outstanding for a certain time.
5087 dasd_eckd_reserve(struct dasd_device
*device
)
5089 struct dasd_ccw_req
*cqr
;
5094 if (!capable(CAP_SYS_ADMIN
))
5098 cqr
= dasd_smalloc_request(DASD_ECKD_MAGIC
, 1, 32, device
, NULL
);
5100 mutex_lock(&dasd_reserve_mutex
);
5102 cqr
= &dasd_reserve_req
->cqr
;
5103 memset(cqr
, 0, sizeof(*cqr
));
5104 memset(&dasd_reserve_req
->ccw
, 0,
5105 sizeof(dasd_reserve_req
->ccw
));
5106 cqr
->cpaddr
= &dasd_reserve_req
->ccw
;
5107 cqr
->data
= &dasd_reserve_req
->data
;
5108 cqr
->magic
= DASD_ECKD_MAGIC
;
5111 ccw
->cmd_code
= DASD_ECKD_CCW_RESERVE
;
5112 ccw
->flags
|= CCW_FLAG_SLI
;
5114 ccw
->cda
= virt_to_dma32(cqr
->data
);
5115 cqr
->startdev
= device
;
5116 cqr
->memdev
= device
;
5117 clear_bit(DASD_CQR_FLAGS_USE_ERP
, &cqr
->flags
);
5118 set_bit(DASD_CQR_FLAGS_FAILFAST
, &cqr
->flags
);
5119 cqr
->retries
= 2; /* set retry counter to enable basic ERP */
5120 cqr
->expires
= 2 * HZ
;
5121 cqr
->buildclk
= get_tod_clock();
5122 cqr
->status
= DASD_CQR_FILLED
;
5124 rc
= dasd_sleep_on_immediatly(cqr
);
5126 set_bit(DASD_FLAG_IS_RESERVED
, &device
->flags
);
5129 mutex_unlock(&dasd_reserve_mutex
);
5131 dasd_sfree_request(cqr
, cqr
->memdev
);
5136 * Steal lock ioctl - unconditional reserve device.
5137 * Buils a channel programm to break a device's reservation.
5138 * (unconditional reserve)
5141 dasd_eckd_steal_lock(struct dasd_device
*device
)
5143 struct dasd_ccw_req
*cqr
;
5148 if (!capable(CAP_SYS_ADMIN
))
5152 cqr
= dasd_smalloc_request(DASD_ECKD_MAGIC
, 1, 32, device
, NULL
);
5154 mutex_lock(&dasd_reserve_mutex
);
5156 cqr
= &dasd_reserve_req
->cqr
;
5157 memset(cqr
, 0, sizeof(*cqr
));
5158 memset(&dasd_reserve_req
->ccw
, 0,
5159 sizeof(dasd_reserve_req
->ccw
));
5160 cqr
->cpaddr
= &dasd_reserve_req
->ccw
;
5161 cqr
->data
= &dasd_reserve_req
->data
;
5162 cqr
->magic
= DASD_ECKD_MAGIC
;
5165 ccw
->cmd_code
= DASD_ECKD_CCW_SLCK
;
5166 ccw
->flags
|= CCW_FLAG_SLI
;
5168 ccw
->cda
= virt_to_dma32(cqr
->data
);
5169 cqr
->startdev
= device
;
5170 cqr
->memdev
= device
;
5171 clear_bit(DASD_CQR_FLAGS_USE_ERP
, &cqr
->flags
);
5172 set_bit(DASD_CQR_FLAGS_FAILFAST
, &cqr
->flags
);
5173 cqr
->retries
= 2; /* set retry counter to enable basic ERP */
5174 cqr
->expires
= 2 * HZ
;
5175 cqr
->buildclk
= get_tod_clock();
5176 cqr
->status
= DASD_CQR_FILLED
;
5178 rc
= dasd_sleep_on_immediatly(cqr
);
5180 set_bit(DASD_FLAG_IS_RESERVED
, &device
->flags
);
5183 mutex_unlock(&dasd_reserve_mutex
);
5185 dasd_sfree_request(cqr
, cqr
->memdev
);
5190 * SNID - Sense Path Group ID
5191 * This ioctl may be used in situations where I/O is stalled due to
5192 * a reserve, so if the normal dasd_smalloc_request fails, we use the
5193 * preallocated dasd_reserve_req.
5195 static int dasd_eckd_snid(struct dasd_device
*device
,
5198 struct dasd_ccw_req
*cqr
;
5202 struct dasd_snid_ioctl_data usrparm
;
5204 if (!capable(CAP_SYS_ADMIN
))
5207 if (copy_from_user(&usrparm
, argp
, sizeof(usrparm
)))
5211 cqr
= dasd_smalloc_request(DASD_ECKD_MAGIC
, 1,
5212 sizeof(struct dasd_snid_data
), device
,
5215 mutex_lock(&dasd_reserve_mutex
);
5217 cqr
= &dasd_reserve_req
->cqr
;
5218 memset(cqr
, 0, sizeof(*cqr
));
5219 memset(&dasd_reserve_req
->ccw
, 0,
5220 sizeof(dasd_reserve_req
->ccw
));
5221 cqr
->cpaddr
= &dasd_reserve_req
->ccw
;
5222 cqr
->data
= &dasd_reserve_req
->data
;
5223 cqr
->magic
= DASD_ECKD_MAGIC
;
5226 ccw
->cmd_code
= DASD_ECKD_CCW_SNID
;
5227 ccw
->flags
|= CCW_FLAG_SLI
;
5229 ccw
->cda
= virt_to_dma32(cqr
->data
);
5230 cqr
->startdev
= device
;
5231 cqr
->memdev
= device
;
5232 clear_bit(DASD_CQR_FLAGS_USE_ERP
, &cqr
->flags
);
5233 set_bit(DASD_CQR_FLAGS_FAILFAST
, &cqr
->flags
);
5234 set_bit(DASD_CQR_ALLOW_SLOCK
, &cqr
->flags
);
5236 cqr
->expires
= 10 * HZ
;
5237 cqr
->buildclk
= get_tod_clock();
5238 cqr
->status
= DASD_CQR_FILLED
;
5239 cqr
->lpm
= usrparm
.path_mask
;
5241 rc
= dasd_sleep_on_immediatly(cqr
);
5242 /* verify that I/O processing didn't modify the path mask */
5243 if (!rc
&& usrparm
.path_mask
&& (cqr
->lpm
!= usrparm
.path_mask
))
5246 usrparm
.data
= *((struct dasd_snid_data
*)cqr
->data
);
5247 if (copy_to_user(argp
, &usrparm
, sizeof(usrparm
)))
5252 mutex_unlock(&dasd_reserve_mutex
);
5254 dasd_sfree_request(cqr
, cqr
->memdev
);
5259 * Read performance statistics
5262 dasd_eckd_performance(struct dasd_device
*device
, void __user
*argp
)
5264 struct dasd_psf_prssd_data
*prssdp
;
5265 struct dasd_rssd_perf_stats_t
*stats
;
5266 struct dasd_ccw_req
*cqr
;
5270 cqr
= dasd_smalloc_request(DASD_ECKD_MAGIC
, 1 /* PSF */ + 1 /* RSSD */,
5271 (sizeof(struct dasd_psf_prssd_data
) +
5272 sizeof(struct dasd_rssd_perf_stats_t
)),
5275 DBF_DEV_EVENT(DBF_WARNING
, device
, "%s",
5276 "Could not allocate initialization request");
5277 return PTR_ERR(cqr
);
5279 cqr
->startdev
= device
;
5280 cqr
->memdev
= device
;
5282 clear_bit(DASD_CQR_FLAGS_USE_ERP
, &cqr
->flags
);
5283 cqr
->expires
= 10 * HZ
;
5285 /* Prepare for Read Subsystem Data */
5286 prssdp
= (struct dasd_psf_prssd_data
*) cqr
->data
;
5287 memset(prssdp
, 0, sizeof(struct dasd_psf_prssd_data
));
5288 prssdp
->order
= PSF_ORDER_PRSSD
;
5289 prssdp
->suborder
= 0x01; /* Performance Statistics */
5290 prssdp
->varies
[1] = 0x01; /* Perf Statistics for the Subsystem */
5293 ccw
->cmd_code
= DASD_ECKD_CCW_PSF
;
5294 ccw
->count
= sizeof(struct dasd_psf_prssd_data
);
5295 ccw
->flags
|= CCW_FLAG_CC
;
5296 ccw
->cda
= virt_to_dma32(prssdp
);
5298 /* Read Subsystem Data - Performance Statistics */
5299 stats
= (struct dasd_rssd_perf_stats_t
*) (prssdp
+ 1);
5300 memset(stats
, 0, sizeof(struct dasd_rssd_perf_stats_t
));
5303 ccw
->cmd_code
= DASD_ECKD_CCW_RSSD
;
5304 ccw
->count
= sizeof(struct dasd_rssd_perf_stats_t
);
5305 ccw
->cda
= virt_to_dma32(stats
);
5307 cqr
->buildclk
= get_tod_clock();
5308 cqr
->status
= DASD_CQR_FILLED
;
5309 rc
= dasd_sleep_on(cqr
);
5311 prssdp
= (struct dasd_psf_prssd_data
*) cqr
->data
;
5312 stats
= (struct dasd_rssd_perf_stats_t
*) (prssdp
+ 1);
5313 if (copy_to_user(argp
, stats
,
5314 sizeof(struct dasd_rssd_perf_stats_t
)))
5317 dasd_sfree_request(cqr
, cqr
->memdev
);
5322 * Get attributes (cache operations)
5323 * Returnes the cache attributes used in Define Extend (DE).
5326 dasd_eckd_get_attrib(struct dasd_device
*device
, void __user
*argp
)
5328 struct dasd_eckd_private
*private = device
->private;
5329 struct attrib_data_t attrib
= private->attrib
;
5332 if (!capable(CAP_SYS_ADMIN
))
5338 if (copy_to_user(argp
, (long *) &attrib
,
5339 sizeof(struct attrib_data_t
)))
5346 * Set attributes (cache operations)
5347 * Stores the attributes for cache operation to be used in Define Extend (DE).
5350 dasd_eckd_set_attrib(struct dasd_device
*device
, void __user
*argp
)
5352 struct dasd_eckd_private
*private = device
->private;
5353 struct attrib_data_t attrib
;
5355 if (!capable(CAP_SYS_ADMIN
))
5360 if (copy_from_user(&attrib
, argp
, sizeof(struct attrib_data_t
)))
5362 private->attrib
= attrib
;
5364 dev_info(&device
->cdev
->dev
,
5365 "The DASD cache mode was set to %x (%i cylinder prestage)\n",
5366 private->attrib
.operation
, private->attrib
.nr_cyl
);
5371 * Issue syscall I/O to EMC Symmetrix array.
5372 * CCWs are PSF and RSSD
5374 static int dasd_symm_io(struct dasd_device
*device
, void __user
*argp
)
5376 struct dasd_symmio_parms usrparm
;
5377 char *psf_data
, *rssd_result
;
5378 struct dasd_ccw_req
*cqr
;
5383 if (!capable(CAP_SYS_ADMIN
) && !capable(CAP_SYS_RAWIO
))
5387 /* Copy parms from caller */
5389 if (copy_from_user(&usrparm
, argp
, sizeof(usrparm
)))
5391 if (is_compat_task()) {
5392 /* Make sure pointers are sane even on 31 bit. */
5394 if ((usrparm
.psf_data
>> 32) != 0)
5396 if ((usrparm
.rssd_result
>> 32) != 0)
5398 usrparm
.psf_data
&= 0x7fffffffULL
;
5399 usrparm
.rssd_result
&= 0x7fffffffULL
;
5401 /* at least 2 bytes are accessed and should be allocated */
5402 if (usrparm
.psf_data_len
< 2) {
5403 DBF_DEV_EVENT(DBF_WARNING
, device
,
5404 "Symmetrix ioctl invalid data length %d",
5405 usrparm
.psf_data_len
);
5409 /* alloc I/O data area */
5410 psf_data
= kzalloc(usrparm
.psf_data_len
, GFP_KERNEL
| GFP_DMA
);
5411 rssd_result
= kzalloc(usrparm
.rssd_result_len
, GFP_KERNEL
| GFP_DMA
);
5412 if (!psf_data
|| !rssd_result
) {
5417 /* get syscall header from user space */
5419 if (copy_from_user(psf_data
,
5420 (void __user
*)(unsigned long) usrparm
.psf_data
,
5421 usrparm
.psf_data_len
))
5426 /* setup CCWs for PSF + RSSD */
5427 cqr
= dasd_smalloc_request(DASD_ECKD_MAGIC
, 2, 0, device
, NULL
);
5429 DBF_DEV_EVENT(DBF_WARNING
, device
, "%s",
5430 "Could not allocate initialization request");
5435 cqr
->startdev
= device
;
5436 cqr
->memdev
= device
;
5438 cqr
->expires
= 10 * HZ
;
5439 cqr
->buildclk
= get_tod_clock();
5440 cqr
->status
= DASD_CQR_FILLED
;
5442 /* Build the ccws */
5446 ccw
->cmd_code
= DASD_ECKD_CCW_PSF
;
5447 ccw
->count
= usrparm
.psf_data_len
;
5448 ccw
->flags
|= CCW_FLAG_CC
;
5449 ccw
->cda
= virt_to_dma32(psf_data
);
5454 ccw
->cmd_code
= DASD_ECKD_CCW_RSSD
;
5455 ccw
->count
= usrparm
.rssd_result_len
;
5456 ccw
->flags
= CCW_FLAG_SLI
;
5457 ccw
->cda
= virt_to_dma32(rssd_result
);
5459 rc
= dasd_sleep_on(cqr
);
5464 if (copy_to_user((void __user
*)(unsigned long) usrparm
.rssd_result
,
5465 rssd_result
, usrparm
.rssd_result_len
))
5470 dasd_sfree_request(cqr
, cqr
->memdev
);
5475 DBF_DEV_EVENT(DBF_WARNING
, device
,
5476 "Symmetrix ioctl (0x%02x 0x%02x): rc=%d",
5477 (int) psf0
, (int) psf1
, rc
);
5482 dasd_eckd_ioctl(struct dasd_block
*block
, unsigned int cmd
, void __user
*argp
)
5484 struct dasd_device
*device
= block
->base
;
5488 return dasd_eckd_get_attrib(device
, argp
);
5490 return dasd_eckd_set_attrib(device
, argp
);
5492 return dasd_eckd_performance(device
, argp
);
5494 return dasd_eckd_release(device
);
5496 return dasd_eckd_reserve(device
);
5498 return dasd_eckd_steal_lock(device
);
5500 return dasd_eckd_snid(device
, argp
);
5502 return dasd_symm_io(device
, argp
);
5509 * Dump the range of CCWs into 'page' buffer
5510 * and return number of printed chars.
5513 dasd_eckd_dump_ccw_range(struct dasd_device
*device
, struct ccw1
*from
,
5514 struct ccw1
*to
, char *page
)
5520 while (from
<= to
) {
5521 len
+= sprintf(page
+ len
, "CCW %px: %08X %08X DAT:",
5522 from
, ((int *) from
)[0], ((int *) from
)[1]);
5524 /* get pointer to data (consider IDALs) */
5525 if (from
->flags
& CCW_FLAG_IDA
)
5526 datap
= dma64_to_virt(*((dma64_t
*)dma32_to_virt(from
->cda
)));
5528 datap
= dma32_to_virt(from
->cda
);
5530 /* dump data (max 128 bytes) */
5531 for (count
= 0; count
< from
->count
&& count
< 128; count
++) {
5532 if (count
% 32 == 0)
5533 len
+= sprintf(page
+ len
, "\n");
5535 len
+= sprintf(page
+ len
, " ");
5537 len
+= sprintf(page
+ len
, " ");
5538 len
+= sprintf(page
+ len
, "%02x", datap
[count
]);
5540 len
+= sprintf(page
+ len
, "\n");
5544 dev_err(&device
->cdev
->dev
, "%s", page
);
5548 dasd_eckd_dump_sense_dbf(struct dasd_device
*device
, struct irb
*irb
,
5554 sense
= (u64
*) dasd_get_sense(irb
);
5555 stat
= (u64
*) &irb
->scsw
;
5557 DBF_DEV_EVENT(DBF_EMERG
, device
, "%s: %016llx %08x : "
5558 "%016llx %016llx %016llx %016llx",
5559 reason
, *stat
, *((u32
*) (stat
+ 1)),
5560 sense
[0], sense
[1], sense
[2], sense
[3]);
5562 DBF_DEV_EVENT(DBF_EMERG
, device
, "%s: %016llx %08x : %s",
5563 reason
, *stat
, *((u32
*) (stat
+ 1)),
5569 * Print sense data and related channel program.
5570 * Parts are printed because printk buffer is only 1024 bytes.
5572 static void dasd_eckd_dump_sense_ccw(struct dasd_device
*device
,
5573 struct dasd_ccw_req
*req
, struct irb
*irb
)
5575 struct ccw1
*first
, *last
, *fail
, *from
, *to
;
5580 dev
= &device
->cdev
->dev
;
5582 page
= (char *) get_zeroed_page(GFP_ATOMIC
);
5584 DBF_DEV_EVENT(DBF_WARNING
, device
, "%s",
5585 "No memory to dump sense data\n");
5588 /* dump the sense data */
5589 len
= sprintf(page
, "I/O status report:\n");
5590 len
+= sprintf(page
+ len
,
5591 "in req: %px CC:%02X FC:%02X AC:%02X SC:%02X DS:%02X CS:%02X RC:%d\n",
5592 req
, scsw_cc(&irb
->scsw
), scsw_fctl(&irb
->scsw
),
5593 scsw_actl(&irb
->scsw
), scsw_stctl(&irb
->scsw
),
5594 scsw_dstat(&irb
->scsw
), scsw_cstat(&irb
->scsw
),
5595 req
? req
->intrc
: 0);
5596 len
+= sprintf(page
+ len
, "Failing CCW: %px\n",
5597 dma32_to_virt(irb
->scsw
.cmd
.cpa
));
5598 if (irb
->esw
.esw0
.erw
.cons
) {
5599 for (sl
= 0; sl
< 4; sl
++) {
5600 len
+= sprintf(page
+ len
, "Sense(hex) %2d-%2d:",
5601 (8 * sl
), ((8 * sl
) + 7));
5603 for (sct
= 0; sct
< 8; sct
++) {
5604 len
+= sprintf(page
+ len
, " %02x",
5605 irb
->ecw
[8 * sl
+ sct
]);
5607 len
+= sprintf(page
+ len
, "\n");
5610 if (irb
->ecw
[27] & DASD_SENSE_BIT_0
) {
5611 /* 24 Byte Sense Data */
5613 "24 Byte: %x MSG %x, %s MSGb to SYSOP\n",
5614 irb
->ecw
[7] >> 4, irb
->ecw
[7] & 0x0f,
5615 irb
->ecw
[1] & 0x10 ? "" : "no");
5617 /* 32 Byte Sense Data */
5619 "32 Byte: Format: %x Exception class %x\n",
5620 irb
->ecw
[6] & 0x0f, irb
->ecw
[22] >> 4);
5623 sprintf(page
+ len
, "SORRY - NO VALID SENSE AVAILABLE\n");
5625 dev_err(dev
, "%s", page
);
5628 /* req == NULL for unsolicited interrupts */
5629 /* dump the Channel Program (max 140 Bytes per line) */
5630 /* Count CCW and print first CCWs (maximum 7) */
5631 first
= req
->cpaddr
;
5632 for (last
= first
; last
->flags
& (CCW_FLAG_CC
| CCW_FLAG_DC
); last
++);
5633 to
= min(first
+ 6, last
);
5634 dev_err(dev
, "Related CP in req: %px\n", req
);
5635 dasd_eckd_dump_ccw_range(device
, first
, to
, page
);
5637 /* print failing CCW area (maximum 4) */
5638 /* scsw->cda is either valid or zero */
5640 fail
= dma32_to_virt(irb
->scsw
.cmd
.cpa
); /* failing CCW */
5641 if (from
< fail
- 2) {
5642 from
= fail
- 2; /* there is a gap - print header */
5643 dev_err(dev
, "......\n");
5645 to
= min(fail
+ 1, last
);
5646 dasd_eckd_dump_ccw_range(device
, from
, to
, page
+ len
);
5648 /* print last CCWs (maximum 2) */
5650 from
= max(from
, ++to
);
5651 if (from
< last
- 1) {
5652 from
= last
- 1; /* there is a gap - print header */
5653 dev_err(dev
, "......\n");
5655 dasd_eckd_dump_ccw_range(device
, from
, last
, page
+ len
);
5657 free_page((unsigned long) page
);
5662 * Print sense data from a tcw.
5664 static void dasd_eckd_dump_sense_tcw(struct dasd_device
*device
,
5665 struct dasd_ccw_req
*req
, struct irb
*irb
)
5668 int len
, sl
, sct
, residual
;
5672 page
= (char *) get_zeroed_page(GFP_ATOMIC
);
5674 DBF_DEV_EVENT(DBF_WARNING
, device
, " %s",
5675 "No memory to dump sense data");
5678 /* dump the sense data */
5679 len
= sprintf(page
, "I/O status report:\n");
5680 len
+= sprintf(page
+ len
,
5681 "in req: %px CC:%02X FC:%02X AC:%02X SC:%02X DS:%02X "
5682 "CS:%02X fcxs:%02X schxs:%02X RC:%d\n",
5683 req
, scsw_cc(&irb
->scsw
), scsw_fctl(&irb
->scsw
),
5684 scsw_actl(&irb
->scsw
), scsw_stctl(&irb
->scsw
),
5685 scsw_dstat(&irb
->scsw
), scsw_cstat(&irb
->scsw
),
5687 (irb
->scsw
.tm
.ifob
<< 7) | irb
->scsw
.tm
.sesq
,
5688 req
? req
->intrc
: 0);
5689 len
+= sprintf(page
+ len
, "Failing TCW: %px\n",
5690 dma32_to_virt(irb
->scsw
.tm
.tcw
));
5694 if (irb
->scsw
.tm
.tcw
&& (irb
->scsw
.tm
.fcxs
& 0x01))
5695 tsb
= tcw_get_tsb(dma32_to_virt(irb
->scsw
.tm
.tcw
));
5698 len
+= sprintf(page
+ len
, "tsb->length %d\n", tsb
->length
);
5699 len
+= sprintf(page
+ len
, "tsb->flags %x\n", tsb
->flags
);
5700 len
+= sprintf(page
+ len
, "tsb->dcw_offset %d\n", tsb
->dcw_offset
);
5701 len
+= sprintf(page
+ len
, "tsb->count %d\n", tsb
->count
);
5702 residual
= tsb
->count
- 28;
5703 len
+= sprintf(page
+ len
, "residual %d\n", residual
);
5705 switch (tsb
->flags
& 0x07) {
5706 case 1: /* tsa_iostat */
5707 len
+= sprintf(page
+ len
, "tsb->tsa.iostat.dev_time %d\n",
5708 tsb
->tsa
.iostat
.dev_time
);
5709 len
+= sprintf(page
+ len
, "tsb->tsa.iostat.def_time %d\n",
5710 tsb
->tsa
.iostat
.def_time
);
5711 len
+= sprintf(page
+ len
, "tsb->tsa.iostat.queue_time %d\n",
5712 tsb
->tsa
.iostat
.queue_time
);
5713 len
+= sprintf(page
+ len
, "tsb->tsa.iostat.dev_busy_time %d\n",
5714 tsb
->tsa
.iostat
.dev_busy_time
);
5715 len
+= sprintf(page
+ len
, "tsb->tsa.iostat.dev_act_time %d\n",
5716 tsb
->tsa
.iostat
.dev_act_time
);
5717 sense
= tsb
->tsa
.iostat
.sense
;
5719 case 2: /* ts_ddpc */
5720 len
+= sprintf(page
+ len
, "tsb->tsa.ddpc.rc %d\n",
5722 for (sl
= 0; sl
< 2; sl
++) {
5723 len
+= sprintf(page
+ len
,
5724 "tsb->tsa.ddpc.rcq %2d-%2d: ",
5725 (8 * sl
), ((8 * sl
) + 7));
5726 rcq
= tsb
->tsa
.ddpc
.rcq
;
5727 for (sct
= 0; sct
< 8; sct
++) {
5728 len
+= sprintf(page
+ len
, "%02x",
5731 len
+= sprintf(page
+ len
, "\n");
5733 sense
= tsb
->tsa
.ddpc
.sense
;
5735 case 3: /* tsa_intrg */
5736 len
+= sprintf(page
+ len
,
5737 "tsb->tsa.intrg.: not supported yet\n");
5742 for (sl
= 0; sl
< 4; sl
++) {
5743 len
+= sprintf(page
+ len
,
5744 "Sense(hex) %2d-%2d:",
5745 (8 * sl
), ((8 * sl
) + 7));
5746 for (sct
= 0; sct
< 8; sct
++) {
5747 len
+= sprintf(page
+ len
, " %02x",
5748 sense
[8 * sl
+ sct
]);
5750 len
+= sprintf(page
+ len
, "\n");
5753 if (sense
[27] & DASD_SENSE_BIT_0
) {
5754 /* 24 Byte Sense Data */
5756 "24 Byte: %x MSG %x, %s MSGb to SYSOP\n",
5757 sense
[7] >> 4, sense
[7] & 0x0f,
5758 sense
[1] & 0x10 ? "" : "no");
5760 /* 32 Byte Sense Data */
5762 "32 Byte: Format: %x Exception class %x\n",
5763 sense
[6] & 0x0f, sense
[22] >> 4);
5766 sprintf(page
+ len
, "SORRY - NO VALID SENSE AVAILABLE\n");
5769 sprintf(page
+ len
, "SORRY - NO TSB DATA AVAILABLE\n");
5771 dev_err(&device
->cdev
->dev
, "%s", page
);
5772 free_page((unsigned long) page
);
5775 static void dasd_eckd_dump_sense(struct dasd_device
*device
,
5776 struct dasd_ccw_req
*req
, struct irb
*irb
)
5778 u8
*sense
= dasd_get_sense(irb
);
5781 * In some cases certain errors might be expected and
5782 * log messages shouldn't be written then.
5783 * Check if the according suppress bit is set.
5785 if (sense
&& (sense
[1] & SNS1_INV_TRACK_FORMAT
) &&
5786 !(sense
[2] & SNS2_ENV_DATA_PRESENT
) &&
5787 test_bit(DASD_CQR_SUPPRESS_IT
, &req
->flags
))
5790 if (sense
&& sense
[0] & SNS0_CMD_REJECT
&&
5791 test_bit(DASD_CQR_SUPPRESS_CR
, &req
->flags
))
5794 if (sense
&& sense
[1] & SNS1_NO_REC_FOUND
&&
5795 test_bit(DASD_CQR_SUPPRESS_NRF
, &req
->flags
))
5798 if (scsw_cstat(&irb
->scsw
) == 0x40 &&
5799 test_bit(DASD_CQR_SUPPRESS_IL
, &req
->flags
))
5802 if (scsw_is_tm(&irb
->scsw
))
5803 dasd_eckd_dump_sense_tcw(device
, req
, irb
);
5805 dasd_eckd_dump_sense_ccw(device
, req
, irb
);
5808 static int dasd_eckd_reload_device(struct dasd_device
*device
)
5810 struct dasd_eckd_private
*private = device
->private;
5811 char print_uid
[DASD_UID_STRLEN
];
5813 struct dasd_uid uid
;
5814 unsigned long flags
;
5817 * remove device from alias handling to prevent new requests
5818 * from being scheduled on the wrong alias device
5820 dasd_alias_remove_device(device
);
5822 spin_lock_irqsave(get_ccwdev_lock(device
->cdev
), flags
);
5823 old_base
= private->uid
.base_unit_addr
;
5824 spin_unlock_irqrestore(get_ccwdev_lock(device
->cdev
), flags
);
5826 /* Read Configuration Data */
5827 rc
= dasd_eckd_read_conf(device
);
5831 dasd_eckd_read_fc_security(device
);
5833 rc
= dasd_eckd_generate_uid(device
);
5837 * update unit address configuration and
5838 * add device to alias management
5840 dasd_alias_update_add_device(device
);
5842 dasd_eckd_get_uid(device
, &uid
);
5844 if (old_base
!= uid
.base_unit_addr
) {
5845 dasd_eckd_get_uid_string(&private->conf
, print_uid
);
5846 dev_info(&device
->cdev
->dev
,
5847 "An Alias device was reassigned to a new base device "
5848 "with UID: %s\n", print_uid
);
5856 static int dasd_eckd_read_message_buffer(struct dasd_device
*device
,
5857 struct dasd_rssd_messages
*messages
,
5860 struct dasd_rssd_messages
*message_buf
;
5861 struct dasd_psf_prssd_data
*prssdp
;
5862 struct dasd_ccw_req
*cqr
;
5866 cqr
= dasd_smalloc_request(DASD_ECKD_MAGIC
, 1 /* PSF */ + 1 /* RSSD */,
5867 (sizeof(struct dasd_psf_prssd_data
) +
5868 sizeof(struct dasd_rssd_messages
)),
5871 DBF_EVENT_DEVID(DBF_WARNING
, device
->cdev
, "%s",
5872 "Could not allocate read message buffer request");
5873 return PTR_ERR(cqr
);
5878 cqr
->startdev
= device
;
5879 cqr
->memdev
= device
;
5881 cqr
->expires
= 10 * HZ
;
5882 set_bit(DASD_CQR_VERIFY_PATH
, &cqr
->flags
);
5883 /* dasd_sleep_on_immediatly does not do complex error
5884 * recovery so clear erp flag and set retry counter to
5886 clear_bit(DASD_CQR_FLAGS_USE_ERP
, &cqr
->flags
);
5889 /* Prepare for Read Subsystem Data */
5890 prssdp
= (struct dasd_psf_prssd_data
*) cqr
->data
;
5891 memset(prssdp
, 0, sizeof(struct dasd_psf_prssd_data
));
5892 prssdp
->order
= PSF_ORDER_PRSSD
;
5893 prssdp
->suborder
= 0x03; /* Message Buffer */
5894 /* all other bytes of prssdp must be zero */
5897 ccw
->cmd_code
= DASD_ECKD_CCW_PSF
;
5898 ccw
->count
= sizeof(struct dasd_psf_prssd_data
);
5899 ccw
->flags
|= CCW_FLAG_CC
;
5900 ccw
->flags
|= CCW_FLAG_SLI
;
5901 ccw
->cda
= virt_to_dma32(prssdp
);
5903 /* Read Subsystem Data - message buffer */
5904 message_buf
= (struct dasd_rssd_messages
*) (prssdp
+ 1);
5905 memset(message_buf
, 0, sizeof(struct dasd_rssd_messages
));
5908 ccw
->cmd_code
= DASD_ECKD_CCW_RSSD
;
5909 ccw
->count
= sizeof(struct dasd_rssd_messages
);
5910 ccw
->flags
|= CCW_FLAG_SLI
;
5911 ccw
->cda
= virt_to_dma32(message_buf
);
5913 cqr
->buildclk
= get_tod_clock();
5914 cqr
->status
= DASD_CQR_FILLED
;
5915 rc
= dasd_sleep_on_immediatly(cqr
);
5917 prssdp
= (struct dasd_psf_prssd_data
*) cqr
->data
;
5918 message_buf
= (struct dasd_rssd_messages
*)
5920 memcpy(messages
, message_buf
,
5921 sizeof(struct dasd_rssd_messages
));
5922 } else if (cqr
->lpm
) {
5924 * on z/VM we might not be able to do I/O on the requested path
5925 * but instead we get the required information on any path
5926 * so retry with open path mask
5931 DBF_EVENT_DEVID(DBF_WARNING
, device
->cdev
,
5932 "Reading messages failed with rc=%d\n"
5934 dasd_sfree_request(cqr
, cqr
->memdev
);
5938 static int dasd_eckd_query_host_access(struct dasd_device
*device
,
5939 struct dasd_psf_query_host_access
*data
)
5941 struct dasd_eckd_private
*private = device
->private;
5942 struct dasd_psf_query_host_access
*host_access
;
5943 struct dasd_psf_prssd_data
*prssdp
;
5944 struct dasd_ccw_req
*cqr
;
5948 /* not available for HYPER PAV alias devices */
5949 if (!device
->block
&& private->lcu
->pav
== HYPER_PAV
)
5952 /* may not be supported by the storage server */
5953 if (!(private->features
.feature
[14] & 0x80))
5956 cqr
= dasd_smalloc_request(DASD_ECKD_MAGIC
, 1 /* PSF */ + 1 /* RSSD */,
5957 sizeof(struct dasd_psf_prssd_data
) + 1,
5960 DBF_EVENT_DEVID(DBF_WARNING
, device
->cdev
, "%s",
5961 "Could not allocate read message buffer request");
5962 return PTR_ERR(cqr
);
5964 host_access
= kzalloc(sizeof(*host_access
), GFP_KERNEL
| GFP_DMA
);
5966 dasd_sfree_request(cqr
, device
);
5967 DBF_EVENT_DEVID(DBF_WARNING
, device
->cdev
, "%s",
5968 "Could not allocate host_access buffer");
5971 cqr
->startdev
= device
;
5972 cqr
->memdev
= device
;
5975 cqr
->expires
= 10 * HZ
;
5977 /* Prepare for Read Subsystem Data */
5978 prssdp
= (struct dasd_psf_prssd_data
*) cqr
->data
;
5979 memset(prssdp
, 0, sizeof(struct dasd_psf_prssd_data
));
5980 prssdp
->order
= PSF_ORDER_PRSSD
;
5981 prssdp
->suborder
= PSF_SUBORDER_QHA
; /* query host access */
5982 /* LSS and Volume that will be queried */
5983 prssdp
->lss
= private->conf
.ned
->ID
;
5984 prssdp
->volume
= private->conf
.ned
->unit_addr
;
5985 /* all other bytes of prssdp must be zero */
5988 ccw
->cmd_code
= DASD_ECKD_CCW_PSF
;
5989 ccw
->count
= sizeof(struct dasd_psf_prssd_data
);
5990 ccw
->flags
|= CCW_FLAG_CC
;
5991 ccw
->flags
|= CCW_FLAG_SLI
;
5992 ccw
->cda
= virt_to_dma32(prssdp
);
5994 /* Read Subsystem Data - query host access */
5996 ccw
->cmd_code
= DASD_ECKD_CCW_RSSD
;
5997 ccw
->count
= sizeof(struct dasd_psf_query_host_access
);
5998 ccw
->flags
|= CCW_FLAG_SLI
;
5999 ccw
->cda
= virt_to_dma32(host_access
);
6001 cqr
->buildclk
= get_tod_clock();
6002 cqr
->status
= DASD_CQR_FILLED
;
6003 /* the command might not be supported, suppress error message */
6004 __set_bit(DASD_CQR_SUPPRESS_CR
, &cqr
->flags
);
6005 rc
= dasd_sleep_on_interruptible(cqr
);
6007 *data
= *host_access
;
6009 DBF_EVENT_DEVID(DBF_WARNING
, device
->cdev
,
6010 "Reading host access data failed with rc=%d\n",
6015 dasd_sfree_request(cqr
, cqr
->memdev
);
6020 * return number of grouped devices
6022 static int dasd_eckd_host_access_count(struct dasd_device
*device
)
6024 struct dasd_psf_query_host_access
*access
;
6025 struct dasd_ckd_path_group_entry
*entry
;
6026 struct dasd_ckd_host_information
*info
;
6030 access
= kzalloc(sizeof(*access
), GFP_NOIO
);
6032 DBF_EVENT_DEVID(DBF_WARNING
, device
->cdev
, "%s",
6033 "Could not allocate access buffer");
6036 rc
= dasd_eckd_query_host_access(device
, access
);
6042 info
= (struct dasd_ckd_host_information
*)
6043 access
->host_access_information
;
6044 for (i
= 0; i
< info
->entry_count
; i
++) {
6045 entry
= (struct dasd_ckd_path_group_entry
*)
6046 (info
->entry
+ i
* info
->entry_size
);
6047 if (entry
->status_flags
& DASD_ECKD_PG_GROUPED
)
6056 * write host access information to a sequential file
6058 static int dasd_hosts_print(struct dasd_device
*device
, struct seq_file
*m
)
6060 struct dasd_psf_query_host_access
*access
;
6061 struct dasd_ckd_path_group_entry
*entry
;
6062 struct dasd_ckd_host_information
*info
;
6063 char sysplex
[9] = "";
6066 access
= kzalloc(sizeof(*access
), GFP_NOIO
);
6068 DBF_EVENT_DEVID(DBF_WARNING
, device
->cdev
, "%s",
6069 "Could not allocate access buffer");
6072 rc
= dasd_eckd_query_host_access(device
, access
);
6078 info
= (struct dasd_ckd_host_information
*)
6079 access
->host_access_information
;
6080 for (i
= 0; i
< info
->entry_count
; i
++) {
6081 entry
= (struct dasd_ckd_path_group_entry
*)
6082 (info
->entry
+ i
* info
->entry_size
);
6084 seq_printf(m
, "pgid %*phN\n", 11, entry
->pgid
);
6086 seq_printf(m
, "status_flags %02x\n", entry
->status_flags
);
6088 memcpy(&sysplex
, &entry
->sysplex_name
, sizeof(sysplex
) - 1);
6089 EBCASC(sysplex
, sizeof(sysplex
));
6090 seq_printf(m
, "sysplex_name %8s\n", sysplex
);
6091 /* SUPPORTED CYLINDER */
6092 seq_printf(m
, "supported_cylinder %d\n", entry
->cylinder
);
6094 seq_printf(m
, "timestamp %lu\n", (unsigned long)
6102 static struct dasd_device
6103 *copy_relation_find_device(struct dasd_copy_relation
*copy
,
6108 for (i
= 0; i
< DASD_CP_ENTRIES
; i
++) {
6109 if (copy
->entry
[i
].configured
&&
6110 strncmp(copy
->entry
[i
].busid
, busid
, DASD_BUS_ID_SIZE
) == 0)
6111 return copy
->entry
[i
].device
;
6117 * set the new active/primary device
6119 static void copy_pair_set_active(struct dasd_copy_relation
*copy
, char *new_busid
,
6124 for (i
= 0; i
< DASD_CP_ENTRIES
; i
++) {
6125 if (copy
->entry
[i
].configured
&&
6126 strncmp(copy
->entry
[i
].busid
, new_busid
,
6127 DASD_BUS_ID_SIZE
) == 0) {
6128 copy
->active
= ©
->entry
[i
];
6129 copy
->entry
[i
].primary
= true;
6130 } else if (copy
->entry
[i
].configured
&&
6131 strncmp(copy
->entry
[i
].busid
, old_busid
,
6132 DASD_BUS_ID_SIZE
) == 0) {
6133 copy
->entry
[i
].primary
= false;
6139 * The function will swap the role of a given copy pair.
6140 * During the swap operation the relation of the blockdevice is disconnected
6141 * from the old primary and connected to the new.
6143 * IO is paused on the block queue before swap and may be resumed afterwards.
6145 static int dasd_eckd_copy_pair_swap(struct dasd_device
*device
, char *prim_busid
,
6148 struct dasd_device
*primary
, *secondary
;
6149 struct dasd_copy_relation
*copy
;
6150 struct dasd_block
*block
;
6151 struct gendisk
*gdp
;
6153 copy
= device
->copy
;
6155 return DASD_COPYPAIRSWAP_INVALID
;
6156 primary
= copy
->active
->device
;
6158 return DASD_COPYPAIRSWAP_INVALID
;
6159 /* double check if swap has correct primary */
6160 if (strncmp(dev_name(&primary
->cdev
->dev
), prim_busid
, DASD_BUS_ID_SIZE
) != 0)
6161 return DASD_COPYPAIRSWAP_PRIMARY
;
6163 secondary
= copy_relation_find_device(copy
, sec_busid
);
6165 return DASD_COPYPAIRSWAP_SECONDARY
;
6168 * usually the device should be quiesced for swap
6169 * for paranoia stop device and requeue requests again
6171 dasd_device_set_stop_bits(primary
, DASD_STOPPED_PPRC
);
6172 dasd_device_set_stop_bits(secondary
, DASD_STOPPED_PPRC
);
6173 dasd_generic_requeue_all_requests(primary
);
6175 /* swap DASD internal device <> block assignment */
6176 block
= primary
->block
;
6177 primary
->block
= NULL
;
6178 secondary
->block
= block
;
6179 block
->base
= secondary
;
6180 /* set new primary device in COPY relation */
6181 copy_pair_set_active(copy
, sec_busid
, prim_busid
);
6183 /* swap blocklayer device link */
6185 dasd_add_link_to_gendisk(gdp
, secondary
);
6187 /* re-enable device */
6188 dasd_device_remove_stop_bits(primary
, DASD_STOPPED_PPRC
);
6189 dasd_device_remove_stop_bits(secondary
, DASD_STOPPED_PPRC
);
6190 dasd_schedule_device_bh(secondary
);
6192 return DASD_COPYPAIRSWAP_SUCCESS
;
6196 * Perform Subsystem Function - Peer-to-Peer Remote Copy Extended Query
6198 static int dasd_eckd_query_pprc_status(struct dasd_device
*device
,
6199 struct dasd_pprc_data_sc4
*data
)
6201 struct dasd_pprc_data_sc4
*pprc_data
;
6202 struct dasd_psf_prssd_data
*prssdp
;
6203 struct dasd_ccw_req
*cqr
;
6207 cqr
= dasd_smalloc_request(DASD_ECKD_MAGIC
, 1 /* PSF */ + 1 /* RSSD */,
6208 sizeof(*prssdp
) + sizeof(*pprc_data
) + 1,
6211 DBF_EVENT_DEVID(DBF_WARNING
, device
->cdev
, "%s",
6212 "Could not allocate query PPRC status request");
6213 return PTR_ERR(cqr
);
6215 cqr
->startdev
= device
;
6216 cqr
->memdev
= device
;
6219 cqr
->expires
= 10 * HZ
;
6221 /* Prepare for Read Subsystem Data */
6222 prssdp
= (struct dasd_psf_prssd_data
*)cqr
->data
;
6223 memset(prssdp
, 0, sizeof(struct dasd_psf_prssd_data
));
6224 prssdp
->order
= PSF_ORDER_PRSSD
;
6225 prssdp
->suborder
= PSF_SUBORDER_PPRCEQ
;
6226 prssdp
->varies
[0] = PPRCEQ_SCOPE_4
;
6227 pprc_data
= (struct dasd_pprc_data_sc4
*)(prssdp
+ 1);
6230 ccw
->cmd_code
= DASD_ECKD_CCW_PSF
;
6231 ccw
->count
= sizeof(struct dasd_psf_prssd_data
);
6232 ccw
->flags
|= CCW_FLAG_CC
;
6233 ccw
->flags
|= CCW_FLAG_SLI
;
6234 ccw
->cda
= virt_to_dma32(prssdp
);
6236 /* Read Subsystem Data - query host access */
6238 ccw
->cmd_code
= DASD_ECKD_CCW_RSSD
;
6239 ccw
->count
= sizeof(*pprc_data
);
6240 ccw
->flags
|= CCW_FLAG_SLI
;
6241 ccw
->cda
= virt_to_dma32(pprc_data
);
6243 cqr
->buildclk
= get_tod_clock();
6244 cqr
->status
= DASD_CQR_FILLED
;
6246 rc
= dasd_sleep_on_interruptible(cqr
);
6250 DBF_EVENT_DEVID(DBF_WARNING
, device
->cdev
,
6251 "PPRC Extended Query failed with rc=%d\n",
6256 dasd_sfree_request(cqr
, cqr
->memdev
);
6261 * ECKD NOP - no operation
6263 static int dasd_eckd_nop(struct dasd_device
*device
)
6265 struct dasd_ccw_req
*cqr
;
6269 cqr
= dasd_smalloc_request(DASD_ECKD_MAGIC
, 1, 1, device
, NULL
);
6271 DBF_EVENT_DEVID(DBF_WARNING
, device
->cdev
, "%s",
6272 "Could not allocate NOP request");
6273 return PTR_ERR(cqr
);
6275 cqr
->startdev
= device
;
6276 cqr
->memdev
= device
;
6279 cqr
->expires
= 10 * HZ
;
6282 ccw
->cmd_code
= DASD_ECKD_CCW_NOP
;
6283 ccw
->flags
|= CCW_FLAG_SLI
;
6285 cqr
->buildclk
= get_tod_clock();
6286 cqr
->status
= DASD_CQR_FILLED
;
6288 rc
= dasd_sleep_on_interruptible(cqr
);
6290 DBF_EVENT_DEVID(DBF_WARNING
, device
->cdev
,
6291 "NOP failed with rc=%d\n", rc
);
6294 dasd_sfree_request(cqr
, cqr
->memdev
);
6298 static int dasd_eckd_device_ping(struct dasd_device
*device
)
6300 return dasd_eckd_nop(device
);
6304 * Perform Subsystem Function - CUIR response
6307 dasd_eckd_psf_cuir_response(struct dasd_device
*device
, int response
,
6308 __u32 message_id
, __u8 lpum
)
6310 struct dasd_psf_cuir_response
*psf_cuir
;
6311 int pos
= pathmask_to_pos(lpum
);
6312 struct dasd_ccw_req
*cqr
;
6316 cqr
= dasd_smalloc_request(DASD_ECKD_MAGIC
, 1 /* PSF */ ,
6317 sizeof(struct dasd_psf_cuir_response
),
6321 DBF_DEV_EVENT(DBF_WARNING
, device
, "%s",
6322 "Could not allocate PSF-CUIR request");
6323 return PTR_ERR(cqr
);
6326 psf_cuir
= (struct dasd_psf_cuir_response
*)cqr
->data
;
6327 psf_cuir
->order
= PSF_ORDER_CUIR_RESPONSE
;
6328 psf_cuir
->cc
= response
;
6329 psf_cuir
->chpid
= device
->path
[pos
].chpid
;
6330 psf_cuir
->message_id
= message_id
;
6331 psf_cuir
->cssid
= device
->path
[pos
].cssid
;
6332 psf_cuir
->ssid
= device
->path
[pos
].ssid
;
6334 ccw
->cmd_code
= DASD_ECKD_CCW_PSF
;
6335 ccw
->cda
= virt_to_dma32(psf_cuir
);
6336 ccw
->flags
= CCW_FLAG_SLI
;
6337 ccw
->count
= sizeof(struct dasd_psf_cuir_response
);
6339 cqr
->startdev
= device
;
6340 cqr
->memdev
= device
;
6343 cqr
->expires
= 10*HZ
;
6344 cqr
->buildclk
= get_tod_clock();
6345 cqr
->status
= DASD_CQR_FILLED
;
6346 set_bit(DASD_CQR_VERIFY_PATH
, &cqr
->flags
);
6348 rc
= dasd_sleep_on(cqr
);
6350 dasd_sfree_request(cqr
, cqr
->memdev
);
6355 * return configuration data that is referenced by record selector
6356 * if a record selector is specified or per default return the
6357 * conf_data pointer for the path specified by lpum
6359 static struct dasd_conf_data
*dasd_eckd_get_ref_conf(struct dasd_device
*device
,
6361 struct dasd_cuir_message
*cuir
)
6363 struct dasd_conf_data
*conf_data
;
6366 if (cuir
->record_selector
== 0)
6368 for (path
= 0x80, pos
= 0; path
; path
>>= 1, pos
++) {
6369 conf_data
= device
->path
[pos
].conf_data
;
6370 if (conf_data
->gneq
.record_selector
==
6371 cuir
->record_selector
)
6375 return device
->path
[pathmask_to_pos(lpum
)].conf_data
;
6379 * This function determines the scope of a reconfiguration request by
6380 * analysing the path and device selection data provided in the CUIR request.
6381 * Returns a path mask containing CUIR affected paths for the give device.
6383 * If the CUIR request does not contain the required information return the
6384 * path mask of the path the attention message for the CUIR request was reveived
6387 static int dasd_eckd_cuir_scope(struct dasd_device
*device
, __u8 lpum
,
6388 struct dasd_cuir_message
*cuir
)
6390 struct dasd_conf_data
*ref_conf_data
;
6391 unsigned long bitmask
= 0, mask
= 0;
6392 struct dasd_conf_data
*conf_data
;
6393 unsigned int pos
, path
;
6394 char *ref_gneq
, *gneq
;
6395 char *ref_ned
, *ned
;
6398 /* if CUIR request does not specify the scope use the path
6399 the attention message was presented on */
6400 if (!cuir
->ned_map
||
6401 !(cuir
->neq_map
[0] | cuir
->neq_map
[1] | cuir
->neq_map
[2]))
6404 /* get reference conf data */
6405 ref_conf_data
= dasd_eckd_get_ref_conf(device
, lpum
, cuir
);
6406 /* reference ned is determined by ned_map field */
6407 pos
= 8 - ffs(cuir
->ned_map
);
6408 ref_ned
= (char *)&ref_conf_data
->neds
[pos
];
6409 ref_gneq
= (char *)&ref_conf_data
->gneq
;
6410 /* transfer 24 bit neq_map to mask */
6411 mask
= cuir
->neq_map
[2];
6412 mask
|= cuir
->neq_map
[1] << 8;
6413 mask
|= cuir
->neq_map
[0] << 16;
6415 for (path
= 0; path
< 8; path
++) {
6416 /* initialise data per path */
6418 conf_data
= device
->path
[path
].conf_data
;
6419 pos
= 8 - ffs(cuir
->ned_map
);
6420 ned
= (char *) &conf_data
->neds
[pos
];
6421 /* compare reference ned and per path ned */
6422 if (memcmp(ref_ned
, ned
, sizeof(*ned
)) != 0)
6424 gneq
= (char *)&conf_data
->gneq
;
6425 /* compare reference gneq and per_path gneq under
6426 24 bit mask where mask bit 0 equals byte 7 of
6427 the gneq and mask bit 24 equals byte 31 */
6429 pos
= ffs(bitmask
) - 1;
6430 if (memcmp(&ref_gneq
[31 - pos
], &gneq
[31 - pos
], 1)
6433 clear_bit(pos
, &bitmask
);
6437 /* device and path match the reference values
6438 add path to CUIR scope */
6439 tbcpm
|= 0x80 >> path
;
6444 static void dasd_eckd_cuir_notify_user(struct dasd_device
*device
,
6445 unsigned long paths
, int action
)
6450 /* get position of bit in mask */
6451 pos
= 8 - ffs(paths
);
6452 /* get channel path descriptor from this position */
6453 if (action
== CUIR_QUIESCE
)
6454 pr_warn("Service on the storage server caused path %x.%02x to go offline",
6455 device
->path
[pos
].cssid
,
6456 device
->path
[pos
].chpid
);
6457 else if (action
== CUIR_RESUME
)
6458 pr_info("Path %x.%02x is back online after service on the storage server",
6459 device
->path
[pos
].cssid
,
6460 device
->path
[pos
].chpid
);
6461 clear_bit(7 - pos
, &paths
);
6465 static int dasd_eckd_cuir_remove_path(struct dasd_device
*device
, __u8 lpum
,
6466 struct dasd_cuir_message
*cuir
)
6468 unsigned long tbcpm
;
6470 tbcpm
= dasd_eckd_cuir_scope(device
, lpum
, cuir
);
6471 /* nothing to do if path is not in use */
6472 if (!(dasd_path_get_opm(device
) & tbcpm
))
6474 if (!(dasd_path_get_opm(device
) & ~tbcpm
)) {
6475 /* no path would be left if the CUIR action is taken
6479 /* remove device from operational path mask */
6480 dasd_path_remove_opm(device
, tbcpm
);
6481 dasd_path_add_cuirpm(device
, tbcpm
);
6486 * walk through all devices and build a path mask to quiesce them
6487 * return an error if the last path to a device would be removed
6489 * if only part of the devices are quiesced and an error
6490 * occurs no onlining necessary, the storage server will
6491 * notify the already set offline devices again
6493 static int dasd_eckd_cuir_quiesce(struct dasd_device
*device
, __u8 lpum
,
6494 struct dasd_cuir_message
*cuir
)
6496 struct dasd_eckd_private
*private = device
->private;
6497 struct alias_pav_group
*pavgroup
, *tempgroup
;
6498 struct dasd_device
*dev
, *n
;
6499 unsigned long paths
= 0;
6500 unsigned long flags
;
6503 /* active devices */
6504 list_for_each_entry_safe(dev
, n
, &private->lcu
->active_devices
,
6506 spin_lock_irqsave(get_ccwdev_lock(dev
->cdev
), flags
);
6507 tbcpm
= dasd_eckd_cuir_remove_path(dev
, lpum
, cuir
);
6508 spin_unlock_irqrestore(get_ccwdev_lock(dev
->cdev
), flags
);
6513 /* inactive devices */
6514 list_for_each_entry_safe(dev
, n
, &private->lcu
->inactive_devices
,
6516 spin_lock_irqsave(get_ccwdev_lock(dev
->cdev
), flags
);
6517 tbcpm
= dasd_eckd_cuir_remove_path(dev
, lpum
, cuir
);
6518 spin_unlock_irqrestore(get_ccwdev_lock(dev
->cdev
), flags
);
6523 /* devices in PAV groups */
6524 list_for_each_entry_safe(pavgroup
, tempgroup
,
6525 &private->lcu
->grouplist
, group
) {
6526 list_for_each_entry_safe(dev
, n
, &pavgroup
->baselist
,
6528 spin_lock_irqsave(get_ccwdev_lock(dev
->cdev
), flags
);
6529 tbcpm
= dasd_eckd_cuir_remove_path(dev
, lpum
, cuir
);
6530 spin_unlock_irqrestore(
6531 get_ccwdev_lock(dev
->cdev
), flags
);
6536 list_for_each_entry_safe(dev
, n
, &pavgroup
->aliaslist
,
6538 spin_lock_irqsave(get_ccwdev_lock(dev
->cdev
), flags
);
6539 tbcpm
= dasd_eckd_cuir_remove_path(dev
, lpum
, cuir
);
6540 spin_unlock_irqrestore(
6541 get_ccwdev_lock(dev
->cdev
), flags
);
6547 /* notify user about all paths affected by CUIR action */
6548 dasd_eckd_cuir_notify_user(device
, paths
, CUIR_QUIESCE
);
6554 static int dasd_eckd_cuir_resume(struct dasd_device
*device
, __u8 lpum
,
6555 struct dasd_cuir_message
*cuir
)
6557 struct dasd_eckd_private
*private = device
->private;
6558 struct alias_pav_group
*pavgroup
, *tempgroup
;
6559 struct dasd_device
*dev
, *n
;
6560 unsigned long paths
= 0;
6564 * the path may have been added through a generic path event before
6565 * only trigger path verification if the path is not already in use
6567 list_for_each_entry_safe(dev
, n
,
6568 &private->lcu
->active_devices
,
6570 tbcpm
= dasd_eckd_cuir_scope(dev
, lpum
, cuir
);
6572 if (!(dasd_path_get_opm(dev
) & tbcpm
)) {
6573 dasd_path_add_tbvpm(dev
, tbcpm
);
6574 dasd_schedule_device_bh(dev
);
6577 list_for_each_entry_safe(dev
, n
,
6578 &private->lcu
->inactive_devices
,
6580 tbcpm
= dasd_eckd_cuir_scope(dev
, lpum
, cuir
);
6582 if (!(dasd_path_get_opm(dev
) & tbcpm
)) {
6583 dasd_path_add_tbvpm(dev
, tbcpm
);
6584 dasd_schedule_device_bh(dev
);
6587 /* devices in PAV groups */
6588 list_for_each_entry_safe(pavgroup
, tempgroup
,
6589 &private->lcu
->grouplist
,
6591 list_for_each_entry_safe(dev
, n
,
6592 &pavgroup
->baselist
,
6594 tbcpm
= dasd_eckd_cuir_scope(dev
, lpum
, cuir
);
6596 if (!(dasd_path_get_opm(dev
) & tbcpm
)) {
6597 dasd_path_add_tbvpm(dev
, tbcpm
);
6598 dasd_schedule_device_bh(dev
);
6601 list_for_each_entry_safe(dev
, n
,
6602 &pavgroup
->aliaslist
,
6604 tbcpm
= dasd_eckd_cuir_scope(dev
, lpum
, cuir
);
6606 if (!(dasd_path_get_opm(dev
) & tbcpm
)) {
6607 dasd_path_add_tbvpm(dev
, tbcpm
);
6608 dasd_schedule_device_bh(dev
);
6612 /* notify user about all paths affected by CUIR action */
6613 dasd_eckd_cuir_notify_user(device
, paths
, CUIR_RESUME
);
6617 static void dasd_eckd_handle_cuir(struct dasd_device
*device
, void *messages
,
6620 struct dasd_cuir_message
*cuir
= messages
;
6623 DBF_DEV_EVENT(DBF_WARNING
, device
,
6624 "CUIR request: %016llx %016llx %016llx %08x",
6625 ((u64
*)cuir
)[0], ((u64
*)cuir
)[1], ((u64
*)cuir
)[2],
6628 if (cuir
->code
== CUIR_QUIESCE
) {
6630 if (dasd_eckd_cuir_quiesce(device
, lpum
, cuir
))
6631 response
= PSF_CUIR_LAST_PATH
;
6633 response
= PSF_CUIR_COMPLETED
;
6634 } else if (cuir
->code
== CUIR_RESUME
) {
6636 dasd_eckd_cuir_resume(device
, lpum
, cuir
);
6637 response
= PSF_CUIR_COMPLETED
;
6639 response
= PSF_CUIR_NOT_SUPPORTED
;
6641 dasd_eckd_psf_cuir_response(device
, response
,
6642 cuir
->message_id
, lpum
);
6643 DBF_DEV_EVENT(DBF_WARNING
, device
,
6644 "CUIR response: %d on message ID %08x", response
,
6646 /* to make sure there is no attention left schedule work again */
6647 device
->discipline
->check_attention(device
, lpum
);
6650 static void dasd_eckd_oos_resume(struct dasd_device
*device
)
6652 struct dasd_eckd_private
*private = device
->private;
6653 struct alias_pav_group
*pavgroup
, *tempgroup
;
6654 struct dasd_device
*dev
, *n
;
6655 unsigned long flags
;
6657 spin_lock_irqsave(&private->lcu
->lock
, flags
);
6658 list_for_each_entry_safe(dev
, n
, &private->lcu
->active_devices
,
6660 if (dev
->stopped
& DASD_STOPPED_NOSPC
)
6661 dasd_generic_space_avail(dev
);
6663 list_for_each_entry_safe(dev
, n
, &private->lcu
->inactive_devices
,
6665 if (dev
->stopped
& DASD_STOPPED_NOSPC
)
6666 dasd_generic_space_avail(dev
);
6668 /* devices in PAV groups */
6669 list_for_each_entry_safe(pavgroup
, tempgroup
,
6670 &private->lcu
->grouplist
,
6672 list_for_each_entry_safe(dev
, n
, &pavgroup
->baselist
,
6674 if (dev
->stopped
& DASD_STOPPED_NOSPC
)
6675 dasd_generic_space_avail(dev
);
6677 list_for_each_entry_safe(dev
, n
, &pavgroup
->aliaslist
,
6679 if (dev
->stopped
& DASD_STOPPED_NOSPC
)
6680 dasd_generic_space_avail(dev
);
6683 spin_unlock_irqrestore(&private->lcu
->lock
, flags
);
6686 static void dasd_eckd_handle_oos(struct dasd_device
*device
, void *messages
,
6689 struct dasd_oos_message
*oos
= messages
;
6691 switch (oos
->code
) {
6694 dev_warn(&device
->cdev
->dev
,
6695 "Extent pool usage has reached a critical value\n");
6696 dasd_eckd_oos_resume(device
);
6700 dev_warn(&device
->cdev
->dev
,
6701 "Extent pool is exhausted\n");
6705 dev_info(&device
->cdev
->dev
,
6706 "Extent pool physical space constraint has been relieved\n");
6710 /* In any case, update related data */
6711 dasd_eckd_read_ext_pool_info(device
);
6713 /* to make sure there is no attention left schedule work again */
6714 device
->discipline
->check_attention(device
, lpum
);
6717 static void dasd_eckd_check_attention_work(struct work_struct
*work
)
6719 struct check_attention_work_data
*data
;
6720 struct dasd_rssd_messages
*messages
;
6721 struct dasd_device
*device
;
6724 data
= container_of(work
, struct check_attention_work_data
, worker
);
6725 device
= data
->device
;
6726 messages
= kzalloc(sizeof(*messages
), GFP_KERNEL
);
6728 DBF_DEV_EVENT(DBF_WARNING
, device
, "%s",
6729 "Could not allocate attention message buffer");
6732 rc
= dasd_eckd_read_message_buffer(device
, messages
, data
->lpum
);
6736 if (messages
->length
== ATTENTION_LENGTH_CUIR
&&
6737 messages
->format
== ATTENTION_FORMAT_CUIR
)
6738 dasd_eckd_handle_cuir(device
, messages
, data
->lpum
);
6739 if (messages
->length
== ATTENTION_LENGTH_OOS
&&
6740 messages
->format
== ATTENTION_FORMAT_OOS
)
6741 dasd_eckd_handle_oos(device
, messages
, data
->lpum
);
6744 dasd_put_device(device
);
6749 static int dasd_eckd_check_attention(struct dasd_device
*device
, __u8 lpum
)
6751 struct check_attention_work_data
*data
;
6753 data
= kzalloc(sizeof(*data
), GFP_ATOMIC
);
6756 INIT_WORK(&data
->worker
, dasd_eckd_check_attention_work
);
6757 dasd_get_device(device
);
6758 data
->device
= device
;
6760 schedule_work(&data
->worker
);
6764 static int dasd_eckd_disable_hpf_path(struct dasd_device
*device
, __u8 lpum
)
6766 if (~lpum
& dasd_path_get_opm(device
)) {
6767 dasd_path_add_nohpfpm(device
, lpum
);
6768 dasd_path_remove_opm(device
, lpum
);
6769 dev_err(&device
->cdev
->dev
,
6770 "Channel path %02X lost HPF functionality and is disabled\n",
6777 static void dasd_eckd_disable_hpf_device(struct dasd_device
*device
)
6779 struct dasd_eckd_private
*private = device
->private;
6781 dev_err(&device
->cdev
->dev
,
6782 "High Performance FICON disabled\n");
6783 private->fcx_max_data
= 0;
6786 static int dasd_eckd_hpf_enabled(struct dasd_device
*device
)
6788 struct dasd_eckd_private
*private = device
->private;
6790 return private->fcx_max_data
? 1 : 0;
6793 static void dasd_eckd_handle_hpf_error(struct dasd_device
*device
,
6796 struct dasd_eckd_private
*private = device
->private;
6798 if (!private->fcx_max_data
) {
6799 /* sanity check for no HPF, the error makes no sense */
6800 DBF_DEV_EVENT(DBF_WARNING
, device
, "%s",
6801 "Trying to disable HPF for a non HPF device");
6804 if (irb
->scsw
.tm
.sesq
== SCSW_SESQ_DEV_NOFCX
) {
6805 dasd_eckd_disable_hpf_device(device
);
6806 } else if (irb
->scsw
.tm
.sesq
== SCSW_SESQ_PATH_NOFCX
) {
6807 if (dasd_eckd_disable_hpf_path(device
, irb
->esw
.esw1
.lpum
))
6809 dasd_eckd_disable_hpf_device(device
);
6810 dasd_path_set_tbvpm(device
,
6811 dasd_path_get_hpfpm(device
));
6814 * prevent that any new I/O ist started on the device and schedule a
6815 * requeue of existing requests
6817 dasd_device_set_stop_bits(device
, DASD_STOPPED_NOT_ACC
);
6818 dasd_schedule_requeue(device
);
6821 static unsigned int dasd_eckd_max_sectors(struct dasd_block
*block
)
6823 if (block
->base
->features
& DASD_FEATURE_USERAW
) {
6825 * the max_blocks value for raw_track access is 256
6826 * it is higher than the native ECKD value because we
6827 * only need one ccw per track
6828 * so the max_hw_sectors are
6829 * 2048 x 512B = 1024kB = 16 tracks
6831 return DASD_ECKD_MAX_BLOCKS_RAW
<< block
->s2b_shift
;
6834 return DASD_ECKD_MAX_BLOCKS
<< block
->s2b_shift
;
6837 static struct ccw_driver dasd_eckd_driver
= {
6839 .name
= "dasd-eckd",
6840 .owner
= THIS_MODULE
,
6841 .dev_groups
= dasd_dev_groups
,
6843 .ids
= dasd_eckd_ids
,
6844 .probe
= dasd_eckd_probe
,
6845 .remove
= dasd_generic_remove
,
6846 .set_offline
= dasd_generic_set_offline
,
6847 .set_online
= dasd_eckd_set_online
,
6848 .notify
= dasd_generic_notify
,
6849 .path_event
= dasd_generic_path_event
,
6850 .shutdown
= dasd_generic_shutdown
,
6851 .uc_handler
= dasd_generic_uc_handler
,
6852 .int_class
= IRQIO_DAS
,
6855 static struct dasd_discipline dasd_eckd_discipline
= {
6856 .owner
= THIS_MODULE
,
6859 .check_device
= dasd_eckd_check_characteristics
,
6860 .uncheck_device
= dasd_eckd_uncheck_device
,
6861 .do_analysis
= dasd_eckd_do_analysis
,
6862 .pe_handler
= dasd_eckd_pe_handler
,
6863 .basic_to_ready
= dasd_eckd_basic_to_ready
,
6864 .online_to_ready
= dasd_eckd_online_to_ready
,
6865 .basic_to_known
= dasd_eckd_basic_to_known
,
6866 .max_sectors
= dasd_eckd_max_sectors
,
6867 .fill_geometry
= dasd_eckd_fill_geometry
,
6868 .start_IO
= dasd_start_IO
,
6869 .term_IO
= dasd_term_IO
,
6870 .handle_terminated_request
= dasd_eckd_handle_terminated_request
,
6871 .format_device
= dasd_eckd_format_device
,
6872 .check_device_format
= dasd_eckd_check_device_format
,
6873 .erp_action
= dasd_eckd_erp_action
,
6874 .erp_postaction
= dasd_eckd_erp_postaction
,
6875 .check_for_device_change
= dasd_eckd_check_for_device_change
,
6876 .build_cp
= dasd_eckd_build_alias_cp
,
6877 .free_cp
= dasd_eckd_free_alias_cp
,
6878 .dump_sense
= dasd_eckd_dump_sense
,
6879 .dump_sense_dbf
= dasd_eckd_dump_sense_dbf
,
6880 .fill_info
= dasd_eckd_fill_info
,
6881 .ioctl
= dasd_eckd_ioctl
,
6882 .reload
= dasd_eckd_reload_device
,
6883 .get_uid
= dasd_eckd_get_uid
,
6884 .kick_validate
= dasd_eckd_kick_validate_server
,
6885 .check_attention
= dasd_eckd_check_attention
,
6886 .host_access_count
= dasd_eckd_host_access_count
,
6887 .hosts_print
= dasd_hosts_print
,
6888 .handle_hpf_error
= dasd_eckd_handle_hpf_error
,
6889 .disable_hpf
= dasd_eckd_disable_hpf_device
,
6890 .hpf_enabled
= dasd_eckd_hpf_enabled
,
6891 .reset_path
= dasd_eckd_reset_path
,
6892 .is_ese
= dasd_eckd_is_ese
,
6893 .space_allocated
= dasd_eckd_space_allocated
,
6894 .space_configured
= dasd_eckd_space_configured
,
6895 .logical_capacity
= dasd_eckd_logical_capacity
,
6896 .release_space
= dasd_eckd_release_space
,
6897 .ext_pool_id
= dasd_eckd_ext_pool_id
,
6898 .ext_size
= dasd_eckd_ext_size
,
6899 .ext_pool_cap_at_warnlevel
= dasd_eckd_ext_pool_cap_at_warnlevel
,
6900 .ext_pool_warn_thrshld
= dasd_eckd_ext_pool_warn_thrshld
,
6901 .ext_pool_oos
= dasd_eckd_ext_pool_oos
,
6902 .ext_pool_exhaust
= dasd_eckd_ext_pool_exhaust
,
6903 .ese_format
= dasd_eckd_ese_format
,
6904 .ese_read
= dasd_eckd_ese_read
,
6905 .pprc_status
= dasd_eckd_query_pprc_status
,
6906 .pprc_enabled
= dasd_eckd_pprc_enabled
,
6907 .copy_pair_swap
= dasd_eckd_copy_pair_swap
,
6908 .device_ping
= dasd_eckd_device_ping
,
6912 dasd_eckd_init(void)
6916 ASCEBC(dasd_eckd_discipline
.ebcname
, 4);
6917 dasd_reserve_req
= kmalloc(sizeof(*dasd_reserve_req
),
6918 GFP_KERNEL
| GFP_DMA
);
6919 if (!dasd_reserve_req
)
6921 dasd_vol_info_req
= kmalloc(sizeof(*dasd_vol_info_req
),
6922 GFP_KERNEL
| GFP_DMA
);
6923 if (!dasd_vol_info_req
) {
6924 kfree(dasd_reserve_req
);
6927 pe_handler_worker
= kmalloc(sizeof(*pe_handler_worker
),
6928 GFP_KERNEL
| GFP_DMA
);
6929 if (!pe_handler_worker
) {
6930 kfree(dasd_reserve_req
);
6931 kfree(dasd_vol_info_req
);
6934 rawpadpage
= (void *)__get_free_page(GFP_KERNEL
);
6936 kfree(pe_handler_worker
);
6937 kfree(dasd_reserve_req
);
6938 kfree(dasd_vol_info_req
);
6941 ret
= ccw_driver_register(&dasd_eckd_driver
);
6943 wait_for_device_probe();
6945 kfree(pe_handler_worker
);
6946 kfree(dasd_reserve_req
);
6947 kfree(dasd_vol_info_req
);
6948 free_page((unsigned long)rawpadpage
);
6954 dasd_eckd_cleanup(void)
6956 ccw_driver_unregister(&dasd_eckd_driver
);
6957 kfree(pe_handler_worker
);
6958 kfree(dasd_reserve_req
);
6959 free_page((unsigned long)rawpadpage
);
6962 module_init(dasd_eckd_init
);
6963 module_exit(dasd_eckd_cleanup
);