2 * drivers/s390/cio/chsc.c
3 * S/390 common I/O routines -- channel subsystem call
5 * Copyright (C) 1999-2002 IBM Deutschland Entwicklung GmbH,
7 * Author(s): Ingo Adlung (adlung@de.ibm.com)
8 * Cornelia Huck (cornelia.huck@de.ibm.com)
9 * Arnd Bergmann (arndb@de.ibm.com)
12 #include <linux/module.h>
13 #include <linux/slab.h>
14 #include <linux/init.h>
15 #include <linux/device.h>
18 #include <asm/chpid.h>
22 #include "cio_debug.h"
27 static void *sei_page
;
29 struct chsc_ssd_area
{
30 struct chsc_header request
;
34 u16 f_sch
; /* first subchannel */
36 u16 l_sch
; /* last subchannel */
38 struct chsc_header response
;
42 u8 st
: 3; /* subchannel type */
44 u8 unit_addr
; /* unit address */
45 u16 devno
; /* device number */
48 u16 sch
; /* subchannel */
49 u8 chpid
[8]; /* chpids 0-7 */
50 u16 fla
[8]; /* full link addresses 0-7 */
51 } __attribute__ ((packed
));
53 int chsc_get_ssd_info(struct subchannel_id schid
, struct chsc_ssd_info
*ssd
)
56 struct chsc_ssd_area
*ssd_area
;
62 page
= get_zeroed_page(GFP_KERNEL
| GFP_DMA
);
65 ssd_area
= (struct chsc_ssd_area
*) page
;
66 ssd_area
->request
.length
= 0x0010;
67 ssd_area
->request
.code
= 0x0004;
68 ssd_area
->ssid
= schid
.ssid
;
69 ssd_area
->f_sch
= schid
.sch_no
;
70 ssd_area
->l_sch
= schid
.sch_no
;
72 ccode
= chsc(ssd_area
);
75 ret
= (ccode
== 3) ? -ENODEV
: -EBUSY
;
78 if (ssd_area
->response
.code
!= 0x0001) {
79 CIO_MSG_EVENT(2, "chsc: ssd failed for 0.%x.%04x (rc=%04x)\n",
80 schid
.ssid
, schid
.sch_no
,
81 ssd_area
->response
.code
);
85 if (!ssd_area
->sch_valid
) {
91 memset(ssd
, 0, sizeof(struct chsc_ssd_info
));
92 if ((ssd_area
->st
!= 0) && (ssd_area
->st
!= 2))
94 ssd
->path_mask
= ssd_area
->path_mask
;
95 ssd
->fla_valid_mask
= ssd_area
->fla_valid_mask
;
96 for (i
= 0; i
< 8; i
++) {
98 if (ssd_area
->path_mask
& mask
) {
99 chp_id_init(&ssd
->chpid
[i
]);
100 ssd
->chpid
[i
].id
= ssd_area
->chpid
[i
];
102 if (ssd_area
->fla_valid_mask
& mask
)
103 ssd
->fla
[i
] = ssd_area
->fla
[i
];
110 static int check_for_io_on_path(struct subchannel
*sch
, int mask
)
114 cc
= stsch(sch
->schid
, &sch
->schib
);
117 if (sch
->schib
.scsw
.actl
&& sch
->schib
.pmcw
.lpum
== mask
)
122 static void terminate_internal_io(struct subchannel
*sch
)
124 if (cio_clear(sch
)) {
125 /* Recheck device in case clear failed. */
127 if (device_trigger_verify(sch
) != 0)
128 css_schedule_eval(sch
->schid
);
131 /* Request retry of internal operation. */
132 device_set_intretry(sch
);
134 if (sch
->driver
&& sch
->driver
->termination
)
135 sch
->driver
->termination(&sch
->dev
);
139 s390_subchannel_remove_chpid(struct device
*dev
, void *data
)
143 struct subchannel
*sch
;
144 struct chp_id
*chpid
;
147 sch
= to_subchannel(dev
);
149 for (j
= 0; j
< 8; j
++) {
151 if ((sch
->schib
.pmcw
.pim
& mask
) &&
152 (sch
->schib
.pmcw
.chpid
[j
] == chpid
->id
))
158 spin_lock_irq(sch
->lock
);
160 stsch(sch
->schid
, &schib
);
163 memcpy(&sch
->schib
, &schib
, sizeof(struct schib
));
164 /* Check for single path devices. */
165 if (sch
->schib
.pmcw
.pim
== 0x80)
168 if (check_for_io_on_path(sch
, mask
)) {
169 if (device_is_online(sch
))
172 terminate_internal_io(sch
);
173 /* Re-start path verification. */
174 if (sch
->driver
&& sch
->driver
->verify
)
175 sch
->driver
->verify(&sch
->dev
);
178 /* trigger path verification. */
179 if (sch
->driver
&& sch
->driver
->verify
)
180 sch
->driver
->verify(&sch
->dev
);
181 else if (sch
->lpm
== mask
)
185 spin_unlock_irq(sch
->lock
);
190 spin_unlock_irq(sch
->lock
);
191 css_schedule_eval(sch
->schid
);
195 void chsc_chp_offline(struct chp_id chpid
)
199 sprintf(dbf_txt
, "chpr%x.%02x", chpid
.cssid
, chpid
.id
);
200 CIO_TRACE_EVENT(2, dbf_txt
);
202 if (chp_get_status(chpid
) <= 0)
204 bus_for_each_dev(&css_bus_type
, NULL
, &chpid
,
205 s390_subchannel_remove_chpid
);
209 s390_process_res_acc_new_sch(struct subchannel_id schid
)
213 * We don't know the device yet, but since a path
214 * may be available now to the device we'll have
215 * to do recognition again.
216 * Since we don't have any idea about which chpid
217 * that beast may be on we'll have to do a stsch
218 * on all devices, grr...
220 if (stsch_err(schid
, &schib
))
224 /* Put it on the slow path. */
225 css_schedule_eval(schid
);
229 struct res_acc_data
{
235 static int get_res_chpid_mask(struct chsc_ssd_info
*ssd
,
236 struct res_acc_data
*data
)
241 for (i
= 0; i
< 8; i
++) {
243 if (!(ssd
->path_mask
& mask
))
245 if (!chp_id_is_equal(&ssd
->chpid
[i
], &data
->chpid
))
247 if ((ssd
->fla_valid_mask
& mask
) &&
248 ((ssd
->fla
[i
] & data
->fla_mask
) != data
->fla
))
256 __s390_process_res_acc(struct subchannel_id schid
, void *data
)
258 int chp_mask
, old_lpm
;
259 struct res_acc_data
*res_data
;
260 struct subchannel
*sch
;
263 sch
= get_subchannel_by_schid(schid
);
265 /* Check if a subchannel is newly available. */
266 return s390_process_res_acc_new_sch(schid
);
268 spin_lock_irq(sch
->lock
);
269 chp_mask
= get_res_chpid_mask(&sch
->ssd_info
, res_data
);
272 if (stsch(sch
->schid
, &sch
->schib
))
275 sch
->lpm
= ((sch
->schib
.pmcw
.pim
&
276 sch
->schib
.pmcw
.pam
&
278 | chp_mask
) & sch
->opm
;
279 if (!old_lpm
&& sch
->lpm
)
280 device_trigger_reprobe(sch
);
281 else if (sch
->driver
&& sch
->driver
->verify
)
282 sch
->driver
->verify(&sch
->dev
);
284 spin_unlock_irq(sch
->lock
);
285 put_device(&sch
->dev
);
289 static void s390_process_res_acc (struct res_acc_data
*res_data
)
293 sprintf(dbf_txt
, "accpr%x.%02x", res_data
->chpid
.cssid
,
295 CIO_TRACE_EVENT( 2, dbf_txt
);
296 if (res_data
->fla
!= 0) {
297 sprintf(dbf_txt
, "fla%x", res_data
->fla
);
298 CIO_TRACE_EVENT( 2, dbf_txt
);
302 * I/O resources may have become accessible.
303 * Scan through all subchannels that may be concerned and
304 * do a validation on those.
305 * The more information we have (info), the less scanning
306 * will we have to do.
308 for_each_subchannel(__s390_process_res_acc
, res_data
);
312 __get_chpid_from_lir(void *data
)
318 /* incident-node descriptor */
320 /* attached-node descriptor */
322 /* incident-specific information */
324 } __attribute__ ((packed
)) *lir
;
328 /* NULL link incident record */
330 if (!(lir
->indesc
[0]&0xc0000000))
331 /* node descriptor not valid */
333 if (!(lir
->indesc
[0]&0x10000000))
334 /* don't handle device-type nodes - FIXME */
336 /* Byte 3 contains the chpid. Could also be CTCA, but we don't care */
338 return (u16
) (lir
->indesc
[0]&0x000000ff);
341 struct chsc_sei_area
{
342 struct chsc_header request
;
346 struct chsc_header response
;
349 u8 vf
; /* validity flags */
350 u8 rs
; /* reporting source */
351 u8 cc
; /* content code */
352 u16 fla
; /* full link address */
353 u16 rsid
; /* reporting source id */
356 u8 ccdf
[4096 - 16 - 24]; /* content-code dependent field */
357 /* ccdf has to be big enough for a link-incident record */
358 } __attribute__ ((packed
));
360 static void chsc_process_sei_link_incident(struct chsc_sei_area
*sei_area
)
365 CIO_CRW_EVENT(4, "chsc: link incident (rs=%02x, rs_id=%04x)\n",
366 sei_area
->rs
, sei_area
->rsid
);
367 if (sei_area
->rs
!= 4)
369 id
= __get_chpid_from_lir(sei_area
->ccdf
);
371 CIO_CRW_EVENT(4, "chsc: link incident - invalid LIR\n");
375 chsc_chp_offline(chpid
);
379 static void chsc_process_sei_res_acc(struct chsc_sei_area
*sei_area
)
381 struct res_acc_data res_data
;
385 CIO_CRW_EVENT(4, "chsc: resource accessibility event (rs=%02x, "
386 "rs_id=%04x)\n", sei_area
->rs
, sei_area
->rsid
);
387 if (sei_area
->rs
!= 4)
390 chpid
.id
= sei_area
->rsid
;
391 /* allocate a new channel path structure, if needed */
392 status
= chp_get_status(chpid
);
397 memset(&res_data
, 0, sizeof(struct res_acc_data
));
398 res_data
.chpid
= chpid
;
399 if ((sei_area
->vf
& 0xc0) != 0) {
400 res_data
.fla
= sei_area
->fla
;
401 if ((sei_area
->vf
& 0xc0) == 0xc0)
402 /* full link address */
403 res_data
.fla_mask
= 0xffff;
406 res_data
.fla_mask
= 0xff00;
408 s390_process_res_acc(&res_data
);
411 struct chp_config_data
{
417 static void chsc_process_sei_chp_config(struct chsc_sei_area
*sei_area
)
419 struct chp_config_data
*data
;
423 CIO_CRW_EVENT(4, "chsc: channel-path-configuration notification\n");
424 if (sei_area
->rs
!= 0)
426 data
= (struct chp_config_data
*) &(sei_area
->ccdf
);
428 for (num
= 0; num
<= __MAX_CHPID
; num
++) {
429 if (!chp_test_bit(data
->map
, num
))
432 printk(KERN_WARNING
"cio: processing configure event %d for "
433 "chpid %x.%02x\n", data
->op
, chpid
.cssid
, chpid
.id
);
436 chp_cfg_schedule(chpid
, 1);
439 chp_cfg_schedule(chpid
, 0);
442 chp_cfg_cancel_deconfigure(chpid
);
448 static void chsc_process_sei(struct chsc_sei_area
*sei_area
)
450 /* Check if we might have lost some information. */
451 if (sei_area
->flags
& 0x40) {
452 CIO_CRW_EVENT(2, "chsc: event overflow\n");
453 css_schedule_eval_all();
455 /* which kind of information was stored? */
456 switch (sei_area
->cc
) {
457 case 1: /* link incident*/
458 chsc_process_sei_link_incident(sei_area
);
460 case 2: /* i/o resource accessibiliy */
461 chsc_process_sei_res_acc(sei_area
);
463 case 8: /* channel-path-configuration notification */
464 chsc_process_sei_chp_config(sei_area
);
466 default: /* other stuff */
467 CIO_CRW_EVENT(4, "chsc: unhandled sei content code %d\n",
473 void chsc_process_crw(void)
475 struct chsc_sei_area
*sei_area
;
479 /* Access to sei_page is serialized through machine check handler
480 * thread, so no need for locking. */
483 CIO_TRACE_EVENT( 2, "prcss");
485 memset(sei_area
, 0, sizeof(*sei_area
));
486 sei_area
->request
.length
= 0x0010;
487 sei_area
->request
.code
= 0x000e;
491 if (sei_area
->response
.code
== 0x0001) {
492 CIO_CRW_EVENT(4, "chsc: sei successful\n");
493 chsc_process_sei(sei_area
);
495 CIO_CRW_EVENT(2, "chsc: sei failed (rc=%04x)\n",
496 sei_area
->response
.code
);
499 } while (sei_area
->flags
& 0x80);
503 __chp_add_new_sch(struct subchannel_id schid
)
507 if (stsch_err(schid
, &schib
))
511 /* Put it on the slow path. */
512 css_schedule_eval(schid
);
518 __chp_add(struct subchannel_id schid
, void *data
)
521 struct chp_id
*chpid
;
522 struct subchannel
*sch
;
525 sch
= get_subchannel_by_schid(schid
);
527 /* Check if the subchannel is now available. */
528 return __chp_add_new_sch(schid
);
529 spin_lock_irq(sch
->lock
);
530 for (i
=0; i
<8; i
++) {
532 if ((sch
->schib
.pmcw
.pim
& mask
) &&
533 (sch
->schib
.pmcw
.chpid
[i
] == chpid
->id
)) {
534 if (stsch(sch
->schid
, &sch
->schib
) != 0) {
536 spin_unlock_irq(sch
->lock
);
543 spin_unlock_irq(sch
->lock
);
546 sch
->lpm
= ((sch
->schib
.pmcw
.pim
&
547 sch
->schib
.pmcw
.pam
&
551 if (sch
->driver
&& sch
->driver
->verify
)
552 sch
->driver
->verify(&sch
->dev
);
554 spin_unlock_irq(sch
->lock
);
555 put_device(&sch
->dev
);
559 void chsc_chp_online(struct chp_id chpid
)
563 sprintf(dbf_txt
, "cadd%x.%02x", chpid
.cssid
, chpid
.id
);
564 CIO_TRACE_EVENT(2, dbf_txt
);
566 if (chp_get_status(chpid
) != 0)
567 for_each_subchannel(__chp_add
, &chpid
);
570 static void __s390_subchannel_vary_chpid(struct subchannel
*sch
,
571 struct chp_id chpid
, int on
)
577 spin_lock_irqsave(sch
->lock
, flags
);
579 for (chp
= 0; chp
< 8; chp
++) {
581 if (!(sch
->ssd_info
.path_mask
& mask
))
583 if (!chp_id_is_equal(&sch
->ssd_info
.chpid
[chp
], &chpid
))
590 device_trigger_reprobe(sch
);
591 else if (sch
->driver
&& sch
->driver
->verify
)
592 sch
->driver
->verify(&sch
->dev
);
597 if (check_for_io_on_path(sch
, mask
)) {
598 if (device_is_online(sch
))
599 /* Path verification is done after killing. */
602 /* Kill and retry internal I/O. */
603 terminate_internal_io(sch
);
604 /* Re-start path verification. */
605 if (sch
->driver
&& sch
->driver
->verify
)
606 sch
->driver
->verify(&sch
->dev
);
608 } else if (!sch
->lpm
) {
609 if (device_trigger_verify(sch
) != 0)
610 css_schedule_eval(sch
->schid
);
611 } else if (sch
->driver
&& sch
->driver
->verify
)
612 sch
->driver
->verify(&sch
->dev
);
615 spin_unlock_irqrestore(sch
->lock
, flags
);
618 static int s390_subchannel_vary_chpid_off(struct device
*dev
, void *data
)
620 struct subchannel
*sch
;
621 struct chp_id
*chpid
;
623 sch
= to_subchannel(dev
);
626 __s390_subchannel_vary_chpid(sch
, *chpid
, 0);
630 static int s390_subchannel_vary_chpid_on(struct device
*dev
, void *data
)
632 struct subchannel
*sch
;
633 struct chp_id
*chpid
;
635 sch
= to_subchannel(dev
);
638 __s390_subchannel_vary_chpid(sch
, *chpid
, 1);
643 __s390_vary_chpid_on(struct subchannel_id schid
, void *data
)
646 struct subchannel
*sch
;
648 sch
= get_subchannel_by_schid(schid
);
650 put_device(&sch
->dev
);
653 if (stsch_err(schid
, &schib
))
656 /* Put it on the slow path. */
657 css_schedule_eval(schid
);
662 * chsc_chp_vary - propagate channel-path vary operation to subchannels
663 * @chpid: channl-path ID
664 * @on: non-zero for vary online, zero for vary offline
666 int chsc_chp_vary(struct chp_id chpid
, int on
)
669 * Redo PathVerification on the devices the chpid connects to
672 bus_for_each_dev(&css_bus_type
, NULL
, &chpid
, on
?
673 s390_subchannel_vary_chpid_on
:
674 s390_subchannel_vary_chpid_off
);
676 /* Scan for new devices on varied on path. */
677 for_each_subchannel(__s390_vary_chpid_on
, NULL
);
682 chsc_remove_cmg_attr(struct channel_subsystem
*css
)
686 for (i
= 0; i
<= __MAX_CHPID
; i
++) {
689 chp_remove_cmg_attr(css
->chps
[i
]);
694 chsc_add_cmg_attr(struct channel_subsystem
*css
)
699 for (i
= 0; i
<= __MAX_CHPID
; i
++) {
702 ret
= chp_add_cmg_attr(css
->chps
[i
]);
708 for (--i
; i
>= 0; i
--) {
711 chp_remove_cmg_attr(css
->chps
[i
]);
717 __chsc_do_secm(struct channel_subsystem
*css
, int enable
, void *page
)
720 struct chsc_header request
;
721 u32 operation_code
: 2;
730 struct chsc_header response
;
735 } __attribute__ ((packed
)) *secm_area
;
739 secm_area
->request
.length
= 0x0050;
740 secm_area
->request
.code
= 0x0016;
742 secm_area
->key
= PAGE_DEFAULT_KEY
;
743 secm_area
->cub_addr1
= (u64
)(unsigned long)css
->cub_addr1
;
744 secm_area
->cub_addr2
= (u64
)(unsigned long)css
->cub_addr2
;
746 secm_area
->operation_code
= enable
? 0 : 1;
748 ccode
= chsc(secm_area
);
750 return (ccode
== 3) ? -ENODEV
: -EBUSY
;
752 switch (secm_area
->response
.code
) {
753 case 0x0001: /* Success. */
756 case 0x0003: /* Invalid block. */
757 case 0x0007: /* Invalid format. */
758 case 0x0008: /* Other invalid block. */
759 CIO_CRW_EVENT(2, "Error in chsc request block!\n");
762 case 0x0004: /* Command not provided in model. */
763 CIO_CRW_EVENT(2, "Model does not provide secm\n");
766 case 0x0102: /* cub adresses incorrect */
767 CIO_CRW_EVENT(2, "Invalid addresses in chsc request block\n");
770 case 0x0103: /* key error */
771 CIO_CRW_EVENT(2, "Access key error in secm\n");
774 case 0x0105: /* error while starting */
775 CIO_CRW_EVENT(2, "Error while starting channel measurement\n");
779 CIO_CRW_EVENT(2, "Unknown CHSC response %d\n",
780 secm_area
->response
.code
);
787 chsc_secm(struct channel_subsystem
*css
, int enable
)
792 secm_area
= (void *)get_zeroed_page(GFP_KERNEL
| GFP_DMA
);
796 mutex_lock(&css
->mutex
);
797 if (enable
&& !css
->cm_enabled
) {
798 css
->cub_addr1
= (void *)get_zeroed_page(GFP_KERNEL
| GFP_DMA
);
799 css
->cub_addr2
= (void *)get_zeroed_page(GFP_KERNEL
| GFP_DMA
);
800 if (!css
->cub_addr1
|| !css
->cub_addr2
) {
801 free_page((unsigned long)css
->cub_addr1
);
802 free_page((unsigned long)css
->cub_addr2
);
803 free_page((unsigned long)secm_area
);
804 mutex_unlock(&css
->mutex
);
808 ret
= __chsc_do_secm(css
, enable
, secm_area
);
810 css
->cm_enabled
= enable
;
811 if (css
->cm_enabled
) {
812 ret
= chsc_add_cmg_attr(css
);
814 memset(secm_area
, 0, PAGE_SIZE
);
815 __chsc_do_secm(css
, 0, secm_area
);
819 chsc_remove_cmg_attr(css
);
821 if (!css
->cm_enabled
) {
822 free_page((unsigned long)css
->cub_addr1
);
823 free_page((unsigned long)css
->cub_addr2
);
825 mutex_unlock(&css
->mutex
);
826 free_page((unsigned long)secm_area
);
830 int chsc_determine_channel_path_description(struct chp_id chpid
,
831 struct channel_path_desc
*desc
)
836 struct chsc_header request
;
842 struct chsc_header response
;
844 struct channel_path_desc desc
;
845 } __attribute__ ((packed
)) *scpd_area
;
847 scpd_area
= (void *)get_zeroed_page(GFP_KERNEL
| GFP_DMA
);
851 scpd_area
->request
.length
= 0x0010;
852 scpd_area
->request
.code
= 0x0002;
854 scpd_area
->first_chpid
= chpid
.id
;
855 scpd_area
->last_chpid
= chpid
.id
;
857 ccode
= chsc(scpd_area
);
859 ret
= (ccode
== 3) ? -ENODEV
: -EBUSY
;
863 switch (scpd_area
->response
.code
) {
864 case 0x0001: /* Success. */
865 memcpy(desc
, &scpd_area
->desc
,
866 sizeof(struct channel_path_desc
));
869 case 0x0003: /* Invalid block. */
870 case 0x0007: /* Invalid format. */
871 case 0x0008: /* Other invalid block. */
872 CIO_CRW_EVENT(2, "Error in chsc request block!\n");
875 case 0x0004: /* Command not provided in model. */
876 CIO_CRW_EVENT(2, "Model does not provide scpd\n");
880 CIO_CRW_EVENT(2, "Unknown CHSC response %d\n",
881 scpd_area
->response
.code
);
885 free_page((unsigned long)scpd_area
);
890 chsc_initialize_cmg_chars(struct channel_path
*chp
, u8 cmcv
,
891 struct cmg_chars
*chars
)
896 chp
->cmg_chars
= kmalloc(sizeof(struct cmg_chars
),
898 if (chp
->cmg_chars
) {
900 struct cmg_chars
*cmg_chars
;
902 cmg_chars
= chp
->cmg_chars
;
903 for (i
= 0; i
< NR_MEASUREMENT_CHARS
; i
++) {
904 mask
= 0x80 >> (i
+ 3);
906 cmg_chars
->values
[i
] = chars
->values
[i
];
908 cmg_chars
->values
[i
] = 0;
913 /* No cmg-dependent data. */
918 int chsc_get_channel_measurement_chars(struct channel_path
*chp
)
923 struct chsc_header request
;
929 struct chsc_header response
;
940 u32 data
[NR_MEASUREMENT_CHARS
];
941 } __attribute__ ((packed
)) *scmc_area
;
943 scmc_area
= (void *)get_zeroed_page(GFP_KERNEL
| GFP_DMA
);
947 scmc_area
->request
.length
= 0x0010;
948 scmc_area
->request
.code
= 0x0022;
950 scmc_area
->first_chpid
= chp
->chpid
.id
;
951 scmc_area
->last_chpid
= chp
->chpid
.id
;
953 ccode
= chsc(scmc_area
);
955 ret
= (ccode
== 3) ? -ENODEV
: -EBUSY
;
959 switch (scmc_area
->response
.code
) {
960 case 0x0001: /* Success. */
961 if (!scmc_area
->not_valid
) {
962 chp
->cmg
= scmc_area
->cmg
;
963 chp
->shared
= scmc_area
->shared
;
964 chsc_initialize_cmg_chars(chp
, scmc_area
->cmcv
,
973 case 0x0003: /* Invalid block. */
974 case 0x0007: /* Invalid format. */
975 case 0x0008: /* Invalid bit combination. */
976 CIO_CRW_EVENT(2, "Error in chsc request block!\n");
979 case 0x0004: /* Command not provided. */
980 CIO_CRW_EVENT(2, "Model does not provide scmc\n");
984 CIO_CRW_EVENT(2, "Unknown CHSC response %d\n",
985 scmc_area
->response
.code
);
989 free_page((unsigned long)scmc_area
);
994 chsc_alloc_sei_area(void)
996 sei_page
= (void *)get_zeroed_page(GFP_KERNEL
| GFP_DMA
);
998 printk(KERN_WARNING
"Can't allocate page for processing of " \
999 "chsc machine checks!\n");
1000 return (sei_page
? 0 : -ENOMEM
);
1004 chsc_enable_facility(int operation_code
)
1008 struct chsc_header request
;
1015 u32 operation_data_area
[252];
1016 struct chsc_header response
;
1020 } __attribute__ ((packed
)) *sda_area
;
1022 sda_area
= (void *)get_zeroed_page(GFP_KERNEL
|GFP_DMA
);
1025 sda_area
->request
.length
= 0x0400;
1026 sda_area
->request
.code
= 0x0031;
1027 sda_area
->operation_code
= operation_code
;
1029 ret
= chsc(sda_area
);
1031 ret
= (ret
== 3) ? -ENODEV
: -EBUSY
;
1034 switch (sda_area
->response
.code
) {
1035 case 0x0001: /* everything ok */
1038 case 0x0003: /* invalid request block */
1042 case 0x0004: /* command not provided */
1043 case 0x0101: /* facility not provided */
1046 default: /* something went wrong */
1050 free_page((unsigned long)sda_area
);
1054 subsys_initcall(chsc_alloc_sei_area
);
1056 struct css_general_char css_general_characteristics
;
1057 struct css_chsc_char css_chsc_characteristics
;
1060 chsc_determine_css_characteristics(void)
1064 struct chsc_header request
;
1068 struct chsc_header response
;
1070 u32 general_char
[510];
1072 } __attribute__ ((packed
)) *scsc_area
;
1074 scsc_area
= (void *)get_zeroed_page(GFP_KERNEL
| GFP_DMA
);
1076 printk(KERN_WARNING
"cio: Was not able to determine available" \
1077 "CHSCs due to no memory.\n");
1081 scsc_area
->request
.length
= 0x0010;
1082 scsc_area
->request
.code
= 0x0010;
1084 result
= chsc(scsc_area
);
1086 printk(KERN_WARNING
"cio: Was not able to determine " \
1087 "available CHSCs, cc=%i.\n", result
);
1092 if (scsc_area
->response
.code
!= 1) {
1093 printk(KERN_WARNING
"cio: Was not able to determine " \
1094 "available CHSCs.\n");
1098 memcpy(&css_general_characteristics
, scsc_area
->general_char
,
1099 sizeof(css_general_characteristics
));
1100 memcpy(&css_chsc_characteristics
, scsc_area
->chsc_char
,
1101 sizeof(css_chsc_characteristics
));
1103 free_page ((unsigned long) scsc_area
);
1107 EXPORT_SYMBOL_GPL(css_general_characteristics
);
1108 EXPORT_SYMBOL_GPL(css_chsc_characteristics
);