2 * drivers/s390/cio/chsc.c
3 * S/390 common I/O routines -- channel subsystem call
6 * Copyright (C) 1999-2002 IBM Deutschland Entwicklung GmbH,
8 * Author(s): Ingo Adlung (adlung@de.ibm.com)
9 * Cornelia Huck (cohuck@de.ibm.com)
10 * Arnd Bergmann (arndb@de.ibm.com)
13 #include <linux/module.h>
14 #include <linux/config.h>
15 #include <linux/slab.h>
16 #include <linux/init.h>
17 #include <linux/device.h>
23 #include "cio_debug.h"
27 static void *sei_page
;
29 static int new_channel_path(int chpid
);
32 set_chp_logically_online(int chp
, int onoff
)
34 css
[0]->chps
[chp
]->state
= onoff
;
38 get_chp_status(int chp
)
40 return (css
[0]->chps
[chp
] ? css
[0]->chps
[chp
]->state
: -ENODEV
);
44 chsc_validate_chpids(struct subchannel
*sch
)
48 for (chp
= 0; chp
<= 7; chp
++) {
50 if (!get_chp_status(sch
->schib
.pmcw
.chpid
[chp
]))
51 /* disable using this path */
57 chpid_is_actually_online(int chp
)
61 state
= get_chp_status(chp
);
64 queue_work(slow_path_wq
, &slow_path_work
);
69 /* FIXME: this is _always_ called for every subchannel. shouldn't we
70 * process more than one at a time? */
72 chsc_get_sch_desc_irq(struct subchannel
*sch
, void *page
)
77 struct chsc_header request
;
81 u16 f_sch
; /* first subchannel */
83 u16 l_sch
; /* last subchannel */
85 struct chsc_header response
;
89 u8 st
: 3; /* subchannel type */
91 u8 unit_addr
; /* unit address */
92 u16 devno
; /* device number */
95 u16 sch
; /* subchannel */
96 u8 chpid
[8]; /* chpids 0-7 */
97 u16 fla
[8]; /* full link addresses 0-7 */
102 ssd_area
->request
= (struct chsc_header
) {
107 ssd_area
->ssid
= sch
->schid
.ssid
;
108 ssd_area
->f_sch
= sch
->schid
.sch_no
;
109 ssd_area
->l_sch
= sch
->schid
.sch_no
;
111 ccode
= chsc(ssd_area
);
113 pr_debug("chsc returned with ccode = %d\n", ccode
);
114 return (ccode
== 3) ? -ENODEV
: -EBUSY
;
117 switch (ssd_area
->response
.code
) {
118 case 0x0001: /* everything ok */
121 CIO_CRW_EVENT(2, "Invalid command!\n");
124 CIO_CRW_EVENT(2, "Error in chsc request block!\n");
127 CIO_CRW_EVENT(2, "Model does not provide ssd\n");
130 CIO_CRW_EVENT(2, "Unknown CHSC response %d\n",
131 ssd_area
->response
.code
);
136 * ssd_area->st stores the type of the detected
137 * subchannel, with the following definitions:
139 * 0: I/O subchannel: All fields have meaning
140 * 1: CHSC subchannel: Only sch_val, st and sch
142 * 2: Message subchannel: All fields except unit_addr
144 * 3: ADM subchannel: Only sch_val, st and sch
147 * Other types are currently undefined.
149 if (ssd_area
->st
> 3) { /* uhm, that looks strange... */
150 CIO_CRW_EVENT(0, "Strange subchannel type %d"
151 " for sch 0.%x.%04x\n", ssd_area
->st
,
152 sch
->schid
.ssid
, sch
->schid
.sch_no
);
154 * There may have been a new subchannel type defined in the
155 * time since this code was written; since we don't know which
156 * fields have meaning and what to do with it we just jump out
160 const char *type
[4] = {"I/O", "chsc", "message", "ADM"};
161 CIO_CRW_EVENT(6, "ssd: sch 0.%x.%04x is %s subchannel\n",
162 sch
->schid
.ssid
, sch
->schid
.sch_no
,
165 sch
->ssd_info
.valid
= 1;
166 sch
->ssd_info
.type
= ssd_area
->st
;
169 if (ssd_area
->st
== 0 || ssd_area
->st
== 2) {
170 for (j
= 0; j
< 8; j
++) {
171 if (!((0x80 >> j
) & ssd_area
->path_mask
&
172 ssd_area
->fla_valid_mask
))
174 sch
->ssd_info
.chpid
[j
] = ssd_area
->chpid
[j
];
175 sch
->ssd_info
.fla
[j
] = ssd_area
->fla
[j
];
182 css_get_ssd_info(struct subchannel
*sch
)
187 page
= (void *)get_zeroed_page(GFP_KERNEL
| GFP_DMA
);
190 spin_lock_irq(&sch
->lock
);
191 ret
= chsc_get_sch_desc_irq(sch
, page
);
193 static int cio_chsc_err_msg
;
195 if (!cio_chsc_err_msg
) {
197 "chsc_get_sch_descriptions:"
198 " Error %d while doing chsc; "
199 "processing some machine checks may "
201 cio_chsc_err_msg
= 1;
204 spin_unlock_irq(&sch
->lock
);
205 free_page((unsigned long)page
);
208 /* Allocate channel path structures, if needed. */
209 for (j
= 0; j
< 8; j
++) {
210 chpid
= sch
->ssd_info
.chpid
[j
];
211 if (chpid
&& (get_chp_status(chpid
) < 0))
212 new_channel_path(chpid
);
219 s390_subchannel_remove_chpid(struct device
*dev
, void *data
)
223 struct subchannel
*sch
;
224 struct channel_path
*chpid
;
227 sch
= to_subchannel(dev
);
229 for (j
= 0; j
< 8; j
++)
230 if (sch
->schib
.pmcw
.chpid
[j
] == chpid
->id
)
236 spin_lock(&sch
->lock
);
238 stsch(sch
->schid
, &schib
);
241 memcpy(&sch
->schib
, &schib
, sizeof(struct schib
));
242 /* Check for single path devices. */
243 if (sch
->schib
.pmcw
.pim
== 0x80)
245 if (sch
->vpm
== mask
)
248 if ((sch
->schib
.scsw
.actl
& (SCSW_ACTL_CLEAR_PEND
|
249 SCSW_ACTL_HALT_PEND
|
250 SCSW_ACTL_START_PEND
|
251 SCSW_ACTL_RESUME_PEND
)) &&
252 (sch
->schib
.pmcw
.lpum
== mask
)) {
253 int cc
= cio_cancel(sch
);
263 if (sch
->driver
&& sch
->driver
->termination
)
264 sch
->driver
->termination(&sch
->dev
);
267 } else if ((sch
->schib
.scsw
.actl
& SCSW_ACTL_DEVACT
) &&
268 (sch
->schib
.scsw
.actl
& SCSW_ACTL_SCHACT
) &&
269 (sch
->schib
.pmcw
.lpum
== mask
)) {
276 if (sch
->driver
&& sch
->driver
->termination
)
277 sch
->driver
->termination(&sch
->dev
);
281 /* trigger path verification. */
282 if (sch
->driver
&& sch
->driver
->verify
)
283 sch
->driver
->verify(&sch
->dev
);
285 spin_unlock(&sch
->lock
);
288 spin_unlock(&sch
->lock
);
290 if (css_enqueue_subchannel_slow(sch
->schid
)) {
291 css_clear_subchannel_slow_list();
298 s390_set_chpid_offline( __u8 chpid
)
303 sprintf(dbf_txt
, "chpr%x", chpid
);
304 CIO_TRACE_EVENT(2, dbf_txt
);
306 if (get_chp_status(chpid
) <= 0)
308 dev
= get_device(&css
[0]->chps
[chpid
]->dev
);
309 bus_for_each_dev(&css_bus_type
, NULL
, to_channelpath(dev
),
310 s390_subchannel_remove_chpid
);
312 if (need_rescan
|| css_slow_subchannels_exist())
313 queue_work(slow_path_wq
, &slow_path_work
);
317 struct res_acc_data
{
318 struct channel_path
*chp
;
324 s390_process_res_acc_sch(struct res_acc_data
*res_data
, struct subchannel
*sch
)
331 for (chp
= 0; chp
<= 7; chp
++)
333 * check if chpid is in information updated by ssd
335 if (sch
->ssd_info
.valid
&&
336 sch
->ssd_info
.chpid
[chp
] == res_data
->chp
->id
&&
337 (sch
->ssd_info
.fla
[chp
] & res_data
->fla_mask
)
347 * Do a stsch to update our subchannel structure with the
348 * new path information and eventually check for logically
351 ccode
= stsch(sch
->schid
, &sch
->schib
);
359 s390_process_res_acc_new_sch(struct subchannel_id schid
)
364 * We don't know the device yet, but since a path
365 * may be available now to the device we'll have
366 * to do recognition again.
367 * Since we don't have any idea about which chpid
368 * that beast may be on we'll have to do a stsch
369 * on all devices, grr...
371 if (stsch_err(schid
, &schib
))
373 return need_rescan
? -EAGAIN
: -ENXIO
;
375 /* Put it on the slow path. */
376 ret
= css_enqueue_subchannel_slow(schid
);
378 css_clear_subchannel_slow_list();
386 __s390_process_res_acc(struct subchannel_id schid
, void *data
)
388 int chp_mask
, old_lpm
;
389 struct res_acc_data
*res_data
;
390 struct subchannel
*sch
;
392 res_data
= (struct res_acc_data
*)data
;
393 sch
= get_subchannel_by_schid(schid
);
395 /* Check if a subchannel is newly available. */
396 return s390_process_res_acc_new_sch(schid
);
398 spin_lock_irq(&sch
->lock
);
400 chp_mask
= s390_process_res_acc_sch(res_data
, sch
);
403 spin_unlock_irq(&sch
->lock
);
407 sch
->lpm
= ((sch
->schib
.pmcw
.pim
&
408 sch
->schib
.pmcw
.pam
&
410 | chp_mask
) & sch
->opm
;
411 if (!old_lpm
&& sch
->lpm
)
412 device_trigger_reprobe(sch
);
413 else if (sch
->driver
&& sch
->driver
->verify
)
414 sch
->driver
->verify(&sch
->dev
);
416 spin_unlock_irq(&sch
->lock
);
417 put_device(&sch
->dev
);
418 return (res_data
->fla_mask
== 0xffff) ? -ENODEV
: 0;
423 s390_process_res_acc (struct res_acc_data
*res_data
)
428 sprintf(dbf_txt
, "accpr%x", res_data
->chp
->id
);
429 CIO_TRACE_EVENT( 2, dbf_txt
);
430 if (res_data
->fla
!= 0) {
431 sprintf(dbf_txt
, "fla%x", res_data
->fla
);
432 CIO_TRACE_EVENT( 2, dbf_txt
);
436 * I/O resources may have become accessible.
437 * Scan through all subchannels that may be concerned and
438 * do a validation on those.
439 * The more information we have (info), the less scanning
440 * will we have to do.
442 rc
= for_each_subchannel(__s390_process_res_acc
, res_data
);
443 if (css_slow_subchannels_exist())
445 else if (rc
!= -EAGAIN
)
451 __get_chpid_from_lir(void *data
)
457 /* incident-node descriptor */
459 /* attached-node descriptor */
461 /* incident-specific information */
465 lir
= (struct lir
*) data
;
467 /* NULL link incident record */
469 if (!(lir
->indesc
[0]&0xc0000000))
470 /* node descriptor not valid */
472 if (!(lir
->indesc
[0]&0x10000000))
473 /* don't handle device-type nodes - FIXME */
475 /* Byte 3 contains the chpid. Could also be CTCA, but we don't care */
477 return (u16
) (lir
->indesc
[0]&0x000000ff);
481 chsc_process_crw(void)
484 struct res_acc_data res_data
;
486 struct chsc_header request
;
490 struct chsc_header response
;
493 u8 vf
; /* validity flags */
494 u8 rs
; /* reporting source */
495 u8 cc
; /* content code */
496 u16 fla
; /* full link address */
497 u16 rsid
; /* reporting source id */
500 u32 ccdf
[96]; /* content-code dependent field */
501 /* ccdf has to be big enough for a link-incident record */
507 * build the chsc request block for store event information
509 * This function is only called by the machine check handler thread,
510 * so we don't need locking for the sei_page.
514 CIO_TRACE_EVENT( 2, "prcss");
519 memset(sei_area
, 0, sizeof(*sei_area
));
520 memset(&res_data
, 0, sizeof(struct res_acc_data
));
521 sei_area
->request
= (struct chsc_header
) {
526 ccode
= chsc(sei_area
);
530 switch (sei_area
->response
.code
) {
531 /* for debug purposes, check for problems */
533 CIO_CRW_EVENT(4, "chsc_process_crw: event information "
534 "successfully stored\n");
535 break; /* everything ok */
538 "chsc_process_crw: invalid command!\n");
541 CIO_CRW_EVENT(2, "chsc_process_crw: error in chsc "
545 CIO_CRW_EVENT(2, "chsc_process_crw: no event "
546 "information stored\n");
549 CIO_CRW_EVENT(2, "chsc_process_crw: chsc response %d\n",
550 sei_area
->response
.code
);
554 /* Check if we might have lost some information. */
555 if (sei_area
->flags
& 0x40)
556 CIO_CRW_EVENT(2, "chsc_process_crw: Event information "
557 "has been lost due to overflow!\n");
559 if (sei_area
->rs
!= 4) {
560 CIO_CRW_EVENT(2, "chsc_process_crw: reporting source "
561 "(%04X) isn't a chpid!\n",
566 /* which kind of information was stored? */
567 switch (sei_area
->cc
) {
568 case 1: /* link incident*/
569 CIO_CRW_EVENT(4, "chsc_process_crw: "
570 "channel subsystem reports link incident,"
571 " reporting source is chpid %x\n",
573 chpid
= __get_chpid_from_lir(sei_area
->ccdf
);
575 CIO_CRW_EVENT(4, "%s: Invalid LIR, skipping\n",
578 s390_set_chpid_offline(chpid
);
581 case 2: /* i/o resource accessibiliy */
582 CIO_CRW_EVENT(4, "chsc_process_crw: "
583 "channel subsystem reports some I/O "
584 "devices may have become accessible\n");
585 pr_debug("Data received after sei: \n");
586 pr_debug("Validity flags: %x\n", sei_area
->vf
);
588 /* allocate a new channel path structure, if needed */
589 status
= get_chp_status(sei_area
->rsid
);
591 new_channel_path(sei_area
->rsid
);
594 dev
= get_device(&css
[0]->chps
[sei_area
->rsid
]->dev
);
595 res_data
.chp
= to_channelpath(dev
);
596 pr_debug("chpid: %x", sei_area
->rsid
);
597 if ((sei_area
->vf
& 0xc0) != 0) {
598 res_data
.fla
= sei_area
->fla
;
599 if ((sei_area
->vf
& 0xc0) == 0xc0) {
600 pr_debug(" full link addr: %x",
602 res_data
.fla_mask
= 0xffff;
604 pr_debug(" link addr: %x",
606 res_data
.fla_mask
= 0xff00;
609 ret
= s390_process_res_acc(&res_data
);
614 default: /* other stuff */
615 CIO_CRW_EVENT(4, "chsc_process_crw: event %d\n",
619 } while (sei_area
->flags
& 0x80);
624 __chp_add_new_sch(struct subchannel_id schid
)
629 if (stsch(schid
, &schib
))
631 return need_rescan
? -EAGAIN
: -ENXIO
;
633 /* Put it on the slow path. */
634 ret
= css_enqueue_subchannel_slow(schid
);
636 css_clear_subchannel_slow_list();
645 __chp_add(struct subchannel_id schid
, void *data
)
648 struct channel_path
*chp
;
649 struct subchannel
*sch
;
651 chp
= (struct channel_path
*)data
;
652 sch
= get_subchannel_by_schid(schid
);
654 /* Check if the subchannel is now available. */
655 return __chp_add_new_sch(schid
);
656 spin_lock(&sch
->lock
);
658 if (sch
->schib
.pmcw
.chpid
[i
] == chp
->id
) {
659 if (stsch(sch
->schid
, &sch
->schib
) != 0) {
661 spin_unlock(&sch
->lock
);
667 spin_unlock(&sch
->lock
);
670 sch
->lpm
= ((sch
->schib
.pmcw
.pim
&
671 sch
->schib
.pmcw
.pam
&
673 | 0x80 >> i
) & sch
->opm
;
675 if (sch
->driver
&& sch
->driver
->verify
)
676 sch
->driver
->verify(&sch
->dev
);
678 spin_unlock(&sch
->lock
);
679 put_device(&sch
->dev
);
690 if (!get_chp_status(chpid
))
691 return 0; /* no need to do the rest */
693 sprintf(dbf_txt
, "cadd%x", chpid
);
694 CIO_TRACE_EVENT(2, dbf_txt
);
696 dev
= get_device(&css
[0]->chps
[chpid
]->dev
);
697 rc
= for_each_subchannel(__chp_add
, to_channelpath(dev
));
698 if (css_slow_subchannels_exist())
707 * Handling of crw machine checks with channel path source.
710 chp_process_crw(int chpid
, int on
)
713 /* Path has gone. We use the link incident routine.*/
714 s390_set_chpid_offline(chpid
);
715 return 0; /* De-register is async anyway. */
718 * Path has come. Allocate a new channel path structure,
721 if (get_chp_status(chpid
) < 0)
722 new_channel_path(chpid
);
723 /* Avoid the extra overhead in process_rec_acc. */
724 return chp_add(chpid
);
728 __check_for_io_and_kill(struct subchannel
*sch
, int index
)
732 if (!device_is_online(sch
))
733 /* cio could be doing I/O. */
735 cc
= stsch(sch
->schid
, &sch
->schib
);
738 if (sch
->schib
.scsw
.actl
&& sch
->schib
.pmcw
.lpum
== (0x80 >> index
)) {
739 device_set_waiting(sch
);
746 __s390_subchannel_vary_chpid(struct subchannel
*sch
, __u8 chpid
, int on
)
751 if (!sch
->ssd_info
.valid
)
754 spin_lock_irqsave(&sch
->lock
, flags
);
756 for (chp
= 0; chp
< 8; chp
++) {
757 if (sch
->ssd_info
.chpid
[chp
] != chpid
)
761 sch
->opm
|= (0x80 >> chp
);
762 sch
->lpm
|= (0x80 >> chp
);
764 device_trigger_reprobe(sch
);
765 else if (sch
->driver
&& sch
->driver
->verify
)
766 sch
->driver
->verify(&sch
->dev
);
768 sch
->opm
&= ~(0x80 >> chp
);
769 sch
->lpm
&= ~(0x80 >> chp
);
771 * Give running I/O a grace period in which it
772 * can successfully terminate, even using the
773 * just varied off path. Then kill it.
775 if (!__check_for_io_and_kill(sch
, chp
) && !sch
->lpm
) {
776 if (css_enqueue_subchannel_slow(sch
->schid
)) {
777 css_clear_subchannel_slow_list();
780 } else if (sch
->driver
&& sch
->driver
->verify
)
781 sch
->driver
->verify(&sch
->dev
);
785 spin_unlock_irqrestore(&sch
->lock
, flags
);
789 s390_subchannel_vary_chpid_off(struct device
*dev
, void *data
)
791 struct subchannel
*sch
;
794 sch
= to_subchannel(dev
);
797 __s390_subchannel_vary_chpid(sch
, *chpid
, 0);
802 s390_subchannel_vary_chpid_on(struct device
*dev
, void *data
)
804 struct subchannel
*sch
;
807 sch
= to_subchannel(dev
);
810 __s390_subchannel_vary_chpid(sch
, *chpid
, 1);
815 __s390_vary_chpid_on(struct subchannel_id schid
, void *data
)
818 struct subchannel
*sch
;
820 sch
= get_subchannel_by_schid(schid
);
822 put_device(&sch
->dev
);
825 if (stsch_err(schid
, &schib
))
828 /* Put it on the slow path. */
829 if (css_enqueue_subchannel_slow(schid
)) {
830 css_clear_subchannel_slow_list();
838 * Function: s390_vary_chpid
839 * Varies the specified chpid online or offline
842 s390_vary_chpid( __u8 chpid
, int on
)
847 sprintf(dbf_text
, on
?"varyon%x":"varyoff%x", chpid
);
848 CIO_TRACE_EVENT( 2, dbf_text
);
850 status
= get_chp_status(chpid
);
852 printk(KERN_ERR
"Can't vary unknown chpid %02X\n", chpid
);
856 if (!on
&& !status
) {
857 printk(KERN_ERR
"chpid %x is already offline\n", chpid
);
861 set_chp_logically_online(chpid
, on
);
864 * Redo PathVerification on the devices the chpid connects to
867 bus_for_each_dev(&css_bus_type
, NULL
, &chpid
, on
?
868 s390_subchannel_vary_chpid_on
:
869 s390_subchannel_vary_chpid_off
);
871 /* Scan for new devices on varied on path. */
872 for_each_subchannel(__s390_vary_chpid_on
, NULL
);
873 if (need_rescan
|| css_slow_subchannels_exist())
874 queue_work(slow_path_wq
, &slow_path_work
);
879 * Files for the channel path entries.
882 chp_status_show(struct device
*dev
, struct device_attribute
*attr
, char *buf
)
884 struct channel_path
*chp
= container_of(dev
, struct channel_path
, dev
);
888 return (get_chp_status(chp
->id
) ? sprintf(buf
, "online\n") :
889 sprintf(buf
, "offline\n"));
893 chp_status_write(struct device
*dev
, struct device_attribute
*attr
, const char *buf
, size_t count
)
895 struct channel_path
*cp
= container_of(dev
, struct channel_path
, dev
);
900 num_args
= sscanf(buf
, "%5s", cmd
);
904 if (!strnicmp(cmd
, "on", 2))
905 error
= s390_vary_chpid(cp
->id
, 1);
906 else if (!strnicmp(cmd
, "off", 3))
907 error
= s390_vary_chpid(cp
->id
, 0);
911 return error
< 0 ? error
: count
;
915 static DEVICE_ATTR(status
, 0644, chp_status_show
, chp_status_write
);
918 chp_type_show(struct device
*dev
, struct device_attribute
*attr
, char *buf
)
920 struct channel_path
*chp
= container_of(dev
, struct channel_path
, dev
);
924 return sprintf(buf
, "%x\n", chp
->desc
.desc
);
927 static DEVICE_ATTR(type
, 0444, chp_type_show
, NULL
);
929 static struct attribute
* chp_attrs
[] = {
930 &dev_attr_status
.attr
,
935 static struct attribute_group chp_attr_group
= {
940 chp_release(struct device
*dev
)
942 struct channel_path
*cp
;
944 cp
= container_of(dev
, struct channel_path
, dev
);
949 chsc_determine_channel_path_description(int chpid
,
950 struct channel_path_desc
*desc
)
955 struct chsc_header request
;
961 struct chsc_header response
;
963 struct channel_path_desc desc
;
966 scpd_area
= (void *)get_zeroed_page(GFP_KERNEL
| GFP_DMA
);
970 scpd_area
->request
= (struct chsc_header
) {
975 scpd_area
->first_chpid
= chpid
;
976 scpd_area
->last_chpid
= chpid
;
978 ccode
= chsc(scpd_area
);
980 ret
= (ccode
== 3) ? -ENODEV
: -EBUSY
;
984 switch (scpd_area
->response
.code
) {
985 case 0x0001: /* Success. */
986 memcpy(desc
, &scpd_area
->desc
,
987 sizeof(struct channel_path_desc
));
990 case 0x0003: /* Invalid block. */
991 case 0x0007: /* Invalid format. */
992 case 0x0008: /* Other invalid block. */
993 CIO_CRW_EVENT(2, "Error in chsc request block!\n");
996 case 0x0004: /* Command not provided in model. */
997 CIO_CRW_EVENT(2, "Model does not provide scpd\n");
1001 CIO_CRW_EVENT(2, "Unknown CHSC response %d\n",
1002 scpd_area
->response
.code
);
1006 free_page((unsigned long)scpd_area
);
1011 * Entries for chpids on the system bus.
1012 * This replaces /proc/chpids.
1015 new_channel_path(int chpid
)
1017 struct channel_path
*chp
;
1020 chp
= kmalloc(sizeof(struct channel_path
), GFP_KERNEL
);
1023 memset(chp
, 0, sizeof(struct channel_path
));
1025 /* fill in status, etc. */
1028 chp
->dev
= (struct device
) {
1029 .parent
= &css
[0]->device
,
1030 .release
= chp_release
,
1032 snprintf(chp
->dev
.bus_id
, BUS_ID_SIZE
, "chp0.%x", chpid
);
1034 /* Obtain channel path description and fill it in. */
1035 ret
= chsc_determine_channel_path_description(chpid
, &chp
->desc
);
1039 /* make it known to the system */
1040 ret
= device_register(&chp
->dev
);
1042 printk(KERN_WARNING
"%s: could not register %02x\n",
1046 ret
= sysfs_create_group(&chp
->dev
.kobj
, &chp_attr_group
);
1048 device_unregister(&chp
->dev
);
1051 css
[0]->chps
[chpid
] = chp
;
1059 chsc_get_chp_desc(struct subchannel
*sch
, int chp_no
)
1061 struct channel_path
*chp
;
1062 struct channel_path_desc
*desc
;
1064 chp
= css
[0]->chps
[sch
->schib
.pmcw
.chpid
[chp_no
]];
1067 desc
= kmalloc(sizeof(struct channel_path_desc
), GFP_KERNEL
);
1070 memcpy(desc
, &chp
->desc
, sizeof(struct channel_path_desc
));
1076 chsc_alloc_sei_area(void)
1078 sei_page
= (void *)get_zeroed_page(GFP_KERNEL
| GFP_DMA
);
1080 printk(KERN_WARNING
"Can't allocate page for processing of " \
1081 "chsc machine checks!\n");
1082 return (sei_page
? 0 : -ENOMEM
);
1086 chsc_enable_facility(int operation_code
)
1090 struct chsc_header request
;
1097 u32 operation_data_area
[252];
1098 struct chsc_header response
;
1104 sda_area
= (void *)get_zeroed_page(GFP_KERNEL
|GFP_DMA
);
1107 sda_area
->request
= (struct chsc_header
) {
1111 sda_area
->operation_code
= operation_code
;
1113 ret
= chsc(sda_area
);
1115 ret
= (ret
== 3) ? -ENODEV
: -EBUSY
;
1118 switch (sda_area
->response
.code
) {
1119 case 0x0003: /* invalid request block */
1123 case 0x0004: /* command not provided */
1124 case 0x0101: /* facility not provided */
1129 free_page((unsigned long)sda_area
);
1133 subsys_initcall(chsc_alloc_sei_area
);
1135 struct css_general_char css_general_characteristics
;
1136 struct css_chsc_char css_chsc_characteristics
;
1139 chsc_determine_css_characteristics(void)
1143 struct chsc_header request
;
1147 struct chsc_header response
;
1149 u32 general_char
[510];
1153 scsc_area
= (void *)get_zeroed_page(GFP_KERNEL
| GFP_DMA
);
1155 printk(KERN_WARNING
"cio: Was not able to determine available" \
1156 "CHSCs due to no memory.\n");
1160 scsc_area
->request
= (struct chsc_header
) {
1165 result
= chsc(scsc_area
);
1167 printk(KERN_WARNING
"cio: Was not able to determine " \
1168 "available CHSCs, cc=%i.\n", result
);
1173 if (scsc_area
->response
.code
!= 1) {
1174 printk(KERN_WARNING
"cio: Was not able to determine " \
1175 "available CHSCs.\n");
1179 memcpy(&css_general_characteristics
, scsc_area
->general_char
,
1180 sizeof(css_general_characteristics
));
1181 memcpy(&css_chsc_characteristics
, scsc_area
->chsc_char
,
1182 sizeof(css_chsc_characteristics
));
1184 free_page ((unsigned long) scsc_area
);
1188 EXPORT_SYMBOL_GPL(css_general_characteristics
);
1189 EXPORT_SYMBOL_GPL(css_chsc_characteristics
);