2 * drivers/s390/cio/chsc.c
3 * S/390 common I/O routines -- channel subsystem call
5 * Copyright (C) 1999-2002 IBM Deutschland Entwicklung GmbH,
7 * Author(s): Ingo Adlung (adlung@de.ibm.com)
8 * Cornelia Huck (cornelia.huck@de.ibm.com)
9 * Arnd Bergmann (arndb@de.ibm.com)
12 #include <linux/module.h>
13 #include <linux/slab.h>
14 #include <linux/init.h>
15 #include <linux/device.h>
18 #include <asm/chpid.h>
22 #include "cio_debug.h"
27 static void *sei_page
;
29 static int chsc_error_from_response(int response
)
48 struct chsc_ssd_area
{
49 struct chsc_header request
;
53 u16 f_sch
; /* first subchannel */
55 u16 l_sch
; /* last subchannel */
57 struct chsc_header response
;
61 u8 st
: 3; /* subchannel type */
63 u8 unit_addr
; /* unit address */
64 u16 devno
; /* device number */
67 u16 sch
; /* subchannel */
68 u8 chpid
[8]; /* chpids 0-7 */
69 u16 fla
[8]; /* full link addresses 0-7 */
70 } __attribute__ ((packed
));
72 int chsc_get_ssd_info(struct subchannel_id schid
, struct chsc_ssd_info
*ssd
)
75 struct chsc_ssd_area
*ssd_area
;
81 page
= get_zeroed_page(GFP_KERNEL
| GFP_DMA
);
84 ssd_area
= (struct chsc_ssd_area
*) page
;
85 ssd_area
->request
.length
= 0x0010;
86 ssd_area
->request
.code
= 0x0004;
87 ssd_area
->ssid
= schid
.ssid
;
88 ssd_area
->f_sch
= schid
.sch_no
;
89 ssd_area
->l_sch
= schid
.sch_no
;
91 ccode
= chsc(ssd_area
);
94 ret
= (ccode
== 3) ? -ENODEV
: -EBUSY
;
97 ret
= chsc_error_from_response(ssd_area
->response
.code
);
99 CIO_MSG_EVENT(2, "chsc: ssd failed for 0.%x.%04x (rc=%04x)\n",
100 schid
.ssid
, schid
.sch_no
,
101 ssd_area
->response
.code
);
104 if (!ssd_area
->sch_valid
) {
110 memset(ssd
, 0, sizeof(struct chsc_ssd_info
));
111 if ((ssd_area
->st
!= SUBCHANNEL_TYPE_IO
) &&
112 (ssd_area
->st
!= SUBCHANNEL_TYPE_MSG
))
114 ssd
->path_mask
= ssd_area
->path_mask
;
115 ssd
->fla_valid_mask
= ssd_area
->fla_valid_mask
;
116 for (i
= 0; i
< 8; i
++) {
118 if (ssd_area
->path_mask
& mask
) {
119 chp_id_init(&ssd
->chpid
[i
]);
120 ssd
->chpid
[i
].id
= ssd_area
->chpid
[i
];
122 if (ssd_area
->fla_valid_mask
& mask
)
123 ssd
->fla
[i
] = ssd_area
->fla
[i
];
130 static int check_for_io_on_path(struct subchannel
*sch
, int mask
)
134 cc
= stsch(sch
->schid
, &sch
->schib
);
137 if (sch
->schib
.scsw
.actl
&& sch
->schib
.pmcw
.lpum
== mask
)
142 static void terminate_internal_io(struct subchannel
*sch
)
144 if (cio_clear(sch
)) {
145 /* Recheck device in case clear failed. */
147 if (device_trigger_verify(sch
) != 0)
148 css_schedule_eval(sch
->schid
);
151 /* Request retry of internal operation. */
152 device_set_intretry(sch
);
154 if (sch
->driver
&& sch
->driver
->termination
)
155 sch
->driver
->termination(sch
);
158 static int s390_subchannel_remove_chpid(struct subchannel
*sch
, void *data
)
162 struct chp_id
*chpid
= data
;
165 for (j
= 0; j
< 8; j
++) {
167 if ((sch
->schib
.pmcw
.pim
& mask
) &&
168 (sch
->schib
.pmcw
.chpid
[j
] == chpid
->id
))
174 spin_lock_irq(sch
->lock
);
176 stsch(sch
->schid
, &schib
);
177 if (!css_sch_is_valid(&schib
))
179 memcpy(&sch
->schib
, &schib
, sizeof(struct schib
));
180 /* Check for single path devices. */
181 if (sch
->schib
.pmcw
.pim
== 0x80)
184 if (check_for_io_on_path(sch
, mask
)) {
185 if (device_is_online(sch
))
188 terminate_internal_io(sch
);
189 /* Re-start path verification. */
190 if (sch
->driver
&& sch
->driver
->verify
)
191 sch
->driver
->verify(sch
);
194 /* trigger path verification. */
195 if (sch
->driver
&& sch
->driver
->verify
)
196 sch
->driver
->verify(sch
);
197 else if (sch
->lpm
== mask
)
201 spin_unlock_irq(sch
->lock
);
206 spin_unlock_irq(sch
->lock
);
207 css_schedule_eval(sch
->schid
);
211 void chsc_chp_offline(struct chp_id chpid
)
215 sprintf(dbf_txt
, "chpr%x.%02x", chpid
.cssid
, chpid
.id
);
216 CIO_TRACE_EVENT(2, dbf_txt
);
218 if (chp_get_status(chpid
) <= 0)
220 /* Wait until previous actions have settled. */
221 css_wait_for_slow_path();
222 for_each_subchannel_staged(s390_subchannel_remove_chpid
, NULL
, &chpid
);
225 static int s390_process_res_acc_new_sch(struct subchannel_id schid
, void *data
)
229 * We don't know the device yet, but since a path
230 * may be available now to the device we'll have
231 * to do recognition again.
232 * Since we don't have any idea about which chpid
233 * that beast may be on we'll have to do a stsch
234 * on all devices, grr...
236 if (stsch_err(schid
, &schib
))
240 /* Put it on the slow path. */
241 css_schedule_eval(schid
);
245 struct res_acc_data
{
251 static int get_res_chpid_mask(struct chsc_ssd_info
*ssd
,
252 struct res_acc_data
*data
)
257 for (i
= 0; i
< 8; i
++) {
259 if (!(ssd
->path_mask
& mask
))
261 if (!chp_id_is_equal(&ssd
->chpid
[i
], &data
->chpid
))
263 if ((ssd
->fla_valid_mask
& mask
) &&
264 ((ssd
->fla
[i
] & data
->fla_mask
) != data
->fla
))
271 static int __s390_process_res_acc(struct subchannel
*sch
, void *data
)
273 int chp_mask
, old_lpm
;
274 struct res_acc_data
*res_data
= data
;
276 spin_lock_irq(sch
->lock
);
277 chp_mask
= get_res_chpid_mask(&sch
->ssd_info
, res_data
);
280 if (stsch(sch
->schid
, &sch
->schib
))
283 sch
->lpm
= ((sch
->schib
.pmcw
.pim
&
284 sch
->schib
.pmcw
.pam
&
286 | chp_mask
) & sch
->opm
;
287 if (!old_lpm
&& sch
->lpm
)
288 device_trigger_reprobe(sch
);
289 else if (sch
->driver
&& sch
->driver
->verify
)
290 sch
->driver
->verify(sch
);
292 spin_unlock_irq(sch
->lock
);
297 static void s390_process_res_acc (struct res_acc_data
*res_data
)
301 sprintf(dbf_txt
, "accpr%x.%02x", res_data
->chpid
.cssid
,
303 CIO_TRACE_EVENT( 2, dbf_txt
);
304 if (res_data
->fla
!= 0) {
305 sprintf(dbf_txt
, "fla%x", res_data
->fla
);
306 CIO_TRACE_EVENT( 2, dbf_txt
);
308 /* Wait until previous actions have settled. */
309 css_wait_for_slow_path();
311 * I/O resources may have become accessible.
312 * Scan through all subchannels that may be concerned and
313 * do a validation on those.
314 * The more information we have (info), the less scanning
315 * will we have to do.
317 for_each_subchannel_staged(__s390_process_res_acc
,
318 s390_process_res_acc_new_sch
, res_data
);
322 __get_chpid_from_lir(void *data
)
328 /* incident-node descriptor */
330 /* attached-node descriptor */
332 /* incident-specific information */
334 } __attribute__ ((packed
)) *lir
;
338 /* NULL link incident record */
340 if (!(lir
->indesc
[0]&0xc0000000))
341 /* node descriptor not valid */
343 if (!(lir
->indesc
[0]&0x10000000))
344 /* don't handle device-type nodes - FIXME */
346 /* Byte 3 contains the chpid. Could also be CTCA, but we don't care */
348 return (u16
) (lir
->indesc
[0]&0x000000ff);
351 struct chsc_sei_area
{
352 struct chsc_header request
;
356 struct chsc_header response
;
359 u8 vf
; /* validity flags */
360 u8 rs
; /* reporting source */
361 u8 cc
; /* content code */
362 u16 fla
; /* full link address */
363 u16 rsid
; /* reporting source id */
366 u8 ccdf
[4096 - 16 - 24]; /* content-code dependent field */
367 /* ccdf has to be big enough for a link-incident record */
368 } __attribute__ ((packed
));
370 static void chsc_process_sei_link_incident(struct chsc_sei_area
*sei_area
)
375 CIO_CRW_EVENT(4, "chsc: link incident (rs=%02x, rs_id=%04x)\n",
376 sei_area
->rs
, sei_area
->rsid
);
377 if (sei_area
->rs
!= 4)
379 id
= __get_chpid_from_lir(sei_area
->ccdf
);
381 CIO_CRW_EVENT(4, "chsc: link incident - invalid LIR\n");
385 chsc_chp_offline(chpid
);
389 static void chsc_process_sei_res_acc(struct chsc_sei_area
*sei_area
)
391 struct res_acc_data res_data
;
395 CIO_CRW_EVENT(4, "chsc: resource accessibility event (rs=%02x, "
396 "rs_id=%04x)\n", sei_area
->rs
, sei_area
->rsid
);
397 if (sei_area
->rs
!= 4)
400 chpid
.id
= sei_area
->rsid
;
401 /* allocate a new channel path structure, if needed */
402 status
= chp_get_status(chpid
);
407 memset(&res_data
, 0, sizeof(struct res_acc_data
));
408 res_data
.chpid
= chpid
;
409 if ((sei_area
->vf
& 0xc0) != 0) {
410 res_data
.fla
= sei_area
->fla
;
411 if ((sei_area
->vf
& 0xc0) == 0xc0)
412 /* full link address */
413 res_data
.fla_mask
= 0xffff;
416 res_data
.fla_mask
= 0xff00;
418 s390_process_res_acc(&res_data
);
421 struct chp_config_data
{
427 static void chsc_process_sei_chp_config(struct chsc_sei_area
*sei_area
)
429 struct chp_config_data
*data
;
433 CIO_CRW_EVENT(4, "chsc: channel-path-configuration notification\n");
434 if (sei_area
->rs
!= 0)
436 data
= (struct chp_config_data
*) &(sei_area
->ccdf
);
438 for (num
= 0; num
<= __MAX_CHPID
; num
++) {
439 if (!chp_test_bit(data
->map
, num
))
442 printk(KERN_WARNING
"cio: processing configure event %d for "
443 "chpid %x.%02x\n", data
->op
, chpid
.cssid
, chpid
.id
);
446 chp_cfg_schedule(chpid
, 1);
449 chp_cfg_schedule(chpid
, 0);
452 chp_cfg_cancel_deconfigure(chpid
);
458 static void chsc_process_sei(struct chsc_sei_area
*sei_area
)
460 /* Check if we might have lost some information. */
461 if (sei_area
->flags
& 0x40) {
462 CIO_CRW_EVENT(2, "chsc: event overflow\n");
463 css_schedule_eval_all();
465 /* which kind of information was stored? */
466 switch (sei_area
->cc
) {
467 case 1: /* link incident*/
468 chsc_process_sei_link_incident(sei_area
);
470 case 2: /* i/o resource accessibiliy */
471 chsc_process_sei_res_acc(sei_area
);
473 case 8: /* channel-path-configuration notification */
474 chsc_process_sei_chp_config(sei_area
);
476 default: /* other stuff */
477 CIO_CRW_EVENT(4, "chsc: unhandled sei content code %d\n",
483 void chsc_process_crw(void)
485 struct chsc_sei_area
*sei_area
;
489 /* Access to sei_page is serialized through machine check handler
490 * thread, so no need for locking. */
493 CIO_TRACE_EVENT( 2, "prcss");
495 memset(sei_area
, 0, sizeof(*sei_area
));
496 sei_area
->request
.length
= 0x0010;
497 sei_area
->request
.code
= 0x000e;
501 if (sei_area
->response
.code
== 0x0001) {
502 CIO_CRW_EVENT(4, "chsc: sei successful\n");
503 chsc_process_sei(sei_area
);
505 CIO_CRW_EVENT(2, "chsc: sei failed (rc=%04x)\n",
506 sei_area
->response
.code
);
509 } while (sei_area
->flags
& 0x80);
512 static int __chp_add_new_sch(struct subchannel_id schid
, void *data
)
516 if (stsch_err(schid
, &schib
))
520 /* Put it on the slow path. */
521 css_schedule_eval(schid
);
526 static int __chp_add(struct subchannel
*sch
, void *data
)
529 struct chp_id
*chpid
= data
;
531 spin_lock_irq(sch
->lock
);
532 for (i
=0; i
<8; i
++) {
534 if ((sch
->schib
.pmcw
.pim
& mask
) &&
535 (sch
->schib
.pmcw
.chpid
[i
] == chpid
->id
))
539 spin_unlock_irq(sch
->lock
);
542 if (stsch(sch
->schid
, &sch
->schib
)) {
543 spin_unlock_irq(sch
->lock
);
544 css_schedule_eval(sch
->schid
);
547 sch
->lpm
= ((sch
->schib
.pmcw
.pim
&
548 sch
->schib
.pmcw
.pam
&
552 if (sch
->driver
&& sch
->driver
->verify
)
553 sch
->driver
->verify(sch
);
555 spin_unlock_irq(sch
->lock
);
560 void chsc_chp_online(struct chp_id chpid
)
564 sprintf(dbf_txt
, "cadd%x.%02x", chpid
.cssid
, chpid
.id
);
565 CIO_TRACE_EVENT(2, dbf_txt
);
567 if (chp_get_status(chpid
) != 0) {
568 /* Wait until previous actions have settled. */
569 css_wait_for_slow_path();
570 for_each_subchannel_staged(__chp_add
, __chp_add_new_sch
,
575 static void __s390_subchannel_vary_chpid(struct subchannel
*sch
,
576 struct chp_id chpid
, int on
)
582 spin_lock_irqsave(sch
->lock
, flags
);
584 for (chp
= 0; chp
< 8; chp
++) {
586 if (!(sch
->ssd_info
.path_mask
& mask
))
588 if (!chp_id_is_equal(&sch
->ssd_info
.chpid
[chp
], &chpid
))
595 device_trigger_reprobe(sch
);
596 else if (sch
->driver
&& sch
->driver
->verify
)
597 sch
->driver
->verify(sch
);
602 if (check_for_io_on_path(sch
, mask
)) {
603 if (device_is_online(sch
))
604 /* Path verification is done after killing. */
607 /* Kill and retry internal I/O. */
608 terminate_internal_io(sch
);
609 /* Re-start path verification. */
610 if (sch
->driver
&& sch
->driver
->verify
)
611 sch
->driver
->verify(sch
);
613 } else if (!sch
->lpm
) {
614 if (device_trigger_verify(sch
) != 0)
615 css_schedule_eval(sch
->schid
);
616 } else if (sch
->driver
&& sch
->driver
->verify
)
617 sch
->driver
->verify(sch
);
620 spin_unlock_irqrestore(sch
->lock
, flags
);
623 static int s390_subchannel_vary_chpid_off(struct subchannel
*sch
, void *data
)
625 struct chp_id
*chpid
= data
;
627 __s390_subchannel_vary_chpid(sch
, *chpid
, 0);
631 static int s390_subchannel_vary_chpid_on(struct subchannel
*sch
, void *data
)
633 struct chp_id
*chpid
= data
;
635 __s390_subchannel_vary_chpid(sch
, *chpid
, 1);
640 __s390_vary_chpid_on(struct subchannel_id schid
, void *data
)
644 if (stsch_err(schid
, &schib
))
647 /* Put it on the slow path. */
648 css_schedule_eval(schid
);
653 * chsc_chp_vary - propagate channel-path vary operation to subchannels
654 * @chpid: channl-path ID
655 * @on: non-zero for vary online, zero for vary offline
657 int chsc_chp_vary(struct chp_id chpid
, int on
)
659 /* Wait until previous actions have settled. */
660 css_wait_for_slow_path();
662 * Redo PathVerification on the devices the chpid connects to
666 for_each_subchannel_staged(s390_subchannel_vary_chpid_on
,
667 __s390_vary_chpid_on
, &chpid
);
669 for_each_subchannel_staged(s390_subchannel_vary_chpid_off
,
676 chsc_remove_cmg_attr(struct channel_subsystem
*css
)
680 for (i
= 0; i
<= __MAX_CHPID
; i
++) {
683 chp_remove_cmg_attr(css
->chps
[i
]);
688 chsc_add_cmg_attr(struct channel_subsystem
*css
)
693 for (i
= 0; i
<= __MAX_CHPID
; i
++) {
696 ret
= chp_add_cmg_attr(css
->chps
[i
]);
702 for (--i
; i
>= 0; i
--) {
705 chp_remove_cmg_attr(css
->chps
[i
]);
711 __chsc_do_secm(struct channel_subsystem
*css
, int enable
, void *page
)
714 struct chsc_header request
;
715 u32 operation_code
: 2;
724 struct chsc_header response
;
729 } __attribute__ ((packed
)) *secm_area
;
733 secm_area
->request
.length
= 0x0050;
734 secm_area
->request
.code
= 0x0016;
736 secm_area
->key
= PAGE_DEFAULT_KEY
;
737 secm_area
->cub_addr1
= (u64
)(unsigned long)css
->cub_addr1
;
738 secm_area
->cub_addr2
= (u64
)(unsigned long)css
->cub_addr2
;
740 secm_area
->operation_code
= enable
? 0 : 1;
742 ccode
= chsc(secm_area
);
744 return (ccode
== 3) ? -ENODEV
: -EBUSY
;
746 switch (secm_area
->response
.code
) {
751 ret
= chsc_error_from_response(secm_area
->response
.code
);
754 CIO_CRW_EVENT(2, "chsc: secm failed (rc=%04x)\n",
755 secm_area
->response
.code
);
760 chsc_secm(struct channel_subsystem
*css
, int enable
)
765 secm_area
= (void *)get_zeroed_page(GFP_KERNEL
| GFP_DMA
);
769 if (enable
&& !css
->cm_enabled
) {
770 css
->cub_addr1
= (void *)get_zeroed_page(GFP_KERNEL
| GFP_DMA
);
771 css
->cub_addr2
= (void *)get_zeroed_page(GFP_KERNEL
| GFP_DMA
);
772 if (!css
->cub_addr1
|| !css
->cub_addr2
) {
773 free_page((unsigned long)css
->cub_addr1
);
774 free_page((unsigned long)css
->cub_addr2
);
775 free_page((unsigned long)secm_area
);
779 ret
= __chsc_do_secm(css
, enable
, secm_area
);
781 css
->cm_enabled
= enable
;
782 if (css
->cm_enabled
) {
783 ret
= chsc_add_cmg_attr(css
);
785 memset(secm_area
, 0, PAGE_SIZE
);
786 __chsc_do_secm(css
, 0, secm_area
);
790 chsc_remove_cmg_attr(css
);
792 if (!css
->cm_enabled
) {
793 free_page((unsigned long)css
->cub_addr1
);
794 free_page((unsigned long)css
->cub_addr2
);
796 free_page((unsigned long)secm_area
);
800 int chsc_determine_channel_path_description(struct chp_id chpid
,
801 struct channel_path_desc
*desc
)
806 struct chsc_header request
;
812 struct chsc_header response
;
814 struct channel_path_desc desc
;
815 } __attribute__ ((packed
)) *scpd_area
;
817 scpd_area
= (void *)get_zeroed_page(GFP_KERNEL
| GFP_DMA
);
821 scpd_area
->request
.length
= 0x0010;
822 scpd_area
->request
.code
= 0x0002;
824 scpd_area
->first_chpid
= chpid
.id
;
825 scpd_area
->last_chpid
= chpid
.id
;
827 ccode
= chsc(scpd_area
);
829 ret
= (ccode
== 3) ? -ENODEV
: -EBUSY
;
833 ret
= chsc_error_from_response(scpd_area
->response
.code
);
836 memcpy(desc
, &scpd_area
->desc
,
837 sizeof(struct channel_path_desc
));
839 CIO_CRW_EVENT(2, "chsc: scpd failed (rc=%04x)\n",
840 scpd_area
->response
.code
);
842 free_page((unsigned long)scpd_area
);
847 chsc_initialize_cmg_chars(struct channel_path
*chp
, u8 cmcv
,
848 struct cmg_chars
*chars
)
853 chp
->cmg_chars
= kmalloc(sizeof(struct cmg_chars
),
855 if (chp
->cmg_chars
) {
857 struct cmg_chars
*cmg_chars
;
859 cmg_chars
= chp
->cmg_chars
;
860 for (i
= 0; i
< NR_MEASUREMENT_CHARS
; i
++) {
861 mask
= 0x80 >> (i
+ 3);
863 cmg_chars
->values
[i
] = chars
->values
[i
];
865 cmg_chars
->values
[i
] = 0;
870 /* No cmg-dependent data. */
875 int chsc_get_channel_measurement_chars(struct channel_path
*chp
)
880 struct chsc_header request
;
886 struct chsc_header response
;
897 u32 data
[NR_MEASUREMENT_CHARS
];
898 } __attribute__ ((packed
)) *scmc_area
;
900 scmc_area
= (void *)get_zeroed_page(GFP_KERNEL
| GFP_DMA
);
904 scmc_area
->request
.length
= 0x0010;
905 scmc_area
->request
.code
= 0x0022;
907 scmc_area
->first_chpid
= chp
->chpid
.id
;
908 scmc_area
->last_chpid
= chp
->chpid
.id
;
910 ccode
= chsc(scmc_area
);
912 ret
= (ccode
== 3) ? -ENODEV
: -EBUSY
;
916 ret
= chsc_error_from_response(scmc_area
->response
.code
);
919 if (!scmc_area
->not_valid
) {
920 chp
->cmg
= scmc_area
->cmg
;
921 chp
->shared
= scmc_area
->shared
;
922 chsc_initialize_cmg_chars(chp
, scmc_area
->cmcv
,
930 CIO_CRW_EVENT(2, "chsc: scmc failed (rc=%04x)\n",
931 scmc_area
->response
.code
);
934 free_page((unsigned long)scmc_area
);
938 int __init
chsc_alloc_sei_area(void)
940 sei_page
= (void *)get_zeroed_page(GFP_KERNEL
| GFP_DMA
);
942 CIO_MSG_EVENT(0, "Can't allocate page for processing of "
943 "chsc machine checks!\n");
944 return (sei_page
? 0 : -ENOMEM
);
947 void __init
chsc_free_sei_area(void)
953 chsc_enable_facility(int operation_code
)
957 struct chsc_header request
;
964 u32 operation_data_area
[252];
965 struct chsc_header response
;
969 } __attribute__ ((packed
)) *sda_area
;
971 sda_area
= (void *)get_zeroed_page(GFP_KERNEL
|GFP_DMA
);
974 sda_area
->request
.length
= 0x0400;
975 sda_area
->request
.code
= 0x0031;
976 sda_area
->operation_code
= operation_code
;
978 ret
= chsc(sda_area
);
980 ret
= (ret
== 3) ? -ENODEV
: -EBUSY
;
984 switch (sda_area
->response
.code
) {
989 ret
= chsc_error_from_response(sda_area
->response
.code
);
992 CIO_CRW_EVENT(2, "chsc: sda (oc=%x) failed (rc=%04x)\n",
993 operation_code
, sda_area
->response
.code
);
995 free_page((unsigned long)sda_area
);
999 struct css_general_char css_general_characteristics
;
1000 struct css_chsc_char css_chsc_characteristics
;
1003 chsc_determine_css_characteristics(void)
1007 struct chsc_header request
;
1011 struct chsc_header response
;
1013 u32 general_char
[510];
1015 } __attribute__ ((packed
)) *scsc_area
;
1017 scsc_area
= (void *)get_zeroed_page(GFP_KERNEL
| GFP_DMA
);
1021 scsc_area
->request
.length
= 0x0010;
1022 scsc_area
->request
.code
= 0x0010;
1024 result
= chsc(scsc_area
);
1026 result
= (result
== 3) ? -ENODEV
: -EBUSY
;
1030 result
= chsc_error_from_response(scsc_area
->response
.code
);
1032 memcpy(&css_general_characteristics
, scsc_area
->general_char
,
1033 sizeof(css_general_characteristics
));
1034 memcpy(&css_chsc_characteristics
, scsc_area
->chsc_char
,
1035 sizeof(css_chsc_characteristics
));
1037 CIO_CRW_EVENT(2, "chsc: scsc failed (rc=%04x)\n",
1038 scsc_area
->response
.code
);
1040 free_page ((unsigned long) scsc_area
);
1044 EXPORT_SYMBOL_GPL(css_general_characteristics
);
1045 EXPORT_SYMBOL_GPL(css_chsc_characteristics
);