2 * driver for channel subsystem
4 * Copyright IBM Corp. 2002, 2009
6 * Author(s): Arnd Bergmann (arndb@de.ibm.com)
7 * Cornelia Huck (cornelia.huck@de.ibm.com)
10 #define KMSG_COMPONENT "cio"
11 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
13 #include <linux/module.h>
14 #include <linux/init.h>
15 #include <linux/device.h>
16 #include <linux/slab.h>
17 #include <linux/errno.h>
18 #include <linux/list.h>
19 #include <linux/reboot.h>
20 #include <linux/suspend.h>
26 #include "cio_debug.h"
33 int css_init_done
= 0;
34 static int need_reprobe
= 0;
35 static int max_ssid
= 0;
37 struct channel_subsystem
*channel_subsystems
[__MAX_CSSID
+ 1];
40 for_each_subchannel(int(*fn
)(struct subchannel_id
, void *), void *data
)
42 struct subchannel_id schid
;
45 init_subchannel_id(&schid
);
49 ret
= fn(schid
, data
);
52 } while (schid
.sch_no
++ < __MAX_SUBCHANNEL
);
54 } while (schid
.ssid
++ < max_ssid
);
61 int (*fn_known_sch
)(struct subchannel
*, void *);
62 int (*fn_unknown_sch
)(struct subchannel_id
, void *);
65 static int call_fn_known_sch(struct device
*dev
, void *data
)
67 struct subchannel
*sch
= to_subchannel(dev
);
68 struct cb_data
*cb
= data
;
71 idset_sch_del(cb
->set
, sch
->schid
);
73 rc
= cb
->fn_known_sch(sch
, cb
->data
);
77 static int call_fn_unknown_sch(struct subchannel_id schid
, void *data
)
79 struct cb_data
*cb
= data
;
82 if (idset_sch_contains(cb
->set
, schid
))
83 rc
= cb
->fn_unknown_sch(schid
, cb
->data
);
87 static int call_fn_all_sch(struct subchannel_id schid
, void *data
)
89 struct cb_data
*cb
= data
;
90 struct subchannel
*sch
;
93 sch
= get_subchannel_by_schid(schid
);
96 rc
= cb
->fn_known_sch(sch
, cb
->data
);
97 put_device(&sch
->dev
);
99 if (cb
->fn_unknown_sch
)
100 rc
= cb
->fn_unknown_sch(schid
, cb
->data
);
106 int for_each_subchannel_staged(int (*fn_known
)(struct subchannel
*, void *),
107 int (*fn_unknown
)(struct subchannel_id
,
114 cb
.fn_known_sch
= fn_known
;
115 cb
.fn_unknown_sch
= fn_unknown
;
117 cb
.set
= idset_sch_new();
119 /* fall back to brute force scanning in case of oom */
120 return for_each_subchannel(call_fn_all_sch
, &cb
);
124 /* Process registered subchannels. */
125 rc
= bus_for_each_dev(&css_bus_type
, NULL
, &cb
, call_fn_known_sch
);
128 /* Process unregistered subchannels. */
130 rc
= for_each_subchannel(call_fn_unknown_sch
, &cb
);
137 static struct subchannel
*
138 css_alloc_subchannel(struct subchannel_id schid
)
140 struct subchannel
*sch
;
143 sch
= kmalloc (sizeof (*sch
), GFP_KERNEL
| GFP_DMA
);
145 return ERR_PTR(-ENOMEM
);
146 ret
= cio_validate_subchannel (sch
, schid
);
155 css_subchannel_release(struct device
*dev
)
157 struct subchannel
*sch
;
159 sch
= to_subchannel(dev
);
160 if (!cio_is_console(sch
->schid
)) {
161 /* Reset intparm to zeroes. */
162 sch
->config
.intparm
= 0;
163 cio_commit_config(sch
);
169 static int css_sch_device_register(struct subchannel
*sch
)
173 mutex_lock(&sch
->reg_mutex
);
174 dev_set_name(&sch
->dev
, "0.%x.%04x", sch
->schid
.ssid
,
176 ret
= device_register(&sch
->dev
);
177 mutex_unlock(&sch
->reg_mutex
);
182 * css_sch_device_unregister - unregister a subchannel
183 * @sch: subchannel to be unregistered
185 void css_sch_device_unregister(struct subchannel
*sch
)
187 mutex_lock(&sch
->reg_mutex
);
188 if (device_is_registered(&sch
->dev
))
189 device_unregister(&sch
->dev
);
190 mutex_unlock(&sch
->reg_mutex
);
192 EXPORT_SYMBOL_GPL(css_sch_device_unregister
);
194 static void ssd_from_pmcw(struct chsc_ssd_info
*ssd
, struct pmcw
*pmcw
)
199 memset(ssd
, 0, sizeof(struct chsc_ssd_info
));
200 ssd
->path_mask
= pmcw
->pim
;
201 for (i
= 0; i
< 8; i
++) {
203 if (pmcw
->pim
& mask
) {
204 chp_id_init(&ssd
->chpid
[i
]);
205 ssd
->chpid
[i
].id
= pmcw
->chpid
[i
];
210 static void ssd_register_chpids(struct chsc_ssd_info
*ssd
)
215 for (i
= 0; i
< 8; i
++) {
217 if (ssd
->path_mask
& mask
)
218 if (!chp_is_registered(ssd
->chpid
[i
]))
219 chp_new(ssd
->chpid
[i
]);
223 void css_update_ssd_info(struct subchannel
*sch
)
227 if (cio_is_console(sch
->schid
)) {
228 /* Console is initialized too early for functions requiring
229 * memory allocation. */
230 ssd_from_pmcw(&sch
->ssd_info
, &sch
->schib
.pmcw
);
232 ret
= chsc_get_ssd_info(sch
->schid
, &sch
->ssd_info
);
234 ssd_from_pmcw(&sch
->ssd_info
, &sch
->schib
.pmcw
);
235 ssd_register_chpids(&sch
->ssd_info
);
239 static ssize_t
type_show(struct device
*dev
, struct device_attribute
*attr
,
242 struct subchannel
*sch
= to_subchannel(dev
);
244 return sprintf(buf
, "%01x\n", sch
->st
);
247 static DEVICE_ATTR(type
, 0444, type_show
, NULL
);
249 static ssize_t
modalias_show(struct device
*dev
, struct device_attribute
*attr
,
252 struct subchannel
*sch
= to_subchannel(dev
);
254 return sprintf(buf
, "css:t%01X\n", sch
->st
);
257 static DEVICE_ATTR(modalias
, 0444, modalias_show
, NULL
);
259 static struct attribute
*subch_attrs
[] = {
261 &dev_attr_modalias
.attr
,
265 static struct attribute_group subch_attr_group
= {
266 .attrs
= subch_attrs
,
269 static const struct attribute_group
*default_subch_attr_groups
[] = {
274 static int css_register_subchannel(struct subchannel
*sch
)
278 /* Initialize the subchannel structure */
279 sch
->dev
.parent
= &channel_subsystems
[0]->device
;
280 sch
->dev
.bus
= &css_bus_type
;
281 sch
->dev
.release
= &css_subchannel_release
;
282 sch
->dev
.groups
= default_subch_attr_groups
;
284 * We don't want to generate uevents for I/O subchannels that don't
285 * have a working ccw device behind them since they will be
286 * unregistered before they can be used anyway, so we delay the add
287 * uevent until after device recognition was successful.
288 * Note that we suppress the uevent for all subchannel types;
289 * the subchannel driver can decide itself when it wants to inform
290 * userspace of its existence.
292 dev_set_uevent_suppress(&sch
->dev
, 1);
293 css_update_ssd_info(sch
);
294 /* make it known to the system */
295 ret
= css_sch_device_register(sch
);
297 CIO_MSG_EVENT(0, "Could not register sch 0.%x.%04x: %d\n",
298 sch
->schid
.ssid
, sch
->schid
.sch_no
, ret
);
303 * No driver matched. Generate the uevent now so that
304 * a fitting driver module may be loaded based on the
307 dev_set_uevent_suppress(&sch
->dev
, 0);
308 kobject_uevent(&sch
->dev
.kobj
, KOBJ_ADD
);
313 int css_probe_device(struct subchannel_id schid
)
316 struct subchannel
*sch
;
318 sch
= css_alloc_subchannel(schid
);
321 ret
= css_register_subchannel(sch
);
323 put_device(&sch
->dev
);
328 check_subchannel(struct device
* dev
, void * data
)
330 struct subchannel
*sch
;
331 struct subchannel_id
*schid
= data
;
333 sch
= to_subchannel(dev
);
334 return schid_equal(&sch
->schid
, schid
);
338 get_subchannel_by_schid(struct subchannel_id schid
)
342 dev
= bus_find_device(&css_bus_type
, NULL
,
343 &schid
, check_subchannel
);
345 return dev
? to_subchannel(dev
) : NULL
;
349 * css_sch_is_valid() - check if a subchannel is valid
350 * @schib: subchannel information block for the subchannel
352 int css_sch_is_valid(struct schib
*schib
)
354 if ((schib
->pmcw
.st
== SUBCHANNEL_TYPE_IO
) && !schib
->pmcw
.dnv
)
356 if ((schib
->pmcw
.st
== SUBCHANNEL_TYPE_MSG
) && !schib
->pmcw
.w
)
360 EXPORT_SYMBOL_GPL(css_sch_is_valid
);
362 static int css_evaluate_new_subchannel(struct subchannel_id schid
, int slow
)
367 /* Will be done on the slow path. */
370 if (stsch_err(schid
, &schib
) || !css_sch_is_valid(&schib
)) {
371 /* Unusable - ignore. */
374 CIO_MSG_EVENT(4, "Evaluating schid 0.%x.%04x, event %d, unknown, "
375 "slow path.\n", schid
.ssid
, schid
.sch_no
, CIO_OPER
);
377 return css_probe_device(schid
);
380 static int css_evaluate_known_subchannel(struct subchannel
*sch
, int slow
)
385 if (sch
->driver
->sch_event
)
386 ret
= sch
->driver
->sch_event(sch
, slow
);
389 "Got subchannel machine check but "
390 "no sch_event handler provided.\n");
395 static void css_evaluate_subchannel(struct subchannel_id schid
, int slow
)
397 struct subchannel
*sch
;
400 sch
= get_subchannel_by_schid(schid
);
402 ret
= css_evaluate_known_subchannel(sch
, slow
);
403 put_device(&sch
->dev
);
405 ret
= css_evaluate_new_subchannel(schid
, slow
);
407 css_schedule_eval(schid
);
410 static struct idset
*slow_subchannel_set
;
411 static spinlock_t slow_subchannel_lock
;
413 static int __init
slow_subchannel_init(void)
415 spin_lock_init(&slow_subchannel_lock
);
416 slow_subchannel_set
= idset_sch_new();
417 if (!slow_subchannel_set
) {
418 CIO_MSG_EVENT(0, "could not allocate slow subchannel set\n");
424 static int slow_eval_known_fn(struct subchannel
*sch
, void *data
)
429 spin_lock_irq(&slow_subchannel_lock
);
430 eval
= idset_sch_contains(slow_subchannel_set
, sch
->schid
);
431 idset_sch_del(slow_subchannel_set
, sch
->schid
);
432 spin_unlock_irq(&slow_subchannel_lock
);
434 rc
= css_evaluate_known_subchannel(sch
, 1);
436 css_schedule_eval(sch
->schid
);
441 static int slow_eval_unknown_fn(struct subchannel_id schid
, void *data
)
446 spin_lock_irq(&slow_subchannel_lock
);
447 eval
= idset_sch_contains(slow_subchannel_set
, schid
);
448 idset_sch_del(slow_subchannel_set
, schid
);
449 spin_unlock_irq(&slow_subchannel_lock
);
451 rc
= css_evaluate_new_subchannel(schid
, 1);
454 css_schedule_eval(schid
);
460 /* These should abort looping */
469 static void css_slow_path_func(struct work_struct
*unused
)
471 CIO_TRACE_EVENT(4, "slowpath");
472 for_each_subchannel_staged(slow_eval_known_fn
, slow_eval_unknown_fn
,
476 static DECLARE_WORK(slow_path_work
, css_slow_path_func
);
477 struct workqueue_struct
*slow_path_wq
;
479 void css_schedule_eval(struct subchannel_id schid
)
483 spin_lock_irqsave(&slow_subchannel_lock
, flags
);
484 idset_sch_add(slow_subchannel_set
, schid
);
485 queue_work(slow_path_wq
, &slow_path_work
);
486 spin_unlock_irqrestore(&slow_subchannel_lock
, flags
);
489 void css_schedule_eval_all(void)
493 spin_lock_irqsave(&slow_subchannel_lock
, flags
);
494 idset_fill(slow_subchannel_set
);
495 queue_work(slow_path_wq
, &slow_path_work
);
496 spin_unlock_irqrestore(&slow_subchannel_lock
, flags
);
499 void css_wait_for_slow_path(void)
501 flush_workqueue(slow_path_wq
);
504 /* Reprobe subchannel if unregistered. */
505 static int reprobe_subchannel(struct subchannel_id schid
, void *data
)
509 CIO_MSG_EVENT(6, "cio: reprobe 0.%x.%04x\n",
510 schid
.ssid
, schid
.sch_no
);
514 ret
= css_probe_device(schid
);
521 /* These should abort looping */
530 static void reprobe_after_idle(struct work_struct
*unused
)
532 /* Make sure initial subchannel scan is done. */
533 wait_event(ccw_device_init_wq
,
534 atomic_read(&ccw_device_init_count
) == 0);
536 css_schedule_reprobe();
539 static DECLARE_WORK(reprobe_idle_work
, reprobe_after_idle
);
541 /* Work function used to reprobe all unregistered subchannels. */
542 static void reprobe_all(struct work_struct
*unused
)
546 CIO_MSG_EVENT(4, "reprobe start\n");
548 /* Make sure initial subchannel scan is done. */
549 if (atomic_read(&ccw_device_init_count
) != 0) {
550 queue_work(ccw_device_work
, &reprobe_idle_work
);
554 ret
= for_each_subchannel_staged(NULL
, reprobe_subchannel
, NULL
);
556 CIO_MSG_EVENT(4, "reprobe done (rc=%d, need_reprobe=%d)\n", ret
,
560 static DECLARE_WORK(css_reprobe_work
, reprobe_all
);
562 /* Schedule reprobing of all unregistered subchannels. */
563 void css_schedule_reprobe(void)
566 queue_work(slow_path_wq
, &css_reprobe_work
);
569 EXPORT_SYMBOL_GPL(css_schedule_reprobe
);
572 * Called from the machine check handler for subchannel report words.
574 static void css_process_crw(struct crw
*crw0
, struct crw
*crw1
, int overflow
)
576 struct subchannel_id mchk_schid
;
579 css_schedule_eval_all();
582 CIO_CRW_EVENT(2, "CRW0 reports slct=%d, oflw=%d, "
583 "chn=%d, rsc=%X, anc=%d, erc=%X, rsid=%X\n",
584 crw0
->slct
, crw0
->oflw
, crw0
->chn
, crw0
->rsc
, crw0
->anc
,
585 crw0
->erc
, crw0
->rsid
);
587 CIO_CRW_EVENT(2, "CRW1 reports slct=%d, oflw=%d, "
588 "chn=%d, rsc=%X, anc=%d, erc=%X, rsid=%X\n",
589 crw1
->slct
, crw1
->oflw
, crw1
->chn
, crw1
->rsc
,
590 crw1
->anc
, crw1
->erc
, crw1
->rsid
);
591 init_subchannel_id(&mchk_schid
);
592 mchk_schid
.sch_no
= crw0
->rsid
;
594 mchk_schid
.ssid
= (crw1
->rsid
>> 8) & 3;
597 * Since we are always presented with IPI in the CRW, we have to
598 * use stsch() to find out if the subchannel in question has come
601 css_evaluate_subchannel(mchk_schid
, 0);
605 __init_channel_subsystem(struct subchannel_id schid
, void *data
)
607 struct subchannel
*sch
;
610 if (cio_is_console(schid
))
611 sch
= cio_get_console_subchannel();
613 sch
= css_alloc_subchannel(schid
);
622 panic("Out of memory in init_channel_subsystem\n");
623 /* -ENXIO: no more subchannels. */
626 /* -EIO: this subchannel set not supported. */
634 * We register ALL valid subchannels in ioinfo, even those
635 * that have been present before init_channel_subsystem.
636 * These subchannels can't have been registered yet (kmalloc
637 * not working) so we do it now. This is true e.g. for the
638 * console subchannel.
640 if (css_register_subchannel(sch
)) {
641 if (!cio_is_console(schid
))
642 put_device(&sch
->dev
);
648 css_generate_pgid(struct channel_subsystem
*css
, u32 tod_high
)
650 if (css_general_characteristics
.mcss
) {
651 css
->global_pgid
.pgid_high
.ext_cssid
.version
= 0x80;
652 css
->global_pgid
.pgid_high
.ext_cssid
.cssid
= css
->cssid
;
655 css
->global_pgid
.pgid_high
.cpu_addr
= stap();
657 css
->global_pgid
.pgid_high
.cpu_addr
= 0;
660 css
->global_pgid
.cpu_id
= S390_lowcore
.cpu_id
.ident
;
661 css
->global_pgid
.cpu_model
= S390_lowcore
.cpu_id
.machine
;
662 css
->global_pgid
.tod_high
= tod_high
;
667 channel_subsystem_release(struct device
*dev
)
669 struct channel_subsystem
*css
;
672 mutex_destroy(&css
->mutex
);
673 if (css
->pseudo_subchannel
) {
674 /* Implies that it has been generated but never registered. */
675 css_subchannel_release(&css
->pseudo_subchannel
->dev
);
676 css
->pseudo_subchannel
= NULL
;
682 css_cm_enable_show(struct device
*dev
, struct device_attribute
*attr
,
685 struct channel_subsystem
*css
= to_css(dev
);
690 mutex_lock(&css
->mutex
);
691 ret
= sprintf(buf
, "%x\n", css
->cm_enabled
);
692 mutex_unlock(&css
->mutex
);
697 css_cm_enable_store(struct device
*dev
, struct device_attribute
*attr
,
698 const char *buf
, size_t count
)
700 struct channel_subsystem
*css
= to_css(dev
);
704 ret
= strict_strtoul(buf
, 16, &val
);
707 mutex_lock(&css
->mutex
);
710 ret
= css
->cm_enabled
? chsc_secm(css
, 0) : 0;
713 ret
= css
->cm_enabled
? 0 : chsc_secm(css
, 1);
718 mutex_unlock(&css
->mutex
);
719 return ret
< 0 ? ret
: count
;
722 static DEVICE_ATTR(cm_enable
, 0644, css_cm_enable_show
, css_cm_enable_store
);
724 static int __init
setup_css(int nr
)
728 struct channel_subsystem
*css
;
730 css
= channel_subsystems
[nr
];
731 memset(css
, 0, sizeof(struct channel_subsystem
));
732 css
->pseudo_subchannel
=
733 kzalloc(sizeof(*css
->pseudo_subchannel
), GFP_KERNEL
);
734 if (!css
->pseudo_subchannel
)
736 css
->pseudo_subchannel
->dev
.parent
= &css
->device
;
737 css
->pseudo_subchannel
->dev
.release
= css_subchannel_release
;
738 dev_set_name(&css
->pseudo_subchannel
->dev
, "defunct");
739 ret
= cio_create_sch_lock(css
->pseudo_subchannel
);
741 kfree(css
->pseudo_subchannel
);
744 mutex_init(&css
->mutex
);
747 dev_set_name(&css
->device
, "css%x", nr
);
748 css
->device
.release
= channel_subsystem_release
;
749 tod_high
= (u32
) (get_clock() >> 32);
750 css_generate_pgid(css
, tod_high
);
754 static int css_reboot_event(struct notifier_block
*this,
761 for (i
= 0; i
<= __MAX_CSSID
; i
++) {
762 struct channel_subsystem
*css
;
764 css
= channel_subsystems
[i
];
765 mutex_lock(&css
->mutex
);
767 if (chsc_secm(css
, 0))
769 mutex_unlock(&css
->mutex
);
775 static struct notifier_block css_reboot_notifier
= {
776 .notifier_call
= css_reboot_event
,
780 * Since the css devices are neither on a bus nor have a class
781 * nor have a special device type, we cannot stop/restart channel
782 * path measurements via the normal suspend/resume callbacks, but have
785 static int css_power_event(struct notifier_block
*this, unsigned long event
,
792 case PM_HIBERNATION_PREPARE
:
793 case PM_SUSPEND_PREPARE
:
795 for (i
= 0; i
<= __MAX_CSSID
; i
++) {
796 struct channel_subsystem
*css
;
798 css
= channel_subsystems
[i
];
799 mutex_lock(&css
->mutex
);
800 if (!css
->cm_enabled
) {
801 mutex_unlock(&css
->mutex
);
804 secm_area
= (void *)get_zeroed_page(GFP_KERNEL
|
807 if (__chsc_do_secm(css
, 0, secm_area
))
809 free_page((unsigned long)secm_area
);
813 mutex_unlock(&css
->mutex
);
816 case PM_POST_HIBERNATION
:
817 case PM_POST_SUSPEND
:
819 for (i
= 0; i
<= __MAX_CSSID
; i
++) {
820 struct channel_subsystem
*css
;
822 css
= channel_subsystems
[i
];
823 mutex_lock(&css
->mutex
);
824 if (!css
->cm_enabled
) {
825 mutex_unlock(&css
->mutex
);
828 secm_area
= (void *)get_zeroed_page(GFP_KERNEL
|
831 if (__chsc_do_secm(css
, 1, secm_area
))
833 free_page((unsigned long)secm_area
);
837 mutex_unlock(&css
->mutex
);
839 /* search for subchannels, which appeared during hibernation */
840 css_schedule_reprobe();
848 static struct notifier_block css_power_notifier
= {
849 .notifier_call
= css_power_event
,
853 * Now that the driver core is running, we can setup our channel subsystem.
854 * The struct subchannel's are created during probing (except for the
855 * static console subchannel).
858 init_channel_subsystem (void)
862 ret
= chsc_determine_css_characteristics();
864 goto out
; /* No need to continue. */
866 ret
= chsc_alloc_sei_area();
870 ret
= slow_subchannel_init();
874 ret
= crw_register_handler(CRW_RSC_SCH
, css_process_crw
);
878 if ((ret
= bus_register(&css_bus_type
)))
881 /* Try to enable MSS. */
882 ret
= chsc_enable_facility(CHSC_SDA_OC_MSS
);
884 case 0: /* Success. */
885 max_ssid
= __MAX_SSID
;
892 /* Setup css structure. */
893 for (i
= 0; i
<= __MAX_CSSID
; i
++) {
894 struct channel_subsystem
*css
;
896 css
= kmalloc(sizeof(struct channel_subsystem
), GFP_KERNEL
);
901 channel_subsystems
[i
] = css
;
904 kfree(channel_subsystems
[i
]);
907 ret
= device_register(&css
->device
);
909 put_device(&css
->device
);
912 if (css_chsc_characteristics
.secm
) {
913 ret
= device_create_file(&css
->device
,
914 &dev_attr_cm_enable
);
918 ret
= device_register(&css
->pseudo_subchannel
->dev
);
920 put_device(&css
->pseudo_subchannel
->dev
);
924 ret
= register_reboot_notifier(&css_reboot_notifier
);
927 ret
= register_pm_notifier(&css_power_notifier
);
929 unregister_reboot_notifier(&css_reboot_notifier
);
934 /* Enable default isc for I/O subchannels. */
935 isc_register(IO_SCH_ISC
);
937 for_each_subchannel(__init_channel_subsystem
, NULL
);
940 if (css_chsc_characteristics
.secm
)
941 device_remove_file(&channel_subsystems
[i
]->device
,
942 &dev_attr_cm_enable
);
944 device_unregister(&channel_subsystems
[i
]->device
);
947 struct channel_subsystem
*css
;
950 css
= channel_subsystems
[i
];
951 device_unregister(&css
->pseudo_subchannel
->dev
);
952 css
->pseudo_subchannel
= NULL
;
953 if (css_chsc_characteristics
.secm
)
954 device_remove_file(&css
->device
,
955 &dev_attr_cm_enable
);
956 device_unregister(&css
->device
);
959 bus_unregister(&css_bus_type
);
961 crw_unregister_handler(CRW_RSC_CSS
);
962 chsc_free_sei_area();
963 kfree(slow_subchannel_set
);
964 pr_alert("The CSS device driver initialization failed with "
969 int sch_is_pseudo_sch(struct subchannel
*sch
)
971 return sch
== to_css(sch
->dev
.parent
)->pseudo_subchannel
;
974 static int css_bus_match(struct device
*dev
, struct device_driver
*drv
)
976 struct subchannel
*sch
= to_subchannel(dev
);
977 struct css_driver
*driver
= to_cssdriver(drv
);
978 struct css_device_id
*id
;
980 for (id
= driver
->subchannel_type
; id
->match_flags
; id
++) {
981 if (sch
->st
== id
->type
)
988 static int css_probe(struct device
*dev
)
990 struct subchannel
*sch
;
993 sch
= to_subchannel(dev
);
994 sch
->driver
= to_cssdriver(dev
->driver
);
995 ret
= sch
->driver
->probe
? sch
->driver
->probe(sch
) : 0;
1001 static int css_remove(struct device
*dev
)
1003 struct subchannel
*sch
;
1006 sch
= to_subchannel(dev
);
1007 ret
= sch
->driver
->remove
? sch
->driver
->remove(sch
) : 0;
1012 static void css_shutdown(struct device
*dev
)
1014 struct subchannel
*sch
;
1016 sch
= to_subchannel(dev
);
1017 if (sch
->driver
&& sch
->driver
->shutdown
)
1018 sch
->driver
->shutdown(sch
);
1021 static int css_uevent(struct device
*dev
, struct kobj_uevent_env
*env
)
1023 struct subchannel
*sch
= to_subchannel(dev
);
1026 ret
= add_uevent_var(env
, "ST=%01X", sch
->st
);
1029 ret
= add_uevent_var(env
, "MODALIAS=css:t%01X", sch
->st
);
1033 static int css_pm_prepare(struct device
*dev
)
1035 struct subchannel
*sch
= to_subchannel(dev
);
1036 struct css_driver
*drv
;
1038 if (mutex_is_locked(&sch
->reg_mutex
))
1040 if (!sch
->dev
.driver
)
1042 drv
= to_cssdriver(sch
->dev
.driver
);
1043 /* Notify drivers that they may not register children. */
1044 return drv
->prepare
? drv
->prepare(sch
) : 0;
1047 static void css_pm_complete(struct device
*dev
)
1049 struct subchannel
*sch
= to_subchannel(dev
);
1050 struct css_driver
*drv
;
1052 if (!sch
->dev
.driver
)
1054 drv
= to_cssdriver(sch
->dev
.driver
);
1059 static int css_pm_freeze(struct device
*dev
)
1061 struct subchannel
*sch
= to_subchannel(dev
);
1062 struct css_driver
*drv
;
1064 if (!sch
->dev
.driver
)
1066 drv
= to_cssdriver(sch
->dev
.driver
);
1067 return drv
->freeze
? drv
->freeze(sch
) : 0;
1070 static int css_pm_thaw(struct device
*dev
)
1072 struct subchannel
*sch
= to_subchannel(dev
);
1073 struct css_driver
*drv
;
1075 if (!sch
->dev
.driver
)
1077 drv
= to_cssdriver(sch
->dev
.driver
);
1078 return drv
->thaw
? drv
->thaw(sch
) : 0;
1081 static int css_pm_restore(struct device
*dev
)
1083 struct subchannel
*sch
= to_subchannel(dev
);
1084 struct css_driver
*drv
;
1086 if (!sch
->dev
.driver
)
1088 drv
= to_cssdriver(sch
->dev
.driver
);
1089 return drv
->restore
? drv
->restore(sch
) : 0;
1092 static struct dev_pm_ops css_pm_ops
= {
1093 .prepare
= css_pm_prepare
,
1094 .complete
= css_pm_complete
,
1095 .freeze
= css_pm_freeze
,
1096 .thaw
= css_pm_thaw
,
1097 .restore
= css_pm_restore
,
1100 struct bus_type css_bus_type
= {
1102 .match
= css_bus_match
,
1104 .remove
= css_remove
,
1105 .shutdown
= css_shutdown
,
1106 .uevent
= css_uevent
,
1111 * css_driver_register - register a css driver
1112 * @cdrv: css driver to register
1114 * This is mainly a wrapper around driver_register that sets name
1115 * and bus_type in the embedded struct device_driver correctly.
1117 int css_driver_register(struct css_driver
*cdrv
)
1119 cdrv
->drv
.name
= cdrv
->name
;
1120 cdrv
->drv
.bus
= &css_bus_type
;
1121 cdrv
->drv
.owner
= cdrv
->owner
;
1122 return driver_register(&cdrv
->drv
);
1124 EXPORT_SYMBOL_GPL(css_driver_register
);
1127 * css_driver_unregister - unregister a css driver
1128 * @cdrv: css driver to unregister
1130 * This is a wrapper around driver_unregister.
1132 void css_driver_unregister(struct css_driver
*cdrv
)
1134 driver_unregister(&cdrv
->drv
);
1136 EXPORT_SYMBOL_GPL(css_driver_unregister
);
1138 subsys_initcall(init_channel_subsystem
);
1140 MODULE_LICENSE("GPL");
1141 EXPORT_SYMBOL(css_bus_type
);