2 * driver for channel subsystem
4 * Copyright IBM Corp. 2002, 2009
6 * Author(s): Arnd Bergmann (arndb@de.ibm.com)
7 * Cornelia Huck (cornelia.huck@de.ibm.com)
10 #define KMSG_COMPONENT "cio"
11 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
13 #include <linux/module.h>
14 #include <linux/init.h>
15 #include <linux/device.h>
16 #include <linux/slab.h>
17 #include <linux/errno.h>
18 #include <linux/list.h>
19 #include <linux/reboot.h>
20 #include <linux/suspend.h>
26 #include "cio_debug.h"
33 int css_init_done
= 0;
34 static int need_reprobe
= 0;
35 static int max_ssid
= 0;
37 struct channel_subsystem
*channel_subsystems
[__MAX_CSSID
+ 1];
40 for_each_subchannel(int(*fn
)(struct subchannel_id
, void *), void *data
)
42 struct subchannel_id schid
;
45 init_subchannel_id(&schid
);
49 ret
= fn(schid
, data
);
52 } while (schid
.sch_no
++ < __MAX_SUBCHANNEL
);
54 } while (schid
.ssid
++ < max_ssid
);
61 int (*fn_known_sch
)(struct subchannel
*, void *);
62 int (*fn_unknown_sch
)(struct subchannel_id
, void *);
65 static int call_fn_known_sch(struct device
*dev
, void *data
)
67 struct subchannel
*sch
= to_subchannel(dev
);
68 struct cb_data
*cb
= data
;
71 idset_sch_del(cb
->set
, sch
->schid
);
73 rc
= cb
->fn_known_sch(sch
, cb
->data
);
77 static int call_fn_unknown_sch(struct subchannel_id schid
, void *data
)
79 struct cb_data
*cb
= data
;
82 if (idset_sch_contains(cb
->set
, schid
))
83 rc
= cb
->fn_unknown_sch(schid
, cb
->data
);
87 static int call_fn_all_sch(struct subchannel_id schid
, void *data
)
89 struct cb_data
*cb
= data
;
90 struct subchannel
*sch
;
93 sch
= get_subchannel_by_schid(schid
);
96 rc
= cb
->fn_known_sch(sch
, cb
->data
);
97 put_device(&sch
->dev
);
99 if (cb
->fn_unknown_sch
)
100 rc
= cb
->fn_unknown_sch(schid
, cb
->data
);
106 int for_each_subchannel_staged(int (*fn_known
)(struct subchannel
*, void *),
107 int (*fn_unknown
)(struct subchannel_id
,
114 cb
.fn_known_sch
= fn_known
;
115 cb
.fn_unknown_sch
= fn_unknown
;
117 cb
.set
= idset_sch_new();
119 /* fall back to brute force scanning in case of oom */
120 return for_each_subchannel(call_fn_all_sch
, &cb
);
124 /* Process registered subchannels. */
125 rc
= bus_for_each_dev(&css_bus_type
, NULL
, &cb
, call_fn_known_sch
);
128 /* Process unregistered subchannels. */
130 rc
= for_each_subchannel(call_fn_unknown_sch
, &cb
);
137 static struct subchannel
*
138 css_alloc_subchannel(struct subchannel_id schid
)
140 struct subchannel
*sch
;
143 sch
= kmalloc (sizeof (*sch
), GFP_KERNEL
| GFP_DMA
);
145 return ERR_PTR(-ENOMEM
);
146 ret
= cio_validate_subchannel (sch
, schid
);
155 css_free_subchannel(struct subchannel
*sch
)
158 /* Reset intparm to zeroes. */
159 sch
->config
.intparm
= 0;
160 cio_commit_config(sch
);
167 css_subchannel_release(struct device
*dev
)
169 struct subchannel
*sch
;
171 sch
= to_subchannel(dev
);
172 if (!cio_is_console(sch
->schid
)) {
178 static int css_sch_device_register(struct subchannel
*sch
)
182 mutex_lock(&sch
->reg_mutex
);
183 ret
= device_register(&sch
->dev
);
184 mutex_unlock(&sch
->reg_mutex
);
189 * css_sch_device_unregister - unregister a subchannel
190 * @sch: subchannel to be unregistered
192 void css_sch_device_unregister(struct subchannel
*sch
)
194 mutex_lock(&sch
->reg_mutex
);
195 if (device_is_registered(&sch
->dev
))
196 device_unregister(&sch
->dev
);
197 mutex_unlock(&sch
->reg_mutex
);
199 EXPORT_SYMBOL_GPL(css_sch_device_unregister
);
201 static void ssd_from_pmcw(struct chsc_ssd_info
*ssd
, struct pmcw
*pmcw
)
206 memset(ssd
, 0, sizeof(struct chsc_ssd_info
));
207 ssd
->path_mask
= pmcw
->pim
;
208 for (i
= 0; i
< 8; i
++) {
210 if (pmcw
->pim
& mask
) {
211 chp_id_init(&ssd
->chpid
[i
]);
212 ssd
->chpid
[i
].id
= pmcw
->chpid
[i
];
217 static void ssd_register_chpids(struct chsc_ssd_info
*ssd
)
222 for (i
= 0; i
< 8; i
++) {
224 if (ssd
->path_mask
& mask
)
225 if (!chp_is_registered(ssd
->chpid
[i
]))
226 chp_new(ssd
->chpid
[i
]);
230 void css_update_ssd_info(struct subchannel
*sch
)
234 if (cio_is_console(sch
->schid
)) {
235 /* Console is initialized too early for functions requiring
236 * memory allocation. */
237 ssd_from_pmcw(&sch
->ssd_info
, &sch
->schib
.pmcw
);
239 ret
= chsc_get_ssd_info(sch
->schid
, &sch
->ssd_info
);
241 ssd_from_pmcw(&sch
->ssd_info
, &sch
->schib
.pmcw
);
242 ssd_register_chpids(&sch
->ssd_info
);
246 static ssize_t
type_show(struct device
*dev
, struct device_attribute
*attr
,
249 struct subchannel
*sch
= to_subchannel(dev
);
251 return sprintf(buf
, "%01x\n", sch
->st
);
254 static DEVICE_ATTR(type
, 0444, type_show
, NULL
);
256 static ssize_t
modalias_show(struct device
*dev
, struct device_attribute
*attr
,
259 struct subchannel
*sch
= to_subchannel(dev
);
261 return sprintf(buf
, "css:t%01X\n", sch
->st
);
264 static DEVICE_ATTR(modalias
, 0444, modalias_show
, NULL
);
266 static struct attribute
*subch_attrs
[] = {
268 &dev_attr_modalias
.attr
,
272 static struct attribute_group subch_attr_group
= {
273 .attrs
= subch_attrs
,
276 static struct attribute_group
*default_subch_attr_groups
[] = {
281 static int css_register_subchannel(struct subchannel
*sch
)
285 /* Initialize the subchannel structure */
286 sch
->dev
.parent
= &channel_subsystems
[0]->device
;
287 sch
->dev
.bus
= &css_bus_type
;
288 sch
->dev
.release
= &css_subchannel_release
;
289 sch
->dev
.groups
= default_subch_attr_groups
;
291 * We don't want to generate uevents for I/O subchannels that don't
292 * have a working ccw device behind them since they will be
293 * unregistered before they can be used anyway, so we delay the add
294 * uevent until after device recognition was successful.
295 * Note that we suppress the uevent for all subchannel types;
296 * the subchannel driver can decide itself when it wants to inform
297 * userspace of its existence.
299 dev_set_uevent_suppress(&sch
->dev
, 1);
300 css_update_ssd_info(sch
);
301 /* make it known to the system */
302 ret
= css_sch_device_register(sch
);
304 CIO_MSG_EVENT(0, "Could not register sch 0.%x.%04x: %d\n",
305 sch
->schid
.ssid
, sch
->schid
.sch_no
, ret
);
310 * No driver matched. Generate the uevent now so that
311 * a fitting driver module may be loaded based on the
314 dev_set_uevent_suppress(&sch
->dev
, 0);
315 kobject_uevent(&sch
->dev
.kobj
, KOBJ_ADD
);
320 int css_probe_device(struct subchannel_id schid
)
323 struct subchannel
*sch
;
325 sch
= css_alloc_subchannel(schid
);
328 ret
= css_register_subchannel(sch
);
330 css_free_subchannel(sch
);
335 check_subchannel(struct device
* dev
, void * data
)
337 struct subchannel
*sch
;
338 struct subchannel_id
*schid
= data
;
340 sch
= to_subchannel(dev
);
341 return schid_equal(&sch
->schid
, schid
);
345 get_subchannel_by_schid(struct subchannel_id schid
)
349 dev
= bus_find_device(&css_bus_type
, NULL
,
350 &schid
, check_subchannel
);
352 return dev
? to_subchannel(dev
) : NULL
;
356 * css_sch_is_valid() - check if a subchannel is valid
357 * @schib: subchannel information block for the subchannel
359 int css_sch_is_valid(struct schib
*schib
)
361 if ((schib
->pmcw
.st
== SUBCHANNEL_TYPE_IO
) && !schib
->pmcw
.dnv
)
363 if ((schib
->pmcw
.st
== SUBCHANNEL_TYPE_MSG
) && !schib
->pmcw
.w
)
367 EXPORT_SYMBOL_GPL(css_sch_is_valid
);
369 static int css_evaluate_new_subchannel(struct subchannel_id schid
, int slow
)
374 /* Will be done on the slow path. */
377 if (stsch_err(schid
, &schib
) || !css_sch_is_valid(&schib
)) {
378 /* Unusable - ignore. */
381 CIO_MSG_EVENT(4, "Evaluating schid 0.%x.%04x, event %d, unknown, "
382 "slow path.\n", schid
.ssid
, schid
.sch_no
, CIO_OPER
);
384 return css_probe_device(schid
);
387 static int css_evaluate_known_subchannel(struct subchannel
*sch
, int slow
)
392 if (sch
->driver
->sch_event
)
393 ret
= sch
->driver
->sch_event(sch
, slow
);
396 "Got subchannel machine check but "
397 "no sch_event handler provided.\n");
402 static void css_evaluate_subchannel(struct subchannel_id schid
, int slow
)
404 struct subchannel
*sch
;
407 sch
= get_subchannel_by_schid(schid
);
409 ret
= css_evaluate_known_subchannel(sch
, slow
);
410 put_device(&sch
->dev
);
412 ret
= css_evaluate_new_subchannel(schid
, slow
);
414 css_schedule_eval(schid
);
417 static struct idset
*slow_subchannel_set
;
418 static spinlock_t slow_subchannel_lock
;
420 static int __init
slow_subchannel_init(void)
422 spin_lock_init(&slow_subchannel_lock
);
423 slow_subchannel_set
= idset_sch_new();
424 if (!slow_subchannel_set
) {
425 CIO_MSG_EVENT(0, "could not allocate slow subchannel set\n");
431 static int slow_eval_known_fn(struct subchannel
*sch
, void *data
)
436 spin_lock_irq(&slow_subchannel_lock
);
437 eval
= idset_sch_contains(slow_subchannel_set
, sch
->schid
);
438 idset_sch_del(slow_subchannel_set
, sch
->schid
);
439 spin_unlock_irq(&slow_subchannel_lock
);
441 rc
= css_evaluate_known_subchannel(sch
, 1);
443 css_schedule_eval(sch
->schid
);
448 static int slow_eval_unknown_fn(struct subchannel_id schid
, void *data
)
453 spin_lock_irq(&slow_subchannel_lock
);
454 eval
= idset_sch_contains(slow_subchannel_set
, schid
);
455 idset_sch_del(slow_subchannel_set
, schid
);
456 spin_unlock_irq(&slow_subchannel_lock
);
458 rc
= css_evaluate_new_subchannel(schid
, 1);
461 css_schedule_eval(schid
);
467 /* These should abort looping */
476 static void css_slow_path_func(struct work_struct
*unused
)
478 CIO_TRACE_EVENT(4, "slowpath");
479 for_each_subchannel_staged(slow_eval_known_fn
, slow_eval_unknown_fn
,
483 static DECLARE_WORK(slow_path_work
, css_slow_path_func
);
484 struct workqueue_struct
*slow_path_wq
;
486 void css_schedule_eval(struct subchannel_id schid
)
490 spin_lock_irqsave(&slow_subchannel_lock
, flags
);
491 idset_sch_add(slow_subchannel_set
, schid
);
492 queue_work(slow_path_wq
, &slow_path_work
);
493 spin_unlock_irqrestore(&slow_subchannel_lock
, flags
);
496 void css_schedule_eval_all(void)
500 spin_lock_irqsave(&slow_subchannel_lock
, flags
);
501 idset_fill(slow_subchannel_set
);
502 queue_work(slow_path_wq
, &slow_path_work
);
503 spin_unlock_irqrestore(&slow_subchannel_lock
, flags
);
506 void css_wait_for_slow_path(void)
508 flush_workqueue(slow_path_wq
);
511 /* Reprobe subchannel if unregistered. */
512 static int reprobe_subchannel(struct subchannel_id schid
, void *data
)
516 CIO_MSG_EVENT(6, "cio: reprobe 0.%x.%04x\n",
517 schid
.ssid
, schid
.sch_no
);
521 ret
= css_probe_device(schid
);
528 /* These should abort looping */
537 static void reprobe_after_idle(struct work_struct
*unused
)
539 /* Make sure initial subchannel scan is done. */
540 wait_event(ccw_device_init_wq
,
541 atomic_read(&ccw_device_init_count
) == 0);
543 css_schedule_reprobe();
546 static DECLARE_WORK(reprobe_idle_work
, reprobe_after_idle
);
548 /* Work function used to reprobe all unregistered subchannels. */
549 static void reprobe_all(struct work_struct
*unused
)
553 CIO_MSG_EVENT(4, "reprobe start\n");
555 /* Make sure initial subchannel scan is done. */
556 if (atomic_read(&ccw_device_init_count
) != 0) {
557 queue_work(ccw_device_work
, &reprobe_idle_work
);
561 ret
= for_each_subchannel_staged(NULL
, reprobe_subchannel
, NULL
);
563 CIO_MSG_EVENT(4, "reprobe done (rc=%d, need_reprobe=%d)\n", ret
,
567 static DECLARE_WORK(css_reprobe_work
, reprobe_all
);
569 /* Schedule reprobing of all unregistered subchannels. */
570 void css_schedule_reprobe(void)
573 queue_work(slow_path_wq
, &css_reprobe_work
);
576 EXPORT_SYMBOL_GPL(css_schedule_reprobe
);
579 * Called from the machine check handler for subchannel report words.
581 static void css_process_crw(struct crw
*crw0
, struct crw
*crw1
, int overflow
)
583 struct subchannel_id mchk_schid
;
586 css_schedule_eval_all();
589 CIO_CRW_EVENT(2, "CRW0 reports slct=%d, oflw=%d, "
590 "chn=%d, rsc=%X, anc=%d, erc=%X, rsid=%X\n",
591 crw0
->slct
, crw0
->oflw
, crw0
->chn
, crw0
->rsc
, crw0
->anc
,
592 crw0
->erc
, crw0
->rsid
);
594 CIO_CRW_EVENT(2, "CRW1 reports slct=%d, oflw=%d, "
595 "chn=%d, rsc=%X, anc=%d, erc=%X, rsid=%X\n",
596 crw1
->slct
, crw1
->oflw
, crw1
->chn
, crw1
->rsc
,
597 crw1
->anc
, crw1
->erc
, crw1
->rsid
);
598 init_subchannel_id(&mchk_schid
);
599 mchk_schid
.sch_no
= crw0
->rsid
;
601 mchk_schid
.ssid
= (crw1
->rsid
>> 8) & 3;
604 * Since we are always presented with IPI in the CRW, we have to
605 * use stsch() to find out if the subchannel in question has come
608 css_evaluate_subchannel(mchk_schid
, 0);
612 __init_channel_subsystem(struct subchannel_id schid
, void *data
)
614 struct subchannel
*sch
;
617 if (cio_is_console(schid
))
618 sch
= cio_get_console_subchannel();
620 sch
= css_alloc_subchannel(schid
);
629 panic("Out of memory in init_channel_subsystem\n");
630 /* -ENXIO: no more subchannels. */
633 /* -EIO: this subchannel set not supported. */
641 * We register ALL valid subchannels in ioinfo, even those
642 * that have been present before init_channel_subsystem.
643 * These subchannels can't have been registered yet (kmalloc
644 * not working) so we do it now. This is true e.g. for the
645 * console subchannel.
647 css_register_subchannel(sch
);
652 css_generate_pgid(struct channel_subsystem
*css
, u32 tod_high
)
654 if (css_general_characteristics
.mcss
) {
655 css
->global_pgid
.pgid_high
.ext_cssid
.version
= 0x80;
656 css
->global_pgid
.pgid_high
.ext_cssid
.cssid
= css
->cssid
;
659 css
->global_pgid
.pgid_high
.cpu_addr
= stap();
661 css
->global_pgid
.pgid_high
.cpu_addr
= 0;
664 css
->global_pgid
.cpu_id
= ((cpuid_t
*) __LC_CPUID
)->ident
;
665 css
->global_pgid
.cpu_model
= ((cpuid_t
*) __LC_CPUID
)->machine
;
666 css
->global_pgid
.tod_high
= tod_high
;
671 channel_subsystem_release(struct device
*dev
)
673 struct channel_subsystem
*css
;
676 mutex_destroy(&css
->mutex
);
677 if (css
->pseudo_subchannel
) {
678 /* Implies that it has been generated but never registered. */
679 css_subchannel_release(&css
->pseudo_subchannel
->dev
);
680 css
->pseudo_subchannel
= NULL
;
686 css_cm_enable_show(struct device
*dev
, struct device_attribute
*attr
,
689 struct channel_subsystem
*css
= to_css(dev
);
694 mutex_lock(&css
->mutex
);
695 ret
= sprintf(buf
, "%x\n", css
->cm_enabled
);
696 mutex_unlock(&css
->mutex
);
701 css_cm_enable_store(struct device
*dev
, struct device_attribute
*attr
,
702 const char *buf
, size_t count
)
704 struct channel_subsystem
*css
= to_css(dev
);
708 ret
= strict_strtoul(buf
, 16, &val
);
711 mutex_lock(&css
->mutex
);
714 ret
= css
->cm_enabled
? chsc_secm(css
, 0) : 0;
717 ret
= css
->cm_enabled
? 0 : chsc_secm(css
, 1);
722 mutex_unlock(&css
->mutex
);
723 return ret
< 0 ? ret
: count
;
726 static DEVICE_ATTR(cm_enable
, 0644, css_cm_enable_show
, css_cm_enable_store
);
728 static int __init
setup_css(int nr
)
732 struct channel_subsystem
*css
;
734 css
= channel_subsystems
[nr
];
735 memset(css
, 0, sizeof(struct channel_subsystem
));
736 css
->pseudo_subchannel
=
737 kzalloc(sizeof(*css
->pseudo_subchannel
), GFP_KERNEL
);
738 if (!css
->pseudo_subchannel
)
740 css
->pseudo_subchannel
->dev
.parent
= &css
->device
;
741 css
->pseudo_subchannel
->dev
.release
= css_subchannel_release
;
742 dev_set_name(&css
->pseudo_subchannel
->dev
, "defunct");
743 ret
= cio_create_sch_lock(css
->pseudo_subchannel
);
745 kfree(css
->pseudo_subchannel
);
748 mutex_init(&css
->mutex
);
751 dev_set_name(&css
->device
, "css%x", nr
);
752 css
->device
.release
= channel_subsystem_release
;
753 tod_high
= (u32
) (get_clock() >> 32);
754 css_generate_pgid(css
, tod_high
);
758 static int css_reboot_event(struct notifier_block
*this,
765 for (i
= 0; i
<= __MAX_CSSID
; i
++) {
766 struct channel_subsystem
*css
;
768 css
= channel_subsystems
[i
];
769 mutex_lock(&css
->mutex
);
771 if (chsc_secm(css
, 0))
773 mutex_unlock(&css
->mutex
);
779 static struct notifier_block css_reboot_notifier
= {
780 .notifier_call
= css_reboot_event
,
784 * Since the css devices are neither on a bus nor have a class
785 * nor have a special device type, we cannot stop/restart channel
786 * path measurements via the normal suspend/resume callbacks, but have
789 static int css_power_event(struct notifier_block
*this, unsigned long event
,
796 case PM_HIBERNATION_PREPARE
:
797 case PM_SUSPEND_PREPARE
:
799 for (i
= 0; i
<= __MAX_CSSID
; i
++) {
800 struct channel_subsystem
*css
;
802 css
= channel_subsystems
[i
];
803 mutex_lock(&css
->mutex
);
804 if (!css
->cm_enabled
) {
805 mutex_unlock(&css
->mutex
);
808 secm_area
= (void *)get_zeroed_page(GFP_KERNEL
|
811 if (__chsc_do_secm(css
, 0, secm_area
))
813 free_page((unsigned long)secm_area
);
817 mutex_unlock(&css
->mutex
);
820 case PM_POST_HIBERNATION
:
821 case PM_POST_SUSPEND
:
823 for (i
= 0; i
<= __MAX_CSSID
; i
++) {
824 struct channel_subsystem
*css
;
826 css
= channel_subsystems
[i
];
827 mutex_lock(&css
->mutex
);
828 if (!css
->cm_enabled
) {
829 mutex_unlock(&css
->mutex
);
832 secm_area
= (void *)get_zeroed_page(GFP_KERNEL
|
835 if (__chsc_do_secm(css
, 1, secm_area
))
837 free_page((unsigned long)secm_area
);
841 mutex_unlock(&css
->mutex
);
843 /* search for subchannels, which appeared during hibernation */
844 css_schedule_reprobe();
852 static struct notifier_block css_power_notifier
= {
853 .notifier_call
= css_power_event
,
857 * Now that the driver core is running, we can setup our channel subsystem.
858 * The struct subchannel's are created during probing (except for the
859 * static console subchannel).
862 init_channel_subsystem (void)
866 ret
= chsc_determine_css_characteristics();
868 goto out
; /* No need to continue. */
870 ret
= chsc_alloc_sei_area();
874 ret
= slow_subchannel_init();
878 ret
= crw_register_handler(CRW_RSC_SCH
, css_process_crw
);
882 if ((ret
= bus_register(&css_bus_type
)))
885 /* Try to enable MSS. */
886 ret
= chsc_enable_facility(CHSC_SDA_OC_MSS
);
888 case 0: /* Success. */
889 max_ssid
= __MAX_SSID
;
896 /* Setup css structure. */
897 for (i
= 0; i
<= __MAX_CSSID
; i
++) {
898 struct channel_subsystem
*css
;
900 css
= kmalloc(sizeof(struct channel_subsystem
), GFP_KERNEL
);
905 channel_subsystems
[i
] = css
;
908 kfree(channel_subsystems
[i
]);
911 ret
= device_register(&css
->device
);
913 put_device(&css
->device
);
916 if (css_chsc_characteristics
.secm
) {
917 ret
= device_create_file(&css
->device
,
918 &dev_attr_cm_enable
);
922 ret
= device_register(&css
->pseudo_subchannel
->dev
);
926 ret
= register_reboot_notifier(&css_reboot_notifier
);
929 ret
= register_pm_notifier(&css_power_notifier
);
931 unregister_reboot_notifier(&css_reboot_notifier
);
936 /* Enable default isc for I/O subchannels. */
937 isc_register(IO_SCH_ISC
);
939 for_each_subchannel(__init_channel_subsystem
, NULL
);
942 if (css_chsc_characteristics
.secm
)
943 device_remove_file(&channel_subsystems
[i
]->device
,
944 &dev_attr_cm_enable
);
946 device_unregister(&channel_subsystems
[i
]->device
);
949 struct channel_subsystem
*css
;
952 css
= channel_subsystems
[i
];
953 device_unregister(&css
->pseudo_subchannel
->dev
);
954 css
->pseudo_subchannel
= NULL
;
955 if (css_chsc_characteristics
.secm
)
956 device_remove_file(&css
->device
,
957 &dev_attr_cm_enable
);
958 device_unregister(&css
->device
);
961 bus_unregister(&css_bus_type
);
963 crw_unregister_handler(CRW_RSC_CSS
);
964 chsc_free_sei_area();
965 kfree(slow_subchannel_set
);
966 pr_alert("The CSS device driver initialization failed with "
971 int sch_is_pseudo_sch(struct subchannel
*sch
)
973 return sch
== to_css(sch
->dev
.parent
)->pseudo_subchannel
;
976 static int css_bus_match(struct device
*dev
, struct device_driver
*drv
)
978 struct subchannel
*sch
= to_subchannel(dev
);
979 struct css_driver
*driver
= to_cssdriver(drv
);
980 struct css_device_id
*id
;
982 for (id
= driver
->subchannel_type
; id
->match_flags
; id
++) {
983 if (sch
->st
== id
->type
)
990 static int css_probe(struct device
*dev
)
992 struct subchannel
*sch
;
995 sch
= to_subchannel(dev
);
996 sch
->driver
= to_cssdriver(dev
->driver
);
997 ret
= sch
->driver
->probe
? sch
->driver
->probe(sch
) : 0;
1003 static int css_remove(struct device
*dev
)
1005 struct subchannel
*sch
;
1008 sch
= to_subchannel(dev
);
1009 ret
= sch
->driver
->remove
? sch
->driver
->remove(sch
) : 0;
1014 static void css_shutdown(struct device
*dev
)
1016 struct subchannel
*sch
;
1018 sch
= to_subchannel(dev
);
1019 if (sch
->driver
&& sch
->driver
->shutdown
)
1020 sch
->driver
->shutdown(sch
);
1023 static int css_uevent(struct device
*dev
, struct kobj_uevent_env
*env
)
1025 struct subchannel
*sch
= to_subchannel(dev
);
1028 ret
= add_uevent_var(env
, "ST=%01X", sch
->st
);
1031 ret
= add_uevent_var(env
, "MODALIAS=css:t%01X", sch
->st
);
1035 static int css_pm_prepare(struct device
*dev
)
1037 struct subchannel
*sch
= to_subchannel(dev
);
1038 struct css_driver
*drv
;
1040 if (mutex_is_locked(&sch
->reg_mutex
))
1042 if (!sch
->dev
.driver
)
1044 drv
= to_cssdriver(sch
->dev
.driver
);
1045 /* Notify drivers that they may not register children. */
1046 return drv
->prepare
? drv
->prepare(sch
) : 0;
1049 static void css_pm_complete(struct device
*dev
)
1051 struct subchannel
*sch
= to_subchannel(dev
);
1052 struct css_driver
*drv
;
1054 if (!sch
->dev
.driver
)
1056 drv
= to_cssdriver(sch
->dev
.driver
);
1061 static int css_pm_freeze(struct device
*dev
)
1063 struct subchannel
*sch
= to_subchannel(dev
);
1064 struct css_driver
*drv
;
1066 if (!sch
->dev
.driver
)
1068 drv
= to_cssdriver(sch
->dev
.driver
);
1069 return drv
->freeze
? drv
->freeze(sch
) : 0;
1072 static int css_pm_thaw(struct device
*dev
)
1074 struct subchannel
*sch
= to_subchannel(dev
);
1075 struct css_driver
*drv
;
1077 if (!sch
->dev
.driver
)
1079 drv
= to_cssdriver(sch
->dev
.driver
);
1080 return drv
->thaw
? drv
->thaw(sch
) : 0;
1083 static int css_pm_restore(struct device
*dev
)
1085 struct subchannel
*sch
= to_subchannel(dev
);
1086 struct css_driver
*drv
;
1088 if (!sch
->dev
.driver
)
1090 drv
= to_cssdriver(sch
->dev
.driver
);
1091 return drv
->restore
? drv
->restore(sch
) : 0;
1094 static struct dev_pm_ops css_pm_ops
= {
1095 .prepare
= css_pm_prepare
,
1096 .complete
= css_pm_complete
,
1097 .freeze
= css_pm_freeze
,
1098 .thaw
= css_pm_thaw
,
1099 .restore
= css_pm_restore
,
1102 struct bus_type css_bus_type
= {
1104 .match
= css_bus_match
,
1106 .remove
= css_remove
,
1107 .shutdown
= css_shutdown
,
1108 .uevent
= css_uevent
,
1113 * css_driver_register - register a css driver
1114 * @cdrv: css driver to register
1116 * This is mainly a wrapper around driver_register that sets name
1117 * and bus_type in the embedded struct device_driver correctly.
1119 int css_driver_register(struct css_driver
*cdrv
)
1121 cdrv
->drv
.name
= cdrv
->name
;
1122 cdrv
->drv
.bus
= &css_bus_type
;
1123 cdrv
->drv
.owner
= cdrv
->owner
;
1124 return driver_register(&cdrv
->drv
);
1126 EXPORT_SYMBOL_GPL(css_driver_register
);
1129 * css_driver_unregister - unregister a css driver
1130 * @cdrv: css driver to unregister
1132 * This is a wrapper around driver_unregister.
1134 void css_driver_unregister(struct css_driver
*cdrv
)
1136 driver_unregister(&cdrv
->drv
);
1138 EXPORT_SYMBOL_GPL(css_driver_unregister
);
1140 subsys_initcall(init_channel_subsystem
);
1142 MODULE_LICENSE("GPL");
1143 EXPORT_SYMBOL(css_bus_type
);