1 // SPDX-License-Identifier: GPL-2.0
3 * driver for channel subsystem
5 * Copyright IBM Corp. 2002, 2010
7 * Author(s): Arnd Bergmann (arndb@de.ibm.com)
8 * Cornelia Huck (cornelia.huck@de.ibm.com)
11 #define KMSG_COMPONENT "cio"
12 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
14 #include <linux/export.h>
15 #include <linux/init.h>
16 #include <linux/device.h>
17 #include <linux/slab.h>
18 #include <linux/errno.h>
19 #include <linux/list.h>
20 #include <linux/reboot.h>
21 #include <linux/proc_fs.h>
22 #include <linux/genalloc.h>
23 #include <linux/dma-mapping.h>
29 #include "blacklist.h"
30 #include "cio_debug.h"
37 int css_init_done
= 0;
41 struct channel_subsystem
*channel_subsystems
[MAX_CSS_IDX
+ 1];
42 static const struct bus_type css_bus_type
;
45 for_each_subchannel(int(*fn
)(struct subchannel_id
, void *), void *data
)
47 struct subchannel_id schid
;
50 init_subchannel_id(&schid
);
53 ret
= fn(schid
, data
);
56 } while (schid
.sch_no
++ < __MAX_SUBCHANNEL
);
58 } while (schid
.ssid
++ < max_ssid
);
65 int (*fn_known_sch
)(struct subchannel
*, void *);
66 int (*fn_unknown_sch
)(struct subchannel_id
, void *);
69 static int call_fn_known_sch(struct device
*dev
, void *data
)
71 struct subchannel
*sch
= to_subchannel(dev
);
72 struct cb_data
*cb
= data
;
76 idset_sch_del(cb
->set
, sch
->schid
);
78 rc
= cb
->fn_known_sch(sch
, cb
->data
);
82 static int call_fn_unknown_sch(struct subchannel_id schid
, void *data
)
84 struct cb_data
*cb
= data
;
87 if (idset_sch_contains(cb
->set
, schid
))
88 rc
= cb
->fn_unknown_sch(schid
, cb
->data
);
92 static int call_fn_all_sch(struct subchannel_id schid
, void *data
)
94 struct cb_data
*cb
= data
;
95 struct subchannel
*sch
;
98 sch
= get_subchannel_by_schid(schid
);
100 if (cb
->fn_known_sch
)
101 rc
= cb
->fn_known_sch(sch
, cb
->data
);
102 put_device(&sch
->dev
);
104 if (cb
->fn_unknown_sch
)
105 rc
= cb
->fn_unknown_sch(schid
, cb
->data
);
111 int for_each_subchannel_staged(int (*fn_known
)(struct subchannel
*, void *),
112 int (*fn_unknown
)(struct subchannel_id
,
119 cb
.fn_known_sch
= fn_known
;
120 cb
.fn_unknown_sch
= fn_unknown
;
122 if (fn_known
&& !fn_unknown
) {
123 /* Skip idset allocation in case of known-only loop. */
125 return bus_for_each_dev(&css_bus_type
, NULL
, &cb
,
129 cb
.set
= idset_sch_new();
131 /* fall back to brute force scanning in case of oom */
132 return for_each_subchannel(call_fn_all_sch
, &cb
);
136 /* Process registered subchannels. */
137 rc
= bus_for_each_dev(&css_bus_type
, NULL
, &cb
, call_fn_known_sch
);
140 /* Process unregistered subchannels. */
142 rc
= for_each_subchannel(call_fn_unknown_sch
, &cb
);
149 static void css_sch_todo(struct work_struct
*work
);
151 static void css_sch_create_locks(struct subchannel
*sch
)
153 spin_lock_init(&sch
->lock
);
154 mutex_init(&sch
->reg_mutex
);
157 static void css_subchannel_release(struct device
*dev
)
159 struct subchannel
*sch
= to_subchannel(dev
);
161 sch
->config
.intparm
= 0;
162 cio_commit_config(sch
);
163 kfree(sch
->driver_override
);
167 static int css_validate_subchannel(struct subchannel_id schid
,
172 switch (schib
->pmcw
.st
) {
173 case SUBCHANNEL_TYPE_IO
:
174 case SUBCHANNEL_TYPE_MSG
:
175 if (!css_sch_is_valid(schib
))
177 else if (is_blacklisted(schid
.ssid
, schib
->pmcw
.dev
)) {
178 CIO_MSG_EVENT(6, "Blacklisted device detected "
179 "at devno %04X, subchannel set %x\n",
180 schib
->pmcw
.dev
, schid
.ssid
);
191 CIO_MSG_EVENT(4, "Subchannel 0.%x.%04x reports subchannel type %04X\n",
192 schid
.ssid
, schid
.sch_no
, schib
->pmcw
.st
);
197 struct subchannel
*css_alloc_subchannel(struct subchannel_id schid
,
200 struct subchannel
*sch
;
203 ret
= css_validate_subchannel(schid
, schib
);
207 sch
= kzalloc(sizeof(*sch
), GFP_KERNEL
| GFP_DMA
);
209 return ERR_PTR(-ENOMEM
);
213 sch
->st
= schib
->pmcw
.st
;
215 css_sch_create_locks(sch
);
217 INIT_WORK(&sch
->todo_work
, css_sch_todo
);
218 sch
->dev
.release
= &css_subchannel_release
;
219 sch
->dev
.dma_mask
= &sch
->dma_mask
;
220 device_initialize(&sch
->dev
);
222 * The physical addresses for some of the dma structures that can
223 * belong to a subchannel need to fit 31 bit width (e.g. ccw).
225 ret
= dma_set_coherent_mask(&sch
->dev
, DMA_BIT_MASK(31));
229 * But we don't have such restrictions imposed on the stuff that
230 * is handled by the streaming API.
232 ret
= dma_set_mask(&sch
->dev
, DMA_BIT_MASK(64));
243 static int css_sch_device_register(struct subchannel
*sch
)
247 mutex_lock(&sch
->reg_mutex
);
248 dev_set_name(&sch
->dev
, "0.%x.%04x", sch
->schid
.ssid
,
250 ret
= device_add(&sch
->dev
);
251 mutex_unlock(&sch
->reg_mutex
);
256 * css_sch_device_unregister - unregister a subchannel
257 * @sch: subchannel to be unregistered
259 void css_sch_device_unregister(struct subchannel
*sch
)
261 mutex_lock(&sch
->reg_mutex
);
262 if (device_is_registered(&sch
->dev
))
263 device_unregister(&sch
->dev
);
264 mutex_unlock(&sch
->reg_mutex
);
266 EXPORT_SYMBOL_GPL(css_sch_device_unregister
);
268 static void ssd_from_pmcw(struct chsc_ssd_info
*ssd
, struct pmcw
*pmcw
)
273 memset(ssd
, 0, sizeof(struct chsc_ssd_info
));
274 ssd
->path_mask
= pmcw
->pim
;
275 for (i
= 0; i
< 8; i
++) {
277 if (pmcw
->pim
& mask
) {
278 chp_id_init(&ssd
->chpid
[i
]);
279 ssd
->chpid
[i
].id
= pmcw
->chpid
[i
];
284 static void ssd_register_chpids(struct chsc_ssd_info
*ssd
)
289 for (i
= 0; i
< 8; i
++) {
291 if (ssd
->path_mask
& mask
)
292 chp_new(ssd
->chpid
[i
]);
296 void css_update_ssd_info(struct subchannel
*sch
)
300 ret
= chsc_get_ssd_info(sch
->schid
, &sch
->ssd_info
);
302 ssd_from_pmcw(&sch
->ssd_info
, &sch
->schib
.pmcw
);
304 ssd_register_chpids(&sch
->ssd_info
);
307 static ssize_t
type_show(struct device
*dev
, struct device_attribute
*attr
,
310 struct subchannel
*sch
= to_subchannel(dev
);
312 return sysfs_emit(buf
, "%01x\n", sch
->st
);
315 static DEVICE_ATTR_RO(type
);
317 static ssize_t
modalias_show(struct device
*dev
, struct device_attribute
*attr
,
320 struct subchannel
*sch
= to_subchannel(dev
);
322 return sysfs_emit(buf
, "css:t%01X\n", sch
->st
);
325 static DEVICE_ATTR_RO(modalias
);
327 static ssize_t
driver_override_store(struct device
*dev
,
328 struct device_attribute
*attr
,
329 const char *buf
, size_t count
)
331 struct subchannel
*sch
= to_subchannel(dev
);
334 ret
= driver_set_override(dev
, &sch
->driver_override
, buf
, count
);
341 static ssize_t
driver_override_show(struct device
*dev
,
342 struct device_attribute
*attr
, char *buf
)
344 struct subchannel
*sch
= to_subchannel(dev
);
348 len
= sysfs_emit(buf
, "%s\n", sch
->driver_override
);
352 static DEVICE_ATTR_RW(driver_override
);
354 static struct attribute
*subch_attrs
[] = {
356 &dev_attr_modalias
.attr
,
357 &dev_attr_driver_override
.attr
,
361 static struct attribute_group subch_attr_group
= {
362 .attrs
= subch_attrs
,
365 static const struct attribute_group
*default_subch_attr_groups
[] = {
370 static ssize_t
chpids_show(struct device
*dev
,
371 struct device_attribute
*attr
,
374 struct subchannel
*sch
= to_subchannel(dev
);
375 struct chsc_ssd_info
*ssd
= &sch
->ssd_info
;
380 for (chp
= 0; chp
< 8; chp
++) {
382 if (ssd
->path_mask
& mask
)
383 ret
+= sysfs_emit_at(buf
, ret
, "%02x ", ssd
->chpid
[chp
].id
);
385 ret
+= sysfs_emit_at(buf
, ret
, "00 ");
387 ret
+= sysfs_emit_at(buf
, ret
, "\n");
390 static DEVICE_ATTR_RO(chpids
);
392 static ssize_t
pimpampom_show(struct device
*dev
,
393 struct device_attribute
*attr
,
396 struct subchannel
*sch
= to_subchannel(dev
);
397 struct pmcw
*pmcw
= &sch
->schib
.pmcw
;
399 return sysfs_emit(buf
, "%02x %02x %02x\n",
400 pmcw
->pim
, pmcw
->pam
, pmcw
->pom
);
402 static DEVICE_ATTR_RO(pimpampom
);
404 static ssize_t
dev_busid_show(struct device
*dev
,
405 struct device_attribute
*attr
,
408 struct subchannel
*sch
= to_subchannel(dev
);
409 struct pmcw
*pmcw
= &sch
->schib
.pmcw
;
411 if ((pmcw
->st
== SUBCHANNEL_TYPE_IO
&& pmcw
->dnv
) ||
412 (pmcw
->st
== SUBCHANNEL_TYPE_MSG
&& pmcw
->w
))
413 return sysfs_emit(buf
, "0.%x.%04x\n", sch
->schid
.ssid
,
416 return sysfs_emit(buf
, "none\n");
418 static DEVICE_ATTR_RO(dev_busid
);
420 static struct attribute
*io_subchannel_type_attrs
[] = {
421 &dev_attr_chpids
.attr
,
422 &dev_attr_pimpampom
.attr
,
423 &dev_attr_dev_busid
.attr
,
426 ATTRIBUTE_GROUPS(io_subchannel_type
);
428 static const struct device_type io_subchannel_type
= {
429 .groups
= io_subchannel_type_groups
,
432 int css_register_subchannel(struct subchannel
*sch
)
436 /* Initialize the subchannel structure */
437 sch
->dev
.parent
= &channel_subsystems
[0]->device
;
438 sch
->dev
.bus
= &css_bus_type
;
439 sch
->dev
.groups
= default_subch_attr_groups
;
441 if (sch
->st
== SUBCHANNEL_TYPE_IO
)
442 sch
->dev
.type
= &io_subchannel_type
;
444 css_update_ssd_info(sch
);
445 /* make it known to the system */
446 ret
= css_sch_device_register(sch
);
448 CIO_MSG_EVENT(0, "Could not register sch 0.%x.%04x: %d\n",
449 sch
->schid
.ssid
, sch
->schid
.sch_no
, ret
);
455 static int css_probe_device(struct subchannel_id schid
, struct schib
*schib
)
457 struct subchannel
*sch
;
460 sch
= css_alloc_subchannel(schid
, schib
);
464 ret
= css_register_subchannel(sch
);
466 put_device(&sch
->dev
);
472 check_subchannel(struct device
*dev
, const void *data
)
474 struct subchannel
*sch
;
475 struct subchannel_id
*schid
= (void *)data
;
477 sch
= to_subchannel(dev
);
478 return schid_equal(&sch
->schid
, schid
);
482 get_subchannel_by_schid(struct subchannel_id schid
)
486 dev
= bus_find_device(&css_bus_type
, NULL
,
487 &schid
, check_subchannel
);
489 return dev
? to_subchannel(dev
) : NULL
;
493 * css_sch_is_valid() - check if a subchannel is valid
494 * @schib: subchannel information block for the subchannel
496 int css_sch_is_valid(struct schib
*schib
)
498 if ((schib
->pmcw
.st
== SUBCHANNEL_TYPE_IO
) && !schib
->pmcw
.dnv
)
500 if ((schib
->pmcw
.st
== SUBCHANNEL_TYPE_MSG
) && !schib
->pmcw
.w
)
504 EXPORT_SYMBOL_GPL(css_sch_is_valid
);
506 static int css_evaluate_new_subchannel(struct subchannel_id schid
, int slow
)
512 /* Will be done on the slow path. */
516 * The first subchannel that is not-operational (ccode==3)
517 * indicates that there aren't any more devices available.
518 * If stsch gets an exception, it means the current subchannel set
521 ccode
= stsch(schid
, &schib
);
523 return (ccode
== 3) ? -ENXIO
: ccode
;
525 return css_probe_device(schid
, &schib
);
528 static int css_evaluate_known_subchannel(struct subchannel
*sch
, int slow
)
533 if (sch
->driver
->sch_event
)
534 ret
= sch
->driver
->sch_event(sch
, slow
);
537 "Got subchannel machine check but "
538 "no sch_event handler provided.\n");
540 if (ret
!= 0 && ret
!= -EAGAIN
) {
541 CIO_MSG_EVENT(2, "eval: sch 0.%x.%04x, rc=%d\n",
542 sch
->schid
.ssid
, sch
->schid
.sch_no
, ret
);
547 static void css_evaluate_subchannel(struct subchannel_id schid
, int slow
)
549 struct subchannel
*sch
;
552 sch
= get_subchannel_by_schid(schid
);
554 ret
= css_evaluate_known_subchannel(sch
, slow
);
555 put_device(&sch
->dev
);
557 ret
= css_evaluate_new_subchannel(schid
, slow
);
559 css_schedule_eval(schid
);
563 * css_sched_sch_todo - schedule a subchannel operation
567 * Schedule the operation identified by @todo to be performed on the slow path
568 * workqueue. Do nothing if another operation with higher priority is already
569 * scheduled. Needs to be called with subchannel lock held.
571 void css_sched_sch_todo(struct subchannel
*sch
, enum sch_todo todo
)
573 CIO_MSG_EVENT(4, "sch_todo: sched sch=0.%x.%04x todo=%d\n",
574 sch
->schid
.ssid
, sch
->schid
.sch_no
, todo
);
575 if (sch
->todo
>= todo
)
577 /* Get workqueue ref. */
578 if (!get_device(&sch
->dev
))
581 if (!queue_work(cio_work_q
, &sch
->todo_work
)) {
582 /* Already queued, release workqueue ref. */
583 put_device(&sch
->dev
);
586 EXPORT_SYMBOL_GPL(css_sched_sch_todo
);
588 static void css_sch_todo(struct work_struct
*work
)
590 struct subchannel
*sch
;
594 sch
= container_of(work
, struct subchannel
, todo_work
);
596 spin_lock_irq(&sch
->lock
);
598 CIO_MSG_EVENT(4, "sch_todo: sch=0.%x.%04x, todo=%d\n", sch
->schid
.ssid
,
599 sch
->schid
.sch_no
, todo
);
600 sch
->todo
= SCH_TODO_NOTHING
;
601 spin_unlock_irq(&sch
->lock
);
604 case SCH_TODO_NOTHING
:
607 ret
= css_evaluate_known_subchannel(sch
, 1);
608 if (ret
== -EAGAIN
) {
609 spin_lock_irq(&sch
->lock
);
610 css_sched_sch_todo(sch
, todo
);
611 spin_unlock_irq(&sch
->lock
);
615 css_sch_device_unregister(sch
);
618 /* Release workqueue ref. */
619 put_device(&sch
->dev
);
622 static struct idset
*slow_subchannel_set
;
623 static DEFINE_SPINLOCK(slow_subchannel_lock
);
624 static DECLARE_WAIT_QUEUE_HEAD(css_eval_wq
);
625 static atomic_t css_eval_scheduled
;
627 static int __init
slow_subchannel_init(void)
629 atomic_set(&css_eval_scheduled
, 0);
630 slow_subchannel_set
= idset_sch_new();
631 if (!slow_subchannel_set
) {
632 CIO_MSG_EVENT(0, "could not allocate slow subchannel set\n");
638 static int slow_eval_known_fn(struct subchannel
*sch
, void *data
)
643 spin_lock_irq(&slow_subchannel_lock
);
644 eval
= idset_sch_contains(slow_subchannel_set
, sch
->schid
);
645 idset_sch_del(slow_subchannel_set
, sch
->schid
);
646 spin_unlock_irq(&slow_subchannel_lock
);
648 rc
= css_evaluate_known_subchannel(sch
, 1);
650 css_schedule_eval(sch
->schid
);
652 * The loop might take long time for platforms with lots of
653 * known devices. Allow scheduling here.
660 static int slow_eval_unknown_fn(struct subchannel_id schid
, void *data
)
665 spin_lock_irq(&slow_subchannel_lock
);
666 eval
= idset_sch_contains(slow_subchannel_set
, schid
);
667 idset_sch_del(slow_subchannel_set
, schid
);
668 spin_unlock_irq(&slow_subchannel_lock
);
670 rc
= css_evaluate_new_subchannel(schid
, 1);
673 css_schedule_eval(schid
);
679 /* These should abort looping */
680 spin_lock_irq(&slow_subchannel_lock
);
681 idset_sch_del_subseq(slow_subchannel_set
, schid
);
682 spin_unlock_irq(&slow_subchannel_lock
);
687 /* Allow scheduling here since the containing loop might
694 static void css_slow_path_func(struct work_struct
*unused
)
698 CIO_TRACE_EVENT(4, "slowpath");
699 for_each_subchannel_staged(slow_eval_known_fn
, slow_eval_unknown_fn
,
701 spin_lock_irqsave(&slow_subchannel_lock
, flags
);
702 if (idset_is_empty(slow_subchannel_set
)) {
703 atomic_set(&css_eval_scheduled
, 0);
704 wake_up(&css_eval_wq
);
706 spin_unlock_irqrestore(&slow_subchannel_lock
, flags
);
709 static DECLARE_DELAYED_WORK(slow_path_work
, css_slow_path_func
);
710 struct workqueue_struct
*cio_work_q
;
712 void css_schedule_eval(struct subchannel_id schid
)
716 spin_lock_irqsave(&slow_subchannel_lock
, flags
);
717 idset_sch_add(slow_subchannel_set
, schid
);
718 atomic_set(&css_eval_scheduled
, 1);
719 queue_delayed_work(cio_work_q
, &slow_path_work
, 0);
720 spin_unlock_irqrestore(&slow_subchannel_lock
, flags
);
723 void css_schedule_eval_all(void)
727 spin_lock_irqsave(&slow_subchannel_lock
, flags
);
728 idset_fill(slow_subchannel_set
);
729 atomic_set(&css_eval_scheduled
, 1);
730 queue_delayed_work(cio_work_q
, &slow_path_work
, 0);
731 spin_unlock_irqrestore(&slow_subchannel_lock
, flags
);
734 static int __unset_validpath(struct device
*dev
, void *data
)
736 struct idset
*set
= data
;
737 struct subchannel
*sch
= to_subchannel(dev
);
738 struct pmcw
*pmcw
= &sch
->schib
.pmcw
;
740 /* Here we want to make sure that we are considering only those subchannels
741 * which do not have an operational device attached to it. This can be found
742 * with the help of PAM and POM values of pmcw. OPM provides the information
743 * about any path which is currently vary-off, so that we should not consider.
745 if (sch
->st
== SUBCHANNEL_TYPE_IO
&&
746 (sch
->opm
& pmcw
->pam
& pmcw
->pom
))
747 idset_sch_del(set
, sch
->schid
);
752 static int __unset_online(struct device
*dev
, void *data
)
754 struct idset
*set
= data
;
755 struct subchannel
*sch
= to_subchannel(dev
);
757 if (sch
->st
== SUBCHANNEL_TYPE_IO
&& sch
->config
.ena
)
758 idset_sch_del(set
, sch
->schid
);
763 void css_schedule_eval_cond(enum css_eval_cond cond
, unsigned long delay
)
768 /* Find unregistered subchannels. */
769 set
= idset_sch_new();
772 css_schedule_eval_all();
777 case CSS_EVAL_NO_PATH
:
778 bus_for_each_dev(&css_bus_type
, NULL
, set
, __unset_validpath
);
780 case CSS_EVAL_NOT_ONLINE
:
781 bus_for_each_dev(&css_bus_type
, NULL
, set
, __unset_online
);
787 /* Apply to slow_subchannel_set. */
788 spin_lock_irqsave(&slow_subchannel_lock
, flags
);
789 idset_add_set(slow_subchannel_set
, set
);
790 atomic_set(&css_eval_scheduled
, 1);
791 queue_delayed_work(cio_work_q
, &slow_path_work
, delay
);
792 spin_unlock_irqrestore(&slow_subchannel_lock
, flags
);
796 void css_wait_for_slow_path(void)
798 flush_workqueue(cio_work_q
);
801 /* Schedule reprobing of all subchannels with no valid operational path. */
802 void css_schedule_reprobe(void)
804 /* Schedule with a delay to allow merging of subsequent calls. */
805 css_schedule_eval_cond(CSS_EVAL_NO_PATH
, 1 * HZ
);
807 EXPORT_SYMBOL_GPL(css_schedule_reprobe
);
810 * Called from the machine check handler for subchannel report words.
812 static void css_process_crw(struct crw
*crw0
, struct crw
*crw1
, int overflow
)
814 struct subchannel_id mchk_schid
;
815 struct subchannel
*sch
;
818 css_schedule_eval_all();
821 CIO_CRW_EVENT(2, "CRW0 reports slct=%d, oflw=%d, "
822 "chn=%d, rsc=%X, anc=%d, erc=%X, rsid=%X\n",
823 crw0
->slct
, crw0
->oflw
, crw0
->chn
, crw0
->rsc
, crw0
->anc
,
824 crw0
->erc
, crw0
->rsid
);
826 CIO_CRW_EVENT(2, "CRW1 reports slct=%d, oflw=%d, "
827 "chn=%d, rsc=%X, anc=%d, erc=%X, rsid=%X\n",
828 crw1
->slct
, crw1
->oflw
, crw1
->chn
, crw1
->rsc
,
829 crw1
->anc
, crw1
->erc
, crw1
->rsid
);
830 init_subchannel_id(&mchk_schid
);
831 mchk_schid
.sch_no
= crw0
->rsid
;
833 mchk_schid
.ssid
= (crw1
->rsid
>> 4) & 3;
835 if (crw0
->erc
== CRW_ERC_PMOD
) {
836 sch
= get_subchannel_by_schid(mchk_schid
);
838 css_update_ssd_info(sch
);
839 put_device(&sch
->dev
);
843 * Since we are always presented with IPI in the CRW, we have to
844 * use stsch() to find out if the subchannel in question has come
847 css_evaluate_subchannel(mchk_schid
, 0);
851 css_generate_pgid(struct channel_subsystem
*css
, u32 tod_high
)
855 if (css_general_characteristics
.mcss
) {
856 css
->global_pgid
.pgid_high
.ext_cssid
.version
= 0x80;
857 css
->global_pgid
.pgid_high
.ext_cssid
.cssid
=
858 css
->id_valid
? css
->cssid
: 0;
860 css
->global_pgid
.pgid_high
.cpu_addr
= stap();
863 css
->global_pgid
.cpu_id
= cpu_id
.ident
;
864 css
->global_pgid
.cpu_model
= cpu_id
.machine
;
865 css
->global_pgid
.tod_high
= tod_high
;
868 static void channel_subsystem_release(struct device
*dev
)
870 struct channel_subsystem
*css
= to_css(dev
);
872 mutex_destroy(&css
->mutex
);
876 static ssize_t
real_cssid_show(struct device
*dev
, struct device_attribute
*a
,
879 struct channel_subsystem
*css
= to_css(dev
);
884 return sysfs_emit(buf
, "%x\n", css
->cssid
);
886 static DEVICE_ATTR_RO(real_cssid
);
888 static ssize_t
rescan_store(struct device
*dev
, struct device_attribute
*a
,
889 const char *buf
, size_t count
)
891 CIO_TRACE_EVENT(4, "usr-rescan");
893 css_schedule_eval_all();
898 static DEVICE_ATTR_WO(rescan
);
900 static ssize_t
cm_enable_show(struct device
*dev
, struct device_attribute
*a
,
903 struct channel_subsystem
*css
= to_css(dev
);
906 mutex_lock(&css
->mutex
);
907 ret
= sysfs_emit(buf
, "%x\n", css
->cm_enabled
);
908 mutex_unlock(&css
->mutex
);
912 static ssize_t
cm_enable_store(struct device
*dev
, struct device_attribute
*a
,
913 const char *buf
, size_t count
)
915 struct channel_subsystem
*css
= to_css(dev
);
919 ret
= kstrtoul(buf
, 16, &val
);
922 mutex_lock(&css
->mutex
);
925 ret
= css
->cm_enabled
? chsc_secm(css
, 0) : 0;
928 ret
= css
->cm_enabled
? 0 : chsc_secm(css
, 1);
933 mutex_unlock(&css
->mutex
);
934 return ret
< 0 ? ret
: count
;
936 static DEVICE_ATTR_RW(cm_enable
);
938 static umode_t
cm_enable_mode(struct kobject
*kobj
, struct attribute
*attr
,
941 return css_chsc_characteristics
.secm
? attr
->mode
: 0;
944 static struct attribute
*cssdev_attrs
[] = {
945 &dev_attr_real_cssid
.attr
,
946 &dev_attr_rescan
.attr
,
950 static struct attribute_group cssdev_attr_group
= {
951 .attrs
= cssdev_attrs
,
954 static struct attribute
*cssdev_cm_attrs
[] = {
955 &dev_attr_cm_enable
.attr
,
959 static struct attribute_group cssdev_cm_attr_group
= {
960 .attrs
= cssdev_cm_attrs
,
961 .is_visible
= cm_enable_mode
,
964 static const struct attribute_group
*cssdev_attr_groups
[] = {
966 &cssdev_cm_attr_group
,
970 static int __init
setup_css(int nr
)
972 struct channel_subsystem
*css
;
975 css
= kzalloc(sizeof(*css
), GFP_KERNEL
);
979 channel_subsystems
[nr
] = css
;
980 dev_set_name(&css
->device
, "css%x", nr
);
981 css
->device
.groups
= cssdev_attr_groups
;
982 css
->device
.release
= channel_subsystem_release
;
984 * We currently allocate notifier bits with this (using
985 * css->device as the device argument with the DMA API)
986 * and are fine with 64 bit addresses.
988 ret
= dma_coerce_mask_and_coherent(&css
->device
, DMA_BIT_MASK(64));
994 mutex_init(&css
->mutex
);
995 ret
= chsc_get_cssid_iid(nr
, &css
->cssid
, &css
->iid
);
997 css
->id_valid
= true;
998 pr_info("Partition identifier %01x.%01x\n", css
->cssid
,
1001 css_generate_pgid(css
, (u32
) (get_tod_clock() >> 32));
1003 ret
= device_register(&css
->device
);
1005 put_device(&css
->device
);
1009 css
->pseudo_subchannel
= kzalloc(sizeof(*css
->pseudo_subchannel
),
1011 if (!css
->pseudo_subchannel
) {
1012 device_unregister(&css
->device
);
1017 css
->pseudo_subchannel
->dev
.parent
= &css
->device
;
1018 css
->pseudo_subchannel
->dev
.release
= css_subchannel_release
;
1019 mutex_init(&css
->pseudo_subchannel
->reg_mutex
);
1020 css_sch_create_locks(css
->pseudo_subchannel
);
1022 dev_set_name(&css
->pseudo_subchannel
->dev
, "defunct");
1023 ret
= device_register(&css
->pseudo_subchannel
->dev
);
1025 put_device(&css
->pseudo_subchannel
->dev
);
1026 device_unregister(&css
->device
);
1032 channel_subsystems
[nr
] = NULL
;
1036 static int css_reboot_event(struct notifier_block
*this,
1037 unsigned long event
,
1040 struct channel_subsystem
*css
;
1045 mutex_lock(&css
->mutex
);
1046 if (css
->cm_enabled
)
1047 if (chsc_secm(css
, 0))
1049 mutex_unlock(&css
->mutex
);
1055 static struct notifier_block css_reboot_notifier
= {
1056 .notifier_call
= css_reboot_event
,
1059 #define CIO_DMA_GFP (GFP_KERNEL | __GFP_ZERO)
1060 static struct gen_pool
*cio_dma_pool
;
1062 /* Currently cio supports only a single css */
1063 struct device
*cio_get_dma_css_dev(void)
1065 return &channel_subsystems
[0]->device
;
1068 struct gen_pool
*cio_gp_dma_create(struct device
*dma_dev
, int nr_pages
)
1070 struct gen_pool
*gp_dma
;
1072 dma_addr_t dma_addr
;
1075 gp_dma
= gen_pool_create(3, -1);
1078 for (i
= 0; i
< nr_pages
; ++i
) {
1079 cpu_addr
= dma_alloc_coherent(dma_dev
, PAGE_SIZE
, &dma_addr
,
1083 gen_pool_add_virt(gp_dma
, (unsigned long) cpu_addr
,
1084 dma_addr
, PAGE_SIZE
, -1);
1089 static void __gp_dma_free_dma(struct gen_pool
*pool
,
1090 struct gen_pool_chunk
*chunk
, void *data
)
1092 size_t chunk_size
= chunk
->end_addr
- chunk
->start_addr
+ 1;
1094 dma_free_coherent((struct device
*) data
, chunk_size
,
1095 (void *) chunk
->start_addr
,
1096 (dma_addr_t
) chunk
->phys_addr
);
1099 void cio_gp_dma_destroy(struct gen_pool
*gp_dma
, struct device
*dma_dev
)
1103 /* this is quite ugly but no better idea */
1104 gen_pool_for_each_chunk(gp_dma
, __gp_dma_free_dma
, dma_dev
);
1105 gen_pool_destroy(gp_dma
);
1108 static int cio_dma_pool_init(void)
1110 /* No need to free up the resources: compiled in */
1111 cio_dma_pool
= cio_gp_dma_create(cio_get_dma_css_dev(), 1);
1117 void *__cio_gp_dma_zalloc(struct gen_pool
*gp_dma
, struct device
*dma_dev
,
1118 size_t size
, dma32_t
*dma_handle
)
1120 dma_addr_t dma_addr
;
1126 addr
= gen_pool_dma_alloc(gp_dma
, size
, &dma_addr
);
1128 chunk_size
= round_up(size
, PAGE_SIZE
);
1129 addr
= dma_alloc_coherent(dma_dev
, chunk_size
, &dma_addr
, CIO_DMA_GFP
);
1132 gen_pool_add_virt(gp_dma
, (unsigned long)addr
, dma_addr
, chunk_size
, -1);
1133 addr
= gen_pool_dma_alloc(gp_dma
, size
, dma_handle
? &dma_addr
: NULL
);
1136 *dma_handle
= (__force dma32_t
)dma_addr
;
1140 void *cio_gp_dma_zalloc(struct gen_pool
*gp_dma
, struct device
*dma_dev
,
1143 return __cio_gp_dma_zalloc(gp_dma
, dma_dev
, size
, NULL
);
1146 void cio_gp_dma_free(struct gen_pool
*gp_dma
, void *cpu_addr
, size_t size
)
1150 memset(cpu_addr
, 0, size
);
1151 gen_pool_free(gp_dma
, (unsigned long) cpu_addr
, size
);
1155 * Allocate dma memory from the css global pool. Intended for memory not
1156 * specific to any single device within the css. The allocated memory
1157 * is not guaranteed to be 31-bit addressable.
1159 * Caution: Not suitable for early stuff like console.
1161 void *cio_dma_zalloc(size_t size
)
1163 return cio_gp_dma_zalloc(cio_dma_pool
, cio_get_dma_css_dev(), size
);
1166 void cio_dma_free(void *cpu_addr
, size_t size
)
1168 cio_gp_dma_free(cio_dma_pool
, cpu_addr
, size
);
1172 * Now that the driver core is running, we can setup our channel subsystem.
1173 * The struct subchannel's are created during probing.
1175 static int __init
css_bus_init(void)
1183 chsc_determine_css_characteristics();
1184 /* Try to enable MSS. */
1185 ret
= chsc_enable_facility(CHSC_SDA_OC_MSS
);
1189 max_ssid
= __MAX_SSID
;
1191 ret
= slow_subchannel_init();
1195 ret
= crw_register_handler(CRW_RSC_SCH
, css_process_crw
);
1199 if ((ret
= bus_register(&css_bus_type
)))
1202 /* Setup css structure. */
1203 for (i
= 0; i
<= MAX_CSS_IDX
; i
++) {
1206 goto out_unregister
;
1208 ret
= register_reboot_notifier(&css_reboot_notifier
);
1210 goto out_unregister
;
1211 ret
= cio_dma_pool_init();
1213 goto out_unregister_rn
;
1217 /* Enable default isc for I/O subchannels. */
1218 isc_register(IO_SCH_ISC
);
1222 unregister_reboot_notifier(&css_reboot_notifier
);
1225 struct channel_subsystem
*css
= channel_subsystems
[i
];
1226 device_unregister(&css
->pseudo_subchannel
->dev
);
1227 device_unregister(&css
->device
);
1229 bus_unregister(&css_bus_type
);
1231 crw_unregister_handler(CRW_RSC_SCH
);
1232 idset_free(slow_subchannel_set
);
1233 chsc_init_cleanup();
1234 pr_alert("The CSS device driver initialization failed with "
1239 static void __init
css_bus_cleanup(void)
1241 struct channel_subsystem
*css
;
1244 device_unregister(&css
->pseudo_subchannel
->dev
);
1245 device_unregister(&css
->device
);
1247 bus_unregister(&css_bus_type
);
1248 crw_unregister_handler(CRW_RSC_SCH
);
1249 idset_free(slow_subchannel_set
);
1250 chsc_init_cleanup();
1251 isc_unregister(IO_SCH_ISC
);
1254 static int __init
channel_subsystem_init(void)
1258 ret
= css_bus_init();
1261 cio_work_q
= create_singlethread_workqueue("cio");
1266 ret
= io_subchannel_init();
1270 /* Register subchannels which are already in use. */
1271 cio_register_early_subchannels();
1272 /* Start initial subchannel evaluation. */
1273 css_schedule_eval_all();
1277 destroy_workqueue(cio_work_q
);
1282 subsys_initcall(channel_subsystem_init
);
1284 static int css_settle(struct device_driver
*drv
, void *unused
)
1286 struct css_driver
*cssdrv
= to_cssdriver(drv
);
1289 return cssdrv
->settle();
1293 int css_complete_work(void)
1297 /* Wait for the evaluation of subchannels to finish. */
1298 ret
= wait_event_interruptible(css_eval_wq
,
1299 atomic_read(&css_eval_scheduled
) == 0);
1302 flush_workqueue(cio_work_q
);
1303 /* Wait for the subchannel type specific initialization to finish */
1304 return bus_for_each_drv(&css_bus_type
, NULL
, NULL
, css_settle
);
1309 * Wait for the initialization of devices to finish, to make sure we are
1310 * done with our setup if the search for the root device starts.
1312 static int __init
channel_subsystem_init_sync(void)
1314 css_complete_work();
1317 subsys_initcall_sync(channel_subsystem_init_sync
);
1319 #ifdef CONFIG_PROC_FS
1320 static ssize_t
cio_settle_write(struct file
*file
, const char __user
*buf
,
1321 size_t count
, loff_t
*ppos
)
1325 /* Handle pending CRW's. */
1326 crw_wait_for_channel_report();
1327 ret
= css_complete_work();
1329 return ret
? ret
: count
;
1332 static const struct proc_ops cio_settle_proc_ops
= {
1333 .proc_open
= nonseekable_open
,
1334 .proc_write
= cio_settle_write
,
1337 static int __init
cio_settle_init(void)
1339 struct proc_dir_entry
*entry
;
1341 entry
= proc_create("cio_settle", S_IWUSR
, NULL
, &cio_settle_proc_ops
);
1346 device_initcall(cio_settle_init
);
1347 #endif /*CONFIG_PROC_FS*/
1349 int sch_is_pseudo_sch(struct subchannel
*sch
)
1351 if (!sch
->dev
.parent
)
1353 return sch
== to_css(sch
->dev
.parent
)->pseudo_subchannel
;
1356 static int css_bus_match(struct device
*dev
, const struct device_driver
*drv
)
1358 struct subchannel
*sch
= to_subchannel(dev
);
1359 const struct css_driver
*driver
= to_cssdriver(drv
);
1360 struct css_device_id
*id
;
1362 /* When driver_override is set, only bind to the matching driver */
1363 if (sch
->driver_override
&& strcmp(sch
->driver_override
, drv
->name
))
1366 for (id
= driver
->subchannel_type
; id
->match_flags
; id
++) {
1367 if (sch
->st
== id
->type
)
1374 static int css_probe(struct device
*dev
)
1376 struct subchannel
*sch
;
1379 sch
= to_subchannel(dev
);
1380 sch
->driver
= to_cssdriver(dev
->driver
);
1381 ret
= sch
->driver
->probe
? sch
->driver
->probe(sch
) : 0;
1387 static void css_remove(struct device
*dev
)
1389 struct subchannel
*sch
;
1391 sch
= to_subchannel(dev
);
1392 if (sch
->driver
->remove
)
1393 sch
->driver
->remove(sch
);
1397 static void css_shutdown(struct device
*dev
)
1399 struct subchannel
*sch
;
1401 sch
= to_subchannel(dev
);
1402 if (sch
->driver
&& sch
->driver
->shutdown
)
1403 sch
->driver
->shutdown(sch
);
1406 static int css_uevent(const struct device
*dev
, struct kobj_uevent_env
*env
)
1408 const struct subchannel
*sch
= to_subchannel(dev
);
1411 ret
= add_uevent_var(env
, "ST=%01X", sch
->st
);
1414 ret
= add_uevent_var(env
, "MODALIAS=css:t%01X", sch
->st
);
1418 static const struct bus_type css_bus_type
= {
1420 .match
= css_bus_match
,
1422 .remove
= css_remove
,
1423 .shutdown
= css_shutdown
,
1424 .uevent
= css_uevent
,
1428 * css_driver_register - register a css driver
1429 * @cdrv: css driver to register
1431 * This is mainly a wrapper around driver_register that sets name
1432 * and bus_type in the embedded struct device_driver correctly.
1434 int css_driver_register(struct css_driver
*cdrv
)
1436 cdrv
->drv
.bus
= &css_bus_type
;
1437 return driver_register(&cdrv
->drv
);
1439 EXPORT_SYMBOL_GPL(css_driver_register
);
1442 * css_driver_unregister - unregister a css driver
1443 * @cdrv: css driver to unregister
1445 * This is a wrapper around driver_unregister.
1447 void css_driver_unregister(struct css_driver
*cdrv
)
1449 driver_unregister(&cdrv
->drv
);
1451 EXPORT_SYMBOL_GPL(css_driver_unregister
);