2 * Copyright IBM Corp. 2007,2012
4 * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>,
5 * Peter Oberparleiter <peter.oberparleiter@de.ibm.com>
8 #define KMSG_COMPONENT "sclp_cmd"
9 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
11 #include <linux/completion.h>
12 #include <linux/init.h>
13 #include <linux/errno.h>
14 #include <linux/err.h>
15 #include <linux/export.h>
16 #include <linux/slab.h>
17 #include <linux/string.h>
19 #include <linux/mmzone.h>
20 #include <linux/memory.h>
21 #include <linux/module.h>
22 #include <linux/platform_device.h>
23 #include <asm/ctl_reg.h>
24 #include <asm/chpid.h>
25 #include <asm/setup.h>
31 #define SCLP_CMDW_READ_SCP_INFO 0x00020001
32 #define SCLP_CMDW_READ_SCP_INFO_FORCED 0x00120001
34 struct read_info_sccb
{
35 struct sccb_header header
; /* 0-7 */
38 u8 _reserved0
[24 - 11]; /* 11-15 */
39 u8 loadparm
[8]; /* 24-31 */
40 u8 _reserved1
[48 - 32]; /* 32-47 */
41 u64 facilities
; /* 48-55 */
42 u8 _reserved2
[84 - 56]; /* 56-83 */
45 u8 _reserved3
[91 - 86]; /* 86-90 */
47 u8 _reserved4
[100 - 92]; /* 92-99 */
48 u32 rnsize2
; /* 100-103 */
49 u64 rnmax2
; /* 104-111 */
50 u8 _reserved5
[4096 - 112]; /* 112-4095 */
51 } __attribute__((packed
, aligned(PAGE_SIZE
)));
53 static struct init_sccb __initdata early_event_mask_sccb
__aligned(PAGE_SIZE
);
54 static struct read_info_sccb __initdata early_read_info_sccb
;
55 static int __initdata early_read_info_sccb_valid
;
59 static unsigned long long rzm
;
60 static unsigned long long rnmax
;
62 static int __init
sclp_cmd_sync_early(sclp_cmdw_t cmd
, void *sccb
)
67 rc
= sclp_service_call(cmd
, sccb
);
70 __load_psw_mask(PSW_DEFAULT_KEY
| PSW_MASK_BASE
| PSW_MASK_EA
|
71 PSW_MASK_BA
| PSW_MASK_EXT
| PSW_MASK_WAIT
);
74 /* Contents of the sccb might have changed. */
76 __ctl_clear_bit(0, 9);
80 static void __init
sclp_read_info_early(void)
84 struct read_info_sccb
*sccb
;
85 sclp_cmdw_t commands
[] = {SCLP_CMDW_READ_SCP_INFO_FORCED
,
86 SCLP_CMDW_READ_SCP_INFO
};
88 sccb
= &early_read_info_sccb
;
89 for (i
= 0; i
< ARRAY_SIZE(commands
); i
++) {
91 memset(sccb
, 0, sizeof(*sccb
));
92 sccb
->header
.length
= sizeof(*sccb
);
93 sccb
->header
.function_code
= 0x80;
94 sccb
->header
.control_mask
[2] = 0x80;
95 rc
= sclp_cmd_sync_early(commands
[i
], sccb
);
96 } while (rc
== -EBUSY
);
100 if (sccb
->header
.response_code
== 0x10) {
101 early_read_info_sccb_valid
= 1;
104 if (sccb
->header
.response_code
!= 0x1f0)
109 static void __init
sclp_event_mask_early(void)
111 struct init_sccb
*sccb
= &early_event_mask_sccb
;
115 memset(sccb
, 0, sizeof(*sccb
));
116 sccb
->header
.length
= sizeof(*sccb
);
117 sccb
->mask_length
= sizeof(sccb_mask_t
);
118 rc
= sclp_cmd_sync_early(SCLP_CMDW_WRITE_EVENT_MASK
, sccb
);
119 } while (rc
== -EBUSY
);
122 void __init
sclp_facilities_detect(void)
124 struct read_info_sccb
*sccb
;
126 sclp_read_info_early();
127 if (!early_read_info_sccb_valid
)
130 sccb
= &early_read_info_sccb
;
131 sclp_facilities
= sccb
->facilities
;
132 sclp_fac84
= sccb
->fac84
;
133 if (sccb
->fac85
& 0x02)
134 S390_lowcore
.machine_flags
|= MACHINE_FLAG_ESOP
;
135 rnmax
= sccb
->rnmax
? sccb
->rnmax
: sccb
->rnmax2
;
136 rzm
= sccb
->rnsize
? sccb
->rnsize
: sccb
->rnsize2
;
139 sclp_event_mask_early();
142 bool __init
sclp_has_linemode(void)
144 struct init_sccb
*sccb
= &early_event_mask_sccb
;
146 if (sccb
->header
.response_code
!= 0x20)
148 if (!(sccb
->sclp_send_mask
& (EVTYP_OPCMD_MASK
| EVTYP_PMSGCMD_MASK
)))
150 if (!(sccb
->sclp_receive_mask
& (EVTYP_MSG_MASK
| EVTYP_PMSGCMD_MASK
)))
155 bool __init
sclp_has_vt220(void)
157 struct init_sccb
*sccb
= &early_event_mask_sccb
;
159 if (sccb
->header
.response_code
!= 0x20)
161 if (sccb
->sclp_send_mask
& EVTYP_VT220MSG_MASK
)
166 unsigned long long sclp_get_rnmax(void)
171 unsigned long long sclp_get_rzm(void)
177 * This function will be called after sclp_facilities_detect(), which gets
178 * called from early.c code. Therefore the sccb should have valid contents.
180 void __init
sclp_get_ipl_info(struct sclp_ipl_info
*info
)
182 struct read_info_sccb
*sccb
;
184 if (!early_read_info_sccb_valid
)
186 sccb
= &early_read_info_sccb
;
188 if (sccb
->flags
& 0x2)
190 memcpy(&info
->loadparm
, &sccb
->loadparm
, LOADPARM_LEN
);
193 static void sclp_sync_callback(struct sclp_req
*req
, void *data
)
195 struct completion
*completion
= data
;
197 complete(completion
);
200 int sclp_sync_request(sclp_cmdw_t cmd
, void *sccb
)
202 struct completion completion
;
203 struct sclp_req
*request
;
206 request
= kzalloc(sizeof(*request
), GFP_KERNEL
);
209 request
->command
= cmd
;
210 request
->sccb
= sccb
;
211 request
->status
= SCLP_REQ_FILLED
;
212 request
->callback
= sclp_sync_callback
;
213 request
->callback_data
= &completion
;
214 init_completion(&completion
);
216 /* Perform sclp request. */
217 rc
= sclp_add_request(request
);
220 wait_for_completion(&completion
);
222 /* Check response. */
223 if (request
->status
!= SCLP_REQ_DONE
) {
224 pr_warning("sync request failed (cmd=0x%08x, "
225 "status=0x%02x)\n", cmd
, request
->status
);
234 * CPU configuration related functions.
237 #define SCLP_CMDW_READ_CPU_INFO 0x00010001
238 #define SCLP_CMDW_CONFIGURE_CPU 0x00110001
239 #define SCLP_CMDW_DECONFIGURE_CPU 0x00100001
241 struct read_cpu_info_sccb
{
242 struct sccb_header header
;
244 u16 offset_configured
;
247 u8 reserved
[4096 - 16];
248 } __attribute__((packed
, aligned(PAGE_SIZE
)));
250 static void sclp_fill_cpu_info(struct sclp_cpu_info
*info
,
251 struct read_cpu_info_sccb
*sccb
)
253 char *page
= (char *) sccb
;
255 memset(info
, 0, sizeof(*info
));
256 info
->configured
= sccb
->nr_configured
;
257 info
->standby
= sccb
->nr_standby
;
258 info
->combined
= sccb
->nr_configured
+ sccb
->nr_standby
;
259 info
->has_cpu_type
= sclp_fac84
& 0x1;
260 memcpy(&info
->cpu
, page
+ sccb
->offset_configured
,
261 info
->combined
* sizeof(struct sclp_cpu_entry
));
264 int sclp_get_cpu_info(struct sclp_cpu_info
*info
)
267 struct read_cpu_info_sccb
*sccb
;
269 if (!SCLP_HAS_CPU_INFO
)
271 sccb
= (void *) get_zeroed_page(GFP_KERNEL
| GFP_DMA
);
274 sccb
->header
.length
= sizeof(*sccb
);
275 rc
= sclp_sync_request(SCLP_CMDW_READ_CPU_INFO
, sccb
);
278 if (sccb
->header
.response_code
!= 0x0010) {
279 pr_warning("readcpuinfo failed (response=0x%04x)\n",
280 sccb
->header
.response_code
);
284 sclp_fill_cpu_info(info
, sccb
);
286 free_page((unsigned long) sccb
);
290 struct cpu_configure_sccb
{
291 struct sccb_header header
;
292 } __attribute__((packed
, aligned(8)));
294 static int do_cpu_configure(sclp_cmdw_t cmd
)
296 struct cpu_configure_sccb
*sccb
;
299 if (!SCLP_HAS_CPU_RECONFIG
)
302 * This is not going to cross a page boundary since we force
303 * kmalloc to have a minimum alignment of 8 bytes on s390.
305 sccb
= kzalloc(sizeof(*sccb
), GFP_KERNEL
| GFP_DMA
);
308 sccb
->header
.length
= sizeof(*sccb
);
309 rc
= sclp_sync_request(cmd
, sccb
);
312 switch (sccb
->header
.response_code
) {
317 pr_warning("configure cpu failed (cmd=0x%08x, "
318 "response=0x%04x)\n", cmd
,
319 sccb
->header
.response_code
);
328 int sclp_cpu_configure(u8 cpu
)
330 return do_cpu_configure(SCLP_CMDW_CONFIGURE_CPU
| cpu
<< 8);
333 int sclp_cpu_deconfigure(u8 cpu
)
335 return do_cpu_configure(SCLP_CMDW_DECONFIGURE_CPU
| cpu
<< 8);
338 #ifdef CONFIG_MEMORY_HOTPLUG
340 static DEFINE_MUTEX(sclp_mem_mutex
);
341 static LIST_HEAD(sclp_mem_list
);
342 static u8 sclp_max_storage_id
;
343 static unsigned long sclp_storage_ids
[256 / BITS_PER_LONG
];
344 static int sclp_mem_state_changed
;
346 struct memory_increment
{
347 struct list_head list
;
352 struct assign_storage_sccb
{
353 struct sccb_header header
;
357 int arch_get_memory_phys_device(unsigned long start_pfn
)
361 return PFN_PHYS(start_pfn
) >> ilog2(rzm
);
364 static unsigned long long rn2addr(u16 rn
)
366 return (unsigned long long) (rn
- 1) * rzm
;
369 static int do_assign_storage(sclp_cmdw_t cmd
, u16 rn
)
371 struct assign_storage_sccb
*sccb
;
374 sccb
= (void *) get_zeroed_page(GFP_KERNEL
| GFP_DMA
);
377 sccb
->header
.length
= PAGE_SIZE
;
379 rc
= sclp_sync_request(cmd
, sccb
);
382 switch (sccb
->header
.response_code
) {
387 pr_warning("assign storage failed (cmd=0x%08x, "
388 "response=0x%04x, rn=0x%04x)\n", cmd
,
389 sccb
->header
.response_code
, rn
);
394 free_page((unsigned long) sccb
);
398 static int sclp_assign_storage(u16 rn
)
400 unsigned long long start
;
403 rc
= do_assign_storage(0x000d0001, rn
);
407 storage_key_init_range(start
, start
+ rzm
);
411 static int sclp_unassign_storage(u16 rn
)
413 return do_assign_storage(0x000c0001, rn
);
416 struct attach_storage_sccb
{
417 struct sccb_header header
;
424 static int sclp_attach_storage(u8 id
)
426 struct attach_storage_sccb
*sccb
;
430 sccb
= (void *) get_zeroed_page(GFP_KERNEL
| GFP_DMA
);
433 sccb
->header
.length
= PAGE_SIZE
;
434 rc
= sclp_sync_request(0x00080001 | id
<< 8, sccb
);
437 switch (sccb
->header
.response_code
) {
439 set_bit(id
, sclp_storage_ids
);
440 for (i
= 0; i
< sccb
->assigned
; i
++) {
441 if (sccb
->entries
[i
])
442 sclp_unassign_storage(sccb
->entries
[i
] >> 16);
450 free_page((unsigned long) sccb
);
454 static int sclp_mem_change_state(unsigned long start
, unsigned long size
,
457 struct memory_increment
*incr
;
458 unsigned long long istart
;
461 list_for_each_entry(incr
, &sclp_mem_list
, list
) {
462 istart
= rn2addr(incr
->rn
);
463 if (start
+ size
- 1 < istart
)
465 if (start
> istart
+ rzm
- 1)
468 rc
|= sclp_assign_storage(incr
->rn
);
470 sclp_unassign_storage(incr
->rn
);
472 return rc
? -EIO
: 0;
475 static int sclp_mem_notifier(struct notifier_block
*nb
,
476 unsigned long action
, void *data
)
478 unsigned long start
, size
;
479 struct memory_notify
*arg
;
484 start
= arg
->start_pfn
<< PAGE_SHIFT
;
485 size
= arg
->nr_pages
<< PAGE_SHIFT
;
486 mutex_lock(&sclp_mem_mutex
);
487 for_each_clear_bit(id
, sclp_storage_ids
, sclp_max_storage_id
+ 1)
488 sclp_attach_storage(id
);
491 case MEM_GOING_OFFLINE
:
492 case MEM_CANCEL_OFFLINE
:
494 case MEM_GOING_ONLINE
:
495 rc
= sclp_mem_change_state(start
, size
, 1);
497 case MEM_CANCEL_ONLINE
:
498 sclp_mem_change_state(start
, size
, 0);
501 sclp_mem_change_state(start
, size
, 0);
508 sclp_mem_state_changed
= 1;
509 mutex_unlock(&sclp_mem_mutex
);
510 return rc
? NOTIFY_BAD
: NOTIFY_OK
;
513 static struct notifier_block sclp_mem_nb
= {
514 .notifier_call
= sclp_mem_notifier
,
517 static void __init
add_memory_merged(u16 rn
)
519 static u16 first_rn
, num
;
520 unsigned long long start
, size
;
522 if (rn
&& first_rn
&& (first_rn
+ num
== rn
)) {
528 start
= rn2addr(first_rn
);
529 size
= (unsigned long long ) num
* rzm
;
530 if (start
>= VMEM_MAX_PHYS
)
532 if (start
+ size
> VMEM_MAX_PHYS
)
533 size
= VMEM_MAX_PHYS
- start
;
534 if (memory_end_set
&& (start
>= memory_end
))
536 if (memory_end_set
&& (start
+ size
> memory_end
))
537 size
= memory_end
- start
;
538 add_memory(0, start
, size
);
544 static void __init
sclp_add_standby_memory(void)
546 struct memory_increment
*incr
;
548 list_for_each_entry(incr
, &sclp_mem_list
, list
)
550 add_memory_merged(incr
->rn
);
551 add_memory_merged(0);
554 static void __init
insert_increment(u16 rn
, int standby
, int assigned
)
556 struct memory_increment
*incr
, *new_incr
;
557 struct list_head
*prev
;
560 new_incr
= kzalloc(sizeof(*new_incr
), GFP_KERNEL
);
564 new_incr
->standby
= standby
;
566 prev
= &sclp_mem_list
;
567 list_for_each_entry(incr
, &sclp_mem_list
, list
) {
568 if (assigned
&& incr
->rn
> rn
)
570 if (!assigned
&& incr
->rn
- last_rn
> 1)
576 new_incr
->rn
= last_rn
+ 1;
577 if (new_incr
->rn
> rnmax
) {
581 list_add(&new_incr
->list
, prev
);
584 static int sclp_mem_freeze(struct device
*dev
)
586 if (!sclp_mem_state_changed
)
588 pr_err("Memory hotplug state changed, suspend refused.\n");
592 struct read_storage_sccb
{
593 struct sccb_header header
;
601 static const struct dev_pm_ops sclp_mem_pm_ops
= {
602 .freeze
= sclp_mem_freeze
,
605 static struct platform_driver sclp_mem_pdrv
= {
608 .pm
= &sclp_mem_pm_ops
,
612 static int __init
sclp_detect_standby_memory(void)
614 struct platform_device
*sclp_pdev
;
615 struct read_storage_sccb
*sccb
;
616 int i
, id
, assigned
, rc
;
618 if (OLDMEM_BASE
) /* No standby memory in kdump mode */
620 if (!early_read_info_sccb_valid
)
622 if ((sclp_facilities
& 0xe00000000000ULL
) != 0xe00000000000ULL
)
625 sccb
= (void *) __get_free_page(GFP_KERNEL
| GFP_DMA
);
629 for (id
= 0; id
<= sclp_max_storage_id
; id
++) {
630 memset(sccb
, 0, PAGE_SIZE
);
631 sccb
->header
.length
= PAGE_SIZE
;
632 rc
= sclp_sync_request(0x00040001 | id
<< 8, sccb
);
635 switch (sccb
->header
.response_code
) {
637 set_bit(id
, sclp_storage_ids
);
638 for (i
= 0; i
< sccb
->assigned
; i
++) {
639 if (!sccb
->entries
[i
])
642 insert_increment(sccb
->entries
[i
] >> 16, 0, 1);
648 for (i
= 0; i
< sccb
->assigned
; i
++) {
649 if (!sccb
->entries
[i
])
652 insert_increment(sccb
->entries
[i
] >> 16, 1, 1);
660 sclp_max_storage_id
= sccb
->max_id
;
662 if (rc
|| list_empty(&sclp_mem_list
))
664 for (i
= 1; i
<= rnmax
- assigned
; i
++)
665 insert_increment(0, 1, 0);
666 rc
= register_memory_notifier(&sclp_mem_nb
);
669 rc
= platform_driver_register(&sclp_mem_pdrv
);
672 sclp_pdev
= platform_device_register_simple("sclp_mem", -1, NULL
, 0);
673 rc
= PTR_RET(sclp_pdev
);
676 sclp_add_standby_memory();
679 platform_driver_unregister(&sclp_mem_pdrv
);
681 free_page((unsigned long) sccb
);
684 __initcall(sclp_detect_standby_memory
);
686 #endif /* CONFIG_MEMORY_HOTPLUG */
689 * PCI I/O adapter configuration related functions.
691 #define SCLP_CMDW_CONFIGURE_PCI 0x001a0001
692 #define SCLP_CMDW_DECONFIGURE_PCI 0x001b0001
694 #define SCLP_RECONFIG_PCI_ATPYE 2
696 struct pci_cfg_sccb
{
697 struct sccb_header header
;
698 u8 atype
; /* adapter type */
701 u32 aid
; /* adapter identifier */
704 static int do_pci_configure(sclp_cmdw_t cmd
, u32 fid
)
706 struct pci_cfg_sccb
*sccb
;
709 if (!SCLP_HAS_PCI_RECONFIG
)
712 sccb
= (struct pci_cfg_sccb
*) get_zeroed_page(GFP_KERNEL
| GFP_DMA
);
716 sccb
->header
.length
= PAGE_SIZE
;
717 sccb
->atype
= SCLP_RECONFIG_PCI_ATPYE
;
719 rc
= sclp_sync_request(cmd
, sccb
);
722 switch (sccb
->header
.response_code
) {
727 pr_warn("configure PCI I/O adapter failed: cmd=0x%08x response=0x%04x\n",
728 cmd
, sccb
->header
.response_code
);
733 free_page((unsigned long) sccb
);
737 int sclp_pci_configure(u32 fid
)
739 return do_pci_configure(SCLP_CMDW_CONFIGURE_PCI
, fid
);
741 EXPORT_SYMBOL(sclp_pci_configure
);
743 int sclp_pci_deconfigure(u32 fid
)
745 return do_pci_configure(SCLP_CMDW_DECONFIGURE_PCI
, fid
);
747 EXPORT_SYMBOL(sclp_pci_deconfigure
);
750 * Channel path configuration related functions.
753 #define SCLP_CMDW_CONFIGURE_CHPATH 0x000f0001
754 #define SCLP_CMDW_DECONFIGURE_CHPATH 0x000e0001
755 #define SCLP_CMDW_READ_CHPATH_INFORMATION 0x00030001
757 struct chp_cfg_sccb
{
758 struct sccb_header header
;
762 } __attribute__((packed
));
764 static int do_chp_configure(sclp_cmdw_t cmd
)
766 struct chp_cfg_sccb
*sccb
;
769 if (!SCLP_HAS_CHP_RECONFIG
)
772 sccb
= (struct chp_cfg_sccb
*) get_zeroed_page(GFP_KERNEL
| GFP_DMA
);
775 sccb
->header
.length
= sizeof(*sccb
);
776 rc
= sclp_sync_request(cmd
, sccb
);
779 switch (sccb
->header
.response_code
) {
786 pr_warning("configure channel-path failed "
787 "(cmd=0x%08x, response=0x%04x)\n", cmd
,
788 sccb
->header
.response_code
);
793 free_page((unsigned long) sccb
);
798 * sclp_chp_configure - perform configure channel-path sclp command
799 * @chpid: channel-path ID
801 * Perform configure channel-path command sclp command for specified chpid.
802 * Return 0 after command successfully finished, non-zero otherwise.
804 int sclp_chp_configure(struct chp_id chpid
)
806 return do_chp_configure(SCLP_CMDW_CONFIGURE_CHPATH
| chpid
.id
<< 8);
810 * sclp_chp_deconfigure - perform deconfigure channel-path sclp command
811 * @chpid: channel-path ID
813 * Perform deconfigure channel-path command sclp command for specified chpid
814 * and wait for completion. On success return 0. Return non-zero otherwise.
816 int sclp_chp_deconfigure(struct chp_id chpid
)
818 return do_chp_configure(SCLP_CMDW_DECONFIGURE_CHPATH
| chpid
.id
<< 8);
821 struct chp_info_sccb
{
822 struct sccb_header header
;
823 u8 recognized
[SCLP_CHP_INFO_MASK_SIZE
];
824 u8 standby
[SCLP_CHP_INFO_MASK_SIZE
];
825 u8 configured
[SCLP_CHP_INFO_MASK_SIZE
];
829 } __attribute__((packed
));
832 * sclp_chp_read_info - perform read channel-path information sclp command
833 * @info: resulting channel-path information data
835 * Perform read channel-path information sclp command and wait for completion.
836 * On success, store channel-path information in @info and return 0. Return
837 * non-zero otherwise.
839 int sclp_chp_read_info(struct sclp_chp_info
*info
)
841 struct chp_info_sccb
*sccb
;
844 if (!SCLP_HAS_CHP_INFO
)
847 sccb
= (struct chp_info_sccb
*) get_zeroed_page(GFP_KERNEL
| GFP_DMA
);
850 sccb
->header
.length
= sizeof(*sccb
);
851 rc
= sclp_sync_request(SCLP_CMDW_READ_CHPATH_INFORMATION
, sccb
);
854 if (sccb
->header
.response_code
!= 0x0010) {
855 pr_warning("read channel-path info failed "
856 "(response=0x%04x)\n", sccb
->header
.response_code
);
860 memcpy(info
->recognized
, sccb
->recognized
, SCLP_CHP_INFO_MASK_SIZE
);
861 memcpy(info
->standby
, sccb
->standby
, SCLP_CHP_INFO_MASK_SIZE
);
862 memcpy(info
->configured
, sccb
->configured
, SCLP_CHP_INFO_MASK_SIZE
);
864 free_page((unsigned long) sccb
);