2 * Copyright IBM Corp. 2007,2012
4 * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>,
5 * Peter Oberparleiter <peter.oberparleiter@de.ibm.com>
8 #define KMSG_COMPONENT "sclp_cmd"
9 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
11 #include <linux/completion.h>
12 #include <linux/init.h>
13 #include <linux/errno.h>
14 #include <linux/err.h>
15 #include <linux/export.h>
16 #include <linux/slab.h>
17 #include <linux/string.h>
19 #include <linux/mmzone.h>
20 #include <linux/memory.h>
21 #include <linux/module.h>
22 #include <linux/platform_device.h>
23 #include <asm/ctl_reg.h>
24 #include <asm/chpid.h>
25 #include <asm/setup.h>
32 static void sclp_sync_callback(struct sclp_req
*req
, void *data
)
34 struct completion
*completion
= data
;
39 int sclp_sync_request(sclp_cmdw_t cmd
, void *sccb
)
41 return sclp_sync_request_timeout(cmd
, sccb
, 0);
44 int sclp_sync_request_timeout(sclp_cmdw_t cmd
, void *sccb
, int timeout
)
46 struct completion completion
;
47 struct sclp_req
*request
;
50 request
= kzalloc(sizeof(*request
), GFP_KERNEL
);
54 request
->queue_timeout
= timeout
;
55 request
->command
= cmd
;
57 request
->status
= SCLP_REQ_FILLED
;
58 request
->callback
= sclp_sync_callback
;
59 request
->callback_data
= &completion
;
60 init_completion(&completion
);
62 /* Perform sclp request. */
63 rc
= sclp_add_request(request
);
66 wait_for_completion(&completion
);
69 if (request
->status
!= SCLP_REQ_DONE
) {
70 pr_warning("sync request failed (cmd=0x%08x, "
71 "status=0x%02x)\n", cmd
, request
->status
);
80 * CPU configuration related functions.
83 #define SCLP_CMDW_READ_CPU_INFO 0x00010001
84 #define SCLP_CMDW_CONFIGURE_CPU 0x00110001
85 #define SCLP_CMDW_DECONFIGURE_CPU 0x00100001
87 struct read_cpu_info_sccb
{
88 struct sccb_header header
;
90 u16 offset_configured
;
93 u8 reserved
[4096 - 16];
94 } __attribute__((packed
, aligned(PAGE_SIZE
)));
96 static void sclp_fill_core_info(struct sclp_core_info
*info
,
97 struct read_cpu_info_sccb
*sccb
)
99 char *page
= (char *) sccb
;
101 memset(info
, 0, sizeof(*info
));
102 info
->configured
= sccb
->nr_configured
;
103 info
->standby
= sccb
->nr_standby
;
104 info
->combined
= sccb
->nr_configured
+ sccb
->nr_standby
;
105 memcpy(&info
->core
, page
+ sccb
->offset_configured
,
106 info
->combined
* sizeof(struct sclp_core_entry
));
109 int sclp_get_core_info(struct sclp_core_info
*info
)
112 struct read_cpu_info_sccb
*sccb
;
114 if (!SCLP_HAS_CPU_INFO
)
116 sccb
= (void *) get_zeroed_page(GFP_KERNEL
| GFP_DMA
);
119 sccb
->header
.length
= sizeof(*sccb
);
120 rc
= sclp_sync_request_timeout(SCLP_CMDW_READ_CPU_INFO
, sccb
,
121 SCLP_QUEUE_INTERVAL
);
124 if (sccb
->header
.response_code
!= 0x0010) {
125 pr_warning("readcpuinfo failed (response=0x%04x)\n",
126 sccb
->header
.response_code
);
130 sclp_fill_core_info(info
, sccb
);
132 free_page((unsigned long) sccb
);
136 struct cpu_configure_sccb
{
137 struct sccb_header header
;
138 } __attribute__((packed
, aligned(8)));
140 static int do_core_configure(sclp_cmdw_t cmd
)
142 struct cpu_configure_sccb
*sccb
;
145 if (!SCLP_HAS_CPU_RECONFIG
)
148 * This is not going to cross a page boundary since we force
149 * kmalloc to have a minimum alignment of 8 bytes on s390.
151 sccb
= kzalloc(sizeof(*sccb
), GFP_KERNEL
| GFP_DMA
);
154 sccb
->header
.length
= sizeof(*sccb
);
155 rc
= sclp_sync_request_timeout(cmd
, sccb
, SCLP_QUEUE_INTERVAL
);
158 switch (sccb
->header
.response_code
) {
163 pr_warning("configure cpu failed (cmd=0x%08x, "
164 "response=0x%04x)\n", cmd
,
165 sccb
->header
.response_code
);
174 int sclp_core_configure(u8 core
)
176 return do_core_configure(SCLP_CMDW_CONFIGURE_CPU
| core
<< 8);
179 int sclp_core_deconfigure(u8 core
)
181 return do_core_configure(SCLP_CMDW_DECONFIGURE_CPU
| core
<< 8);
184 #ifdef CONFIG_MEMORY_HOTPLUG
186 static DEFINE_MUTEX(sclp_mem_mutex
);
187 static LIST_HEAD(sclp_mem_list
);
188 static u8 sclp_max_storage_id
;
189 static DECLARE_BITMAP(sclp_storage_ids
, 256);
190 static int sclp_mem_state_changed
;
192 struct memory_increment
{
193 struct list_head list
;
198 struct assign_storage_sccb
{
199 struct sccb_header header
;
203 int arch_get_memory_phys_device(unsigned long start_pfn
)
207 return PFN_PHYS(start_pfn
) >> ilog2(sclp
.rzm
);
210 static unsigned long long rn2addr(u16 rn
)
212 return (unsigned long long) (rn
- 1) * sclp
.rzm
;
215 static int do_assign_storage(sclp_cmdw_t cmd
, u16 rn
)
217 struct assign_storage_sccb
*sccb
;
220 sccb
= (void *) get_zeroed_page(GFP_KERNEL
| GFP_DMA
);
223 sccb
->header
.length
= PAGE_SIZE
;
225 rc
= sclp_sync_request_timeout(cmd
, sccb
, SCLP_QUEUE_INTERVAL
);
228 switch (sccb
->header
.response_code
) {
233 pr_warning("assign storage failed (cmd=0x%08x, "
234 "response=0x%04x, rn=0x%04x)\n", cmd
,
235 sccb
->header
.response_code
, rn
);
240 free_page((unsigned long) sccb
);
244 static int sclp_assign_storage(u16 rn
)
246 unsigned long long start
;
249 rc
= do_assign_storage(0x000d0001, rn
);
253 storage_key_init_range(start
, start
+ sclp
.rzm
);
257 static int sclp_unassign_storage(u16 rn
)
259 return do_assign_storage(0x000c0001, rn
);
262 struct attach_storage_sccb
{
263 struct sccb_header header
;
270 static int sclp_attach_storage(u8 id
)
272 struct attach_storage_sccb
*sccb
;
276 sccb
= (void *) get_zeroed_page(GFP_KERNEL
| GFP_DMA
);
279 sccb
->header
.length
= PAGE_SIZE
;
280 rc
= sclp_sync_request_timeout(0x00080001 | id
<< 8, sccb
,
281 SCLP_QUEUE_INTERVAL
);
284 switch (sccb
->header
.response_code
) {
286 set_bit(id
, sclp_storage_ids
);
287 for (i
= 0; i
< sccb
->assigned
; i
++) {
288 if (sccb
->entries
[i
])
289 sclp_unassign_storage(sccb
->entries
[i
] >> 16);
297 free_page((unsigned long) sccb
);
301 static int sclp_mem_change_state(unsigned long start
, unsigned long size
,
304 struct memory_increment
*incr
;
305 unsigned long long istart
;
308 list_for_each_entry(incr
, &sclp_mem_list
, list
) {
309 istart
= rn2addr(incr
->rn
);
310 if (start
+ size
- 1 < istart
)
312 if (start
> istart
+ sclp
.rzm
- 1)
315 rc
|= sclp_assign_storage(incr
->rn
);
317 sclp_unassign_storage(incr
->rn
);
319 incr
->standby
= online
? 0 : 1;
321 return rc
? -EIO
: 0;
324 static bool contains_standby_increment(unsigned long start
, unsigned long end
)
326 struct memory_increment
*incr
;
327 unsigned long istart
;
329 list_for_each_entry(incr
, &sclp_mem_list
, list
) {
330 istart
= rn2addr(incr
->rn
);
331 if (end
- 1 < istart
)
333 if (start
> istart
+ sclp
.rzm
- 1)
341 static int sclp_mem_notifier(struct notifier_block
*nb
,
342 unsigned long action
, void *data
)
344 unsigned long start
, size
;
345 struct memory_notify
*arg
;
350 start
= arg
->start_pfn
<< PAGE_SHIFT
;
351 size
= arg
->nr_pages
<< PAGE_SHIFT
;
352 mutex_lock(&sclp_mem_mutex
);
353 for_each_clear_bit(id
, sclp_storage_ids
, sclp_max_storage_id
+ 1)
354 sclp_attach_storage(id
);
356 case MEM_GOING_OFFLINE
:
358 * We do not allow to set memory blocks offline that contain
359 * standby memory. This is done to simplify the "memory online"
362 if (contains_standby_increment(start
, start
+ size
))
366 case MEM_CANCEL_OFFLINE
:
368 case MEM_GOING_ONLINE
:
369 rc
= sclp_mem_change_state(start
, size
, 1);
371 case MEM_CANCEL_ONLINE
:
372 sclp_mem_change_state(start
, size
, 0);
375 sclp_mem_change_state(start
, size
, 0);
382 sclp_mem_state_changed
= 1;
383 mutex_unlock(&sclp_mem_mutex
);
384 return rc
? NOTIFY_BAD
: NOTIFY_OK
;
387 static struct notifier_block sclp_mem_nb
= {
388 .notifier_call
= sclp_mem_notifier
,
391 static void __init
align_to_block_size(unsigned long long *start
,
392 unsigned long long *size
,
393 unsigned long long alignment
)
395 unsigned long long start_align
, size_align
;
397 start_align
= roundup(*start
, alignment
);
398 size_align
= rounddown(*start
+ *size
, alignment
) - start_align
;
400 pr_info("Standby memory at 0x%llx (%lluM of %lluM usable)\n",
401 *start
, size_align
>> 20, *size
>> 20);
402 *start
= start_align
;
406 static void __init
add_memory_merged(u16 rn
)
408 unsigned long long start
, size
, addr
, block_size
;
409 static u16 first_rn
, num
;
411 if (rn
&& first_rn
&& (first_rn
+ num
== rn
)) {
417 start
= rn2addr(first_rn
);
418 size
= (unsigned long long) num
* sclp
.rzm
;
419 if (start
>= VMEM_MAX_PHYS
)
421 if (start
+ size
> VMEM_MAX_PHYS
)
422 size
= VMEM_MAX_PHYS
- start
;
423 if (memory_end_set
&& (start
>= memory_end
))
425 if (memory_end_set
&& (start
+ size
> memory_end
))
426 size
= memory_end
- start
;
427 block_size
= memory_block_size_bytes();
428 align_to_block_size(&start
, &size
, block_size
);
431 for (addr
= start
; addr
< start
+ size
; addr
+= block_size
)
432 add_memory(numa_pfn_to_nid(PFN_DOWN(addr
)), addr
, block_size
);
438 static void __init
sclp_add_standby_memory(void)
440 struct memory_increment
*incr
;
442 list_for_each_entry(incr
, &sclp_mem_list
, list
)
444 add_memory_merged(incr
->rn
);
445 add_memory_merged(0);
448 static void __init
insert_increment(u16 rn
, int standby
, int assigned
)
450 struct memory_increment
*incr
, *new_incr
;
451 struct list_head
*prev
;
454 new_incr
= kzalloc(sizeof(*new_incr
), GFP_KERNEL
);
458 new_incr
->standby
= standby
;
460 prev
= &sclp_mem_list
;
461 list_for_each_entry(incr
, &sclp_mem_list
, list
) {
462 if (assigned
&& incr
->rn
> rn
)
464 if (!assigned
&& incr
->rn
- last_rn
> 1)
470 new_incr
->rn
= last_rn
+ 1;
471 if (new_incr
->rn
> sclp
.rnmax
) {
475 list_add(&new_incr
->list
, prev
);
478 static int sclp_mem_freeze(struct device
*dev
)
480 if (!sclp_mem_state_changed
)
482 pr_err("Memory hotplug state changed, suspend refused.\n");
486 struct read_storage_sccb
{
487 struct sccb_header header
;
495 static const struct dev_pm_ops sclp_mem_pm_ops
= {
496 .freeze
= sclp_mem_freeze
,
499 static struct platform_driver sclp_mem_pdrv
= {
502 .pm
= &sclp_mem_pm_ops
,
506 static int __init
sclp_detect_standby_memory(void)
508 struct platform_device
*sclp_pdev
;
509 struct read_storage_sccb
*sccb
;
510 int i
, id
, assigned
, rc
;
512 if (OLDMEM_BASE
) /* No standby memory in kdump mode */
514 if ((sclp
.facilities
& 0xe00000000000ULL
) != 0xe00000000000ULL
)
517 sccb
= (void *) __get_free_page(GFP_KERNEL
| GFP_DMA
);
521 for (id
= 0; id
<= sclp_max_storage_id
; id
++) {
522 memset(sccb
, 0, PAGE_SIZE
);
523 sccb
->header
.length
= PAGE_SIZE
;
524 rc
= sclp_sync_request(0x00040001 | id
<< 8, sccb
);
527 switch (sccb
->header
.response_code
) {
529 set_bit(id
, sclp_storage_ids
);
530 for (i
= 0; i
< sccb
->assigned
; i
++) {
531 if (!sccb
->entries
[i
])
534 insert_increment(sccb
->entries
[i
] >> 16, 0, 1);
540 for (i
= 0; i
< sccb
->assigned
; i
++) {
541 if (!sccb
->entries
[i
])
544 insert_increment(sccb
->entries
[i
] >> 16, 1, 1);
552 sclp_max_storage_id
= sccb
->max_id
;
554 if (rc
|| list_empty(&sclp_mem_list
))
556 for (i
= 1; i
<= sclp
.rnmax
- assigned
; i
++)
557 insert_increment(0, 1, 0);
558 rc
= register_memory_notifier(&sclp_mem_nb
);
561 rc
= platform_driver_register(&sclp_mem_pdrv
);
564 sclp_pdev
= platform_device_register_simple("sclp_mem", -1, NULL
, 0);
565 rc
= PTR_ERR_OR_ZERO(sclp_pdev
);
568 sclp_add_standby_memory();
571 platform_driver_unregister(&sclp_mem_pdrv
);
573 free_page((unsigned long) sccb
);
576 __initcall(sclp_detect_standby_memory
);
578 #endif /* CONFIG_MEMORY_HOTPLUG */
581 * PCI I/O adapter configuration related functions.
583 #define SCLP_CMDW_CONFIGURE_PCI 0x001a0001
584 #define SCLP_CMDW_DECONFIGURE_PCI 0x001b0001
586 #define SCLP_RECONFIG_PCI_ATPYE 2
588 struct pci_cfg_sccb
{
589 struct sccb_header header
;
590 u8 atype
; /* adapter type */
593 u32 aid
; /* adapter identifier */
596 static int do_pci_configure(sclp_cmdw_t cmd
, u32 fid
)
598 struct pci_cfg_sccb
*sccb
;
601 if (!SCLP_HAS_PCI_RECONFIG
)
604 sccb
= (struct pci_cfg_sccb
*) get_zeroed_page(GFP_KERNEL
| GFP_DMA
);
608 sccb
->header
.length
= PAGE_SIZE
;
609 sccb
->atype
= SCLP_RECONFIG_PCI_ATPYE
;
611 rc
= sclp_sync_request(cmd
, sccb
);
614 switch (sccb
->header
.response_code
) {
619 pr_warn("configure PCI I/O adapter failed: cmd=0x%08x response=0x%04x\n",
620 cmd
, sccb
->header
.response_code
);
625 free_page((unsigned long) sccb
);
629 int sclp_pci_configure(u32 fid
)
631 return do_pci_configure(SCLP_CMDW_CONFIGURE_PCI
, fid
);
633 EXPORT_SYMBOL(sclp_pci_configure
);
635 int sclp_pci_deconfigure(u32 fid
)
637 return do_pci_configure(SCLP_CMDW_DECONFIGURE_PCI
, fid
);
639 EXPORT_SYMBOL(sclp_pci_deconfigure
);
642 * Channel path configuration related functions.
645 #define SCLP_CMDW_CONFIGURE_CHPATH 0x000f0001
646 #define SCLP_CMDW_DECONFIGURE_CHPATH 0x000e0001
647 #define SCLP_CMDW_READ_CHPATH_INFORMATION 0x00030001
649 struct chp_cfg_sccb
{
650 struct sccb_header header
;
654 } __attribute__((packed
));
656 static int do_chp_configure(sclp_cmdw_t cmd
)
658 struct chp_cfg_sccb
*sccb
;
661 if (!SCLP_HAS_CHP_RECONFIG
)
664 sccb
= (struct chp_cfg_sccb
*) get_zeroed_page(GFP_KERNEL
| GFP_DMA
);
667 sccb
->header
.length
= sizeof(*sccb
);
668 rc
= sclp_sync_request(cmd
, sccb
);
671 switch (sccb
->header
.response_code
) {
678 pr_warning("configure channel-path failed "
679 "(cmd=0x%08x, response=0x%04x)\n", cmd
,
680 sccb
->header
.response_code
);
685 free_page((unsigned long) sccb
);
690 * sclp_chp_configure - perform configure channel-path sclp command
691 * @chpid: channel-path ID
693 * Perform configure channel-path command sclp command for specified chpid.
694 * Return 0 after command successfully finished, non-zero otherwise.
696 int sclp_chp_configure(struct chp_id chpid
)
698 return do_chp_configure(SCLP_CMDW_CONFIGURE_CHPATH
| chpid
.id
<< 8);
702 * sclp_chp_deconfigure - perform deconfigure channel-path sclp command
703 * @chpid: channel-path ID
705 * Perform deconfigure channel-path command sclp command for specified chpid
706 * and wait for completion. On success return 0. Return non-zero otherwise.
708 int sclp_chp_deconfigure(struct chp_id chpid
)
710 return do_chp_configure(SCLP_CMDW_DECONFIGURE_CHPATH
| chpid
.id
<< 8);
713 struct chp_info_sccb
{
714 struct sccb_header header
;
715 u8 recognized
[SCLP_CHP_INFO_MASK_SIZE
];
716 u8 standby
[SCLP_CHP_INFO_MASK_SIZE
];
717 u8 configured
[SCLP_CHP_INFO_MASK_SIZE
];
721 } __attribute__((packed
));
724 * sclp_chp_read_info - perform read channel-path information sclp command
725 * @info: resulting channel-path information data
727 * Perform read channel-path information sclp command and wait for completion.
728 * On success, store channel-path information in @info and return 0. Return
729 * non-zero otherwise.
731 int sclp_chp_read_info(struct sclp_chp_info
*info
)
733 struct chp_info_sccb
*sccb
;
736 if (!SCLP_HAS_CHP_INFO
)
739 sccb
= (struct chp_info_sccb
*) get_zeroed_page(GFP_KERNEL
| GFP_DMA
);
742 sccb
->header
.length
= sizeof(*sccb
);
743 rc
= sclp_sync_request(SCLP_CMDW_READ_CHPATH_INFORMATION
, sccb
);
746 if (sccb
->header
.response_code
!= 0x0010) {
747 pr_warning("read channel-path info failed "
748 "(response=0x%04x)\n", sccb
->header
.response_code
);
752 memcpy(info
->recognized
, sccb
->recognized
, SCLP_CHP_INFO_MASK_SIZE
);
753 memcpy(info
->standby
, sccb
->standby
, SCLP_CHP_INFO_MASK_SIZE
);
754 memcpy(info
->configured
, sccb
->configured
, SCLP_CHP_INFO_MASK_SIZE
);
756 free_page((unsigned long) sccb
);