2 * Copyright IBM Corp. 2007, 2009
4 * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>,
5 * Peter Oberparleiter <peter.oberparleiter@de.ibm.com>
8 #define KMSG_COMPONENT "sclp_cmd"
9 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
11 #include <linux/completion.h>
12 #include <linux/init.h>
13 #include <linux/errno.h>
14 #include <linux/err.h>
15 #include <linux/slab.h>
16 #include <linux/string.h>
18 #include <linux/mmzone.h>
19 #include <linux/memory.h>
20 #include <linux/platform_device.h>
21 #include <asm/chpid.h>
23 #include <asm/setup.h>
27 #define SCLP_CMDW_READ_SCP_INFO 0x00020001
28 #define SCLP_CMDW_READ_SCP_INFO_FORCED 0x00120001
30 struct read_info_sccb
{
31 struct sccb_header header
; /* 0-7 */
34 u8 _reserved0
[24 - 11]; /* 11-15 */
35 u8 loadparm
[8]; /* 24-31 */
36 u8 _reserved1
[48 - 32]; /* 32-47 */
37 u64 facilities
; /* 48-55 */
38 u8 _reserved2
[84 - 56]; /* 56-83 */
40 u8 _reserved3
[91 - 85]; /* 85-90 */
42 u8 _reserved4
[100 - 92]; /* 92-99 */
43 u32 rnsize2
; /* 100-103 */
44 u64 rnmax2
; /* 104-111 */
45 u8 _reserved5
[4096 - 112]; /* 112-4095 */
46 } __attribute__((packed
, aligned(PAGE_SIZE
)));
48 static struct read_info_sccb __initdata early_read_info_sccb
;
49 static int __initdata early_read_info_sccb_valid
;
53 static unsigned long long rzm
;
54 static unsigned long long rnmax
;
56 static int __init
sclp_cmd_sync_early(sclp_cmdw_t cmd
, void *sccb
)
61 rc
= sclp_service_call(cmd
, sccb
);
64 __load_psw_mask(PSW_BASE_BITS
| PSW_MASK_EXT
|
65 PSW_MASK_WAIT
| PSW_DEFAULT_KEY
);
68 /* Contents of the sccb might have changed. */
70 __ctl_clear_bit(0, 9);
74 static void __init
sclp_read_info_early(void)
78 struct read_info_sccb
*sccb
;
79 sclp_cmdw_t commands
[] = {SCLP_CMDW_READ_SCP_INFO_FORCED
,
80 SCLP_CMDW_READ_SCP_INFO
};
82 sccb
= &early_read_info_sccb
;
83 for (i
= 0; i
< ARRAY_SIZE(commands
); i
++) {
85 memset(sccb
, 0, sizeof(*sccb
));
86 sccb
->header
.length
= sizeof(*sccb
);
87 sccb
->header
.control_mask
[2] = 0x80;
88 rc
= sclp_cmd_sync_early(commands
[i
], sccb
);
89 } while (rc
== -EBUSY
);
93 if (sccb
->header
.response_code
== 0x10) {
94 early_read_info_sccb_valid
= 1;
97 if (sccb
->header
.response_code
!= 0x1f0)
102 void __init
sclp_facilities_detect(void)
104 struct read_info_sccb
*sccb
;
106 sclp_read_info_early();
107 if (!early_read_info_sccb_valid
)
110 sccb
= &early_read_info_sccb
;
111 sclp_facilities
= sccb
->facilities
;
112 sclp_fac84
= sccb
->fac84
;
113 rnmax
= sccb
->rnmax
? sccb
->rnmax
: sccb
->rnmax2
;
114 rzm
= sccb
->rnsize
? sccb
->rnsize
: sccb
->rnsize2
;
118 unsigned long long sclp_get_rnmax(void)
123 unsigned long long sclp_get_rzm(void)
129 * This function will be called after sclp_facilities_detect(), which gets
130 * called from early.c code. Therefore the sccb should have valid contents.
132 void __init
sclp_get_ipl_info(struct sclp_ipl_info
*info
)
134 struct read_info_sccb
*sccb
;
136 if (!early_read_info_sccb_valid
)
138 sccb
= &early_read_info_sccb
;
140 if (sccb
->flags
& 0x2)
142 memcpy(&info
->loadparm
, &sccb
->loadparm
, LOADPARM_LEN
);
145 static void sclp_sync_callback(struct sclp_req
*req
, void *data
)
147 struct completion
*completion
= data
;
149 complete(completion
);
152 static int do_sync_request(sclp_cmdw_t cmd
, void *sccb
)
154 struct completion completion
;
155 struct sclp_req
*request
;
158 request
= kzalloc(sizeof(*request
), GFP_KERNEL
);
161 request
->command
= cmd
;
162 request
->sccb
= sccb
;
163 request
->status
= SCLP_REQ_FILLED
;
164 request
->callback
= sclp_sync_callback
;
165 request
->callback_data
= &completion
;
166 init_completion(&completion
);
168 /* Perform sclp request. */
169 rc
= sclp_add_request(request
);
172 wait_for_completion(&completion
);
174 /* Check response. */
175 if (request
->status
!= SCLP_REQ_DONE
) {
176 pr_warning("sync request failed (cmd=0x%08x, "
177 "status=0x%02x)\n", cmd
, request
->status
);
186 * CPU configuration related functions.
189 #define SCLP_CMDW_READ_CPU_INFO 0x00010001
190 #define SCLP_CMDW_CONFIGURE_CPU 0x00110001
191 #define SCLP_CMDW_DECONFIGURE_CPU 0x00100001
193 struct read_cpu_info_sccb
{
194 struct sccb_header header
;
196 u16 offset_configured
;
199 u8 reserved
[4096 - 16];
200 } __attribute__((packed
, aligned(PAGE_SIZE
)));
202 static void sclp_fill_cpu_info(struct sclp_cpu_info
*info
,
203 struct read_cpu_info_sccb
*sccb
)
205 char *page
= (char *) sccb
;
207 memset(info
, 0, sizeof(*info
));
208 info
->configured
= sccb
->nr_configured
;
209 info
->standby
= sccb
->nr_standby
;
210 info
->combined
= sccb
->nr_configured
+ sccb
->nr_standby
;
211 info
->has_cpu_type
= sclp_fac84
& 0x1;
212 memcpy(&info
->cpu
, page
+ sccb
->offset_configured
,
213 info
->combined
* sizeof(struct sclp_cpu_entry
));
216 int sclp_get_cpu_info(struct sclp_cpu_info
*info
)
219 struct read_cpu_info_sccb
*sccb
;
221 if (!SCLP_HAS_CPU_INFO
)
223 sccb
= (void *) get_zeroed_page(GFP_KERNEL
| GFP_DMA
);
226 sccb
->header
.length
= sizeof(*sccb
);
227 rc
= do_sync_request(SCLP_CMDW_READ_CPU_INFO
, sccb
);
230 if (sccb
->header
.response_code
!= 0x0010) {
231 pr_warning("readcpuinfo failed (response=0x%04x)\n",
232 sccb
->header
.response_code
);
236 sclp_fill_cpu_info(info
, sccb
);
238 free_page((unsigned long) sccb
);
242 struct cpu_configure_sccb
{
243 struct sccb_header header
;
244 } __attribute__((packed
, aligned(8)));
246 static int do_cpu_configure(sclp_cmdw_t cmd
)
248 struct cpu_configure_sccb
*sccb
;
251 if (!SCLP_HAS_CPU_RECONFIG
)
254 * This is not going to cross a page boundary since we force
255 * kmalloc to have a minimum alignment of 8 bytes on s390.
257 sccb
= kzalloc(sizeof(*sccb
), GFP_KERNEL
| GFP_DMA
);
260 sccb
->header
.length
= sizeof(*sccb
);
261 rc
= do_sync_request(cmd
, sccb
);
264 switch (sccb
->header
.response_code
) {
269 pr_warning("configure cpu failed (cmd=0x%08x, "
270 "response=0x%04x)\n", cmd
,
271 sccb
->header
.response_code
);
280 int sclp_cpu_configure(u8 cpu
)
282 return do_cpu_configure(SCLP_CMDW_CONFIGURE_CPU
| cpu
<< 8);
285 int sclp_cpu_deconfigure(u8 cpu
)
287 return do_cpu_configure(SCLP_CMDW_DECONFIGURE_CPU
| cpu
<< 8);
290 #ifdef CONFIG_MEMORY_HOTPLUG
292 static DEFINE_MUTEX(sclp_mem_mutex
);
293 static LIST_HEAD(sclp_mem_list
);
294 static u8 sclp_max_storage_id
;
295 static unsigned long sclp_storage_ids
[256 / BITS_PER_LONG
];
296 static int sclp_mem_state_changed
;
298 struct memory_increment
{
299 struct list_head list
;
305 struct assign_storage_sccb
{
306 struct sccb_header header
;
310 static unsigned long long rn2addr(u16 rn
)
312 return (unsigned long long) (rn
- 1) * rzm
;
315 static int do_assign_storage(sclp_cmdw_t cmd
, u16 rn
)
317 struct assign_storage_sccb
*sccb
;
320 sccb
= (void *) get_zeroed_page(GFP_KERNEL
| GFP_DMA
);
323 sccb
->header
.length
= PAGE_SIZE
;
325 rc
= do_sync_request(cmd
, sccb
);
328 switch (sccb
->header
.response_code
) {
333 pr_warning("assign storage failed (cmd=0x%08x, "
334 "response=0x%04x, rn=0x%04x)\n", cmd
,
335 sccb
->header
.response_code
, rn
);
340 free_page((unsigned long) sccb
);
344 static int sclp_assign_storage(u16 rn
)
346 return do_assign_storage(0x000d0001, rn
);
349 static int sclp_unassign_storage(u16 rn
)
351 return do_assign_storage(0x000c0001, rn
);
354 struct attach_storage_sccb
{
355 struct sccb_header header
;
362 static int sclp_attach_storage(u8 id
)
364 struct attach_storage_sccb
*sccb
;
368 sccb
= (void *) get_zeroed_page(GFP_KERNEL
| GFP_DMA
);
371 sccb
->header
.length
= PAGE_SIZE
;
372 rc
= do_sync_request(0x00080001 | id
<< 8, sccb
);
375 switch (sccb
->header
.response_code
) {
377 set_bit(id
, sclp_storage_ids
);
378 for (i
= 0; i
< sccb
->assigned
; i
++)
379 sclp_unassign_storage(sccb
->entries
[i
] >> 16);
386 free_page((unsigned long) sccb
);
390 static int sclp_mem_change_state(unsigned long start
, unsigned long size
,
393 struct memory_increment
*incr
;
394 unsigned long long istart
;
397 list_for_each_entry(incr
, &sclp_mem_list
, list
) {
398 istart
= rn2addr(incr
->rn
);
399 if (start
+ size
- 1 < istart
)
401 if (start
> istart
+ rzm
- 1)
404 if (incr
->usecount
++)
407 * Don't break the loop if one assign fails. Loop may
408 * be walked again on CANCEL and we can't save
409 * information if state changed before or not.
410 * So continue and increase usecount for all increments.
412 rc
|= sclp_assign_storage(incr
->rn
);
414 if (--incr
->usecount
)
416 sclp_unassign_storage(incr
->rn
);
419 return rc
? -EIO
: 0;
422 static int sclp_mem_notifier(struct notifier_block
*nb
,
423 unsigned long action
, void *data
)
425 unsigned long start
, size
;
426 struct memory_notify
*arg
;
431 start
= arg
->start_pfn
<< PAGE_SHIFT
;
432 size
= arg
->nr_pages
<< PAGE_SHIFT
;
433 mutex_lock(&sclp_mem_mutex
);
434 for (id
= 0; id
<= sclp_max_storage_id
; id
++)
435 if (!test_bit(id
, sclp_storage_ids
))
436 sclp_attach_storage(id
);
439 case MEM_GOING_OFFLINE
:
440 case MEM_CANCEL_OFFLINE
:
442 case MEM_GOING_ONLINE
:
443 rc
= sclp_mem_change_state(start
, size
, 1);
445 case MEM_CANCEL_ONLINE
:
446 sclp_mem_change_state(start
, size
, 0);
449 sclp_mem_change_state(start
, size
, 0);
456 sclp_mem_state_changed
= 1;
457 mutex_unlock(&sclp_mem_mutex
);
458 return rc
? NOTIFY_BAD
: NOTIFY_OK
;
461 static struct notifier_block sclp_mem_nb
= {
462 .notifier_call
= sclp_mem_notifier
,
465 static void __init
add_memory_merged(u16 rn
)
467 static u16 first_rn
, num
;
468 unsigned long long start
, size
;
470 if (rn
&& first_rn
&& (first_rn
+ num
== rn
)) {
476 start
= rn2addr(first_rn
);
477 size
= (unsigned long long ) num
* rzm
;
478 if (start
>= VMEM_MAX_PHYS
)
480 if (start
+ size
> VMEM_MAX_PHYS
)
481 size
= VMEM_MAX_PHYS
- start
;
482 if (memory_end_set
&& (start
>= memory_end
))
484 if (memory_end_set
&& (start
+ size
> memory_end
))
485 size
= memory_end
- start
;
486 add_memory(0, start
, size
);
492 static void __init
sclp_add_standby_memory(void)
494 struct memory_increment
*incr
;
496 list_for_each_entry(incr
, &sclp_mem_list
, list
)
498 add_memory_merged(incr
->rn
);
499 add_memory_merged(0);
502 static void __init
insert_increment(u16 rn
, int standby
, int assigned
)
504 struct memory_increment
*incr
, *new_incr
;
505 struct list_head
*prev
;
508 new_incr
= kzalloc(sizeof(*new_incr
), GFP_KERNEL
);
512 new_incr
->standby
= standby
;
514 prev
= &sclp_mem_list
;
515 list_for_each_entry(incr
, &sclp_mem_list
, list
) {
516 if (assigned
&& incr
->rn
> rn
)
518 if (!assigned
&& incr
->rn
- last_rn
> 1)
524 new_incr
->rn
= last_rn
+ 1;
525 if (new_incr
->rn
> rnmax
) {
529 list_add(&new_incr
->list
, prev
);
532 static int sclp_mem_freeze(struct device
*dev
)
534 if (!sclp_mem_state_changed
)
536 pr_err("Memory hotplug state changed, suspend refused.\n");
540 struct read_storage_sccb
{
541 struct sccb_header header
;
549 static struct dev_pm_ops sclp_mem_pm_ops
= {
550 .freeze
= sclp_mem_freeze
,
553 static struct platform_driver sclp_mem_pdrv
= {
556 .pm
= &sclp_mem_pm_ops
,
560 static int __init
sclp_detect_standby_memory(void)
562 struct platform_device
*sclp_pdev
;
563 struct read_storage_sccb
*sccb
;
564 int i
, id
, assigned
, rc
;
566 if (!early_read_info_sccb_valid
)
568 if ((sclp_facilities
& 0xe00000000000ULL
) != 0xe00000000000ULL
)
571 sccb
= (void *) __get_free_page(GFP_KERNEL
| GFP_DMA
);
575 for (id
= 0; id
<= sclp_max_storage_id
; id
++) {
576 memset(sccb
, 0, PAGE_SIZE
);
577 sccb
->header
.length
= PAGE_SIZE
;
578 rc
= do_sync_request(0x00040001 | id
<< 8, sccb
);
581 switch (sccb
->header
.response_code
) {
583 set_bit(id
, sclp_storage_ids
);
584 for (i
= 0; i
< sccb
->assigned
; i
++) {
585 if (!sccb
->entries
[i
])
588 insert_increment(sccb
->entries
[i
] >> 16, 0, 1);
594 for (i
= 0; i
< sccb
->assigned
; i
++) {
595 if (!sccb
->entries
[i
])
598 insert_increment(sccb
->entries
[i
] >> 16, 1, 1);
606 sclp_max_storage_id
= sccb
->max_id
;
608 if (rc
|| list_empty(&sclp_mem_list
))
610 for (i
= 1; i
<= rnmax
- assigned
; i
++)
611 insert_increment(0, 1, 0);
612 rc
= register_memory_notifier(&sclp_mem_nb
);
615 rc
= platform_driver_register(&sclp_mem_pdrv
);
618 sclp_pdev
= platform_device_register_simple("sclp_mem", -1, NULL
, 0);
619 rc
= IS_ERR(sclp_pdev
) ? PTR_ERR(sclp_pdev
) : 0;
622 sclp_add_standby_memory();
625 platform_driver_unregister(&sclp_mem_pdrv
);
627 free_page((unsigned long) sccb
);
630 __initcall(sclp_detect_standby_memory
);
632 #endif /* CONFIG_MEMORY_HOTPLUG */
635 * Channel path configuration related functions.
638 #define SCLP_CMDW_CONFIGURE_CHPATH 0x000f0001
639 #define SCLP_CMDW_DECONFIGURE_CHPATH 0x000e0001
640 #define SCLP_CMDW_READ_CHPATH_INFORMATION 0x00030001
642 struct chp_cfg_sccb
{
643 struct sccb_header header
;
647 } __attribute__((packed
));
649 static int do_chp_configure(sclp_cmdw_t cmd
)
651 struct chp_cfg_sccb
*sccb
;
654 if (!SCLP_HAS_CHP_RECONFIG
)
657 sccb
= (struct chp_cfg_sccb
*) get_zeroed_page(GFP_KERNEL
| GFP_DMA
);
660 sccb
->header
.length
= sizeof(*sccb
);
661 rc
= do_sync_request(cmd
, sccb
);
664 switch (sccb
->header
.response_code
) {
671 pr_warning("configure channel-path failed "
672 "(cmd=0x%08x, response=0x%04x)\n", cmd
,
673 sccb
->header
.response_code
);
678 free_page((unsigned long) sccb
);
683 * sclp_chp_configure - perform configure channel-path sclp command
684 * @chpid: channel-path ID
686 * Perform configure channel-path command sclp command for specified chpid.
687 * Return 0 after command successfully finished, non-zero otherwise.
689 int sclp_chp_configure(struct chp_id chpid
)
691 return do_chp_configure(SCLP_CMDW_CONFIGURE_CHPATH
| chpid
.id
<< 8);
695 * sclp_chp_deconfigure - perform deconfigure channel-path sclp command
696 * @chpid: channel-path ID
698 * Perform deconfigure channel-path command sclp command for specified chpid
699 * and wait for completion. On success return 0. Return non-zero otherwise.
701 int sclp_chp_deconfigure(struct chp_id chpid
)
703 return do_chp_configure(SCLP_CMDW_DECONFIGURE_CHPATH
| chpid
.id
<< 8);
706 struct chp_info_sccb
{
707 struct sccb_header header
;
708 u8 recognized
[SCLP_CHP_INFO_MASK_SIZE
];
709 u8 standby
[SCLP_CHP_INFO_MASK_SIZE
];
710 u8 configured
[SCLP_CHP_INFO_MASK_SIZE
];
714 } __attribute__((packed
));
717 * sclp_chp_read_info - perform read channel-path information sclp command
718 * @info: resulting channel-path information data
720 * Perform read channel-path information sclp command and wait for completion.
721 * On success, store channel-path information in @info and return 0. Return
722 * non-zero otherwise.
724 int sclp_chp_read_info(struct sclp_chp_info
*info
)
726 struct chp_info_sccb
*sccb
;
729 if (!SCLP_HAS_CHP_INFO
)
732 sccb
= (struct chp_info_sccb
*) get_zeroed_page(GFP_KERNEL
| GFP_DMA
);
735 sccb
->header
.length
= sizeof(*sccb
);
736 rc
= do_sync_request(SCLP_CMDW_READ_CHPATH_INFORMATION
, sccb
);
739 if (sccb
->header
.response_code
!= 0x0010) {
740 pr_warning("read channel-path info failed "
741 "(response=0x%04x)\n", sccb
->header
.response_code
);
745 memcpy(info
->recognized
, sccb
->recognized
, SCLP_CHP_INFO_MASK_SIZE
);
746 memcpy(info
->standby
, sccb
->standby
, SCLP_CHP_INFO_MASK_SIZE
);
747 memcpy(info
->configured
, sccb
->configured
, SCLP_CHP_INFO_MASK_SIZE
);
749 free_page((unsigned long) sccb
);