1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright IBM Corp. 2007,2012
5 * Author(s): Peter Oberparleiter <peter.oberparleiter@de.ibm.com>
8 #define KMSG_COMPONENT "sclp_cmd"
9 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
11 #include <linux/completion.h>
12 #include <linux/init.h>
13 #include <linux/errno.h>
14 #include <linux/err.h>
15 #include <linux/export.h>
16 #include <linux/slab.h>
17 #include <linux/string.h>
19 #include <linux/mmzone.h>
20 #include <linux/memory.h>
21 #include <linux/memory_hotplug.h>
22 #include <linux/module.h>
23 #include <asm/ctlreg.h>
24 #include <asm/chpid.h>
25 #include <asm/setup.h>
29 #include <asm/facility.h>
30 #include <asm/page-states.h>
34 #define SCLP_CMDW_ASSIGN_STORAGE 0x000d0001
35 #define SCLP_CMDW_UNASSIGN_STORAGE 0x000c0001
37 static void sclp_sync_callback(struct sclp_req
*req
, void *data
)
39 struct completion
*completion
= data
;
44 int sclp_sync_request(sclp_cmdw_t cmd
, void *sccb
)
46 return sclp_sync_request_timeout(cmd
, sccb
, 0);
49 int sclp_sync_request_timeout(sclp_cmdw_t cmd
, void *sccb
, int timeout
)
51 struct completion completion
;
52 struct sclp_req
*request
;
55 request
= kzalloc(sizeof(*request
), GFP_KERNEL
);
59 request
->queue_timeout
= timeout
;
60 request
->command
= cmd
;
62 request
->status
= SCLP_REQ_FILLED
;
63 request
->callback
= sclp_sync_callback
;
64 request
->callback_data
= &completion
;
65 init_completion(&completion
);
67 /* Perform sclp request. */
68 rc
= sclp_add_request(request
);
71 wait_for_completion(&completion
);
74 if (request
->status
!= SCLP_REQ_DONE
) {
75 pr_warn("sync request failed (cmd=0x%08x, status=0x%02x)\n",
76 cmd
, request
->status
);
85 * CPU configuration related functions.
88 #define SCLP_CMDW_CONFIGURE_CPU 0x00110001
89 #define SCLP_CMDW_DECONFIGURE_CPU 0x00100001
91 int _sclp_get_core_info(struct sclp_core_info
*info
)
94 int length
= test_facility(140) ? EXT_SCCB_READ_CPU
: PAGE_SIZE
;
95 struct read_cpu_info_sccb
*sccb
;
97 if (!SCLP_HAS_CPU_INFO
)
100 sccb
= (void *)__get_free_pages(GFP_KERNEL
| GFP_DMA
| __GFP_ZERO
, get_order(length
));
103 sccb
->header
.length
= length
;
104 sccb
->header
.control_mask
[2] = 0x80;
105 rc
= sclp_sync_request_timeout(SCLP_CMDW_READ_CPU_INFO
, sccb
,
106 SCLP_QUEUE_INTERVAL
);
109 if (sccb
->header
.response_code
!= 0x0010) {
110 pr_warn("readcpuinfo failed (response=0x%04x)\n",
111 sccb
->header
.response_code
);
115 sclp_fill_core_info(info
, sccb
);
117 free_pages((unsigned long) sccb
, get_order(length
));
121 struct cpu_configure_sccb
{
122 struct sccb_header header
;
123 } __attribute__((packed
, aligned(8)));
125 static int do_core_configure(sclp_cmdw_t cmd
)
127 struct cpu_configure_sccb
*sccb
;
130 if (!SCLP_HAS_CPU_RECONFIG
)
133 * This is not going to cross a page boundary since we force
134 * kmalloc to have a minimum alignment of 8 bytes on s390.
136 sccb
= kzalloc(sizeof(*sccb
), GFP_KERNEL
| GFP_DMA
);
139 sccb
->header
.length
= sizeof(*sccb
);
140 rc
= sclp_sync_request_timeout(cmd
, sccb
, SCLP_QUEUE_INTERVAL
);
143 switch (sccb
->header
.response_code
) {
148 pr_warn("configure cpu failed (cmd=0x%08x, response=0x%04x)\n",
149 cmd
, sccb
->header
.response_code
);
158 int sclp_core_configure(u8 core
)
160 return do_core_configure(SCLP_CMDW_CONFIGURE_CPU
| core
<< 8);
163 int sclp_core_deconfigure(u8 core
)
165 return do_core_configure(SCLP_CMDW_DECONFIGURE_CPU
| core
<< 8);
168 #ifdef CONFIG_MEMORY_HOTPLUG
170 static DEFINE_MUTEX(sclp_mem_mutex
);
171 static LIST_HEAD(sclp_mem_list
);
172 static u8 sclp_max_storage_id
;
173 static DECLARE_BITMAP(sclp_storage_ids
, 256);
175 struct memory_increment
{
176 struct list_head list
;
181 struct assign_storage_sccb
{
182 struct sccb_header header
;
186 int arch_get_memory_phys_device(unsigned long start_pfn
)
190 return PFN_PHYS(start_pfn
) >> ilog2(sclp
.rzm
);
193 static unsigned long long rn2addr(u16 rn
)
195 return (unsigned long long) (rn
- 1) * sclp
.rzm
;
198 static int do_assign_storage(sclp_cmdw_t cmd
, u16 rn
)
200 struct assign_storage_sccb
*sccb
;
203 sccb
= (void *) get_zeroed_page(GFP_KERNEL
| GFP_DMA
);
206 sccb
->header
.length
= PAGE_SIZE
;
208 rc
= sclp_sync_request_timeout(cmd
, sccb
, SCLP_QUEUE_INTERVAL
);
211 switch (sccb
->header
.response_code
) {
216 pr_warn("assign storage failed (cmd=0x%08x, response=0x%04x, rn=0x%04x)\n",
217 cmd
, sccb
->header
.response_code
, rn
);
222 free_page((unsigned long) sccb
);
226 static int sclp_assign_storage(u16 rn
)
228 unsigned long long start
;
231 rc
= do_assign_storage(SCLP_CMDW_ASSIGN_STORAGE
, rn
);
235 storage_key_init_range(start
, start
+ sclp
.rzm
);
239 static int sclp_unassign_storage(u16 rn
)
241 return do_assign_storage(SCLP_CMDW_UNASSIGN_STORAGE
, rn
);
244 struct attach_storage_sccb
{
245 struct sccb_header header
;
252 static int sclp_attach_storage(u8 id
)
254 struct attach_storage_sccb
*sccb
;
258 sccb
= (void *) get_zeroed_page(GFP_KERNEL
| GFP_DMA
);
261 sccb
->header
.length
= PAGE_SIZE
;
262 sccb
->header
.function_code
= 0x40;
263 rc
= sclp_sync_request_timeout(0x00080001 | id
<< 8, sccb
,
264 SCLP_QUEUE_INTERVAL
);
267 switch (sccb
->header
.response_code
) {
269 set_bit(id
, sclp_storage_ids
);
270 for (i
= 0; i
< sccb
->assigned
; i
++) {
271 if (sccb
->entries
[i
])
272 sclp_unassign_storage(sccb
->entries
[i
] >> 16);
280 free_page((unsigned long) sccb
);
284 static int sclp_mem_change_state(unsigned long start
, unsigned long size
,
287 struct memory_increment
*incr
;
288 unsigned long long istart
;
291 list_for_each_entry(incr
, &sclp_mem_list
, list
) {
292 istart
= rn2addr(incr
->rn
);
293 if (start
+ size
- 1 < istart
)
295 if (start
> istart
+ sclp
.rzm
- 1)
298 rc
|= sclp_assign_storage(incr
->rn
);
300 sclp_unassign_storage(incr
->rn
);
302 incr
->standby
= online
? 0 : 1;
304 return rc
? -EIO
: 0;
307 static bool contains_standby_increment(unsigned long start
, unsigned long end
)
309 struct memory_increment
*incr
;
310 unsigned long istart
;
312 list_for_each_entry(incr
, &sclp_mem_list
, list
) {
313 istart
= rn2addr(incr
->rn
);
314 if (end
- 1 < istart
)
316 if (start
> istart
+ sclp
.rzm
- 1)
324 static int sclp_mem_notifier(struct notifier_block
*nb
,
325 unsigned long action
, void *data
)
327 unsigned long start
, size
;
328 struct memory_notify
*arg
;
333 start
= arg
->start_pfn
<< PAGE_SHIFT
;
334 size
= arg
->nr_pages
<< PAGE_SHIFT
;
335 mutex_lock(&sclp_mem_mutex
);
336 for_each_clear_bit(id
, sclp_storage_ids
, sclp_max_storage_id
+ 1)
337 sclp_attach_storage(id
);
339 case MEM_GOING_OFFLINE
:
341 * We do not allow to set memory blocks offline that contain
342 * standby memory. This is done to simplify the "memory online"
345 if (contains_standby_increment(start
, start
+ size
))
348 case MEM_PREPARE_ONLINE
:
350 * Access the altmap_start_pfn and altmap_nr_pages fields
351 * within the struct memory_notify specifically when dealing
352 * with only MEM_PREPARE_ONLINE/MEM_FINISH_OFFLINE notifiers.
354 * When altmap is in use, take the specified memory range
355 * online, which includes the altmap.
357 if (arg
->altmap_nr_pages
) {
358 start
= PFN_PHYS(arg
->altmap_start_pfn
);
359 size
+= PFN_PHYS(arg
->altmap_nr_pages
);
361 rc
= sclp_mem_change_state(start
, size
, 1);
362 if (rc
|| !arg
->altmap_nr_pages
)
365 * Set CMMA state to nodat here, since the struct page memory
366 * at the beginning of the memory block will not go through the
367 * buddy allocator later.
369 __arch_set_page_nodat((void *)__va(start
), arg
->altmap_nr_pages
);
371 case MEM_FINISH_OFFLINE
:
373 * When altmap is in use, take the specified memory range
374 * offline, which includes the altmap.
376 if (arg
->altmap_nr_pages
) {
377 start
= PFN_PHYS(arg
->altmap_start_pfn
);
378 size
+= PFN_PHYS(arg
->altmap_nr_pages
);
380 sclp_mem_change_state(start
, size
, 0);
385 mutex_unlock(&sclp_mem_mutex
);
386 return rc
? NOTIFY_BAD
: NOTIFY_OK
;
389 static struct notifier_block sclp_mem_nb
= {
390 .notifier_call
= sclp_mem_notifier
,
393 static void __init
align_to_block_size(unsigned long long *start
,
394 unsigned long long *size
,
395 unsigned long long alignment
)
397 unsigned long long start_align
, size_align
;
399 start_align
= roundup(*start
, alignment
);
400 size_align
= rounddown(*start
+ *size
, alignment
) - start_align
;
402 pr_info("Standby memory at 0x%llx (%lluM of %lluM usable)\n",
403 *start
, size_align
>> 20, *size
>> 20);
404 *start
= start_align
;
408 static void __init
add_memory_merged(u16 rn
)
410 unsigned long long start
, size
, addr
, block_size
;
411 static u16 first_rn
, num
;
413 if (rn
&& first_rn
&& (first_rn
+ num
== rn
)) {
419 start
= rn2addr(first_rn
);
420 size
= (unsigned long long) num
* sclp
.rzm
;
421 if (start
>= ident_map_size
)
423 if (start
+ size
> ident_map_size
)
424 size
= ident_map_size
- start
;
425 block_size
= memory_block_size_bytes();
426 align_to_block_size(&start
, &size
, block_size
);
429 for (addr
= start
; addr
< start
+ size
; addr
+= block_size
)
430 add_memory(0, addr
, block_size
,
432 MHP_MEMMAP_ON_MEMORY
| MHP_OFFLINE_INACCESSIBLE
: MHP_NONE
);
438 static void __init
sclp_add_standby_memory(void)
440 struct memory_increment
*incr
;
442 list_for_each_entry(incr
, &sclp_mem_list
, list
)
444 add_memory_merged(incr
->rn
);
445 add_memory_merged(0);
448 static void __init
insert_increment(u16 rn
, int standby
, int assigned
)
450 struct memory_increment
*incr
, *new_incr
;
451 struct list_head
*prev
;
454 new_incr
= kzalloc(sizeof(*new_incr
), GFP_KERNEL
);
458 new_incr
->standby
= standby
;
460 prev
= &sclp_mem_list
;
461 list_for_each_entry(incr
, &sclp_mem_list
, list
) {
462 if (assigned
&& incr
->rn
> rn
)
464 if (!assigned
&& incr
->rn
- last_rn
> 1)
470 new_incr
->rn
= last_rn
+ 1;
471 if (new_incr
->rn
> sclp
.rnmax
) {
475 list_add(&new_incr
->list
, prev
);
478 static int __init
sclp_detect_standby_memory(void)
480 struct read_storage_sccb
*sccb
;
481 int i
, id
, assigned
, rc
;
483 if (oldmem_data
.start
) /* No standby memory in kdump mode */
485 if ((sclp
.facilities
& 0xe00000000000ULL
) != 0xe00000000000ULL
)
488 sccb
= (void *) __get_free_page(GFP_KERNEL
| GFP_DMA
);
492 for (id
= 0; id
<= sclp_max_storage_id
; id
++) {
493 memset(sccb
, 0, PAGE_SIZE
);
494 sccb
->header
.length
= PAGE_SIZE
;
495 rc
= sclp_sync_request(SCLP_CMDW_READ_STORAGE_INFO
| id
<< 8, sccb
);
498 switch (sccb
->header
.response_code
) {
500 set_bit(id
, sclp_storage_ids
);
501 for (i
= 0; i
< sccb
->assigned
; i
++) {
502 if (!sccb
->entries
[i
])
505 insert_increment(sccb
->entries
[i
] >> 16, 0, 1);
511 for (i
= 0; i
< sccb
->assigned
; i
++) {
512 if (!sccb
->entries
[i
])
515 insert_increment(sccb
->entries
[i
] >> 16, 1, 1);
523 sclp_max_storage_id
= sccb
->max_id
;
525 if (rc
|| list_empty(&sclp_mem_list
))
527 for (i
= 1; i
<= sclp
.rnmax
- assigned
; i
++)
528 insert_increment(0, 1, 0);
529 rc
= register_memory_notifier(&sclp_mem_nb
);
532 sclp_add_standby_memory();
534 free_page((unsigned long) sccb
);
537 __initcall(sclp_detect_standby_memory
);
539 #endif /* CONFIG_MEMORY_HOTPLUG */
542 * Channel path configuration related functions.
545 #define SCLP_CMDW_CONFIGURE_CHPATH 0x000f0001
546 #define SCLP_CMDW_DECONFIGURE_CHPATH 0x000e0001
547 #define SCLP_CMDW_READ_CHPATH_INFORMATION 0x00030001
549 struct chp_cfg_sccb
{
550 struct sccb_header header
;
554 } __attribute__((packed
));
556 static int do_chp_configure(sclp_cmdw_t cmd
)
558 struct chp_cfg_sccb
*sccb
;
561 if (!SCLP_HAS_CHP_RECONFIG
)
564 sccb
= (struct chp_cfg_sccb
*) get_zeroed_page(GFP_KERNEL
| GFP_DMA
);
567 sccb
->header
.length
= sizeof(*sccb
);
568 rc
= sclp_sync_request(cmd
, sccb
);
571 switch (sccb
->header
.response_code
) {
578 pr_warn("configure channel-path failed (cmd=0x%08x, response=0x%04x)\n",
579 cmd
, sccb
->header
.response_code
);
584 free_page((unsigned long) sccb
);
589 * sclp_chp_configure - perform configure channel-path sclp command
590 * @chpid: channel-path ID
592 * Perform configure channel-path command sclp command for specified chpid.
593 * Return 0 after command successfully finished, non-zero otherwise.
595 int sclp_chp_configure(struct chp_id chpid
)
597 return do_chp_configure(SCLP_CMDW_CONFIGURE_CHPATH
| chpid
.id
<< 8);
601 * sclp_chp_deconfigure - perform deconfigure channel-path sclp command
602 * @chpid: channel-path ID
604 * Perform deconfigure channel-path command sclp command for specified chpid
605 * and wait for completion. On success return 0. Return non-zero otherwise.
607 int sclp_chp_deconfigure(struct chp_id chpid
)
609 return do_chp_configure(SCLP_CMDW_DECONFIGURE_CHPATH
| chpid
.id
<< 8);
612 struct chp_info_sccb
{
613 struct sccb_header header
;
614 u8 recognized
[SCLP_CHP_INFO_MASK_SIZE
];
615 u8 standby
[SCLP_CHP_INFO_MASK_SIZE
];
616 u8 configured
[SCLP_CHP_INFO_MASK_SIZE
];
620 } __attribute__((packed
));
623 * sclp_chp_read_info - perform read channel-path information sclp command
624 * @info: resulting channel-path information data
626 * Perform read channel-path information sclp command and wait for completion.
627 * On success, store channel-path information in @info and return 0. Return
628 * non-zero otherwise.
630 int sclp_chp_read_info(struct sclp_chp_info
*info
)
632 struct chp_info_sccb
*sccb
;
635 if (!SCLP_HAS_CHP_INFO
)
638 sccb
= (struct chp_info_sccb
*) get_zeroed_page(GFP_KERNEL
| GFP_DMA
);
641 sccb
->header
.length
= sizeof(*sccb
);
642 rc
= sclp_sync_request(SCLP_CMDW_READ_CHPATH_INFORMATION
, sccb
);
645 if (sccb
->header
.response_code
!= 0x0010) {
646 pr_warn("read channel-path info failed (response=0x%04x)\n",
647 sccb
->header
.response_code
);
651 memcpy(info
->recognized
, sccb
->recognized
, SCLP_CHP_INFO_MASK_SIZE
);
652 memcpy(info
->standby
, sccb
->standby
, SCLP_CHP_INFO_MASK_SIZE
);
653 memcpy(info
->configured
, sccb
->configured
, SCLP_CHP_INFO_MASK_SIZE
);
655 free_page((unsigned long) sccb
);