2 * OPAL asynchronus Memory error handling support in PowreNV.
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
18 * Copyright 2013 IBM Corporation
19 * Author: Mahesh Salgaonkar <mahesh@linux.vnet.ibm.com>
24 #include <linux/kernel.h>
25 #include <linux/init.h>
28 #include <linux/slab.h>
30 #include <asm/machdep.h>
32 #include <asm/cputable.h>
34 static int opal_mem_err_nb_init
;
35 static LIST_HEAD(opal_memory_err_list
);
36 static DEFINE_SPINLOCK(opal_mem_err_lock
);
39 struct list_head list
;
43 static void handle_memory_error_event(struct OpalMemoryErrorData
*merr_evt
)
45 uint64_t paddr_start
, paddr_end
;
47 pr_debug("%s: Retrived memory error event, type: 0x%x\n",
48 __func__
, merr_evt
->type
);
49 switch (merr_evt
->type
) {
50 case OPAL_MEM_ERR_TYPE_RESILIENCE
:
51 paddr_start
= be64_to_cpu(merr_evt
->u
.resilience
.physical_address_start
);
52 paddr_end
= be64_to_cpu(merr_evt
->u
.resilience
.physical_address_end
);
54 case OPAL_MEM_ERR_TYPE_DYN_DALLOC
:
55 paddr_start
= be64_to_cpu(merr_evt
->u
.dyn_dealloc
.physical_address_start
);
56 paddr_end
= be64_to_cpu(merr_evt
->u
.dyn_dealloc
.physical_address_end
);
62 for (; paddr_start
< paddr_end
; paddr_start
+= PAGE_SIZE
) {
63 memory_failure(paddr_start
>> PAGE_SHIFT
, 0, 0);
67 static void handle_memory_error(void)
70 struct OpalMemoryErrorData
*merr_evt
;
71 struct OpalMsgNode
*msg_node
;
73 spin_lock_irqsave(&opal_mem_err_lock
, flags
);
74 while (!list_empty(&opal_memory_err_list
)) {
75 msg_node
= list_entry(opal_memory_err_list
.next
,
76 struct OpalMsgNode
, list
);
77 list_del(&msg_node
->list
);
78 spin_unlock_irqrestore(&opal_mem_err_lock
, flags
);
80 merr_evt
= (struct OpalMemoryErrorData
*)
81 &msg_node
->msg
.params
[0];
82 handle_memory_error_event(merr_evt
);
84 spin_lock_irqsave(&opal_mem_err_lock
, flags
);
86 spin_unlock_irqrestore(&opal_mem_err_lock
, flags
);
89 static void mem_error_handler(struct work_struct
*work
)
91 handle_memory_error();
94 static DECLARE_WORK(mem_error_work
, mem_error_handler
);
97 * opal_memory_err_event - notifier handler that queues up the opal message
98 * to be preocessed later.
100 static int opal_memory_err_event(struct notifier_block
*nb
,
101 unsigned long msg_type
, void *msg
)
104 struct OpalMsgNode
*msg_node
;
106 if (msg_type
!= OPAL_MSG_MEM_ERR
)
109 msg_node
= kzalloc(sizeof(*msg_node
), GFP_ATOMIC
);
111 pr_err("MEMORY_ERROR: out of memory, Opal message event not"
115 memcpy(&msg_node
->msg
, msg
, sizeof(struct opal_msg
));
117 spin_lock_irqsave(&opal_mem_err_lock
, flags
);
118 list_add(&msg_node
->list
, &opal_memory_err_list
);
119 spin_unlock_irqrestore(&opal_mem_err_lock
, flags
);
121 schedule_work(&mem_error_work
);
125 static struct notifier_block opal_mem_err_nb
= {
126 .notifier_call
= opal_memory_err_event
,
131 static int __init
opal_mem_err_init(void)
135 if (!opal_mem_err_nb_init
) {
136 ret
= opal_message_notifier_register(
137 OPAL_MSG_MEM_ERR
, &opal_mem_err_nb
);
139 pr_err("%s: Can't register OPAL event notifier (%d)\n",
143 opal_mem_err_nb_init
= 1;
147 machine_subsys_initcall(powernv
, opal_mem_err_init
);