2 * Copyright (C) 2012 Google, Inc.
4 * This software is licensed under the terms of the GNU General Public
5 * License version 2, as published by the Free Software Foundation, and
6 * may be copied, distributed, and modified under those terms.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
15 #define pr_fmt(fmt) "persistent_ram: " fmt
17 #include <linux/device.h>
18 #include <linux/err.h>
19 #include <linux/errno.h>
20 #include <linux/kernel.h>
21 #include <linux/init.h>
23 #include <linux/list.h>
24 #include <linux/memblock.h>
25 #include <linux/rslib.h>
26 #include <linux/slab.h>
27 #include <linux/vmalloc.h>
28 #include <linux/pstore_ram.h>
31 struct persistent_ram_buffer
{
38 #define PERSISTENT_RAM_SIG (0x43474244) /* DBGC */
40 static inline size_t buffer_size(struct persistent_ram_zone
*prz
)
42 return atomic_read(&prz
->buffer
->size
);
45 static inline size_t buffer_start(struct persistent_ram_zone
*prz
)
47 return atomic_read(&prz
->buffer
->start
);
50 /* increase and wrap the start pointer, returning the old value */
51 static size_t buffer_start_add_atomic(struct persistent_ram_zone
*prz
, size_t a
)
57 old
= atomic_read(&prz
->buffer
->start
);
59 while (unlikely(new >= prz
->buffer_size
))
60 new -= prz
->buffer_size
;
61 } while (atomic_cmpxchg(&prz
->buffer
->start
, old
, new) != old
);
66 /* increase the size counter until it hits the max size */
67 static void buffer_size_add_atomic(struct persistent_ram_zone
*prz
, size_t a
)
72 if (atomic_read(&prz
->buffer
->size
) == prz
->buffer_size
)
76 old
= atomic_read(&prz
->buffer
->size
);
78 if (new > prz
->buffer_size
)
79 new = prz
->buffer_size
;
80 } while (atomic_cmpxchg(&prz
->buffer
->size
, old
, new) != old
);
83 static DEFINE_RAW_SPINLOCK(buffer_lock
);
85 /* increase and wrap the start pointer, returning the old value */
86 static size_t buffer_start_add_locked(struct persistent_ram_zone
*prz
, size_t a
)
92 raw_spin_lock_irqsave(&buffer_lock
, flags
);
94 old
= atomic_read(&prz
->buffer
->start
);
96 while (unlikely(new >= prz
->buffer_size
))
97 new -= prz
->buffer_size
;
98 atomic_set(&prz
->buffer
->start
, new);
100 raw_spin_unlock_irqrestore(&buffer_lock
, flags
);
105 /* increase the size counter until it hits the max size */
106 static void buffer_size_add_locked(struct persistent_ram_zone
*prz
, size_t a
)
112 raw_spin_lock_irqsave(&buffer_lock
, flags
);
114 old
= atomic_read(&prz
->buffer
->size
);
115 if (old
== prz
->buffer_size
)
119 if (new > prz
->buffer_size
)
120 new = prz
->buffer_size
;
121 atomic_set(&prz
->buffer
->size
, new);
124 raw_spin_unlock_irqrestore(&buffer_lock
, flags
);
127 static size_t (*buffer_start_add
)(struct persistent_ram_zone
*, size_t) = buffer_start_add_atomic
;
128 static void (*buffer_size_add
)(struct persistent_ram_zone
*, size_t) = buffer_size_add_atomic
;
130 static void notrace
persistent_ram_encode_rs8(struct persistent_ram_zone
*prz
,
131 uint8_t *data
, size_t len
, uint8_t *ecc
)
134 uint16_t par
[prz
->ecc_info
.ecc_size
];
136 /* Initialize the parity buffer */
137 memset(par
, 0, sizeof(par
));
138 encode_rs8(prz
->rs_decoder
, data
, len
, par
, 0);
139 for (i
= 0; i
< prz
->ecc_info
.ecc_size
; i
++)
143 static int persistent_ram_decode_rs8(struct persistent_ram_zone
*prz
,
144 void *data
, size_t len
, uint8_t *ecc
)
147 uint16_t par
[prz
->ecc_info
.ecc_size
];
149 for (i
= 0; i
< prz
->ecc_info
.ecc_size
; i
++)
151 return decode_rs8(prz
->rs_decoder
, data
, par
, len
,
152 NULL
, 0, NULL
, 0, NULL
);
155 static void notrace
persistent_ram_update_ecc(struct persistent_ram_zone
*prz
,
156 unsigned int start
, unsigned int count
)
158 struct persistent_ram_buffer
*buffer
= prz
->buffer
;
159 uint8_t *buffer_end
= buffer
->data
+ prz
->buffer_size
;
162 int ecc_block_size
= prz
->ecc_info
.block_size
;
163 int ecc_size
= prz
->ecc_info
.ecc_size
;
164 int size
= ecc_block_size
;
169 block
= buffer
->data
+ (start
& ~(ecc_block_size
- 1));
170 par
= prz
->par_buffer
+ (start
/ ecc_block_size
) * ecc_size
;
173 if (block
+ ecc_block_size
> buffer_end
)
174 size
= buffer_end
- block
;
175 persistent_ram_encode_rs8(prz
, block
, size
, par
);
176 block
+= ecc_block_size
;
178 } while (block
< buffer
->data
+ start
+ count
);
181 static void persistent_ram_update_header_ecc(struct persistent_ram_zone
*prz
)
183 struct persistent_ram_buffer
*buffer
= prz
->buffer
;
185 if (!prz
->ecc_info
.ecc_size
)
188 persistent_ram_encode_rs8(prz
, (uint8_t *)buffer
, sizeof(*buffer
),
192 static void persistent_ram_ecc_old(struct persistent_ram_zone
*prz
)
194 struct persistent_ram_buffer
*buffer
= prz
->buffer
;
198 if (!prz
->ecc_info
.ecc_size
)
201 block
= buffer
->data
;
202 par
= prz
->par_buffer
;
203 while (block
< buffer
->data
+ buffer_size(prz
)) {
205 int size
= prz
->ecc_info
.block_size
;
206 if (block
+ size
> buffer
->data
+ prz
->buffer_size
)
207 size
= buffer
->data
+ prz
->buffer_size
- block
;
208 numerr
= persistent_ram_decode_rs8(prz
, block
, size
, par
);
210 pr_devel("error in block %p, %d\n", block
, numerr
);
211 prz
->corrected_bytes
+= numerr
;
212 } else if (numerr
< 0) {
213 pr_devel("uncorrectable error in block %p\n", block
);
216 block
+= prz
->ecc_info
.block_size
;
217 par
+= prz
->ecc_info
.ecc_size
;
221 static int persistent_ram_init_ecc(struct persistent_ram_zone
*prz
,
222 struct persistent_ram_ecc_info
*ecc_info
)
225 struct persistent_ram_buffer
*buffer
= prz
->buffer
;
229 if (!ecc_info
|| !ecc_info
->ecc_size
)
232 prz
->ecc_info
.block_size
= ecc_info
->block_size
?: 128;
233 prz
->ecc_info
.ecc_size
= ecc_info
->ecc_size
?: 16;
234 prz
->ecc_info
.symsize
= ecc_info
->symsize
?: 8;
235 prz
->ecc_info
.poly
= ecc_info
->poly
?: 0x11d;
237 ecc_blocks
= DIV_ROUND_UP(prz
->buffer_size
- prz
->ecc_info
.ecc_size
,
238 prz
->ecc_info
.block_size
+
239 prz
->ecc_info
.ecc_size
);
240 ecc_total
= (ecc_blocks
+ 1) * prz
->ecc_info
.ecc_size
;
241 if (ecc_total
>= prz
->buffer_size
) {
242 pr_err("%s: invalid ecc_size %u (total %zu, buffer size %zu)\n",
243 __func__
, prz
->ecc_info
.ecc_size
,
244 ecc_total
, prz
->buffer_size
);
248 prz
->buffer_size
-= ecc_total
;
249 prz
->par_buffer
= buffer
->data
+ prz
->buffer_size
;
250 prz
->par_header
= prz
->par_buffer
+
251 ecc_blocks
* prz
->ecc_info
.ecc_size
;
254 * first consecutive root is 0
255 * primitive element to generate roots = 1
257 prz
->rs_decoder
= init_rs(prz
->ecc_info
.symsize
, prz
->ecc_info
.poly
,
258 0, 1, prz
->ecc_info
.ecc_size
);
259 if (prz
->rs_decoder
== NULL
) {
260 pr_info("init_rs failed\n");
264 prz
->corrected_bytes
= 0;
267 numerr
= persistent_ram_decode_rs8(prz
, buffer
, sizeof(*buffer
),
270 pr_info("error in header, %d\n", numerr
);
271 prz
->corrected_bytes
+= numerr
;
272 } else if (numerr
< 0) {
273 pr_info("uncorrectable error in header\n");
280 ssize_t
persistent_ram_ecc_string(struct persistent_ram_zone
*prz
,
281 char *str
, size_t len
)
285 if (!prz
->ecc_info
.ecc_size
)
288 if (prz
->corrected_bytes
|| prz
->bad_blocks
)
289 ret
= snprintf(str
, len
, ""
290 "\n%d Corrected bytes, %d unrecoverable blocks\n",
291 prz
->corrected_bytes
, prz
->bad_blocks
);
293 ret
= snprintf(str
, len
, "\nNo errors detected\n");
298 static void notrace
persistent_ram_update(struct persistent_ram_zone
*prz
,
299 const void *s
, unsigned int start
, unsigned int count
)
301 struct persistent_ram_buffer
*buffer
= prz
->buffer
;
302 memcpy(buffer
->data
+ start
, s
, count
);
303 persistent_ram_update_ecc(prz
, start
, count
);
306 void persistent_ram_save_old(struct persistent_ram_zone
*prz
)
308 struct persistent_ram_buffer
*buffer
= prz
->buffer
;
309 size_t size
= buffer_size(prz
);
310 size_t start
= buffer_start(prz
);
316 persistent_ram_ecc_old(prz
);
317 prz
->old_log
= kmalloc(size
, GFP_KERNEL
);
320 pr_err("failed to allocate buffer\n");
324 prz
->old_log_size
= size
;
325 memcpy(prz
->old_log
, &buffer
->data
[start
], size
- start
);
326 memcpy(prz
->old_log
+ size
- start
, &buffer
->data
[0], start
);
329 int notrace
persistent_ram_write(struct persistent_ram_zone
*prz
,
330 const void *s
, unsigned int count
)
336 if (unlikely(c
> prz
->buffer_size
)) {
337 s
+= c
- prz
->buffer_size
;
338 c
= prz
->buffer_size
;
341 buffer_size_add(prz
, c
);
343 start
= buffer_start_add(prz
, c
);
345 rem
= prz
->buffer_size
- start
;
346 if (unlikely(rem
< c
)) {
347 persistent_ram_update(prz
, s
, start
, rem
);
352 persistent_ram_update(prz
, s
, start
, c
);
354 persistent_ram_update_header_ecc(prz
);
359 size_t persistent_ram_old_size(struct persistent_ram_zone
*prz
)
361 return prz
->old_log_size
;
364 void *persistent_ram_old(struct persistent_ram_zone
*prz
)
369 void persistent_ram_free_old(struct persistent_ram_zone
*prz
)
373 prz
->old_log_size
= 0;
376 void persistent_ram_zap(struct persistent_ram_zone
*prz
)
378 atomic_set(&prz
->buffer
->start
, 0);
379 atomic_set(&prz
->buffer
->size
, 0);
380 persistent_ram_update_header_ecc(prz
);
383 static void *persistent_ram_vmap(phys_addr_t start
, size_t size
)
386 phys_addr_t page_start
;
387 unsigned int page_count
;
392 page_start
= start
- offset_in_page(start
);
393 page_count
= DIV_ROUND_UP(size
+ offset_in_page(start
), PAGE_SIZE
);
395 prot
= pgprot_noncached(PAGE_KERNEL
);
397 pages
= kmalloc_array(page_count
, sizeof(struct page
*), GFP_KERNEL
);
399 pr_err("%s: Failed to allocate array for %u pages\n",
400 __func__
, page_count
);
404 for (i
= 0; i
< page_count
; i
++) {
405 phys_addr_t addr
= page_start
+ i
* PAGE_SIZE
;
406 pages
[i
] = pfn_to_page(addr
>> PAGE_SHIFT
);
408 vaddr
= vmap(pages
, page_count
, VM_MAP
, prot
);
414 static void *persistent_ram_iomap(phys_addr_t start
, size_t size
)
416 if (!request_mem_region(start
, size
, "persistent_ram")) {
417 pr_err("request mem region (0x%llx@0x%llx) failed\n",
418 (unsigned long long)size
, (unsigned long long)start
);
422 buffer_start_add
= buffer_start_add_locked
;
423 buffer_size_add
= buffer_size_add_locked
;
425 return ioremap(start
, size
);
428 static int persistent_ram_buffer_map(phys_addr_t start
, phys_addr_t size
,
429 struct persistent_ram_zone
*prz
)
434 if (pfn_valid(start
>> PAGE_SHIFT
))
435 prz
->vaddr
= persistent_ram_vmap(start
, size
);
437 prz
->vaddr
= persistent_ram_iomap(start
, size
);
440 pr_err("%s: Failed to map 0x%llx pages at 0x%llx\n", __func__
,
441 (unsigned long long)size
, (unsigned long long)start
);
445 prz
->buffer
= prz
->vaddr
+ offset_in_page(start
);
446 prz
->buffer_size
= size
- sizeof(struct persistent_ram_buffer
);
451 static int persistent_ram_post_init(struct persistent_ram_zone
*prz
, u32 sig
,
452 struct persistent_ram_ecc_info
*ecc_info
)
456 ret
= persistent_ram_init_ecc(prz
, ecc_info
);
460 sig
^= PERSISTENT_RAM_SIG
;
462 if (prz
->buffer
->sig
== sig
) {
463 if (buffer_size(prz
) > prz
->buffer_size
||
464 buffer_start(prz
) > buffer_size(prz
))
465 pr_info("found existing invalid buffer, size %zu, start %zu\n",
466 buffer_size(prz
), buffer_start(prz
));
468 pr_debug("found existing buffer, size %zu, start %zu\n",
469 buffer_size(prz
), buffer_start(prz
));
470 persistent_ram_save_old(prz
);
474 pr_debug("no valid data in buffer (sig = 0x%08x)\n",
478 prz
->buffer
->sig
= sig
;
479 persistent_ram_zap(prz
);
484 void persistent_ram_free(struct persistent_ram_zone
*prz
)
490 if (pfn_valid(prz
->paddr
>> PAGE_SHIFT
)) {
494 release_mem_region(prz
->paddr
, prz
->size
);
498 persistent_ram_free_old(prz
);
502 struct persistent_ram_zone
*persistent_ram_new(phys_addr_t start
, size_t size
,
503 u32 sig
, struct persistent_ram_ecc_info
*ecc_info
)
505 struct persistent_ram_zone
*prz
;
508 prz
= kzalloc(sizeof(struct persistent_ram_zone
), GFP_KERNEL
);
510 pr_err("failed to allocate persistent ram zone\n");
514 ret
= persistent_ram_buffer_map(start
, size
, prz
);
518 ret
= persistent_ram_post_init(prz
, sig
, ecc_info
);
524 persistent_ram_free(prz
);