soc/intel/xeon_sp/util: Enhance lock_pam0123
[coreboot2.git] / payloads / libpayload / liblz4 / lz4_wrapper.c
blob3d17fe67425a8edd3e0e7e1be67de905edec686b
1 /*
2 * Copyright 2015 Google Inc.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
12 * 3. The name of the author may not be used to endorse or promote products
13 * derived from this software without specific prior written permission.
15 * Alternatively, this software may be distributed under the terms of the
16 * GNU General Public License ("GPL") version 2 as published by the Free
17 * Software Foundation.
19 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
23 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 * SUCH DAMAGE.
32 #include <endian.h>
33 #include <libpayload.h>
34 #include <lz4.h>
36 /* LZ4 comes with its own supposedly portable memory access functions, but they
37 * seem to be very inefficient in practice (at least on ARM64). Since libpayload
38 * knows about endinaness and allows some basic assumptions (such as unaligned
39 * access support), we can easily write the ones we need ourselves. */
40 static uint16_t LZ4_readLE16(const void *src)
42 return le16toh(*(uint16_t *)src);
44 static void LZ4_copy8(void *dst, const void *src)
46 /* ARM32 needs to be a special snowflake to prevent GCC from coalescing the
47 * access into LDRD/STRD (which don't support unaligned accesses). */
48 #ifdef __arm__
49 uint32_t x0, x1;
50 asm volatile (
51 "ldr %[x0], [%[src]]\n\t"
52 "ldr %[x1], [%[src], #4]\n\t"
53 "str %[x0], [%[dst]]\n\t"
54 "str %[x1], [%[dst], #4]\n\t"
55 : [x0]"=r"(x0), [x1]"=r"(x1)
56 : [src]"r"(src), [dst]"r"(dst)
57 : "memory" );
58 #else
59 *(uint64_t *)dst = *(const uint64_t *)src;
60 #endif
63 typedef uint8_t BYTE;
64 typedef uint16_t U16;
65 typedef uint32_t U32;
66 typedef int32_t S32;
67 typedef uint64_t U64;
69 #define FORCE_INLINE static inline __attribute__((always_inline))
70 #define likely(expr) __builtin_expect((expr) != 0, 1)
71 #define unlikely(expr) __builtin_expect((expr) != 0, 0)
73 /* Unaltered (just removed unrelated code) from github.com/Cyan4973/lz4/dev. */
74 #include "lz4.c.inc" /* #include for inlining, do not link! */
76 #define LZ4F_MAGICNUMBER 0x184D2204
78 struct lz4_frame_header {
79 uint32_t magic;
80 union {
81 uint8_t flags;
82 struct {
83 uint8_t reserved0 : 2;
84 uint8_t has_content_checksum : 1;
85 uint8_t has_content_size : 1;
86 uint8_t has_block_checksum : 1;
87 uint8_t independent_blocks : 1;
88 uint8_t version : 2;
91 union {
92 uint8_t block_descriptor;
93 struct {
94 uint8_t reserved1 : 4;
95 uint8_t max_block_size : 3;
96 uint8_t reserved2 : 1;
99 /* + uint64_t content_size iff has_content_size is set */
100 /* + uint8_t header_checksum */
101 } __packed;
103 struct lz4_block_header {
104 union {
105 uint32_t raw;
106 struct {
107 uint32_t size : 31;
108 uint32_t not_compressed : 1;
111 /* + size bytes of data */
112 /* + uint32_t block_checksum iff has_block_checksum is set */
113 } __packed;
115 size_t ulz4fn(const void *src, size_t srcn, void *dst, size_t dstn)
117 const void *in = src;
118 void *out = dst;
119 size_t out_size = 0;
120 int has_block_checksum;
122 { /* With in-place decompression the header may become invalid later. */
123 const struct lz4_frame_header *h = in;
125 if (srcn < sizeof(*h) + sizeof(uint64_t) + sizeof(uint8_t))
126 return 0; /* input overrun */
128 /* We assume there's always only a single, standard frame. */
129 if (le32toh(h->magic) != LZ4F_MAGICNUMBER || h->version != 1)
130 return 0; /* unknown format */
131 if (h->reserved0 || h->reserved1 || h->reserved2)
132 return 0; /* reserved must be zero */
133 if (!h->independent_blocks)
134 return 0; /* we don't support block dependency */
135 has_block_checksum = h->has_block_checksum;
137 in += sizeof(*h);
138 if (h->has_content_size)
139 in += sizeof(uint64_t);
140 in += sizeof(uint8_t);
143 while (1) {
144 if ((size_t)(in - src) + sizeof(struct lz4_block_header) > srcn)
145 break; /* input overrun */
147 struct lz4_block_header b = { .raw = le32toh(*(uint32_t *)in) };
148 in += sizeof(struct lz4_block_header);
150 if ((size_t)(in - src) + b.size > srcn)
151 break; /* input overrun */
153 if (!b.size) {
154 out_size = out - dst;
155 break; /* decompression successful */
158 if (b.not_compressed) {
159 size_t size = MIN((uint32_t)b.size, dst + dstn - out);
160 memcpy(out, in, size);
161 if (size < b.size)
162 break; /* output overrun */
163 else
164 out += size;
165 } else {
166 /* constant folding essential, do not touch params! */
167 int ret = LZ4_decompress_generic(in, out, b.size,
168 dst + dstn - out, endOnInputSize,
169 full, 0, noDict, out, NULL, 0);
170 if (ret < 0)
171 break; /* decompression error */
172 else
173 out += ret;
176 in += b.size;
177 if (has_block_checksum)
178 in += sizeof(uint32_t);
181 return out_size;
184 size_t ulz4f(const void *src, void *dst)
186 /* LZ4 uses signed size parameters, so can't just use ((u32)-1) here. */
187 return ulz4fn(src, 1*GiB, dst, 1*GiB);