The eleventh batch
[alt-git.git] / reftable / block.c
blobf5b432566a6b9f171a1f1374b6c892ab0696d744
1 /*
2 Copyright 2020 Google LLC
4 Use of this source code is governed by a BSD-style
5 license that can be found in the LICENSE file or at
6 https://developers.google.com/open-source/licenses/bsd
7 */
9 #include "block.h"
11 #include "blocksource.h"
12 #include "constants.h"
13 #include "record.h"
14 #include "reftable-error.h"
15 #include "system.h"
16 #include <zlib.h>
18 int header_size(int version)
20 switch (version) {
21 case 1:
22 return 24;
23 case 2:
24 return 28;
26 abort();
29 int footer_size(int version)
31 switch (version) {
32 case 1:
33 return 68;
34 case 2:
35 return 72;
37 abort();
40 static int block_writer_register_restart(struct block_writer *w, int n,
41 int is_restart, struct reftable_buf *key)
43 int rlen, err;
45 rlen = w->restart_len;
46 if (rlen >= MAX_RESTARTS) {
47 is_restart = 0;
50 if (is_restart) {
51 rlen++;
53 if (2 + 3 * rlen + n > w->block_size - w->next)
54 return -1;
55 if (is_restart) {
56 REFTABLE_ALLOC_GROW(w->restarts, w->restart_len + 1, w->restart_cap);
57 if (!w->restarts)
58 return REFTABLE_OUT_OF_MEMORY_ERROR;
59 w->restarts[w->restart_len++] = w->next;
62 w->next += n;
64 reftable_buf_reset(&w->last_key);
65 err = reftable_buf_add(&w->last_key, key->buf, key->len);
66 if (err < 0)
67 return err;
69 w->entries++;
70 return 0;
73 int block_writer_init(struct block_writer *bw, uint8_t typ, uint8_t *buf,
74 uint32_t block_size, uint32_t header_off, int hash_size)
76 bw->buf = buf;
77 bw->hash_size = hash_size;
78 bw->block_size = block_size;
79 bw->header_off = header_off;
80 bw->buf[header_off] = typ;
81 bw->next = header_off + 4;
82 bw->restart_interval = 16;
83 bw->entries = 0;
84 bw->restart_len = 0;
85 bw->last_key.len = 0;
86 if (!bw->zstream) {
87 REFTABLE_CALLOC_ARRAY(bw->zstream, 1);
88 if (!bw->zstream)
89 return REFTABLE_OUT_OF_MEMORY_ERROR;
90 deflateInit(bw->zstream, 9);
93 return 0;
96 uint8_t block_writer_type(struct block_writer *bw)
98 return bw->buf[bw->header_off];
101 /* Adds the reftable_record to the block. Returns -1 if it does not fit, 0 on
102 success. Returns REFTABLE_API_ERROR if attempting to write a record with
103 empty key. */
104 int block_writer_add(struct block_writer *w, struct reftable_record *rec)
106 struct reftable_buf empty = REFTABLE_BUF_INIT;
107 struct reftable_buf last =
108 w->entries % w->restart_interval == 0 ? empty : w->last_key;
109 struct string_view out = {
110 .buf = w->buf + w->next,
111 .len = w->block_size - w->next,
114 struct string_view start = out;
116 int is_restart = 0;
117 struct reftable_buf key = REFTABLE_BUF_INIT;
118 int n = 0;
119 int err;
121 err = reftable_record_key(rec, &key);
122 if (err < 0)
123 goto done;
125 if (!key.len) {
126 err = REFTABLE_API_ERROR;
127 goto done;
130 n = reftable_encode_key(&is_restart, out, last, key,
131 reftable_record_val_type(rec));
132 if (n < 0) {
133 err = -1;
134 goto done;
136 string_view_consume(&out, n);
138 n = reftable_record_encode(rec, out, w->hash_size);
139 if (n < 0) {
140 err = -1;
141 goto done;
143 string_view_consume(&out, n);
145 err = block_writer_register_restart(w, start.len - out.len, is_restart,
146 &key);
147 done:
148 reftable_buf_release(&key);
149 return err;
152 int block_writer_finish(struct block_writer *w)
154 int i;
155 for (i = 0; i < w->restart_len; i++) {
156 put_be24(w->buf + w->next, w->restarts[i]);
157 w->next += 3;
160 put_be16(w->buf + w->next, w->restart_len);
161 w->next += 2;
162 put_be24(w->buf + 1 + w->header_off, w->next);
165 * Log records are stored zlib-compressed. Note that the compression
166 * also spans over the restart points we have just written.
168 if (block_writer_type(w) == BLOCK_TYPE_LOG) {
169 int block_header_skip = 4 + w->header_off;
170 uLongf src_len = w->next - block_header_skip, compressed_len;
171 int ret;
173 ret = deflateReset(w->zstream);
174 if (ret != Z_OK)
175 return REFTABLE_ZLIB_ERROR;
178 * Precompute the upper bound of how many bytes the compressed
179 * data may end up with. Combined with `Z_FINISH`, `deflate()`
180 * is guaranteed to return `Z_STREAM_END`.
182 compressed_len = deflateBound(w->zstream, src_len);
183 REFTABLE_ALLOC_GROW(w->compressed, compressed_len, w->compressed_cap);
184 if (!w->compressed) {
185 ret = REFTABLE_OUT_OF_MEMORY_ERROR;
186 return ret;
189 w->zstream->next_out = w->compressed;
190 w->zstream->avail_out = compressed_len;
191 w->zstream->next_in = w->buf + block_header_skip;
192 w->zstream->avail_in = src_len;
195 * We want to perform all decompression in a single step, which
196 * is why we can pass Z_FINISH here. As we have precomputed the
197 * deflated buffer's size via `deflateBound()` this function is
198 * guaranteed to succeed according to the zlib documentation.
200 ret = deflate(w->zstream, Z_FINISH);
201 if (ret != Z_STREAM_END)
202 return REFTABLE_ZLIB_ERROR;
205 * Overwrite the uncompressed data we have already written and
206 * adjust the `next` pointer to point right after the
207 * compressed data.
209 memcpy(w->buf + block_header_skip, w->compressed,
210 w->zstream->total_out);
211 w->next = w->zstream->total_out + block_header_skip;
214 return w->next;
217 int block_reader_init(struct block_reader *br, struct reftable_block *block,
218 uint32_t header_off, uint32_t table_block_size,
219 int hash_size)
221 uint32_t full_block_size = table_block_size;
222 uint8_t typ = block->data[header_off];
223 uint32_t sz = get_be24(block->data + header_off + 1);
224 int err = 0;
225 uint16_t restart_count = 0;
226 uint32_t restart_start = 0;
227 uint8_t *restart_bytes = NULL;
229 reftable_block_done(&br->block);
231 if (!reftable_is_block_type(typ)) {
232 err = REFTABLE_FORMAT_ERROR;
233 goto done;
236 if (typ == BLOCK_TYPE_LOG) {
237 uint32_t block_header_skip = 4 + header_off;
238 uLong dst_len = sz - block_header_skip;
239 uLong src_len = block->len - block_header_skip;
241 /* Log blocks specify the *uncompressed* size in their header. */
242 REFTABLE_ALLOC_GROW(br->uncompressed_data, sz,
243 br->uncompressed_cap);
244 if (!br->uncompressed_data) {
245 err = REFTABLE_OUT_OF_MEMORY_ERROR;
246 goto done;
249 /* Copy over the block header verbatim. It's not compressed. */
250 memcpy(br->uncompressed_data, block->data, block_header_skip);
252 if (!br->zstream) {
253 REFTABLE_CALLOC_ARRAY(br->zstream, 1);
254 if (!br->zstream) {
255 err = REFTABLE_OUT_OF_MEMORY_ERROR;
256 goto done;
259 err = inflateInit(br->zstream);
260 } else {
261 err = inflateReset(br->zstream);
263 if (err != Z_OK) {
264 err = REFTABLE_ZLIB_ERROR;
265 goto done;
268 br->zstream->next_in = block->data + block_header_skip;
269 br->zstream->avail_in = src_len;
270 br->zstream->next_out = br->uncompressed_data + block_header_skip;
271 br->zstream->avail_out = dst_len;
274 * We know both input as well as output size, and we know that
275 * the sizes should never be bigger than `uInt_MAX` because
276 * blocks can at most be 16MB large. We can thus use `Z_FINISH`
277 * here to instruct zlib to inflate the data in one go, which
278 * is more efficient than using `Z_NO_FLUSH`.
280 err = inflate(br->zstream, Z_FINISH);
281 if (err != Z_STREAM_END) {
282 err = REFTABLE_ZLIB_ERROR;
283 goto done;
285 err = 0;
287 if (br->zstream->total_out + block_header_skip != sz) {
288 err = REFTABLE_FORMAT_ERROR;
289 goto done;
292 /* We're done with the input data. */
293 reftable_block_done(block);
294 block->data = br->uncompressed_data;
295 block->len = sz;
296 full_block_size = src_len + block_header_skip - br->zstream->avail_in;
297 } else if (full_block_size == 0) {
298 full_block_size = sz;
299 } else if (sz < full_block_size && sz < block->len &&
300 block->data[sz] != 0) {
301 /* If the block is smaller than the full block size, it is
302 padded (data followed by '\0') or the next block is
303 unaligned. */
304 full_block_size = sz;
307 restart_count = get_be16(block->data + sz - 2);
308 restart_start = sz - 2 - 3 * restart_count;
309 restart_bytes = block->data + restart_start;
311 /* transfer ownership. */
312 br->block = *block;
313 block->data = NULL;
314 block->len = 0;
316 br->hash_size = hash_size;
317 br->block_len = restart_start;
318 br->full_block_size = full_block_size;
319 br->header_off = header_off;
320 br->restart_count = restart_count;
321 br->restart_bytes = restart_bytes;
323 done:
324 return err;
327 void block_reader_release(struct block_reader *br)
329 inflateEnd(br->zstream);
330 reftable_free(br->zstream);
331 reftable_free(br->uncompressed_data);
332 reftable_block_done(&br->block);
335 uint8_t block_reader_type(const struct block_reader *r)
337 return r->block.data[r->header_off];
340 int block_reader_first_key(const struct block_reader *br, struct reftable_buf *key)
342 int off = br->header_off + 4, n;
343 struct string_view in = {
344 .buf = br->block.data + off,
345 .len = br->block_len - off,
347 uint8_t extra = 0;
349 reftable_buf_reset(key);
351 n = reftable_decode_key(key, &extra, in);
352 if (n < 0)
353 return n;
354 if (!key->len)
355 return REFTABLE_FORMAT_ERROR;
357 return 0;
360 static uint32_t block_reader_restart_offset(const struct block_reader *br, size_t idx)
362 return get_be24(br->restart_bytes + 3 * idx);
365 void block_iter_seek_start(struct block_iter *it, const struct block_reader *br)
367 it->block = br->block.data;
368 it->block_len = br->block_len;
369 it->hash_size = br->hash_size;
370 reftable_buf_reset(&it->last_key);
371 it->next_off = br->header_off + 4;
374 struct restart_needle_less_args {
375 int error;
376 struct reftable_buf needle;
377 const struct block_reader *reader;
380 static int restart_needle_less(size_t idx, void *_args)
382 struct restart_needle_less_args *args = _args;
383 uint32_t off = block_reader_restart_offset(args->reader, idx);
384 struct string_view in = {
385 .buf = args->reader->block.data + off,
386 .len = args->reader->block_len - off,
388 uint64_t prefix_len, suffix_len;
389 uint8_t extra;
390 int n;
393 * Records at restart points are stored without prefix compression, so
394 * there is no need to fully decode the record key here. This removes
395 * the need for allocating memory.
397 n = reftable_decode_keylen(in, &prefix_len, &suffix_len, &extra);
398 if (n < 0 || prefix_len) {
399 args->error = 1;
400 return -1;
403 string_view_consume(&in, n);
404 if (suffix_len > in.len) {
405 args->error = 1;
406 return -1;
409 n = memcmp(args->needle.buf, in.buf,
410 args->needle.len < suffix_len ? args->needle.len : suffix_len);
411 if (n)
412 return n < 0;
413 return args->needle.len < suffix_len;
416 int block_iter_next(struct block_iter *it, struct reftable_record *rec)
418 struct string_view in = {
419 .buf = (unsigned char *) it->block + it->next_off,
420 .len = it->block_len - it->next_off,
422 struct string_view start = in;
423 uint8_t extra = 0;
424 int n = 0;
426 if (it->next_off >= it->block_len)
427 return 1;
429 n = reftable_decode_key(&it->last_key, &extra, in);
430 if (n < 0)
431 return -1;
432 if (!it->last_key.len)
433 return REFTABLE_FORMAT_ERROR;
435 string_view_consume(&in, n);
436 n = reftable_record_decode(rec, it->last_key, extra, in, it->hash_size,
437 &it->scratch);
438 if (n < 0)
439 return -1;
440 string_view_consume(&in, n);
442 it->next_off += start.len - in.len;
443 return 0;
446 void block_iter_reset(struct block_iter *it)
448 reftable_buf_reset(&it->last_key);
449 it->next_off = 0;
450 it->block = NULL;
451 it->block_len = 0;
452 it->hash_size = 0;
455 void block_iter_close(struct block_iter *it)
457 reftable_buf_release(&it->last_key);
458 reftable_buf_release(&it->scratch);
461 int block_iter_seek_key(struct block_iter *it, const struct block_reader *br,
462 struct reftable_buf *want)
464 struct restart_needle_less_args args = {
465 .needle = *want,
466 .reader = br,
468 struct reftable_record rec;
469 int err = 0;
470 size_t i;
473 * Perform a binary search over the block's restart points, which
474 * avoids doing a linear scan over the whole block. Like this, we
475 * identify the section of the block that should contain our key.
477 * Note that we explicitly search for the first restart point _greater_
478 * than the sought-after record, not _greater or equal_ to it. In case
479 * the sought-after record is located directly at the restart point we
480 * would otherwise start doing the linear search at the preceding
481 * restart point. While that works alright, we would end up scanning
482 * too many record.
484 i = binsearch(br->restart_count, &restart_needle_less, &args);
485 if (args.error) {
486 err = REFTABLE_FORMAT_ERROR;
487 goto done;
491 * Now there are multiple cases:
493 * - `i == 0`: The wanted record is smaller than the record found at
494 * the first restart point. As the first restart point is the first
495 * record in the block, our wanted record cannot be located in this
496 * block at all. We still need to position the iterator so that the
497 * next call to `block_iter_next()` will yield an end-of-iterator
498 * signal.
500 * - `i == restart_count`: The wanted record was not found at any of
501 * the restart points. As there is no restart point at the end of
502 * the section the record may thus be contained in the last block.
504 * - `i > 0`: The wanted record must be contained in the section
505 * before the found restart point. We thus do a linear search
506 * starting from the preceding restart point.
508 if (i > 0)
509 it->next_off = block_reader_restart_offset(br, i - 1);
510 else
511 it->next_off = br->header_off + 4;
512 it->block = br->block.data;
513 it->block_len = br->block_len;
514 it->hash_size = br->hash_size;
516 reftable_record_init(&rec, block_reader_type(br));
519 * We're looking for the last entry less than the wanted key so that
520 * the next call to `block_reader_next()` would yield the wanted
521 * record. We thus don't want to position our reader at the sought
522 * after record, but one before. To do so, we have to go one entry too
523 * far and then back up.
525 while (1) {
526 size_t prev_off = it->next_off;
528 err = block_iter_next(it, &rec);
529 if (err < 0)
530 goto done;
531 if (err > 0) {
532 it->next_off = prev_off;
533 err = 0;
534 goto done;
537 err = reftable_record_key(&rec, &it->last_key);
538 if (err < 0)
539 goto done;
542 * Check whether the current key is greater or equal to the
543 * sought-after key. In case it is greater we know that the
544 * record does not exist in the block and can thus abort early.
545 * In case it is equal to the sought-after key we have found
546 * the desired record.
548 * Note that we store the next record's key record directly in
549 * `last_key` without restoring the key of the preceding record
550 * in case we need to go one record back. This is safe to do as
551 * `block_iter_next()` would return the ref whose key is equal
552 * to `last_key` now, and naturally all keys share a prefix
553 * with themselves.
555 if (reftable_buf_cmp(&it->last_key, want) >= 0) {
556 it->next_off = prev_off;
557 goto done;
561 done:
562 reftable_record_release(&rec);
563 return err;
566 void block_writer_release(struct block_writer *bw)
568 deflateEnd(bw->zstream);
569 REFTABLE_FREE_AND_NULL(bw->zstream);
570 REFTABLE_FREE_AND_NULL(bw->restarts);
571 REFTABLE_FREE_AND_NULL(bw->compressed);
572 reftable_buf_release(&bw->last_key);
573 /* the block is not owned. */
576 void reftable_block_done(struct reftable_block *blockp)
578 struct reftable_block_source source = blockp->source;
579 if (blockp && source.ops)
580 source.ops->return_block(source.arg, blockp);
581 blockp->data = NULL;
582 blockp->len = 0;
583 blockp->source.ops = NULL;
584 blockp->source.arg = NULL;