2 Copyright 2020 Google LLC
4 Use of this source code is governed by a BSD-style
5 license that can be found in the LICENSE file or at
6 https://developers.google.com/open-source/licenses/bsd
11 #include "blocksource.h"
12 #include "constants.h"
14 #include "reftable-error.h"
18 int header_size(int version
)
29 int footer_size(int version
)
40 static int block_writer_register_restart(struct block_writer
*w
, int n
,
41 int is_restart
, struct reftable_buf
*key
)
45 rlen
= w
->restart_len
;
46 if (rlen
>= MAX_RESTARTS
) {
53 if (2 + 3 * rlen
+ n
> w
->block_size
- w
->next
)
56 REFTABLE_ALLOC_GROW(w
->restarts
, w
->restart_len
+ 1, w
->restart_cap
);
58 return REFTABLE_OUT_OF_MEMORY_ERROR
;
59 w
->restarts
[w
->restart_len
++] = w
->next
;
64 reftable_buf_reset(&w
->last_key
);
65 err
= reftable_buf_add(&w
->last_key
, key
->buf
, key
->len
);
73 int block_writer_init(struct block_writer
*bw
, uint8_t typ
, uint8_t *buf
,
74 uint32_t block_size
, uint32_t header_off
, int hash_size
)
77 bw
->hash_size
= hash_size
;
78 bw
->block_size
= block_size
;
79 bw
->header_off
= header_off
;
80 bw
->buf
[header_off
] = typ
;
81 bw
->next
= header_off
+ 4;
82 bw
->restart_interval
= 16;
87 REFTABLE_CALLOC_ARRAY(bw
->zstream
, 1);
89 return REFTABLE_OUT_OF_MEMORY_ERROR
;
90 deflateInit(bw
->zstream
, 9);
96 uint8_t block_writer_type(struct block_writer
*bw
)
98 return bw
->buf
[bw
->header_off
];
101 /* Adds the reftable_record to the block. Returns -1 if it does not fit, 0 on
102 success. Returns REFTABLE_API_ERROR if attempting to write a record with
104 int block_writer_add(struct block_writer
*w
, struct reftable_record
*rec
)
106 struct reftable_buf empty
= REFTABLE_BUF_INIT
;
107 struct reftable_buf last
=
108 w
->entries
% w
->restart_interval
== 0 ? empty
: w
->last_key
;
109 struct string_view out
= {
110 .buf
= w
->buf
+ w
->next
,
111 .len
= w
->block_size
- w
->next
,
114 struct string_view start
= out
;
117 struct reftable_buf key
= REFTABLE_BUF_INIT
;
121 err
= reftable_record_key(rec
, &key
);
126 err
= REFTABLE_API_ERROR
;
130 n
= reftable_encode_key(&is_restart
, out
, last
, key
,
131 reftable_record_val_type(rec
));
136 string_view_consume(&out
, n
);
138 n
= reftable_record_encode(rec
, out
, w
->hash_size
);
143 string_view_consume(&out
, n
);
145 err
= block_writer_register_restart(w
, start
.len
- out
.len
, is_restart
,
148 reftable_buf_release(&key
);
152 int block_writer_finish(struct block_writer
*w
)
155 for (i
= 0; i
< w
->restart_len
; i
++) {
156 put_be24(w
->buf
+ w
->next
, w
->restarts
[i
]);
160 put_be16(w
->buf
+ w
->next
, w
->restart_len
);
162 put_be24(w
->buf
+ 1 + w
->header_off
, w
->next
);
165 * Log records are stored zlib-compressed. Note that the compression
166 * also spans over the restart points we have just written.
168 if (block_writer_type(w
) == BLOCK_TYPE_LOG
) {
169 int block_header_skip
= 4 + w
->header_off
;
170 uLongf src_len
= w
->next
- block_header_skip
, compressed_len
;
173 ret
= deflateReset(w
->zstream
);
175 return REFTABLE_ZLIB_ERROR
;
178 * Precompute the upper bound of how many bytes the compressed
179 * data may end up with. Combined with `Z_FINISH`, `deflate()`
180 * is guaranteed to return `Z_STREAM_END`.
182 compressed_len
= deflateBound(w
->zstream
, src_len
);
183 REFTABLE_ALLOC_GROW(w
->compressed
, compressed_len
, w
->compressed_cap
);
184 if (!w
->compressed
) {
185 ret
= REFTABLE_OUT_OF_MEMORY_ERROR
;
189 w
->zstream
->next_out
= w
->compressed
;
190 w
->zstream
->avail_out
= compressed_len
;
191 w
->zstream
->next_in
= w
->buf
+ block_header_skip
;
192 w
->zstream
->avail_in
= src_len
;
195 * We want to perform all decompression in a single step, which
196 * is why we can pass Z_FINISH here. As we have precomputed the
197 * deflated buffer's size via `deflateBound()` this function is
198 * guaranteed to succeed according to the zlib documentation.
200 ret
= deflate(w
->zstream
, Z_FINISH
);
201 if (ret
!= Z_STREAM_END
)
202 return REFTABLE_ZLIB_ERROR
;
205 * Overwrite the uncompressed data we have already written and
206 * adjust the `next` pointer to point right after the
209 memcpy(w
->buf
+ block_header_skip
, w
->compressed
,
210 w
->zstream
->total_out
);
211 w
->next
= w
->zstream
->total_out
+ block_header_skip
;
217 int block_reader_init(struct block_reader
*br
, struct reftable_block
*block
,
218 uint32_t header_off
, uint32_t table_block_size
,
221 uint32_t full_block_size
= table_block_size
;
222 uint8_t typ
= block
->data
[header_off
];
223 uint32_t sz
= get_be24(block
->data
+ header_off
+ 1);
225 uint16_t restart_count
= 0;
226 uint32_t restart_start
= 0;
227 uint8_t *restart_bytes
= NULL
;
229 reftable_block_done(&br
->block
);
231 if (!reftable_is_block_type(typ
)) {
232 err
= REFTABLE_FORMAT_ERROR
;
236 if (typ
== BLOCK_TYPE_LOG
) {
237 uint32_t block_header_skip
= 4 + header_off
;
238 uLong dst_len
= sz
- block_header_skip
;
239 uLong src_len
= block
->len
- block_header_skip
;
241 /* Log blocks specify the *uncompressed* size in their header. */
242 REFTABLE_ALLOC_GROW(br
->uncompressed_data
, sz
,
243 br
->uncompressed_cap
);
244 if (!br
->uncompressed_data
) {
245 err
= REFTABLE_OUT_OF_MEMORY_ERROR
;
249 /* Copy over the block header verbatim. It's not compressed. */
250 memcpy(br
->uncompressed_data
, block
->data
, block_header_skip
);
253 REFTABLE_CALLOC_ARRAY(br
->zstream
, 1);
255 err
= REFTABLE_OUT_OF_MEMORY_ERROR
;
259 err
= inflateInit(br
->zstream
);
261 err
= inflateReset(br
->zstream
);
264 err
= REFTABLE_ZLIB_ERROR
;
268 br
->zstream
->next_in
= block
->data
+ block_header_skip
;
269 br
->zstream
->avail_in
= src_len
;
270 br
->zstream
->next_out
= br
->uncompressed_data
+ block_header_skip
;
271 br
->zstream
->avail_out
= dst_len
;
274 * We know both input as well as output size, and we know that
275 * the sizes should never be bigger than `uInt_MAX` because
276 * blocks can at most be 16MB large. We can thus use `Z_FINISH`
277 * here to instruct zlib to inflate the data in one go, which
278 * is more efficient than using `Z_NO_FLUSH`.
280 err
= inflate(br
->zstream
, Z_FINISH
);
281 if (err
!= Z_STREAM_END
) {
282 err
= REFTABLE_ZLIB_ERROR
;
287 if (br
->zstream
->total_out
+ block_header_skip
!= sz
) {
288 err
= REFTABLE_FORMAT_ERROR
;
292 /* We're done with the input data. */
293 reftable_block_done(block
);
294 block
->data
= br
->uncompressed_data
;
296 full_block_size
= src_len
+ block_header_skip
- br
->zstream
->avail_in
;
297 } else if (full_block_size
== 0) {
298 full_block_size
= sz
;
299 } else if (sz
< full_block_size
&& sz
< block
->len
&&
300 block
->data
[sz
] != 0) {
301 /* If the block is smaller than the full block size, it is
302 padded (data followed by '\0') or the next block is
304 full_block_size
= sz
;
307 restart_count
= get_be16(block
->data
+ sz
- 2);
308 restart_start
= sz
- 2 - 3 * restart_count
;
309 restart_bytes
= block
->data
+ restart_start
;
311 /* transfer ownership. */
316 br
->hash_size
= hash_size
;
317 br
->block_len
= restart_start
;
318 br
->full_block_size
= full_block_size
;
319 br
->header_off
= header_off
;
320 br
->restart_count
= restart_count
;
321 br
->restart_bytes
= restart_bytes
;
327 void block_reader_release(struct block_reader
*br
)
329 inflateEnd(br
->zstream
);
330 reftable_free(br
->zstream
);
331 reftable_free(br
->uncompressed_data
);
332 reftable_block_done(&br
->block
);
335 uint8_t block_reader_type(const struct block_reader
*r
)
337 return r
->block
.data
[r
->header_off
];
340 int block_reader_first_key(const struct block_reader
*br
, struct reftable_buf
*key
)
342 int off
= br
->header_off
+ 4, n
;
343 struct string_view in
= {
344 .buf
= br
->block
.data
+ off
,
345 .len
= br
->block_len
- off
,
349 reftable_buf_reset(key
);
351 n
= reftable_decode_key(key
, &extra
, in
);
355 return REFTABLE_FORMAT_ERROR
;
360 static uint32_t block_reader_restart_offset(const struct block_reader
*br
, size_t idx
)
362 return get_be24(br
->restart_bytes
+ 3 * idx
);
365 void block_iter_seek_start(struct block_iter
*it
, const struct block_reader
*br
)
367 it
->block
= br
->block
.data
;
368 it
->block_len
= br
->block_len
;
369 it
->hash_size
= br
->hash_size
;
370 reftable_buf_reset(&it
->last_key
);
371 it
->next_off
= br
->header_off
+ 4;
374 struct restart_needle_less_args
{
376 struct reftable_buf needle
;
377 const struct block_reader
*reader
;
380 static int restart_needle_less(size_t idx
, void *_args
)
382 struct restart_needle_less_args
*args
= _args
;
383 uint32_t off
= block_reader_restart_offset(args
->reader
, idx
);
384 struct string_view in
= {
385 .buf
= args
->reader
->block
.data
+ off
,
386 .len
= args
->reader
->block_len
- off
,
388 uint64_t prefix_len
, suffix_len
;
393 * Records at restart points are stored without prefix compression, so
394 * there is no need to fully decode the record key here. This removes
395 * the need for allocating memory.
397 n
= reftable_decode_keylen(in
, &prefix_len
, &suffix_len
, &extra
);
398 if (n
< 0 || prefix_len
) {
403 string_view_consume(&in
, n
);
404 if (suffix_len
> in
.len
) {
409 n
= memcmp(args
->needle
.buf
, in
.buf
,
410 args
->needle
.len
< suffix_len
? args
->needle
.len
: suffix_len
);
413 return args
->needle
.len
< suffix_len
;
416 int block_iter_next(struct block_iter
*it
, struct reftable_record
*rec
)
418 struct string_view in
= {
419 .buf
= (unsigned char *) it
->block
+ it
->next_off
,
420 .len
= it
->block_len
- it
->next_off
,
422 struct string_view start
= in
;
426 if (it
->next_off
>= it
->block_len
)
429 n
= reftable_decode_key(&it
->last_key
, &extra
, in
);
432 if (!it
->last_key
.len
)
433 return REFTABLE_FORMAT_ERROR
;
435 string_view_consume(&in
, n
);
436 n
= reftable_record_decode(rec
, it
->last_key
, extra
, in
, it
->hash_size
,
440 string_view_consume(&in
, n
);
442 it
->next_off
+= start
.len
- in
.len
;
446 void block_iter_reset(struct block_iter
*it
)
448 reftable_buf_reset(&it
->last_key
);
455 void block_iter_close(struct block_iter
*it
)
457 reftable_buf_release(&it
->last_key
);
458 reftable_buf_release(&it
->scratch
);
461 int block_iter_seek_key(struct block_iter
*it
, const struct block_reader
*br
,
462 struct reftable_buf
*want
)
464 struct restart_needle_less_args args
= {
468 struct reftable_record rec
;
473 * Perform a binary search over the block's restart points, which
474 * avoids doing a linear scan over the whole block. Like this, we
475 * identify the section of the block that should contain our key.
477 * Note that we explicitly search for the first restart point _greater_
478 * than the sought-after record, not _greater or equal_ to it. In case
479 * the sought-after record is located directly at the restart point we
480 * would otherwise start doing the linear search at the preceding
481 * restart point. While that works alright, we would end up scanning
484 i
= binsearch(br
->restart_count
, &restart_needle_less
, &args
);
486 err
= REFTABLE_FORMAT_ERROR
;
491 * Now there are multiple cases:
493 * - `i == 0`: The wanted record is smaller than the record found at
494 * the first restart point. As the first restart point is the first
495 * record in the block, our wanted record cannot be located in this
496 * block at all. We still need to position the iterator so that the
497 * next call to `block_iter_next()` will yield an end-of-iterator
500 * - `i == restart_count`: The wanted record was not found at any of
501 * the restart points. As there is no restart point at the end of
502 * the section the record may thus be contained in the last block.
504 * - `i > 0`: The wanted record must be contained in the section
505 * before the found restart point. We thus do a linear search
506 * starting from the preceding restart point.
509 it
->next_off
= block_reader_restart_offset(br
, i
- 1);
511 it
->next_off
= br
->header_off
+ 4;
512 it
->block
= br
->block
.data
;
513 it
->block_len
= br
->block_len
;
514 it
->hash_size
= br
->hash_size
;
516 reftable_record_init(&rec
, block_reader_type(br
));
519 * We're looking for the last entry less than the wanted key so that
520 * the next call to `block_reader_next()` would yield the wanted
521 * record. We thus don't want to position our reader at the sought
522 * after record, but one before. To do so, we have to go one entry too
523 * far and then back up.
526 size_t prev_off
= it
->next_off
;
528 err
= block_iter_next(it
, &rec
);
532 it
->next_off
= prev_off
;
537 err
= reftable_record_key(&rec
, &it
->last_key
);
542 * Check whether the current key is greater or equal to the
543 * sought-after key. In case it is greater we know that the
544 * record does not exist in the block and can thus abort early.
545 * In case it is equal to the sought-after key we have found
546 * the desired record.
548 * Note that we store the next record's key record directly in
549 * `last_key` without restoring the key of the preceding record
550 * in case we need to go one record back. This is safe to do as
551 * `block_iter_next()` would return the ref whose key is equal
552 * to `last_key` now, and naturally all keys share a prefix
555 if (reftable_buf_cmp(&it
->last_key
, want
) >= 0) {
556 it
->next_off
= prev_off
;
562 reftable_record_release(&rec
);
566 void block_writer_release(struct block_writer
*bw
)
568 deflateEnd(bw
->zstream
);
569 REFTABLE_FREE_AND_NULL(bw
->zstream
);
570 REFTABLE_FREE_AND_NULL(bw
->restarts
);
571 REFTABLE_FREE_AND_NULL(bw
->compressed
);
572 reftable_buf_release(&bw
->last_key
);
573 /* the block is not owned. */
576 void reftable_block_done(struct reftable_block
*blockp
)
578 struct reftable_block_source source
= blockp
->source
;
579 if (blockp
&& source
.ops
)
580 source
.ops
->return_block(source
.arg
, blockp
);
583 blockp
->source
.ops
= NULL
;
584 blockp
->source
.arg
= NULL
;