2 Copyright 2020 Google LLC
4 Use of this source code is governed by a BSD-style
5 license that can be found in the LICENSE file or at
6 https://developers.google.com/open-source/licenses/bsd
11 #include "blocksource.h"
12 #include "constants.h"
14 #include "reftable-error.h"
18 int header_size(int version
)
29 int footer_size(int version
)
40 static int block_writer_register_restart(struct block_writer
*w
, int n
,
41 int is_restart
, struct strbuf
*key
)
43 int rlen
= w
->restart_len
;
44 if (rlen
>= MAX_RESTARTS
) {
51 if (2 + 3 * rlen
+ n
> w
->block_size
- w
->next
)
54 REFTABLE_ALLOC_GROW(w
->restarts
, w
->restart_len
+ 1, w
->restart_cap
);
55 w
->restarts
[w
->restart_len
++] = w
->next
;
60 strbuf_reset(&w
->last_key
);
61 strbuf_addbuf(&w
->last_key
, key
);
66 void block_writer_init(struct block_writer
*bw
, uint8_t typ
, uint8_t *buf
,
67 uint32_t block_size
, uint32_t header_off
, int hash_size
)
70 bw
->hash_size
= hash_size
;
71 bw
->block_size
= block_size
;
72 bw
->header_off
= header_off
;
73 bw
->buf
[header_off
] = typ
;
74 bw
->next
= header_off
+ 4;
75 bw
->restart_interval
= 16;
80 REFTABLE_CALLOC_ARRAY(bw
->zstream
, 1);
81 deflateInit(bw
->zstream
, 9);
85 uint8_t block_writer_type(struct block_writer
*bw
)
87 return bw
->buf
[bw
->header_off
];
90 /* Adds the reftable_record to the block. Returns -1 if it does not fit, 0 on
91 success. Returns REFTABLE_API_ERROR if attempting to write a record with
93 int block_writer_add(struct block_writer
*w
, struct reftable_record
*rec
)
95 struct strbuf empty
= STRBUF_INIT
;
97 w
->entries
% w
->restart_interval
== 0 ? empty
: w
->last_key
;
98 struct string_view out
= {
99 .buf
= w
->buf
+ w
->next
,
100 .len
= w
->block_size
- w
->next
,
103 struct string_view start
= out
;
106 struct strbuf key
= STRBUF_INIT
;
110 reftable_record_key(rec
, &key
);
112 err
= REFTABLE_API_ERROR
;
116 n
= reftable_encode_key(&is_restart
, out
, last
, key
,
117 reftable_record_val_type(rec
));
120 string_view_consume(&out
, n
);
122 n
= reftable_record_encode(rec
, out
, w
->hash_size
);
125 string_view_consume(&out
, n
);
127 err
= block_writer_register_restart(w
, start
.len
- out
.len
, is_restart
,
130 strbuf_release(&key
);
134 int block_writer_finish(struct block_writer
*w
)
137 for (i
= 0; i
< w
->restart_len
; i
++) {
138 put_be24(w
->buf
+ w
->next
, w
->restarts
[i
]);
142 put_be16(w
->buf
+ w
->next
, w
->restart_len
);
144 put_be24(w
->buf
+ 1 + w
->header_off
, w
->next
);
147 * Log records are stored zlib-compressed. Note that the compression
148 * also spans over the restart points we have just written.
150 if (block_writer_type(w
) == BLOCK_TYPE_LOG
) {
151 int block_header_skip
= 4 + w
->header_off
;
152 uLongf src_len
= w
->next
- block_header_skip
, compressed_len
;
155 ret
= deflateReset(w
->zstream
);
157 return REFTABLE_ZLIB_ERROR
;
160 * Precompute the upper bound of how many bytes the compressed
161 * data may end up with. Combined with `Z_FINISH`, `deflate()`
162 * is guaranteed to return `Z_STREAM_END`.
164 compressed_len
= deflateBound(w
->zstream
, src_len
);
165 REFTABLE_ALLOC_GROW(w
->compressed
, compressed_len
, w
->compressed_cap
);
167 w
->zstream
->next_out
= w
->compressed
;
168 w
->zstream
->avail_out
= compressed_len
;
169 w
->zstream
->next_in
= w
->buf
+ block_header_skip
;
170 w
->zstream
->avail_in
= src_len
;
173 * We want to perform all decompression in a single step, which
174 * is why we can pass Z_FINISH here. As we have precomputed the
175 * deflated buffer's size via `deflateBound()` this function is
176 * guaranteed to succeed according to the zlib documentation.
178 ret
= deflate(w
->zstream
, Z_FINISH
);
179 if (ret
!= Z_STREAM_END
)
180 return REFTABLE_ZLIB_ERROR
;
183 * Overwrite the uncompressed data we have already written and
184 * adjust the `next` pointer to point right after the
187 memcpy(w
->buf
+ block_header_skip
, w
->compressed
,
188 w
->zstream
->total_out
);
189 w
->next
= w
->zstream
->total_out
+ block_header_skip
;
195 int block_reader_init(struct block_reader
*br
, struct reftable_block
*block
,
196 uint32_t header_off
, uint32_t table_block_size
,
199 uint32_t full_block_size
= table_block_size
;
200 uint8_t typ
= block
->data
[header_off
];
201 uint32_t sz
= get_be24(block
->data
+ header_off
+ 1);
203 uint16_t restart_count
= 0;
204 uint32_t restart_start
= 0;
205 uint8_t *restart_bytes
= NULL
;
207 reftable_block_done(&br
->block
);
209 if (!reftable_is_block_type(typ
)) {
210 err
= REFTABLE_FORMAT_ERROR
;
214 if (typ
== BLOCK_TYPE_LOG
) {
215 uint32_t block_header_skip
= 4 + header_off
;
216 uLong dst_len
= sz
- block_header_skip
;
217 uLong src_len
= block
->len
- block_header_skip
;
219 /* Log blocks specify the *uncompressed* size in their header. */
220 REFTABLE_ALLOC_GROW(br
->uncompressed_data
, sz
,
221 br
->uncompressed_cap
);
223 /* Copy over the block header verbatim. It's not compressed. */
224 memcpy(br
->uncompressed_data
, block
->data
, block_header_skip
);
227 REFTABLE_CALLOC_ARRAY(br
->zstream
, 1);
228 err
= inflateInit(br
->zstream
);
230 err
= inflateReset(br
->zstream
);
233 err
= REFTABLE_ZLIB_ERROR
;
237 br
->zstream
->next_in
= block
->data
+ block_header_skip
;
238 br
->zstream
->avail_in
= src_len
;
239 br
->zstream
->next_out
= br
->uncompressed_data
+ block_header_skip
;
240 br
->zstream
->avail_out
= dst_len
;
243 * We know both input as well as output size, and we know that
244 * the sizes should never be bigger than `uInt_MAX` because
245 * blocks can at most be 16MB large. We can thus use `Z_FINISH`
246 * here to instruct zlib to inflate the data in one go, which
247 * is more efficient than using `Z_NO_FLUSH`.
249 err
= inflate(br
->zstream
, Z_FINISH
);
250 if (err
!= Z_STREAM_END
) {
251 err
= REFTABLE_ZLIB_ERROR
;
256 if (br
->zstream
->total_out
+ block_header_skip
!= sz
) {
257 err
= REFTABLE_FORMAT_ERROR
;
261 /* We're done with the input data. */
262 reftable_block_done(block
);
263 block
->data
= br
->uncompressed_data
;
265 full_block_size
= src_len
+ block_header_skip
- br
->zstream
->avail_in
;
266 } else if (full_block_size
== 0) {
267 full_block_size
= sz
;
268 } else if (sz
< full_block_size
&& sz
< block
->len
&&
269 block
->data
[sz
] != 0) {
270 /* If the block is smaller than the full block size, it is
271 padded (data followed by '\0') or the next block is
273 full_block_size
= sz
;
276 restart_count
= get_be16(block
->data
+ sz
- 2);
277 restart_start
= sz
- 2 - 3 * restart_count
;
278 restart_bytes
= block
->data
+ restart_start
;
280 /* transfer ownership. */
285 br
->hash_size
= hash_size
;
286 br
->block_len
= restart_start
;
287 br
->full_block_size
= full_block_size
;
288 br
->header_off
= header_off
;
289 br
->restart_count
= restart_count
;
290 br
->restart_bytes
= restart_bytes
;
296 void block_reader_release(struct block_reader
*br
)
298 inflateEnd(br
->zstream
);
299 reftable_free(br
->zstream
);
300 reftable_free(br
->uncompressed_data
);
301 reftable_block_done(&br
->block
);
304 uint8_t block_reader_type(const struct block_reader
*r
)
306 return r
->block
.data
[r
->header_off
];
309 int block_reader_first_key(const struct block_reader
*br
, struct strbuf
*key
)
311 int off
= br
->header_off
+ 4, n
;
312 struct string_view in
= {
313 .buf
= br
->block
.data
+ off
,
314 .len
= br
->block_len
- off
,
320 n
= reftable_decode_key(key
, &extra
, in
);
324 return REFTABLE_FORMAT_ERROR
;
329 static uint32_t block_reader_restart_offset(const struct block_reader
*br
, size_t idx
)
331 return get_be24(br
->restart_bytes
+ 3 * idx
);
334 void block_iter_seek_start(struct block_iter
*it
, const struct block_reader
*br
)
336 it
->block
= br
->block
.data
;
337 it
->block_len
= br
->block_len
;
338 it
->hash_size
= br
->hash_size
;
339 strbuf_reset(&it
->last_key
);
340 it
->next_off
= br
->header_off
+ 4;
343 struct restart_needle_less_args
{
345 struct strbuf needle
;
346 const struct block_reader
*reader
;
349 static int restart_needle_less(size_t idx
, void *_args
)
351 struct restart_needle_less_args
*args
= _args
;
352 uint32_t off
= block_reader_restart_offset(args
->reader
, idx
);
353 struct string_view in
= {
354 .buf
= args
->reader
->block
.data
+ off
,
355 .len
= args
->reader
->block_len
- off
,
357 uint64_t prefix_len
, suffix_len
;
362 * Records at restart points are stored without prefix compression, so
363 * there is no need to fully decode the record key here. This removes
364 * the need for allocating memory.
366 n
= reftable_decode_keylen(in
, &prefix_len
, &suffix_len
, &extra
);
367 if (n
< 0 || prefix_len
) {
372 string_view_consume(&in
, n
);
373 if (suffix_len
> in
.len
) {
378 n
= memcmp(args
->needle
.buf
, in
.buf
,
379 args
->needle
.len
< suffix_len
? args
->needle
.len
: suffix_len
);
382 return args
->needle
.len
< suffix_len
;
385 int block_iter_next(struct block_iter
*it
, struct reftable_record
*rec
)
387 struct string_view in
= {
388 .buf
= (unsigned char *) it
->block
+ it
->next_off
,
389 .len
= it
->block_len
- it
->next_off
,
391 struct string_view start
= in
;
395 if (it
->next_off
>= it
->block_len
)
398 n
= reftable_decode_key(&it
->last_key
, &extra
, in
);
401 if (!it
->last_key
.len
)
402 return REFTABLE_FORMAT_ERROR
;
404 string_view_consume(&in
, n
);
405 n
= reftable_record_decode(rec
, it
->last_key
, extra
, in
, it
->hash_size
,
409 string_view_consume(&in
, n
);
411 it
->next_off
+= start
.len
- in
.len
;
415 void block_iter_reset(struct block_iter
*it
)
417 strbuf_reset(&it
->last_key
);
424 void block_iter_close(struct block_iter
*it
)
426 strbuf_release(&it
->last_key
);
427 strbuf_release(&it
->scratch
);
430 int block_iter_seek_key(struct block_iter
*it
, const struct block_reader
*br
,
433 struct restart_needle_less_args args
= {
437 struct reftable_record rec
;
442 * Perform a binary search over the block's restart points, which
443 * avoids doing a linear scan over the whole block. Like this, we
444 * identify the section of the block that should contain our key.
446 * Note that we explicitly search for the first restart point _greater_
447 * than the sought-after record, not _greater or equal_ to it. In case
448 * the sought-after record is located directly at the restart point we
449 * would otherwise start doing the linear search at the preceding
450 * restart point. While that works alright, we would end up scanning
453 i
= binsearch(br
->restart_count
, &restart_needle_less
, &args
);
455 err
= REFTABLE_FORMAT_ERROR
;
460 * Now there are multiple cases:
462 * - `i == 0`: The wanted record is smaller than the record found at
463 * the first restart point. As the first restart point is the first
464 * record in the block, our wanted record cannot be located in this
465 * block at all. We still need to position the iterator so that the
466 * next call to `block_iter_next()` will yield an end-of-iterator
469 * - `i == restart_count`: The wanted record was not found at any of
470 * the restart points. As there is no restart point at the end of
471 * the section the record may thus be contained in the last block.
473 * - `i > 0`: The wanted record must be contained in the section
474 * before the found restart point. We thus do a linear search
475 * starting from the preceding restart point.
478 it
->next_off
= block_reader_restart_offset(br
, i
- 1);
480 it
->next_off
= br
->header_off
+ 4;
481 it
->block
= br
->block
.data
;
482 it
->block_len
= br
->block_len
;
483 it
->hash_size
= br
->hash_size
;
485 reftable_record_init(&rec
, block_reader_type(br
));
488 * We're looking for the last entry less than the wanted key so that
489 * the next call to `block_reader_next()` would yield the wanted
490 * record. We thus don't want to position our reader at the sought
491 * after record, but one before. To do so, we have to go one entry too
492 * far and then back up.
495 size_t prev_off
= it
->next_off
;
497 err
= block_iter_next(it
, &rec
);
501 it
->next_off
= prev_off
;
507 * Check whether the current key is greater or equal to the
508 * sought-after key. In case it is greater we know that the
509 * record does not exist in the block and can thus abort early.
510 * In case it is equal to the sought-after key we have found
511 * the desired record.
513 * Note that we store the next record's key record directly in
514 * `last_key` without restoring the key of the preceding record
515 * in case we need to go one record back. This is safe to do as
516 * `block_iter_next()` would return the ref whose key is equal
517 * to `last_key` now, and naturally all keys share a prefix
520 reftable_record_key(&rec
, &it
->last_key
);
521 if (strbuf_cmp(&it
->last_key
, want
) >= 0) {
522 it
->next_off
= prev_off
;
528 reftable_record_release(&rec
);
532 void block_writer_release(struct block_writer
*bw
)
534 deflateEnd(bw
->zstream
);
535 FREE_AND_NULL(bw
->zstream
);
536 FREE_AND_NULL(bw
->restarts
);
537 FREE_AND_NULL(bw
->compressed
);
538 strbuf_release(&bw
->last_key
);
539 /* the block is not owned. */
542 void reftable_block_done(struct reftable_block
*blockp
)
544 struct reftable_block_source source
= blockp
->source
;
545 if (blockp
&& source
.ops
)
546 source
.ops
->return_block(source
.arg
, blockp
);
549 blockp
->source
.ops
= NULL
;
550 blockp
->source
.arg
= NULL
;