2 * Copyright (c) 2011, Google Inc.
5 #define USE_THE_REPOSITORY_VARIABLE
7 #include "git-compat-util.h"
8 #include "bulk-checkin.h"
9 #include "environment.h"
13 #include "repository.h"
14 #include "csum-file.h"
17 #include "tmp-objdir.h"
19 #include "object-file.h"
20 #include "object-store-ll.h"
22 static int odb_transaction_nesting
;
24 static struct tmp_objdir
*bulk_fsync_objdir
;
26 static struct bulk_checkin_packfile
{
30 struct pack_idx_option pack_idx_opts
;
32 struct pack_idx_entry
**written
;
33 uint32_t alloc_written
;
35 } bulk_checkin_packfile
;
37 static void finish_tmp_packfile(struct strbuf
*basename
,
38 const char *pack_tmp_name
,
39 struct pack_idx_entry
**written_list
,
41 struct pack_idx_option
*pack_idx_opts
,
44 char *idx_tmp_name
= NULL
;
46 stage_tmp_packfiles(basename
, pack_tmp_name
, written_list
, nr_written
,
47 NULL
, pack_idx_opts
, hash
, &idx_tmp_name
);
48 rename_tmp_packfile_idx(basename
, &idx_tmp_name
);
53 static void flush_bulk_checkin_packfile(struct bulk_checkin_packfile
*state
)
55 unsigned char hash
[GIT_MAX_RAWSZ
];
56 struct strbuf packname
= STRBUF_INIT
;
62 if (state
->nr_written
== 0) {
64 free_hashfile(state
->f
);
65 unlink(state
->pack_tmp_name
);
67 } else if (state
->nr_written
== 1) {
68 finalize_hashfile(state
->f
, hash
, FSYNC_COMPONENT_PACK
,
69 CSUM_HASH_IN_STREAM
| CSUM_FSYNC
| CSUM_CLOSE
);
71 int fd
= finalize_hashfile(state
->f
, hash
, FSYNC_COMPONENT_PACK
, 0);
72 fixup_pack_header_footer(fd
, hash
, state
->pack_tmp_name
,
73 state
->nr_written
, hash
,
78 strbuf_addf(&packname
, "%s/pack/pack-%s.", repo_get_object_directory(the_repository
),
80 finish_tmp_packfile(&packname
, state
->pack_tmp_name
,
81 state
->written
, state
->nr_written
,
82 &state
->pack_idx_opts
, hash
);
83 for (i
= 0; i
< state
->nr_written
; i
++)
84 free(state
->written
[i
]);
87 free(state
->pack_tmp_name
);
89 memset(state
, 0, sizeof(*state
));
91 strbuf_release(&packname
);
92 /* Make objects we just wrote available to ourselves */
93 reprepare_packed_git(the_repository
);
97 * Cleanup after batch-mode fsync_object_files.
99 static void flush_batch_fsync(void)
101 struct strbuf temp_path
= STRBUF_INIT
;
102 struct tempfile
*temp
;
104 if (!bulk_fsync_objdir
)
108 * Issue a full hardware flush against a temporary file to ensure
109 * that all objects are durable before any renames occur. The code in
110 * fsync_loose_object_bulk_checkin has already issued a writeout
111 * request, but it has not flushed any writeback cache in the storage
112 * hardware or any filesystem logs. This fsync call acts as a barrier
113 * to ensure that the data in each new object file is durable before
114 * the final name is visible.
116 strbuf_addf(&temp_path
, "%s/bulk_fsync_XXXXXX", repo_get_object_directory(the_repository
));
117 temp
= xmks_tempfile(temp_path
.buf
);
118 fsync_or_die(get_tempfile_fd(temp
), get_tempfile_path(temp
));
119 delete_tempfile(&temp
);
120 strbuf_release(&temp_path
);
123 * Make the object files visible in the primary ODB after their data is
126 tmp_objdir_migrate(bulk_fsync_objdir
);
127 bulk_fsync_objdir
= NULL
;
130 static int already_written(struct bulk_checkin_packfile
*state
, struct object_id
*oid
)
134 /* The object may already exist in the repository */
135 if (repo_has_object_file(the_repository
, oid
))
138 /* Might want to keep the list sorted */
139 for (i
= 0; i
< state
->nr_written
; i
++)
140 if (oideq(&state
->written
[i
]->oid
, oid
))
143 /* This is a new object we need to keep */
148 * Read the contents from fd for size bytes, streaming it to the
149 * packfile in state while updating the hash in ctx. Signal a failure
150 * by returning a negative value when the resulting pack would exceed
151 * the pack size limit and this is not the first object in the pack,
152 * so that the caller can discard what we wrote from the current pack
153 * by truncating it and opening a new one. The caller will then call
154 * us again after rewinding the input fd.
156 * The already_hashed_to pointer is kept untouched by the caller to
157 * make sure we do not hash the same byte when we are called
158 * again. This way, the caller does not have to checkpoint its hash
159 * status before calling us just in case we ask it to call us again
162 static int stream_blob_to_pack(struct bulk_checkin_packfile
*state
,
163 git_hash_ctx
*ctx
, off_t
*already_hashed_to
,
164 int fd
, size_t size
, const char *path
,
168 unsigned char ibuf
[16384];
169 unsigned char obuf
[16384];
172 int write_object
= (flags
& HASH_WRITE_OBJECT
);
175 git_deflate_init(&s
, pack_compression_level
);
177 hdrlen
= encode_in_pack_object_header(obuf
, sizeof(obuf
), OBJ_BLOB
, size
);
178 s
.next_out
= obuf
+ hdrlen
;
179 s
.avail_out
= sizeof(obuf
) - hdrlen
;
181 while (status
!= Z_STREAM_END
) {
182 if (size
&& !s
.avail_in
) {
183 ssize_t rsize
= size
< sizeof(ibuf
) ? size
: sizeof(ibuf
);
184 ssize_t read_result
= read_in_full(fd
, ibuf
, rsize
);
186 die_errno("failed to read from '%s'", path
);
187 if (read_result
!= rsize
)
188 die("failed to read %d bytes from '%s'",
191 if (*already_hashed_to
< offset
) {
192 size_t hsize
= offset
- *already_hashed_to
;
196 the_hash_algo
->update_fn(ctx
, ibuf
, hsize
);
197 *already_hashed_to
= offset
;
204 status
= git_deflate(&s
, size
? 0 : Z_FINISH
);
206 if (!s
.avail_out
|| status
== Z_STREAM_END
) {
208 size_t written
= s
.next_out
- obuf
;
210 /* would we bust the size limit? */
211 if (state
->nr_written
&&
212 pack_size_limit_cfg
&&
213 pack_size_limit_cfg
< state
->offset
+ written
) {
214 git_deflate_abort(&s
);
218 hashwrite(state
->f
, obuf
, written
);
219 state
->offset
+= written
;
222 s
.avail_out
= sizeof(obuf
);
231 die("unexpected deflate failure: %d", status
);
238 /* Lazily create backing packfile for the state */
239 static void prepare_to_stream(struct bulk_checkin_packfile
*state
,
242 if (!(flags
& HASH_WRITE_OBJECT
) || state
->f
)
245 state
->f
= create_tmp_packfile(&state
->pack_tmp_name
);
246 reset_pack_idx_option(&state
->pack_idx_opts
);
248 /* Pretend we are going to write only one object */
249 state
->offset
= write_pack_header(state
->f
, 1);
251 die_errno("unable to write pack header");
254 static int deflate_blob_to_pack(struct bulk_checkin_packfile
*state
,
255 struct object_id
*result_oid
,
257 const char *path
, unsigned flags
)
259 off_t seekback
, already_hashed_to
;
261 unsigned char obuf
[16384];
263 struct hashfile_checkpoint checkpoint
= {0};
264 struct pack_idx_entry
*idx
= NULL
;
266 seekback
= lseek(fd
, 0, SEEK_CUR
);
267 if (seekback
== (off_t
) -1)
268 return error("cannot find the current offset");
270 header_len
= format_object_header((char *)obuf
, sizeof(obuf
),
272 the_hash_algo
->init_fn(&ctx
);
273 the_hash_algo
->update_fn(&ctx
, obuf
, header_len
);
274 the_hash_algo
->init_fn(&checkpoint
.ctx
);
276 /* Note: idx is non-NULL when we are writing */
277 if ((flags
& HASH_WRITE_OBJECT
) != 0)
278 CALLOC_ARRAY(idx
, 1);
280 already_hashed_to
= 0;
283 prepare_to_stream(state
, flags
);
285 hashfile_checkpoint(state
->f
, &checkpoint
);
286 idx
->offset
= state
->offset
;
287 crc32_begin(state
->f
);
289 if (!stream_blob_to_pack(state
, &ctx
, &already_hashed_to
,
290 fd
, size
, path
, flags
))
293 * Writing this object to the current pack will make
294 * it too big; we need to truncate it, start a new
295 * pack, and write into it.
298 BUG("should not happen");
299 hashfile_truncate(state
->f
, &checkpoint
);
300 state
->offset
= checkpoint
.offset
;
301 flush_bulk_checkin_packfile(state
);
302 if (lseek(fd
, seekback
, SEEK_SET
) == (off_t
) -1)
303 return error("cannot seek back");
305 the_hash_algo
->final_oid_fn(result_oid
, &ctx
);
309 idx
->crc32
= crc32_end(state
->f
);
310 if (already_written(state
, result_oid
)) {
311 hashfile_truncate(state
->f
, &checkpoint
);
312 state
->offset
= checkpoint
.offset
;
315 oidcpy(&idx
->oid
, result_oid
);
316 ALLOC_GROW(state
->written
,
317 state
->nr_written
+ 1,
318 state
->alloc_written
);
319 state
->written
[state
->nr_written
++] = idx
;
324 void prepare_loose_object_bulk_checkin(void)
327 * We lazily create the temporary object directory
328 * the first time an object might be added, since
329 * callers may not know whether any objects will be
330 * added at the time they call begin_odb_transaction.
332 if (!odb_transaction_nesting
|| bulk_fsync_objdir
)
335 bulk_fsync_objdir
= tmp_objdir_create("bulk-fsync");
336 if (bulk_fsync_objdir
)
337 tmp_objdir_replace_primary_odb(bulk_fsync_objdir
, 0);
340 void fsync_loose_object_bulk_checkin(int fd
, const char *filename
)
343 * If we have an active ODB transaction, we issue a call that
344 * cleans the filesystem page cache but avoids a hardware flush
345 * command. Later on we will issue a single hardware flush
346 * before renaming the objects to their final names as part of
349 if (!bulk_fsync_objdir
||
350 git_fsync(fd
, FSYNC_WRITEOUT_ONLY
) < 0) {
352 warning(_("core.fsyncMethod = batch is unsupported on this platform"));
353 fsync_or_die(fd
, filename
);
357 int index_blob_bulk_checkin(struct object_id
*oid
,
359 const char *path
, unsigned flags
)
361 int status
= deflate_blob_to_pack(&bulk_checkin_packfile
, oid
, fd
, size
,
363 if (!odb_transaction_nesting
)
364 flush_bulk_checkin_packfile(&bulk_checkin_packfile
);
368 void begin_odb_transaction(void)
370 odb_transaction_nesting
+= 1;
373 void flush_odb_transaction(void)
376 flush_bulk_checkin_packfile(&bulk_checkin_packfile
);
379 void end_odb_transaction(void)
381 odb_transaction_nesting
-= 1;
382 if (odb_transaction_nesting
< 0)
383 BUG("Unbalanced ODB transaction nesting");
385 if (odb_transaction_nesting
)
388 flush_odb_transaction();