]>
Commit | Line | Data |
---|---|---|
568508e7 JH |
1 | /* |
2 | * Copyright (c) 2011, Google Inc. | |
3 | */ | |
b6fdc44c | 4 | #include "git-compat-util.h" |
36bf1958 | 5 | #include "alloc.h" |
568508e7 | 6 | #include "bulk-checkin.h" |
32a8f510 | 7 | #include "environment.h" |
f394e093 | 8 | #include "gettext.h" |
41771fa4 | 9 | #include "hex.h" |
c0f4752e | 10 | #include "lockfile.h" |
a49d2834 | 11 | #include "repository.h" |
568508e7 JH |
12 | #include "csum-file.h" |
13 | #include "pack.h" | |
58892711 | 14 | #include "strbuf.h" |
c0f4752e NS |
15 | #include "string-list.h" |
16 | #include "tmp-objdir.h" | |
0abe14f6 | 17 | #include "packfile.h" |
87bed179 | 18 | #include "object-file.h" |
cbd53a21 | 19 | #include "object-store.h" |
65156bb7 | 20 | #include "wrapper.h" |
568508e7 | 21 | |
2c23d1b4 | 22 | static int odb_transaction_nesting; |
568508e7 | 23 | |
c0f4752e NS |
24 | static struct tmp_objdir *bulk_fsync_objdir; |
25 | ||
897c9e25 | 26 | static struct bulk_checkin_packfile { |
568508e7 | 27 | char *pack_tmp_name; |
98a3beab | 28 | struct hashfile *f; |
568508e7 JH |
29 | off_t offset; |
30 | struct pack_idx_option pack_idx_opts; | |
31 | ||
32 | struct pack_idx_entry **written; | |
33 | uint32_t alloc_written; | |
34 | uint32_t nr_written; | |
897c9e25 | 35 | } bulk_checkin_packfile; |
568508e7 | 36 | |
2ec02dd5 ÆAB |
37 | static void finish_tmp_packfile(struct strbuf *basename, |
38 | const char *pack_tmp_name, | |
39 | struct pack_idx_entry **written_list, | |
40 | uint32_t nr_written, | |
41 | struct pack_idx_option *pack_idx_opts, | |
42 | unsigned char hash[]) | |
43 | { | |
44 | char *idx_tmp_name = NULL; | |
45 | ||
46 | stage_tmp_packfiles(basename, pack_tmp_name, written_list, nr_written, | |
1c573cdd | 47 | NULL, pack_idx_opts, hash, &idx_tmp_name); |
2ec02dd5 ÆAB |
48 | rename_tmp_packfile_idx(basename, &idx_tmp_name); |
49 | ||
50 | free(idx_tmp_name); | |
51 | } | |
52 | ||
897c9e25 | 53 | static void flush_bulk_checkin_packfile(struct bulk_checkin_packfile *state) |
568508e7 | 54 | { |
ae44b5a4 | 55 | unsigned char hash[GIT_MAX_RAWSZ]; |
58892711 | 56 | struct strbuf packname = STRBUF_INIT; |
568508e7 JH |
57 | int i; |
58 | ||
59 | if (!state->f) | |
60 | return; | |
61 | ||
62 | if (state->nr_written == 0) { | |
63 | close(state->f->fd); | |
64 | unlink(state->pack_tmp_name); | |
65 | goto clear_exit; | |
66 | } else if (state->nr_written == 1) { | |
020406ea NS |
67 | finalize_hashfile(state->f, hash, FSYNC_COMPONENT_PACK, |
68 | CSUM_HASH_IN_STREAM | CSUM_FSYNC | CSUM_CLOSE); | |
568508e7 | 69 | } else { |
020406ea | 70 | int fd = finalize_hashfile(state->f, hash, FSYNC_COMPONENT_PACK, 0); |
ae44b5a4 TB |
71 | fixup_pack_header_footer(fd, hash, state->pack_tmp_name, |
72 | state->nr_written, hash, | |
568508e7 JH |
73 | state->offset); |
74 | close(fd); | |
75 | } | |
76 | ||
66833f0e ÆAB |
77 | strbuf_addf(&packname, "%s/pack/pack-%s.", get_object_directory(), |
78 | hash_to_hex(hash)); | |
58892711 | 79 | finish_tmp_packfile(&packname, state->pack_tmp_name, |
568508e7 | 80 | state->written, state->nr_written, |
ae44b5a4 | 81 | &state->pack_idx_opts, hash); |
568508e7 JH |
82 | for (i = 0; i < state->nr_written; i++) |
83 | free(state->written[i]); | |
84 | ||
85 | clear_exit: | |
86 | free(state->written); | |
87 | memset(state, 0, sizeof(*state)); | |
88 | ||
58892711 | 89 | strbuf_release(&packname); |
568508e7 | 90 | /* Make objects we just wrote available to ourselves */ |
a49d2834 | 91 | reprepare_packed_git(the_repository); |
568508e7 JH |
92 | } |
93 | ||
c0f4752e NS |
94 | /* |
95 | * Cleanup after batch-mode fsync_object_files. | |
96 | */ | |
97 | static void flush_batch_fsync(void) | |
98 | { | |
99 | struct strbuf temp_path = STRBUF_INIT; | |
100 | struct tempfile *temp; | |
101 | ||
102 | if (!bulk_fsync_objdir) | |
103 | return; | |
104 | ||
105 | /* | |
106 | * Issue a full hardware flush against a temporary file to ensure | |
107 | * that all objects are durable before any renames occur. The code in | |
108 | * fsync_loose_object_bulk_checkin has already issued a writeout | |
109 | * request, but it has not flushed any writeback cache in the storage | |
110 | * hardware or any filesystem logs. This fsync call acts as a barrier | |
111 | * to ensure that the data in each new object file is durable before | |
112 | * the final name is visible. | |
113 | */ | |
114 | strbuf_addf(&temp_path, "%s/bulk_fsync_XXXXXX", get_object_directory()); | |
115 | temp = xmks_tempfile(temp_path.buf); | |
116 | fsync_or_die(get_tempfile_fd(temp), get_tempfile_path(temp)); | |
117 | delete_tempfile(&temp); | |
118 | strbuf_release(&temp_path); | |
119 | ||
120 | /* | |
121 | * Make the object files visible in the primary ODB after their data is | |
122 | * fully durable. | |
123 | */ | |
124 | tmp_objdir_migrate(bulk_fsync_objdir); | |
125 | bulk_fsync_objdir = NULL; | |
126 | } | |
127 | ||
897c9e25 | 128 | static int already_written(struct bulk_checkin_packfile *state, struct object_id *oid) |
568508e7 JH |
129 | { |
130 | int i; | |
131 | ||
132 | /* The object may already exist in the repository */ | |
bc726bd0 | 133 | if (repo_has_object_file(the_repository, oid)) |
568508e7 JH |
134 | return 1; |
135 | ||
136 | /* Might want to keep the list sorted */ | |
137 | for (i = 0; i < state->nr_written; i++) | |
4a7e27e9 | 138 | if (oideq(&state->written[i]->oid, oid)) |
568508e7 JH |
139 | return 1; |
140 | ||
141 | /* This is a new object we need to keep */ | |
142 | return 0; | |
143 | } | |
144 | ||
145 | /* | |
146 | * Read the contents from fd for size bytes, streaming it to the | |
147 | * packfile in state while updating the hash in ctx. Signal a failure | |
148 | * by returning a negative value when the resulting pack would exceed | |
149 | * the pack size limit and this is not the first object in the pack, | |
150 | * so that the caller can discard what we wrote from the current pack | |
151 | * by truncating it and opening a new one. The caller will then call | |
152 | * us again after rewinding the input fd. | |
153 | * | |
154 | * The already_hashed_to pointer is kept untouched by the caller to | |
155 | * make sure we do not hash the same byte when we are called | |
156 | * again. This way, the caller does not have to checkpoint its hash | |
157 | * status before calling us just in case we ask it to call us again | |
158 | * with a new pack. | |
159 | */ | |
897c9e25 | 160 | static int stream_to_pack(struct bulk_checkin_packfile *state, |
f87e8137 | 161 | git_hash_ctx *ctx, off_t *already_hashed_to, |
568508e7 JH |
162 | int fd, size_t size, enum object_type type, |
163 | const char *path, unsigned flags) | |
164 | { | |
165 | git_zstream s; | |
f96178c5 | 166 | unsigned char ibuf[16384]; |
568508e7 JH |
167 | unsigned char obuf[16384]; |
168 | unsigned hdrlen; | |
169 | int status = Z_OK; | |
170 | int write_object = (flags & HASH_WRITE_OBJECT); | |
171 | off_t offset = 0; | |
172 | ||
568508e7 JH |
173 | git_deflate_init(&s, pack_compression_level); |
174 | ||
7202a6fa | 175 | hdrlen = encode_in_pack_object_header(obuf, sizeof(obuf), type, size); |
568508e7 JH |
176 | s.next_out = obuf + hdrlen; |
177 | s.avail_out = sizeof(obuf) - hdrlen; | |
178 | ||
179 | while (status != Z_STREAM_END) { | |
568508e7 JH |
180 | if (size && !s.avail_in) { |
181 | ssize_t rsize = size < sizeof(ibuf) ? size : sizeof(ibuf); | |
41dcc4dc JK |
182 | ssize_t read_result = read_in_full(fd, ibuf, rsize); |
183 | if (read_result < 0) | |
184 | die_errno("failed to read from '%s'", path); | |
185 | if (read_result != rsize) | |
568508e7 JH |
186 | die("failed to read %d bytes from '%s'", |
187 | (int)rsize, path); | |
188 | offset += rsize; | |
189 | if (*already_hashed_to < offset) { | |
190 | size_t hsize = offset - *already_hashed_to; | |
191 | if (rsize < hsize) | |
192 | hsize = rsize; | |
193 | if (hsize) | |
f87e8137 | 194 | the_hash_algo->update_fn(ctx, ibuf, hsize); |
568508e7 JH |
195 | *already_hashed_to = offset; |
196 | } | |
197 | s.next_in = ibuf; | |
198 | s.avail_in = rsize; | |
199 | size -= rsize; | |
200 | } | |
201 | ||
202 | status = git_deflate(&s, size ? 0 : Z_FINISH); | |
203 | ||
204 | if (!s.avail_out || status == Z_STREAM_END) { | |
205 | if (write_object) { | |
206 | size_t written = s.next_out - obuf; | |
207 | ||
208 | /* would we bust the size limit? */ | |
209 | if (state->nr_written && | |
210 | pack_size_limit_cfg && | |
211 | pack_size_limit_cfg < state->offset + written) { | |
212 | git_deflate_abort(&s); | |
213 | return -1; | |
214 | } | |
215 | ||
98a3beab | 216 | hashwrite(state->f, obuf, written); |
568508e7 JH |
217 | state->offset += written; |
218 | } | |
219 | s.next_out = obuf; | |
220 | s.avail_out = sizeof(obuf); | |
221 | } | |
222 | ||
223 | switch (status) { | |
224 | case Z_OK: | |
225 | case Z_BUF_ERROR: | |
226 | case Z_STREAM_END: | |
227 | continue; | |
228 | default: | |
229 | die("unexpected deflate failure: %d", status); | |
230 | } | |
231 | } | |
232 | git_deflate_end(&s); | |
233 | return 0; | |
234 | } | |
235 | ||
236 | /* Lazily create backing packfile for the state */ | |
897c9e25 | 237 | static void prepare_to_stream(struct bulk_checkin_packfile *state, |
568508e7 JH |
238 | unsigned flags) |
239 | { | |
240 | if (!(flags & HASH_WRITE_OBJECT) || state->f) | |
241 | return; | |
242 | ||
243 | state->f = create_tmp_packfile(&state->pack_tmp_name); | |
244 | reset_pack_idx_option(&state->pack_idx_opts); | |
245 | ||
246 | /* Pretend we are going to write only one object */ | |
247 | state->offset = write_pack_header(state->f, 1); | |
248 | if (!state->offset) | |
249 | die_errno("unable to write pack header"); | |
250 | } | |
251 | ||
897c9e25 | 252 | static int deflate_to_pack(struct bulk_checkin_packfile *state, |
68ee6dfc | 253 | struct object_id *result_oid, |
568508e7 JH |
254 | int fd, size_t size, |
255 | enum object_type type, const char *path, | |
256 | unsigned flags) | |
257 | { | |
258 | off_t seekback, already_hashed_to; | |
f87e8137 | 259 | git_hash_ctx ctx; |
568508e7 JH |
260 | unsigned char obuf[16384]; |
261 | unsigned header_len; | |
7140414d | 262 | struct hashfile_checkpoint checkpoint = {0}; |
568508e7 JH |
263 | struct pack_idx_entry *idx = NULL; |
264 | ||
265 | seekback = lseek(fd, 0, SEEK_CUR); | |
266 | if (seekback == (off_t) -1) | |
267 | return error("cannot find the current offset"); | |
268 | ||
b04cdea4 ÆAB |
269 | header_len = format_object_header((char *)obuf, sizeof(obuf), |
270 | type, size); | |
f87e8137 | 271 | the_hash_algo->init_fn(&ctx); |
272 | the_hash_algo->update_fn(&ctx, obuf, header_len); | |
568508e7 JH |
273 | |
274 | /* Note: idx is non-NULL when we are writing */ | |
275 | if ((flags & HASH_WRITE_OBJECT) != 0) | |
ca56dadb | 276 | CALLOC_ARRAY(idx, 1); |
568508e7 JH |
277 | |
278 | already_hashed_to = 0; | |
279 | ||
280 | while (1) { | |
281 | prepare_to_stream(state, flags); | |
282 | if (idx) { | |
98a3beab | 283 | hashfile_checkpoint(state->f, &checkpoint); |
568508e7 JH |
284 | idx->offset = state->offset; |
285 | crc32_begin(state->f); | |
286 | } | |
287 | if (!stream_to_pack(state, &ctx, &already_hashed_to, | |
288 | fd, size, type, path, flags)) | |
289 | break; | |
290 | /* | |
291 | * Writing this object to the current pack will make | |
292 | * it too big; we need to truncate it, start a new | |
293 | * pack, and write into it. | |
294 | */ | |
295 | if (!idx) | |
033abf97 | 296 | BUG("should not happen"); |
98a3beab | 297 | hashfile_truncate(state->f, &checkpoint); |
568508e7 | 298 | state->offset = checkpoint.offset; |
897c9e25 | 299 | flush_bulk_checkin_packfile(state); |
568508e7 JH |
300 | if (lseek(fd, seekback, SEEK_SET) == (off_t) -1) |
301 | return error("cannot seek back"); | |
302 | } | |
5951bf46 | 303 | the_hash_algo->final_oid_fn(result_oid, &ctx); |
568508e7 JH |
304 | if (!idx) |
305 | return 0; | |
306 | ||
307 | idx->crc32 = crc32_end(state->f); | |
68ee6dfc | 308 | if (already_written(state, result_oid)) { |
98a3beab | 309 | hashfile_truncate(state->f, &checkpoint); |
568508e7 JH |
310 | state->offset = checkpoint.offset; |
311 | free(idx); | |
312 | } else { | |
68ee6dfc | 313 | oidcpy(&idx->oid, result_oid); |
568508e7 JH |
314 | ALLOC_GROW(state->written, |
315 | state->nr_written + 1, | |
316 | state->alloc_written); | |
317 | state->written[state->nr_written++] = idx; | |
318 | } | |
319 | return 0; | |
320 | } | |
321 | ||
c0f4752e NS |
322 | void prepare_loose_object_bulk_checkin(void) |
323 | { | |
324 | /* | |
325 | * We lazily create the temporary object directory | |
326 | * the first time an object might be added, since | |
327 | * callers may not know whether any objects will be | |
328 | * added at the time they call begin_odb_transaction. | |
329 | */ | |
330 | if (!odb_transaction_nesting || bulk_fsync_objdir) | |
331 | return; | |
332 | ||
333 | bulk_fsync_objdir = tmp_objdir_create("bulk-fsync"); | |
334 | if (bulk_fsync_objdir) | |
335 | tmp_objdir_replace_primary_odb(bulk_fsync_objdir, 0); | |
336 | } | |
337 | ||
338 | void fsync_loose_object_bulk_checkin(int fd, const char *filename) | |
339 | { | |
340 | /* | |
341 | * If we have an active ODB transaction, we issue a call that | |
342 | * cleans the filesystem page cache but avoids a hardware flush | |
343 | * command. Later on we will issue a single hardware flush | |
344 | * before renaming the objects to their final names as part of | |
345 | * flush_batch_fsync. | |
346 | */ | |
347 | if (!bulk_fsync_objdir || | |
348 | git_fsync(fd, FSYNC_WRITEOUT_ONLY) < 0) { | |
ce50f1f3 JS |
349 | if (errno == ENOSYS) |
350 | warning(_("core.fsyncMethod = batch is unsupported on this platform")); | |
c0f4752e NS |
351 | fsync_or_die(fd, filename); |
352 | } | |
353 | } | |
354 | ||
68ee6dfc | 355 | int index_bulk_checkin(struct object_id *oid, |
568508e7 JH |
356 | int fd, size_t size, enum object_type type, |
357 | const char *path, unsigned flags) | |
358 | { | |
897c9e25 | 359 | int status = deflate_to_pack(&bulk_checkin_packfile, oid, fd, size, type, |
568508e7 | 360 | path, flags); |
2c23d1b4 | 361 | if (!odb_transaction_nesting) |
897c9e25 | 362 | flush_bulk_checkin_packfile(&bulk_checkin_packfile); |
568508e7 JH |
363 | return status; |
364 | } | |
365 | ||
2c23d1b4 | 366 | void begin_odb_transaction(void) |
568508e7 | 367 | { |
2c23d1b4 | 368 | odb_transaction_nesting += 1; |
568508e7 JH |
369 | } |
370 | ||
2c23d1b4 | 371 | void flush_odb_transaction(void) |
568508e7 | 372 | { |
c0f4752e | 373 | flush_batch_fsync(); |
897c9e25 | 374 | flush_bulk_checkin_packfile(&bulk_checkin_packfile); |
568508e7 | 375 | } |
2c23d1b4 NS |
376 | |
377 | void end_odb_transaction(void) | |
378 | { | |
379 | odb_transaction_nesting -= 1; | |
380 | if (odb_transaction_nesting < 0) | |
381 | BUG("Unbalanced ODB transaction nesting"); | |
382 | ||
383 | if (odb_transaction_nesting) | |
384 | return; | |
385 | ||
386 | flush_odb_transaction(); | |
568508e7 | 387 | } |