]>
Commit | Line | Data |
---|---|---|
568508e7 JH |
1 | /* |
2 | * Copyright (c) 2011, Google Inc. | |
3 | */ | |
b6fdc44c | 4 | #include "git-compat-util.h" |
568508e7 | 5 | #include "bulk-checkin.h" |
32a8f510 | 6 | #include "environment.h" |
f394e093 | 7 | #include "gettext.h" |
41771fa4 | 8 | #include "hex.h" |
c0f4752e | 9 | #include "lockfile.h" |
a49d2834 | 10 | #include "repository.h" |
568508e7 JH |
11 | #include "csum-file.h" |
12 | #include "pack.h" | |
58892711 | 13 | #include "strbuf.h" |
c0f4752e NS |
14 | #include "string-list.h" |
15 | #include "tmp-objdir.h" | |
0abe14f6 | 16 | #include "packfile.h" |
87bed179 | 17 | #include "object-file.h" |
a034e910 | 18 | #include "object-store-ll.h" |
568508e7 | 19 | |
2c23d1b4 | 20 | static int odb_transaction_nesting; |
568508e7 | 21 | |
c0f4752e NS |
22 | static struct tmp_objdir *bulk_fsync_objdir; |
23 | ||
897c9e25 | 24 | static struct bulk_checkin_packfile { |
568508e7 | 25 | char *pack_tmp_name; |
98a3beab | 26 | struct hashfile *f; |
568508e7 JH |
27 | off_t offset; |
28 | struct pack_idx_option pack_idx_opts; | |
29 | ||
30 | struct pack_idx_entry **written; | |
31 | uint32_t alloc_written; | |
32 | uint32_t nr_written; | |
897c9e25 | 33 | } bulk_checkin_packfile; |
568508e7 | 34 | |
2ec02dd5 ÆAB |
35 | static void finish_tmp_packfile(struct strbuf *basename, |
36 | const char *pack_tmp_name, | |
37 | struct pack_idx_entry **written_list, | |
38 | uint32_t nr_written, | |
39 | struct pack_idx_option *pack_idx_opts, | |
40 | unsigned char hash[]) | |
41 | { | |
42 | char *idx_tmp_name = NULL; | |
43 | ||
44 | stage_tmp_packfiles(basename, pack_tmp_name, written_list, nr_written, | |
1c573cdd | 45 | NULL, pack_idx_opts, hash, &idx_tmp_name); |
2ec02dd5 ÆAB |
46 | rename_tmp_packfile_idx(basename, &idx_tmp_name); |
47 | ||
48 | free(idx_tmp_name); | |
49 | } | |
50 | ||
897c9e25 | 51 | static void flush_bulk_checkin_packfile(struct bulk_checkin_packfile *state) |
568508e7 | 52 | { |
ae44b5a4 | 53 | unsigned char hash[GIT_MAX_RAWSZ]; |
58892711 | 54 | struct strbuf packname = STRBUF_INIT; |
568508e7 JH |
55 | int i; |
56 | ||
57 | if (!state->f) | |
58 | return; | |
59 | ||
60 | if (state->nr_written == 0) { | |
61 | close(state->f->fd); | |
62 | unlink(state->pack_tmp_name); | |
63 | goto clear_exit; | |
64 | } else if (state->nr_written == 1) { | |
020406ea NS |
65 | finalize_hashfile(state->f, hash, FSYNC_COMPONENT_PACK, |
66 | CSUM_HASH_IN_STREAM | CSUM_FSYNC | CSUM_CLOSE); | |
568508e7 | 67 | } else { |
020406ea | 68 | int fd = finalize_hashfile(state->f, hash, FSYNC_COMPONENT_PACK, 0); |
ae44b5a4 TB |
69 | fixup_pack_header_footer(fd, hash, state->pack_tmp_name, |
70 | state->nr_written, hash, | |
568508e7 JH |
71 | state->offset); |
72 | close(fd); | |
73 | } | |
74 | ||
66833f0e ÆAB |
75 | strbuf_addf(&packname, "%s/pack/pack-%s.", get_object_directory(), |
76 | hash_to_hex(hash)); | |
58892711 | 77 | finish_tmp_packfile(&packname, state->pack_tmp_name, |
568508e7 | 78 | state->written, state->nr_written, |
ae44b5a4 | 79 | &state->pack_idx_opts, hash); |
568508e7 JH |
80 | for (i = 0; i < state->nr_written; i++) |
81 | free(state->written[i]); | |
82 | ||
83 | clear_exit: | |
84 | free(state->written); | |
85 | memset(state, 0, sizeof(*state)); | |
86 | ||
58892711 | 87 | strbuf_release(&packname); |
568508e7 | 88 | /* Make objects we just wrote available to ourselves */ |
a49d2834 | 89 | reprepare_packed_git(the_repository); |
568508e7 JH |
90 | } |
91 | ||
c0f4752e NS |
92 | /* |
93 | * Cleanup after batch-mode fsync_object_files. | |
94 | */ | |
95 | static void flush_batch_fsync(void) | |
96 | { | |
97 | struct strbuf temp_path = STRBUF_INIT; | |
98 | struct tempfile *temp; | |
99 | ||
100 | if (!bulk_fsync_objdir) | |
101 | return; | |
102 | ||
103 | /* | |
104 | * Issue a full hardware flush against a temporary file to ensure | |
105 | * that all objects are durable before any renames occur. The code in | |
106 | * fsync_loose_object_bulk_checkin has already issued a writeout | |
107 | * request, but it has not flushed any writeback cache in the storage | |
108 | * hardware or any filesystem logs. This fsync call acts as a barrier | |
109 | * to ensure that the data in each new object file is durable before | |
110 | * the final name is visible. | |
111 | */ | |
112 | strbuf_addf(&temp_path, "%s/bulk_fsync_XXXXXX", get_object_directory()); | |
113 | temp = xmks_tempfile(temp_path.buf); | |
114 | fsync_or_die(get_tempfile_fd(temp), get_tempfile_path(temp)); | |
115 | delete_tempfile(&temp); | |
116 | strbuf_release(&temp_path); | |
117 | ||
118 | /* | |
119 | * Make the object files visible in the primary ODB after their data is | |
120 | * fully durable. | |
121 | */ | |
122 | tmp_objdir_migrate(bulk_fsync_objdir); | |
123 | bulk_fsync_objdir = NULL; | |
124 | } | |
125 | ||
897c9e25 | 126 | static int already_written(struct bulk_checkin_packfile *state, struct object_id *oid) |
568508e7 JH |
127 | { |
128 | int i; | |
129 | ||
130 | /* The object may already exist in the repository */ | |
bc726bd0 | 131 | if (repo_has_object_file(the_repository, oid)) |
568508e7 JH |
132 | return 1; |
133 | ||
134 | /* Might want to keep the list sorted */ | |
135 | for (i = 0; i < state->nr_written; i++) | |
4a7e27e9 | 136 | if (oideq(&state->written[i]->oid, oid)) |
568508e7 JH |
137 | return 1; |
138 | ||
139 | /* This is a new object we need to keep */ | |
140 | return 0; | |
141 | } | |
142 | ||
143 | /* | |
144 | * Read the contents from fd for size bytes, streaming it to the | |
145 | * packfile in state while updating the hash in ctx. Signal a failure | |
146 | * by returning a negative value when the resulting pack would exceed | |
147 | * the pack size limit and this is not the first object in the pack, | |
148 | * so that the caller can discard what we wrote from the current pack | |
149 | * by truncating it and opening a new one. The caller will then call | |
150 | * us again after rewinding the input fd. | |
151 | * | |
152 | * The already_hashed_to pointer is kept untouched by the caller to | |
153 | * make sure we do not hash the same byte when we are called | |
154 | * again. This way, the caller does not have to checkpoint its hash | |
155 | * status before calling us just in case we ask it to call us again | |
156 | * with a new pack. | |
157 | */ | |
897c9e25 | 158 | static int stream_to_pack(struct bulk_checkin_packfile *state, |
f87e8137 | 159 | git_hash_ctx *ctx, off_t *already_hashed_to, |
568508e7 JH |
160 | int fd, size_t size, enum object_type type, |
161 | const char *path, unsigned flags) | |
162 | { | |
163 | git_zstream s; | |
f96178c5 | 164 | unsigned char ibuf[16384]; |
568508e7 JH |
165 | unsigned char obuf[16384]; |
166 | unsigned hdrlen; | |
167 | int status = Z_OK; | |
168 | int write_object = (flags & HASH_WRITE_OBJECT); | |
169 | off_t offset = 0; | |
170 | ||
568508e7 JH |
171 | git_deflate_init(&s, pack_compression_level); |
172 | ||
7202a6fa | 173 | hdrlen = encode_in_pack_object_header(obuf, sizeof(obuf), type, size); |
568508e7 JH |
174 | s.next_out = obuf + hdrlen; |
175 | s.avail_out = sizeof(obuf) - hdrlen; | |
176 | ||
177 | while (status != Z_STREAM_END) { | |
568508e7 JH |
178 | if (size && !s.avail_in) { |
179 | ssize_t rsize = size < sizeof(ibuf) ? size : sizeof(ibuf); | |
41dcc4dc JK |
180 | ssize_t read_result = read_in_full(fd, ibuf, rsize); |
181 | if (read_result < 0) | |
182 | die_errno("failed to read from '%s'", path); | |
183 | if (read_result != rsize) | |
568508e7 JH |
184 | die("failed to read %d bytes from '%s'", |
185 | (int)rsize, path); | |
186 | offset += rsize; | |
187 | if (*already_hashed_to < offset) { | |
188 | size_t hsize = offset - *already_hashed_to; | |
189 | if (rsize < hsize) | |
190 | hsize = rsize; | |
191 | if (hsize) | |
f87e8137 | 192 | the_hash_algo->update_fn(ctx, ibuf, hsize); |
568508e7 JH |
193 | *already_hashed_to = offset; |
194 | } | |
195 | s.next_in = ibuf; | |
196 | s.avail_in = rsize; | |
197 | size -= rsize; | |
198 | } | |
199 | ||
200 | status = git_deflate(&s, size ? 0 : Z_FINISH); | |
201 | ||
202 | if (!s.avail_out || status == Z_STREAM_END) { | |
203 | if (write_object) { | |
204 | size_t written = s.next_out - obuf; | |
205 | ||
206 | /* would we bust the size limit? */ | |
207 | if (state->nr_written && | |
208 | pack_size_limit_cfg && | |
209 | pack_size_limit_cfg < state->offset + written) { | |
210 | git_deflate_abort(&s); | |
211 | return -1; | |
212 | } | |
213 | ||
98a3beab | 214 | hashwrite(state->f, obuf, written); |
568508e7 JH |
215 | state->offset += written; |
216 | } | |
217 | s.next_out = obuf; | |
218 | s.avail_out = sizeof(obuf); | |
219 | } | |
220 | ||
221 | switch (status) { | |
222 | case Z_OK: | |
223 | case Z_BUF_ERROR: | |
224 | case Z_STREAM_END: | |
225 | continue; | |
226 | default: | |
227 | die("unexpected deflate failure: %d", status); | |
228 | } | |
229 | } | |
230 | git_deflate_end(&s); | |
231 | return 0; | |
232 | } | |
233 | ||
234 | /* Lazily create backing packfile for the state */ | |
897c9e25 | 235 | static void prepare_to_stream(struct bulk_checkin_packfile *state, |
568508e7 JH |
236 | unsigned flags) |
237 | { | |
238 | if (!(flags & HASH_WRITE_OBJECT) || state->f) | |
239 | return; | |
240 | ||
241 | state->f = create_tmp_packfile(&state->pack_tmp_name); | |
242 | reset_pack_idx_option(&state->pack_idx_opts); | |
243 | ||
244 | /* Pretend we are going to write only one object */ | |
245 | state->offset = write_pack_header(state->f, 1); | |
246 | if (!state->offset) | |
247 | die_errno("unable to write pack header"); | |
248 | } | |
249 | ||
897c9e25 | 250 | static int deflate_to_pack(struct bulk_checkin_packfile *state, |
68ee6dfc | 251 | struct object_id *result_oid, |
568508e7 JH |
252 | int fd, size_t size, |
253 | enum object_type type, const char *path, | |
254 | unsigned flags) | |
255 | { | |
256 | off_t seekback, already_hashed_to; | |
f87e8137 | 257 | git_hash_ctx ctx; |
568508e7 JH |
258 | unsigned char obuf[16384]; |
259 | unsigned header_len; | |
7140414d | 260 | struct hashfile_checkpoint checkpoint = {0}; |
568508e7 JH |
261 | struct pack_idx_entry *idx = NULL; |
262 | ||
263 | seekback = lseek(fd, 0, SEEK_CUR); | |
264 | if (seekback == (off_t) -1) | |
265 | return error("cannot find the current offset"); | |
266 | ||
b04cdea4 ÆAB |
267 | header_len = format_object_header((char *)obuf, sizeof(obuf), |
268 | type, size); | |
f87e8137 | 269 | the_hash_algo->init_fn(&ctx); |
270 | the_hash_algo->update_fn(&ctx, obuf, header_len); | |
568508e7 JH |
271 | |
272 | /* Note: idx is non-NULL when we are writing */ | |
273 | if ((flags & HASH_WRITE_OBJECT) != 0) | |
ca56dadb | 274 | CALLOC_ARRAY(idx, 1); |
568508e7 JH |
275 | |
276 | already_hashed_to = 0; | |
277 | ||
278 | while (1) { | |
279 | prepare_to_stream(state, flags); | |
280 | if (idx) { | |
98a3beab | 281 | hashfile_checkpoint(state->f, &checkpoint); |
568508e7 JH |
282 | idx->offset = state->offset; |
283 | crc32_begin(state->f); | |
284 | } | |
285 | if (!stream_to_pack(state, &ctx, &already_hashed_to, | |
286 | fd, size, type, path, flags)) | |
287 | break; | |
288 | /* | |
289 | * Writing this object to the current pack will make | |
290 | * it too big; we need to truncate it, start a new | |
291 | * pack, and write into it. | |
292 | */ | |
293 | if (!idx) | |
033abf97 | 294 | BUG("should not happen"); |
98a3beab | 295 | hashfile_truncate(state->f, &checkpoint); |
568508e7 | 296 | state->offset = checkpoint.offset; |
897c9e25 | 297 | flush_bulk_checkin_packfile(state); |
568508e7 JH |
298 | if (lseek(fd, seekback, SEEK_SET) == (off_t) -1) |
299 | return error("cannot seek back"); | |
300 | } | |
5951bf46 | 301 | the_hash_algo->final_oid_fn(result_oid, &ctx); |
568508e7 JH |
302 | if (!idx) |
303 | return 0; | |
304 | ||
305 | idx->crc32 = crc32_end(state->f); | |
68ee6dfc | 306 | if (already_written(state, result_oid)) { |
98a3beab | 307 | hashfile_truncate(state->f, &checkpoint); |
568508e7 JH |
308 | state->offset = checkpoint.offset; |
309 | free(idx); | |
310 | } else { | |
68ee6dfc | 311 | oidcpy(&idx->oid, result_oid); |
568508e7 JH |
312 | ALLOC_GROW(state->written, |
313 | state->nr_written + 1, | |
314 | state->alloc_written); | |
315 | state->written[state->nr_written++] = idx; | |
316 | } | |
317 | return 0; | |
318 | } | |
319 | ||
c0f4752e NS |
320 | void prepare_loose_object_bulk_checkin(void) |
321 | { | |
322 | /* | |
323 | * We lazily create the temporary object directory | |
324 | * the first time an object might be added, since | |
325 | * callers may not know whether any objects will be | |
326 | * added at the time they call begin_odb_transaction. | |
327 | */ | |
328 | if (!odb_transaction_nesting || bulk_fsync_objdir) | |
329 | return; | |
330 | ||
331 | bulk_fsync_objdir = tmp_objdir_create("bulk-fsync"); | |
332 | if (bulk_fsync_objdir) | |
333 | tmp_objdir_replace_primary_odb(bulk_fsync_objdir, 0); | |
334 | } | |
335 | ||
336 | void fsync_loose_object_bulk_checkin(int fd, const char *filename) | |
337 | { | |
338 | /* | |
339 | * If we have an active ODB transaction, we issue a call that | |
340 | * cleans the filesystem page cache but avoids a hardware flush | |
341 | * command. Later on we will issue a single hardware flush | |
342 | * before renaming the objects to their final names as part of | |
343 | * flush_batch_fsync. | |
344 | */ | |
345 | if (!bulk_fsync_objdir || | |
346 | git_fsync(fd, FSYNC_WRITEOUT_ONLY) < 0) { | |
ce50f1f3 JS |
347 | if (errno == ENOSYS) |
348 | warning(_("core.fsyncMethod = batch is unsupported on this platform")); | |
c0f4752e NS |
349 | fsync_or_die(fd, filename); |
350 | } | |
351 | } | |
352 | ||
68ee6dfc | 353 | int index_bulk_checkin(struct object_id *oid, |
568508e7 JH |
354 | int fd, size_t size, enum object_type type, |
355 | const char *path, unsigned flags) | |
356 | { | |
897c9e25 | 357 | int status = deflate_to_pack(&bulk_checkin_packfile, oid, fd, size, type, |
568508e7 | 358 | path, flags); |
2c23d1b4 | 359 | if (!odb_transaction_nesting) |
897c9e25 | 360 | flush_bulk_checkin_packfile(&bulk_checkin_packfile); |
568508e7 JH |
361 | return status; |
362 | } | |
363 | ||
2c23d1b4 | 364 | void begin_odb_transaction(void) |
568508e7 | 365 | { |
2c23d1b4 | 366 | odb_transaction_nesting += 1; |
568508e7 JH |
367 | } |
368 | ||
2c23d1b4 | 369 | void flush_odb_transaction(void) |
568508e7 | 370 | { |
c0f4752e | 371 | flush_batch_fsync(); |
897c9e25 | 372 | flush_bulk_checkin_packfile(&bulk_checkin_packfile); |
568508e7 | 373 | } |
2c23d1b4 NS |
374 | |
375 | void end_odb_transaction(void) | |
376 | { | |
377 | odb_transaction_nesting -= 1; | |
378 | if (odb_transaction_nesting < 0) | |
379 | BUG("Unbalanced ODB transaction nesting"); | |
380 | ||
381 | if (odb_transaction_nesting) | |
382 | return; | |
383 | ||
384 | flush_odb_transaction(); | |
568508e7 | 385 | } |