]> git.ipfire.org Git - thirdparty/git.git/blob - csum-file.c
Merge tag 'v2.41.0-rc0'
[thirdparty/git.git] / csum-file.c
1 /*
2 * csum-file.c
3 *
4 * Copyright (C) 2005 Linus Torvalds
5 *
6 * Simple file write infrastructure for writing SHA1-summed
7 * files. Useful when you write a file that you want to be
8 * able to verify hasn't been messed with afterwards.
9 */
10 #include "git-compat-util.h"
11 #include "progress.h"
12 #include "csum-file.h"
13 #include "hash.h"
14 #include "wrapper.h"
15
16 static void verify_buffer_or_die(struct hashfile *f,
17 const void *buf,
18 unsigned int count)
19 {
20 ssize_t ret = read_in_full(f->check_fd, f->check_buffer, count);
21
22 if (ret < 0)
23 die_errno("%s: sha1 file read error", f->name);
24 if (ret != count)
25 die("%s: sha1 file truncated", f->name);
26 if (memcmp(buf, f->check_buffer, count))
27 die("sha1 file '%s' validation error", f->name);
28 }
29
30 static void flush(struct hashfile *f, const void *buf, unsigned int count)
31 {
32 if (0 <= f->check_fd && count)
33 verify_buffer_or_die(f, buf, count);
34
35 if (write_in_full(f->fd, buf, count) < 0) {
36 if (errno == ENOSPC)
37 die("sha1 file '%s' write error. Out of diskspace", f->name);
38 die_errno("sha1 file '%s' write error", f->name);
39 }
40
41 f->total += count;
42 display_throughput(f->tp, f->total);
43 }
44
45 void hashflush(struct hashfile *f)
46 {
47 unsigned offset = f->offset;
48
49 if (offset) {
50 if (!f->skip_hash)
51 the_hash_algo->update_fn(&f->ctx, f->buffer, offset);
52 flush(f, f->buffer, offset);
53 f->offset = 0;
54 }
55 }
56
57 static void free_hashfile(struct hashfile *f)
58 {
59 free(f->buffer);
60 free(f->check_buffer);
61 free(f);
62 }
63
64 int finalize_hashfile(struct hashfile *f, unsigned char *result,
65 enum fsync_component component, unsigned int flags)
66 {
67 int fd;
68
69 hashflush(f);
70
71 if (f->skip_hash)
72 hashclr(f->buffer);
73 else
74 the_hash_algo->final_fn(f->buffer, &f->ctx);
75
76 if (result)
77 hashcpy(result, f->buffer);
78 if (flags & CSUM_HASH_IN_STREAM)
79 flush(f, f->buffer, the_hash_algo->rawsz);
80 if (flags & CSUM_FSYNC)
81 fsync_component_or_die(component, f->fd, f->name);
82 if (flags & CSUM_CLOSE) {
83 if (close(f->fd))
84 die_errno("%s: sha1 file error on close", f->name);
85 fd = 0;
86 } else
87 fd = f->fd;
88 if (0 <= f->check_fd) {
89 char discard;
90 int cnt = read_in_full(f->check_fd, &discard, 1);
91 if (cnt < 0)
92 die_errno("%s: error when reading the tail of sha1 file",
93 f->name);
94 if (cnt)
95 die("%s: sha1 file has trailing garbage", f->name);
96 if (close(f->check_fd))
97 die_errno("%s: sha1 file error on close", f->name);
98 }
99 free_hashfile(f);
100 return fd;
101 }
102
103 void hashwrite(struct hashfile *f, const void *buf, unsigned int count)
104 {
105 while (count) {
106 unsigned left = f->buffer_len - f->offset;
107 unsigned nr = count > left ? left : count;
108
109 if (f->do_crc)
110 f->crc32 = crc32(f->crc32, buf, nr);
111
112 if (nr == f->buffer_len) {
113 /*
114 * Flush a full batch worth of data directly
115 * from the input, skipping the memcpy() to
116 * the hashfile's buffer. In this block,
117 * f->offset is necessarily zero.
118 */
119 if (!f->skip_hash)
120 the_hash_algo->update_fn(&f->ctx, buf, nr);
121 flush(f, buf, nr);
122 } else {
123 /*
124 * Copy to the hashfile's buffer, flushing only
125 * if it became full.
126 */
127 memcpy(f->buffer + f->offset, buf, nr);
128 f->offset += nr;
129 left -= nr;
130 if (!left)
131 hashflush(f);
132 }
133
134 count -= nr;
135 buf = (char *) buf + nr;
136 }
137 }
138
139 struct hashfile *hashfd_check(const char *name)
140 {
141 int sink, check;
142 struct hashfile *f;
143
144 sink = xopen("/dev/null", O_WRONLY);
145 check = xopen(name, O_RDONLY);
146 f = hashfd(sink, name);
147 f->check_fd = check;
148 f->check_buffer = xmalloc(f->buffer_len);
149
150 return f;
151 }
152
153 static struct hashfile *hashfd_internal(int fd, const char *name,
154 struct progress *tp,
155 size_t buffer_len)
156 {
157 struct hashfile *f = xmalloc(sizeof(*f));
158 f->fd = fd;
159 f->check_fd = -1;
160 f->offset = 0;
161 f->total = 0;
162 f->tp = tp;
163 f->name = name;
164 f->do_crc = 0;
165 f->skip_hash = 0;
166 the_hash_algo->init_fn(&f->ctx);
167
168 f->buffer_len = buffer_len;
169 f->buffer = xmalloc(buffer_len);
170 f->check_buffer = NULL;
171
172 return f;
173 }
174
175 struct hashfile *hashfd(int fd, const char *name)
176 {
177 /*
178 * Since we are not going to use a progress meter to
179 * measure the rate of data passing through this hashfile,
180 * use a larger buffer size to reduce fsync() calls.
181 */
182 return hashfd_internal(fd, name, NULL, 128 * 1024);
183 }
184
185 struct hashfile *hashfd_throughput(int fd, const char *name, struct progress *tp)
186 {
187 /*
188 * Since we are expecting to report progress of the
189 * write into this hashfile, use a smaller buffer
190 * size so the progress indicators arrive at a more
191 * frequent rate.
192 */
193 return hashfd_internal(fd, name, tp, 8 * 1024);
194 }
195
196 void hashfile_checkpoint(struct hashfile *f, struct hashfile_checkpoint *checkpoint)
197 {
198 hashflush(f);
199 checkpoint->offset = f->total;
200 the_hash_algo->clone_fn(&checkpoint->ctx, &f->ctx);
201 }
202
203 int hashfile_truncate(struct hashfile *f, struct hashfile_checkpoint *checkpoint)
204 {
205 off_t offset = checkpoint->offset;
206
207 if (ftruncate(f->fd, offset) ||
208 lseek(f->fd, offset, SEEK_SET) != offset)
209 return -1;
210 f->total = offset;
211 f->ctx = checkpoint->ctx;
212 f->offset = 0; /* hashflush() was called in checkpoint */
213 return 0;
214 }
215
216 void crc32_begin(struct hashfile *f)
217 {
218 f->crc32 = crc32(0, NULL, 0);
219 f->do_crc = 1;
220 }
221
222 uint32_t crc32_end(struct hashfile *f)
223 {
224 f->do_crc = 0;
225 return f->crc32;
226 }
227
228 int hashfile_checksum_valid(const unsigned char *data, size_t total_len)
229 {
230 unsigned char got[GIT_MAX_RAWSZ];
231 git_hash_ctx ctx;
232 size_t data_len = total_len - the_hash_algo->rawsz;
233
234 if (total_len < the_hash_algo->rawsz)
235 return 0; /* say "too short"? */
236
237 the_hash_algo->init_fn(&ctx);
238 the_hash_algo->update_fn(&ctx, data, data_len);
239 the_hash_algo->final_fn(got, &ctx);
240
241 return hasheq(got, data + data_len);
242 }