]> git.ipfire.org Git - thirdparty/systemd.git/blob - src/import/pull-job.c
headers: remove unneeded includes from util.h
[thirdparty/systemd.git] / src / import / pull-job.c
1 /* SPDX-License-Identifier: LGPL-2.1+ */
2
3 #include <fcntl.h>
4 #include <sys/stat.h>
5 #include <sys/xattr.h>
6
7 #include "alloc-util.h"
8 #include "fd-util.h"
9 #include "gcrypt-util.h"
10 #include "hexdecoct.h"
11 #include "import-util.h"
12 #include "io-util.h"
13 #include "machine-pool.h"
14 #include "parse-util.h"
15 #include "pull-common.h"
16 #include "pull-job.h"
17 #include "string-util.h"
18 #include "strv.h"
19 #include "xattr-util.h"
20
21 PullJob* pull_job_unref(PullJob *j) {
22 if (!j)
23 return NULL;
24
25 curl_glue_remove_and_free(j->glue, j->curl);
26 curl_slist_free_all(j->request_header);
27
28 safe_close(j->disk_fd);
29
30 import_compress_free(&j->compress);
31
32 if (j->checksum_context)
33 gcry_md_close(j->checksum_context);
34
35 free(j->url);
36 free(j->etag);
37 strv_free(j->old_etags);
38 free(j->payload);
39 free(j->checksum);
40
41 return mfree(j);
42 }
43
44 static void pull_job_finish(PullJob *j, int ret) {
45 assert(j);
46
47 if (IN_SET(j->state, PULL_JOB_DONE, PULL_JOB_FAILED))
48 return;
49
50 if (ret == 0) {
51 j->state = PULL_JOB_DONE;
52 j->progress_percent = 100;
53 log_info("Download of %s complete.", j->url);
54 } else {
55 j->state = PULL_JOB_FAILED;
56 j->error = ret;
57 }
58
59 if (j->on_finished)
60 j->on_finished(j);
61 }
62
63 static int pull_job_restart(PullJob *j) {
64 int r;
65 char *chksum_url = NULL;
66
67 r = import_url_change_last_component(j->url, "SHA256SUMS", &chksum_url);
68 if (r < 0)
69 return r;
70
71 free(j->url);
72 j->url = chksum_url;
73 j->state = PULL_JOB_INIT;
74 j->payload = mfree(j->payload);
75 j->payload_size = 0;
76 j->payload_allocated = 0;
77 j->written_compressed = 0;
78 j->written_uncompressed = 0;
79
80 r = pull_job_begin(j);
81 if (r < 0)
82 return r;
83
84 return 0;
85 }
86
87 void pull_job_curl_on_finished(CurlGlue *g, CURL *curl, CURLcode result) {
88 PullJob *j = NULL;
89 CURLcode code;
90 long status;
91 int r;
92
93 if (curl_easy_getinfo(curl, CURLINFO_PRIVATE, (char **)&j) != CURLE_OK)
94 return;
95
96 if (!j || IN_SET(j->state, PULL_JOB_DONE, PULL_JOB_FAILED))
97 return;
98
99 if (result != CURLE_OK) {
100 log_error("Transfer failed: %s", curl_easy_strerror(result));
101 r = -EIO;
102 goto finish;
103 }
104
105 code = curl_easy_getinfo(curl, CURLINFO_RESPONSE_CODE, &status);
106 if (code != CURLE_OK) {
107 log_error("Failed to retrieve response code: %s", curl_easy_strerror(code));
108 r = -EIO;
109 goto finish;
110 } else if (status == 304) {
111 log_info("Image already downloaded. Skipping download.");
112 j->etag_exists = true;
113 r = 0;
114 goto finish;
115 } else if (status >= 300) {
116 if (status == 404 && j->style == VERIFICATION_PER_FILE) {
117
118 /* retry pull job with SHA256SUMS file */
119 r = pull_job_restart(j);
120 if (r < 0)
121 goto finish;
122
123 code = curl_easy_getinfo(j->curl, CURLINFO_RESPONSE_CODE, &status);
124 if (code != CURLE_OK) {
125 log_error("Failed to retrieve response code: %s", curl_easy_strerror(code));
126 r = -EIO;
127 goto finish;
128 }
129
130 if (status == 0) {
131 j->style = VERIFICATION_PER_DIRECTORY;
132 return;
133 }
134 }
135
136 log_error("HTTP request to %s failed with code %li.", j->url, status);
137 r = -EIO;
138 goto finish;
139 } else if (status < 200) {
140 log_error("HTTP request to %s finished with unexpected code %li.", j->url, status);
141 r = -EIO;
142 goto finish;
143 }
144
145 if (j->state != PULL_JOB_RUNNING) {
146 log_error("Premature connection termination.");
147 r = -EIO;
148 goto finish;
149 }
150
151 if (j->content_length != (uint64_t) -1 &&
152 j->content_length != j->written_compressed) {
153 log_error("Download truncated.");
154 r = -EIO;
155 goto finish;
156 }
157
158 if (j->checksum_context) {
159 uint8_t *k;
160
161 k = gcry_md_read(j->checksum_context, GCRY_MD_SHA256);
162 if (!k) {
163 log_error("Failed to get checksum.");
164 r = -EIO;
165 goto finish;
166 }
167
168 j->checksum = hexmem(k, gcry_md_get_algo_dlen(GCRY_MD_SHA256));
169 if (!j->checksum) {
170 r = log_oom();
171 goto finish;
172 }
173
174 log_debug("SHA256 of %s is %s.", j->url, j->checksum);
175 }
176
177 if (j->disk_fd >= 0 && j->allow_sparse) {
178 /* Make sure the file size is right, in case the file was
179 * sparse and we just seeked for the last part */
180
181 if (ftruncate(j->disk_fd, j->written_uncompressed) < 0) {
182 r = log_error_errno(errno, "Failed to truncate file: %m");
183 goto finish;
184 }
185
186 if (j->etag)
187 (void) fsetxattr(j->disk_fd, "user.source_etag", j->etag, strlen(j->etag), 0);
188 if (j->url)
189 (void) fsetxattr(j->disk_fd, "user.source_url", j->url, strlen(j->url), 0);
190
191 if (j->mtime != 0) {
192 struct timespec ut[2];
193
194 timespec_store(&ut[0], j->mtime);
195 ut[1] = ut[0];
196 (void) futimens(j->disk_fd, ut);
197
198 (void) fd_setcrtime(j->disk_fd, j->mtime);
199 }
200 }
201
202 r = 0;
203
204 finish:
205 pull_job_finish(j, r);
206 }
207
208 static int pull_job_write_uncompressed(const void *p, size_t sz, void *userdata) {
209 PullJob *j = userdata;
210 ssize_t n;
211
212 assert(j);
213 assert(p);
214
215 if (sz <= 0)
216 return 0;
217
218 if (j->written_uncompressed + sz < j->written_uncompressed)
219 return log_error_errno(SYNTHETIC_ERRNO(EOVERFLOW),
220 "File too large, overflow");
221
222 if (j->written_uncompressed + sz > j->uncompressed_max)
223 return log_error_errno(SYNTHETIC_ERRNO(EFBIG),
224 "File overly large, refusing");
225
226 if (j->disk_fd >= 0) {
227
228 if (j->allow_sparse)
229 n = sparse_write(j->disk_fd, p, sz, 64);
230 else {
231 n = write(j->disk_fd, p, sz);
232 if (n < 0)
233 n = -errno;
234 }
235 if (n < 0)
236 return log_error_errno((int) n, "Failed to write file: %m");
237 if ((size_t) n < sz)
238 return log_error_errno(SYNTHETIC_ERRNO(EIO), "Short write");
239 } else {
240
241 if (!GREEDY_REALLOC(j->payload, j->payload_allocated, j->payload_size + sz))
242 return log_oom();
243
244 memcpy(j->payload + j->payload_size, p, sz);
245 j->payload_size += sz;
246 }
247
248 j->written_uncompressed += sz;
249
250 return 0;
251 }
252
253 static int pull_job_write_compressed(PullJob *j, void *p, size_t sz) {
254 int r;
255
256 assert(j);
257 assert(p);
258
259 if (sz <= 0)
260 return 0;
261
262 if (j->written_compressed + sz < j->written_compressed)
263 return log_error_errno(SYNTHETIC_ERRNO(EOVERFLOW), "File too large, overflow");
264
265 if (j->written_compressed + sz > j->compressed_max)
266 return log_error_errno(SYNTHETIC_ERRNO(EFBIG), "File overly large, refusing.");
267
268 if (j->content_length != (uint64_t) -1 &&
269 j->written_compressed + sz > j->content_length)
270 return log_error_errno(SYNTHETIC_ERRNO(EFBIG),
271 "Content length incorrect.");
272
273 if (j->checksum_context)
274 gcry_md_write(j->checksum_context, p, sz);
275
276 r = import_uncompress(&j->compress, p, sz, pull_job_write_uncompressed, j);
277 if (r < 0)
278 return r;
279
280 j->written_compressed += sz;
281
282 return 0;
283 }
284
285 static int pull_job_open_disk(PullJob *j) {
286 int r;
287
288 assert(j);
289
290 if (j->on_open_disk) {
291 r = j->on_open_disk(j);
292 if (r < 0)
293 return r;
294 }
295
296 if (j->disk_fd >= 0) {
297 /* Check if we can do sparse files */
298
299 if (lseek(j->disk_fd, SEEK_SET, 0) == 0)
300 j->allow_sparse = true;
301 else {
302 if (errno != ESPIPE)
303 return log_error_errno(errno, "Failed to seek on file descriptor: %m");
304
305 j->allow_sparse = false;
306 }
307 }
308
309 if (j->calc_checksum) {
310 initialize_libgcrypt(false);
311
312 if (gcry_md_open(&j->checksum_context, GCRY_MD_SHA256, 0) != 0)
313 return log_error_errno(SYNTHETIC_ERRNO(EIO),
314 "Failed to initialize hash context.");
315 }
316
317 return 0;
318 }
319
320 static int pull_job_detect_compression(PullJob *j) {
321 _cleanup_free_ uint8_t *stub = NULL;
322 size_t stub_size;
323
324 int r;
325
326 assert(j);
327
328 r = import_uncompress_detect(&j->compress, j->payload, j->payload_size);
329 if (r < 0)
330 return log_error_errno(r, "Failed to initialize compressor: %m");
331 if (r == 0)
332 return 0;
333
334 log_debug("Stream is compressed: %s", import_compress_type_to_string(j->compress.type));
335
336 r = pull_job_open_disk(j);
337 if (r < 0)
338 return r;
339
340 /* Now, take the payload we read so far, and decompress it */
341 stub = j->payload;
342 stub_size = j->payload_size;
343
344 j->payload = NULL;
345 j->payload_size = 0;
346 j->payload_allocated = 0;
347
348 j->state = PULL_JOB_RUNNING;
349
350 r = pull_job_write_compressed(j, stub, stub_size);
351 if (r < 0)
352 return r;
353
354 return 0;
355 }
356
357 static size_t pull_job_write_callback(void *contents, size_t size, size_t nmemb, void *userdata) {
358 PullJob *j = userdata;
359 size_t sz = size * nmemb;
360 int r;
361
362 assert(contents);
363 assert(j);
364
365 switch (j->state) {
366
367 case PULL_JOB_ANALYZING:
368 /* Let's first check what it actually is */
369
370 if (!GREEDY_REALLOC(j->payload, j->payload_allocated, j->payload_size + sz)) {
371 r = log_oom();
372 goto fail;
373 }
374
375 memcpy(j->payload + j->payload_size, contents, sz);
376 j->payload_size += sz;
377
378 r = pull_job_detect_compression(j);
379 if (r < 0)
380 goto fail;
381
382 break;
383
384 case PULL_JOB_RUNNING:
385
386 r = pull_job_write_compressed(j, contents, sz);
387 if (r < 0)
388 goto fail;
389
390 break;
391
392 case PULL_JOB_DONE:
393 case PULL_JOB_FAILED:
394 r = -ESTALE;
395 goto fail;
396
397 default:
398 assert_not_reached("Impossible state.");
399 }
400
401 return sz;
402
403 fail:
404 pull_job_finish(j, r);
405 return 0;
406 }
407
408 static size_t pull_job_header_callback(void *contents, size_t size, size_t nmemb, void *userdata) {
409 PullJob *j = userdata;
410 size_t sz = size * nmemb;
411 _cleanup_free_ char *length = NULL, *last_modified = NULL;
412 char *etag;
413 int r;
414
415 assert(contents);
416 assert(j);
417
418 if (IN_SET(j->state, PULL_JOB_DONE, PULL_JOB_FAILED)) {
419 r = -ESTALE;
420 goto fail;
421 }
422
423 assert(j->state == PULL_JOB_ANALYZING);
424
425 r = curl_header_strdup(contents, sz, "ETag:", &etag);
426 if (r < 0) {
427 log_oom();
428 goto fail;
429 }
430 if (r > 0) {
431 free(j->etag);
432 j->etag = etag;
433
434 if (strv_contains(j->old_etags, j->etag)) {
435 log_info("Image already downloaded. Skipping download.");
436 j->etag_exists = true;
437 pull_job_finish(j, 0);
438 return sz;
439 }
440
441 return sz;
442 }
443
444 r = curl_header_strdup(contents, sz, "Content-Length:", &length);
445 if (r < 0) {
446 log_oom();
447 goto fail;
448 }
449 if (r > 0) {
450 (void) safe_atou64(length, &j->content_length);
451
452 if (j->content_length != (uint64_t) -1) {
453 char bytes[FORMAT_BYTES_MAX];
454
455 if (j->content_length > j->compressed_max) {
456 log_error("Content too large.");
457 r = -EFBIG;
458 goto fail;
459 }
460
461 log_info("Downloading %s for %s.", format_bytes(bytes, sizeof(bytes), j->content_length), j->url);
462 }
463
464 return sz;
465 }
466
467 r = curl_header_strdup(contents, sz, "Last-Modified:", &last_modified);
468 if (r < 0) {
469 log_oom();
470 goto fail;
471 }
472 if (r > 0) {
473 (void) curl_parse_http_time(last_modified, &j->mtime);
474 return sz;
475 }
476
477 if (j->on_header) {
478 r = j->on_header(j, contents, sz);
479 if (r < 0)
480 goto fail;
481 }
482
483 return sz;
484
485 fail:
486 pull_job_finish(j, r);
487 return 0;
488 }
489
490 static int pull_job_progress_callback(void *userdata, curl_off_t dltotal, curl_off_t dlnow, curl_off_t ultotal, curl_off_t ulnow) {
491 PullJob *j = userdata;
492 unsigned percent;
493 usec_t n;
494
495 assert(j);
496
497 if (dltotal <= 0)
498 return 0;
499
500 percent = ((100 * dlnow) / dltotal);
501 n = now(CLOCK_MONOTONIC);
502
503 if (n > j->last_status_usec + USEC_PER_SEC &&
504 percent != j->progress_percent &&
505 dlnow < dltotal) {
506 char buf[FORMAT_TIMESPAN_MAX];
507
508 if (n - j->start_usec > USEC_PER_SEC && dlnow > 0) {
509 char y[FORMAT_BYTES_MAX];
510 usec_t left, done;
511
512 done = n - j->start_usec;
513 left = (usec_t) (((double) done * (double) dltotal) / dlnow) - done;
514
515 log_info("Got %u%% of %s. %s left at %s/s.",
516 percent,
517 j->url,
518 format_timespan(buf, sizeof(buf), left, USEC_PER_SEC),
519 format_bytes(y, sizeof(y), (uint64_t) ((double) dlnow / ((double) done / (double) USEC_PER_SEC))));
520 } else
521 log_info("Got %u%% of %s.", percent, j->url);
522
523 j->progress_percent = percent;
524 j->last_status_usec = n;
525
526 if (j->on_progress)
527 j->on_progress(j);
528 }
529
530 return 0;
531 }
532
533 int pull_job_new(PullJob **ret, const char *url, CurlGlue *glue, void *userdata) {
534 _cleanup_(pull_job_unrefp) PullJob *j = NULL;
535 _cleanup_free_ char *u = NULL;
536
537 assert(url);
538 assert(glue);
539 assert(ret);
540
541 u = strdup(url);
542 if (!u)
543 return -ENOMEM;
544
545 j = new(PullJob, 1);
546 if (!j)
547 return -ENOMEM;
548
549 *j = (PullJob) {
550 .state = PULL_JOB_INIT,
551 .disk_fd = -1,
552 .userdata = userdata,
553 .glue = glue,
554 .content_length = (uint64_t) -1,
555 .start_usec = now(CLOCK_MONOTONIC),
556 .compressed_max = 64LLU * 1024LLU * 1024LLU * 1024LLU, /* 64GB safety limit */
557 .uncompressed_max = 64LLU * 1024LLU * 1024LLU * 1024LLU, /* 64GB safety limit */
558 .style = VERIFICATION_STYLE_UNSET,
559 .url = TAKE_PTR(u),
560 };
561
562 *ret = TAKE_PTR(j);
563
564 return 0;
565 }
566
567 int pull_job_begin(PullJob *j) {
568 int r;
569
570 assert(j);
571
572 if (j->state != PULL_JOB_INIT)
573 return -EBUSY;
574
575 r = curl_glue_make(&j->curl, j->url, j);
576 if (r < 0)
577 return r;
578
579 if (!strv_isempty(j->old_etags)) {
580 _cleanup_free_ char *cc = NULL, *hdr = NULL;
581
582 cc = strv_join(j->old_etags, ", ");
583 if (!cc)
584 return -ENOMEM;
585
586 hdr = strappend("If-None-Match: ", cc);
587 if (!hdr)
588 return -ENOMEM;
589
590 if (!j->request_header) {
591 j->request_header = curl_slist_new(hdr, NULL);
592 if (!j->request_header)
593 return -ENOMEM;
594 } else {
595 struct curl_slist *l;
596
597 l = curl_slist_append(j->request_header, hdr);
598 if (!l)
599 return -ENOMEM;
600
601 j->request_header = l;
602 }
603 }
604
605 if (j->request_header) {
606 if (curl_easy_setopt(j->curl, CURLOPT_HTTPHEADER, j->request_header) != CURLE_OK)
607 return -EIO;
608 }
609
610 if (curl_easy_setopt(j->curl, CURLOPT_WRITEFUNCTION, pull_job_write_callback) != CURLE_OK)
611 return -EIO;
612
613 if (curl_easy_setopt(j->curl, CURLOPT_WRITEDATA, j) != CURLE_OK)
614 return -EIO;
615
616 if (curl_easy_setopt(j->curl, CURLOPT_HEADERFUNCTION, pull_job_header_callback) != CURLE_OK)
617 return -EIO;
618
619 if (curl_easy_setopt(j->curl, CURLOPT_HEADERDATA, j) != CURLE_OK)
620 return -EIO;
621
622 if (curl_easy_setopt(j->curl, CURLOPT_XFERINFOFUNCTION, pull_job_progress_callback) != CURLE_OK)
623 return -EIO;
624
625 if (curl_easy_setopt(j->curl, CURLOPT_XFERINFODATA, j) != CURLE_OK)
626 return -EIO;
627
628 if (curl_easy_setopt(j->curl, CURLOPT_NOPROGRESS, 0) != CURLE_OK)
629 return -EIO;
630
631 r = curl_glue_add(j->glue, j->curl);
632 if (r < 0)
633 return r;
634
635 j->state = PULL_JOB_ANALYZING;
636
637 return 0;
638 }