1 /* SPDX-License-Identifier: LGPL-2.1+ */
3 This file is part of systemd.
5 Copyright 2015 Lennart Poettering
10 #include "alloc-util.h"
12 #include "hexdecoct.h"
13 #include "import-util.h"
15 #include "machine-pool.h"
16 #include "parse-util.h"
17 #include "pull-common.h"
19 #include "string-util.h"
21 #include "xattr-util.h"
23 PullJob
* pull_job_unref(PullJob
*j
) {
27 curl_glue_remove_and_free(j
->glue
, j
->curl
);
28 curl_slist_free_all(j
->request_header
);
30 safe_close(j
->disk_fd
);
32 import_compress_free(&j
->compress
);
34 if (j
->checksum_context
)
35 gcry_md_close(j
->checksum_context
);
39 strv_free(j
->old_etags
);
46 static void pull_job_finish(PullJob
*j
, int ret
) {
49 if (IN_SET(j
->state
, PULL_JOB_DONE
, PULL_JOB_FAILED
))
53 j
->state
= PULL_JOB_DONE
;
54 j
->progress_percent
= 100;
55 log_info("Download of %s complete.", j
->url
);
57 j
->state
= PULL_JOB_FAILED
;
65 static int pull_job_restart(PullJob
*j
) {
67 char *chksum_url
= NULL
;
69 r
= import_url_change_last_component(j
->url
, "SHA256SUMS", &chksum_url
);
75 j
->state
= PULL_JOB_INIT
;
76 j
->payload
= mfree(j
->payload
);
78 j
->payload_allocated
= 0;
79 j
->written_compressed
= 0;
80 j
->written_uncompressed
= 0;
81 j
->written_since_last_grow
= 0;
83 r
= pull_job_begin(j
);
90 void pull_job_curl_on_finished(CurlGlue
*g
, CURL
*curl
, CURLcode result
) {
96 if (curl_easy_getinfo(curl
, CURLINFO_PRIVATE
, (char **)&j
) != CURLE_OK
)
99 if (!j
|| IN_SET(j
->state
, PULL_JOB_DONE
, PULL_JOB_FAILED
))
102 if (result
!= CURLE_OK
) {
103 log_error("Transfer failed: %s", curl_easy_strerror(result
));
108 code
= curl_easy_getinfo(curl
, CURLINFO_RESPONSE_CODE
, &status
);
109 if (code
!= CURLE_OK
) {
110 log_error("Failed to retrieve response code: %s", curl_easy_strerror(code
));
113 } else if (status
== 304) {
114 log_info("Image already downloaded. Skipping download.");
115 j
->etag_exists
= true;
118 } else if (status
>= 300) {
119 if (status
== 404 && j
->style
== VERIFICATION_PER_FILE
) {
121 /* retry pull job with SHA256SUMS file */
122 r
= pull_job_restart(j
);
126 code
= curl_easy_getinfo(j
->curl
, CURLINFO_RESPONSE_CODE
, &status
);
127 if (code
!= CURLE_OK
) {
128 log_error("Failed to retrieve response code: %s", curl_easy_strerror(code
));
134 j
->style
= VERIFICATION_PER_DIRECTORY
;
139 log_error("HTTP request to %s failed with code %li.", j
->url
, status
);
142 } else if (status
< 200) {
143 log_error("HTTP request to %s finished with unexpected code %li.", j
->url
, status
);
148 if (j
->state
!= PULL_JOB_RUNNING
) {
149 log_error("Premature connection termination.");
154 if (j
->content_length
!= (uint64_t) -1 &&
155 j
->content_length
!= j
->written_compressed
) {
156 log_error("Download truncated.");
161 if (j
->checksum_context
) {
164 k
= gcry_md_read(j
->checksum_context
, GCRY_MD_SHA256
);
166 log_error("Failed to get checksum.");
171 j
->checksum
= hexmem(k
, gcry_md_get_algo_dlen(GCRY_MD_SHA256
));
177 log_debug("SHA256 of %s is %s.", j
->url
, j
->checksum
);
180 if (j
->disk_fd
>= 0 && j
->allow_sparse
) {
181 /* Make sure the file size is right, in case the file was
182 * sparse and we just seeked for the last part */
184 if (ftruncate(j
->disk_fd
, j
->written_uncompressed
) < 0) {
185 r
= log_error_errno(errno
, "Failed to truncate file: %m");
190 (void) fsetxattr(j
->disk_fd
, "user.source_etag", j
->etag
, strlen(j
->etag
), 0);
192 (void) fsetxattr(j
->disk_fd
, "user.source_url", j
->url
, strlen(j
->url
), 0);
195 struct timespec ut
[2];
197 timespec_store(&ut
[0], j
->mtime
);
199 (void) futimens(j
->disk_fd
, ut
);
201 (void) fd_setcrtime(j
->disk_fd
, j
->mtime
);
208 pull_job_finish(j
, r
);
211 static int pull_job_write_uncompressed(const void *p
, size_t sz
, void *userdata
) {
212 PullJob
*j
= userdata
;
221 if (j
->written_uncompressed
+ sz
< j
->written_uncompressed
) {
222 log_error("File too large, overflow");
226 if (j
->written_uncompressed
+ sz
> j
->uncompressed_max
) {
227 log_error("File overly large, refusing");
231 if (j
->disk_fd
>= 0) {
233 if (j
->grow_machine_directory
&& j
->written_since_last_grow
>= GROW_INTERVAL_BYTES
) {
234 j
->written_since_last_grow
= 0;
235 grow_machine_directory();
239 n
= sparse_write(j
->disk_fd
, p
, sz
, 64);
241 n
= write(j
->disk_fd
, p
, sz
);
243 return log_error_errno(errno
, "Failed to write file: %m");
244 if ((size_t) n
< sz
) {
245 log_error("Short write");
250 if (!GREEDY_REALLOC(j
->payload
, j
->payload_allocated
, j
->payload_size
+ sz
))
253 memcpy(j
->payload
+ j
->payload_size
, p
, sz
);
254 j
->payload_size
+= sz
;
257 j
->written_uncompressed
+= sz
;
258 j
->written_since_last_grow
+= sz
;
263 static int pull_job_write_compressed(PullJob
*j
, void *p
, size_t sz
) {
272 if (j
->written_compressed
+ sz
< j
->written_compressed
) {
273 log_error("File too large, overflow");
277 if (j
->written_compressed
+ sz
> j
->compressed_max
) {
278 log_error("File overly large, refusing.");
282 if (j
->content_length
!= (uint64_t) -1 &&
283 j
->written_compressed
+ sz
> j
->content_length
) {
284 log_error("Content length incorrect.");
288 if (j
->checksum_context
)
289 gcry_md_write(j
->checksum_context
, p
, sz
);
291 r
= import_uncompress(&j
->compress
, p
, sz
, pull_job_write_uncompressed
, j
);
295 j
->written_compressed
+= sz
;
300 static int pull_job_open_disk(PullJob
*j
) {
305 if (j
->on_open_disk
) {
306 r
= j
->on_open_disk(j
);
311 if (j
->disk_fd
>= 0) {
312 /* Check if we can do sparse files */
314 if (lseek(j
->disk_fd
, SEEK_SET
, 0) == 0)
315 j
->allow_sparse
= true;
318 return log_error_errno(errno
, "Failed to seek on file descriptor: %m");
320 j
->allow_sparse
= false;
324 if (j
->calc_checksum
) {
325 if (gcry_md_open(&j
->checksum_context
, GCRY_MD_SHA256
, 0) != 0) {
326 log_error("Failed to initialize hash context.");
334 static int pull_job_detect_compression(PullJob
*j
) {
335 _cleanup_free_
uint8_t *stub
= NULL
;
342 r
= import_uncompress_detect(&j
->compress
, j
->payload
, j
->payload_size
);
344 return log_error_errno(r
, "Failed to initialize compressor: %m");
348 log_debug("Stream is compressed: %s", import_compress_type_to_string(j
->compress
.type
));
350 r
= pull_job_open_disk(j
);
354 /* Now, take the payload we read so far, and decompress it */
356 stub_size
= j
->payload_size
;
360 j
->payload_allocated
= 0;
362 j
->state
= PULL_JOB_RUNNING
;
364 r
= pull_job_write_compressed(j
, stub
, stub_size
);
371 static size_t pull_job_write_callback(void *contents
, size_t size
, size_t nmemb
, void *userdata
) {
372 PullJob
*j
= userdata
;
373 size_t sz
= size
* nmemb
;
381 case PULL_JOB_ANALYZING
:
382 /* Let's first check what it actually is */
384 if (!GREEDY_REALLOC(j
->payload
, j
->payload_allocated
, j
->payload_size
+ sz
)) {
389 memcpy(j
->payload
+ j
->payload_size
, contents
, sz
);
390 j
->payload_size
+= sz
;
392 r
= pull_job_detect_compression(j
);
398 case PULL_JOB_RUNNING
:
400 r
= pull_job_write_compressed(j
, contents
, sz
);
407 case PULL_JOB_FAILED
:
412 assert_not_reached("Impossible state.");
418 pull_job_finish(j
, r
);
422 static size_t pull_job_header_callback(void *contents
, size_t size
, size_t nmemb
, void *userdata
) {
423 PullJob
*j
= userdata
;
424 size_t sz
= size
* nmemb
;
425 _cleanup_free_
char *length
= NULL
, *last_modified
= NULL
;
432 if (IN_SET(j
->state
, PULL_JOB_DONE
, PULL_JOB_FAILED
)) {
437 assert(j
->state
== PULL_JOB_ANALYZING
);
439 r
= curl_header_strdup(contents
, sz
, "ETag:", &etag
);
448 if (strv_contains(j
->old_etags
, j
->etag
)) {
449 log_info("Image already downloaded. Skipping download.");
450 j
->etag_exists
= true;
451 pull_job_finish(j
, 0);
458 r
= curl_header_strdup(contents
, sz
, "Content-Length:", &length
);
464 (void) safe_atou64(length
, &j
->content_length
);
466 if (j
->content_length
!= (uint64_t) -1) {
467 char bytes
[FORMAT_BYTES_MAX
];
469 if (j
->content_length
> j
->compressed_max
) {
470 log_error("Content too large.");
475 log_info("Downloading %s for %s.", format_bytes(bytes
, sizeof(bytes
), j
->content_length
), j
->url
);
481 r
= curl_header_strdup(contents
, sz
, "Last-Modified:", &last_modified
);
487 (void) curl_parse_http_time(last_modified
, &j
->mtime
);
492 r
= j
->on_header(j
, contents
, sz
);
500 pull_job_finish(j
, r
);
504 static int pull_job_progress_callback(void *userdata
, curl_off_t dltotal
, curl_off_t dlnow
, curl_off_t ultotal
, curl_off_t ulnow
) {
505 PullJob
*j
= userdata
;
514 percent
= ((100 * dlnow
) / dltotal
);
515 n
= now(CLOCK_MONOTONIC
);
517 if (n
> j
->last_status_usec
+ USEC_PER_SEC
&&
518 percent
!= j
->progress_percent
&&
520 char buf
[FORMAT_TIMESPAN_MAX
];
522 if (n
- j
->start_usec
> USEC_PER_SEC
&& dlnow
> 0) {
523 char y
[FORMAT_BYTES_MAX
];
526 done
= n
- j
->start_usec
;
527 left
= (usec_t
) (((double) done
* (double) dltotal
) / dlnow
) - done
;
529 log_info("Got %u%% of %s. %s left at %s/s.",
532 format_timespan(buf
, sizeof(buf
), left
, USEC_PER_SEC
),
533 format_bytes(y
, sizeof(y
), (uint64_t) ((double) dlnow
/ ((double) done
/ (double) USEC_PER_SEC
))));
535 log_info("Got %u%% of %s.", percent
, j
->url
);
537 j
->progress_percent
= percent
;
538 j
->last_status_usec
= n
;
547 int pull_job_new(PullJob
**ret
, const char *url
, CurlGlue
*glue
, void *userdata
) {
548 _cleanup_(pull_job_unrefp
) PullJob
*j
= NULL
;
554 j
= new0(PullJob
, 1);
558 j
->state
= PULL_JOB_INIT
;
560 j
->userdata
= userdata
;
562 j
->content_length
= (uint64_t) -1;
563 j
->start_usec
= now(CLOCK_MONOTONIC
);
564 j
->compressed_max
= j
->uncompressed_max
= 64LLU * 1024LLU * 1024LLU * 1024LLU; /* 64GB safety limit */
565 j
->style
= VERIFICATION_STYLE_UNSET
;
567 j
->url
= strdup(url
);
576 int pull_job_begin(PullJob
*j
) {
581 if (j
->state
!= PULL_JOB_INIT
)
584 if (j
->grow_machine_directory
)
585 grow_machine_directory();
587 r
= curl_glue_make(&j
->curl
, j
->url
, j
);
591 if (!strv_isempty(j
->old_etags
)) {
592 _cleanup_free_
char *cc
= NULL
, *hdr
= NULL
;
594 cc
= strv_join(j
->old_etags
, ", ");
598 hdr
= strappend("If-None-Match: ", cc
);
602 if (!j
->request_header
) {
603 j
->request_header
= curl_slist_new(hdr
, NULL
);
604 if (!j
->request_header
)
607 struct curl_slist
*l
;
609 l
= curl_slist_append(j
->request_header
, hdr
);
613 j
->request_header
= l
;
617 if (j
->request_header
) {
618 if (curl_easy_setopt(j
->curl
, CURLOPT_HTTPHEADER
, j
->request_header
) != CURLE_OK
)
622 if (curl_easy_setopt(j
->curl
, CURLOPT_WRITEFUNCTION
, pull_job_write_callback
) != CURLE_OK
)
625 if (curl_easy_setopt(j
->curl
, CURLOPT_WRITEDATA
, j
) != CURLE_OK
)
628 if (curl_easy_setopt(j
->curl
, CURLOPT_HEADERFUNCTION
, pull_job_header_callback
) != CURLE_OK
)
631 if (curl_easy_setopt(j
->curl
, CURLOPT_HEADERDATA
, j
) != CURLE_OK
)
634 if (curl_easy_setopt(j
->curl
, CURLOPT_XFERINFOFUNCTION
, pull_job_progress_callback
) != CURLE_OK
)
637 if (curl_easy_setopt(j
->curl
, CURLOPT_XFERINFODATA
, j
) != CURLE_OK
)
640 if (curl_easy_setopt(j
->curl
, CURLOPT_NOPROGRESS
, 0) != CURLE_OK
)
643 r
= curl_glue_add(j
->glue
, j
->curl
);
647 j
->state
= PULL_JOB_ANALYZING
;