1 /*-*- Mode: C; c-basic-offset: 8; indent-tabs-mode: nil -*-*/
4 This file is part of systemd.
6 Copyright 2015 Lennart Poettering
8 systemd is free software; you can redistribute it and/or modify it
9 under the terms of the GNU Lesser General Public License as published by
10 the Free Software Foundation; either version 2.1 of the License, or
11 (at your option) any later version.
13 systemd is distributed in the hope that it will be useful, but
14 WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 Lesser General Public License for more details.
18 You should have received a copy of the GNU Lesser General Public License
19 along with systemd; If not, see <http://www.gnu.org/licenses/>.
22 #include <sys/xattr.h>
24 #include "alloc-util.h"
26 #include "hexdecoct.h"
28 #include "machine-pool.h"
29 #include "parse-util.h"
31 #include "string-util.h"
33 #include "xattr-util.h"
35 PullJob
* pull_job_unref(PullJob
*j
) {
39 curl_glue_remove_and_free(j
->glue
, j
->curl
);
40 curl_slist_free_all(j
->request_header
);
42 safe_close(j
->disk_fd
);
44 import_compress_free(&j
->compress
);
46 if (j
->checksum_context
)
47 gcry_md_close(j
->checksum_context
);
51 strv_free(j
->old_etags
);
60 static void pull_job_finish(PullJob
*j
, int ret
) {
63 if (j
->state
== PULL_JOB_DONE
||
64 j
->state
== PULL_JOB_FAILED
)
68 j
->state
= PULL_JOB_DONE
;
69 j
->progress_percent
= 100;
70 log_info("Download of %s complete.", j
->url
);
72 j
->state
= PULL_JOB_FAILED
;
80 void pull_job_curl_on_finished(CurlGlue
*g
, CURL
*curl
, CURLcode result
) {
86 if (curl_easy_getinfo(curl
, CURLINFO_PRIVATE
, (char **)&j
) != CURLE_OK
)
89 if (!j
|| j
->state
== PULL_JOB_DONE
|| j
->state
== PULL_JOB_FAILED
)
92 if (result
!= CURLE_OK
) {
93 log_error("Transfer failed: %s", curl_easy_strerror(result
));
98 code
= curl_easy_getinfo(curl
, CURLINFO_RESPONSE_CODE
, &status
);
99 if (code
!= CURLE_OK
) {
100 log_error("Failed to retrieve response code: %s", curl_easy_strerror(code
));
103 } else if (status
== 304) {
104 log_info("Image already downloaded. Skipping download.");
105 j
->etag_exists
= true;
108 } else if (status
>= 300) {
109 log_error("HTTP request to %s failed with code %li.", j
->url
, status
);
112 } else if (status
< 200) {
113 log_error("HTTP request to %s finished with unexpected code %li.", j
->url
, status
);
118 if (j
->state
!= PULL_JOB_RUNNING
) {
119 log_error("Premature connection termination.");
124 if (j
->content_length
!= (uint64_t) -1 &&
125 j
->content_length
!= j
->written_compressed
) {
126 log_error("Download truncated.");
131 if (j
->checksum_context
) {
134 k
= gcry_md_read(j
->checksum_context
, GCRY_MD_SHA256
);
136 log_error("Failed to get checksum.");
141 j
->checksum
= hexmem(k
, gcry_md_get_algo_dlen(GCRY_MD_SHA256
));
147 log_debug("SHA256 of %s is %s.", j
->url
, j
->checksum
);
150 if (j
->disk_fd
>= 0 && j
->allow_sparse
) {
151 /* Make sure the file size is right, in case the file was
152 * sparse and we just seeked for the last part */
154 if (ftruncate(j
->disk_fd
, j
->written_uncompressed
) < 0) {
155 r
= log_error_errno(errno
, "Failed to truncate file: %m");
160 (void) fsetxattr(j
->disk_fd
, "user.source_etag", j
->etag
, strlen(j
->etag
), 0);
162 (void) fsetxattr(j
->disk_fd
, "user.source_url", j
->url
, strlen(j
->url
), 0);
165 struct timespec ut
[2];
167 timespec_store(&ut
[0], j
->mtime
);
169 (void) futimens(j
->disk_fd
, ut
);
171 (void) fd_setcrtime(j
->disk_fd
, j
->mtime
);
178 pull_job_finish(j
, r
);
181 static int pull_job_write_uncompressed(const void *p
, size_t sz
, void *userdata
) {
182 PullJob
*j
= userdata
;
191 if (j
->written_uncompressed
+ sz
< j
->written_uncompressed
) {
192 log_error("File too large, overflow");
196 if (j
->written_uncompressed
+ sz
> j
->uncompressed_max
) {
197 log_error("File overly large, refusing");
201 if (j
->disk_fd
>= 0) {
203 if (j
->grow_machine_directory
&& j
->written_since_last_grow
>= GROW_INTERVAL_BYTES
) {
204 j
->written_since_last_grow
= 0;
205 grow_machine_directory();
209 n
= sparse_write(j
->disk_fd
, p
, sz
, 64);
211 n
= write(j
->disk_fd
, p
, sz
);
213 return log_error_errno(errno
, "Failed to write file: %m");
214 if ((size_t) n
< sz
) {
215 log_error("Short write");
220 if (!GREEDY_REALLOC(j
->payload
, j
->payload_allocated
, j
->payload_size
+ sz
))
223 memcpy(j
->payload
+ j
->payload_size
, p
, sz
);
224 j
->payload_size
+= sz
;
227 j
->written_uncompressed
+= sz
;
228 j
->written_since_last_grow
+= sz
;
233 static int pull_job_write_compressed(PullJob
*j
, void *p
, size_t sz
) {
242 if (j
->written_compressed
+ sz
< j
->written_compressed
) {
243 log_error("File too large, overflow");
247 if (j
->written_compressed
+ sz
> j
->compressed_max
) {
248 log_error("File overly large, refusing.");
252 if (j
->content_length
!= (uint64_t) -1 &&
253 j
->written_compressed
+ sz
> j
->content_length
) {
254 log_error("Content length incorrect.");
258 if (j
->checksum_context
)
259 gcry_md_write(j
->checksum_context
, p
, sz
);
261 r
= import_uncompress(&j
->compress
, p
, sz
, pull_job_write_uncompressed
, j
);
265 j
->written_compressed
+= sz
;
270 static int pull_job_open_disk(PullJob
*j
) {
275 if (j
->on_open_disk
) {
276 r
= j
->on_open_disk(j
);
281 if (j
->disk_fd
>= 0) {
282 /* Check if we can do sparse files */
284 if (lseek(j
->disk_fd
, SEEK_SET
, 0) == 0)
285 j
->allow_sparse
= true;
288 return log_error_errno(errno
, "Failed to seek on file descriptor: %m");
290 j
->allow_sparse
= false;
294 if (j
->calc_checksum
) {
295 if (gcry_md_open(&j
->checksum_context
, GCRY_MD_SHA256
, 0) != 0) {
296 log_error("Failed to initialize hash context.");
304 static int pull_job_detect_compression(PullJob
*j
) {
305 _cleanup_free_
uint8_t *stub
= NULL
;
312 r
= import_uncompress_detect(&j
->compress
, j
->payload
, j
->payload_size
);
314 return log_error_errno(r
, "Failed to initialize compressor: %m");
318 log_debug("Stream is compressed: %s", import_compress_type_to_string(j
->compress
.type
));
320 r
= pull_job_open_disk(j
);
324 /* Now, take the payload we read so far, and decompress it */
326 stub_size
= j
->payload_size
;
330 j
->payload_allocated
= 0;
332 j
->state
= PULL_JOB_RUNNING
;
334 r
= pull_job_write_compressed(j
, stub
, stub_size
);
341 static size_t pull_job_write_callback(void *contents
, size_t size
, size_t nmemb
, void *userdata
) {
342 PullJob
*j
= userdata
;
343 size_t sz
= size
* nmemb
;
351 case PULL_JOB_ANALYZING
:
352 /* Let's first check what it actually is */
354 if (!GREEDY_REALLOC(j
->payload
, j
->payload_allocated
, j
->payload_size
+ sz
)) {
359 memcpy(j
->payload
+ j
->payload_size
, contents
, sz
);
360 j
->payload_size
+= sz
;
362 r
= pull_job_detect_compression(j
);
368 case PULL_JOB_RUNNING
:
370 r
= pull_job_write_compressed(j
, contents
, sz
);
377 case PULL_JOB_FAILED
:
382 assert_not_reached("Impossible state.");
388 pull_job_finish(j
, r
);
392 static size_t pull_job_header_callback(void *contents
, size_t size
, size_t nmemb
, void *userdata
) {
393 PullJob
*j
= userdata
;
394 size_t sz
= size
* nmemb
;
395 _cleanup_free_
char *length
= NULL
, *last_modified
= NULL
;
402 if (j
->state
== PULL_JOB_DONE
|| j
->state
== PULL_JOB_FAILED
) {
407 assert(j
->state
== PULL_JOB_ANALYZING
);
409 r
= curl_header_strdup(contents
, sz
, "ETag:", &etag
);
418 if (strv_contains(j
->old_etags
, j
->etag
)) {
419 log_info("Image already downloaded. Skipping download.");
420 j
->etag_exists
= true;
421 pull_job_finish(j
, 0);
428 r
= curl_header_strdup(contents
, sz
, "Content-Length:", &length
);
434 (void) safe_atou64(length
, &j
->content_length
);
436 if (j
->content_length
!= (uint64_t) -1) {
437 char bytes
[FORMAT_BYTES_MAX
];
439 if (j
->content_length
> j
->compressed_max
) {
440 log_error("Content too large.");
445 log_info("Downloading %s for %s.", format_bytes(bytes
, sizeof(bytes
), j
->content_length
), j
->url
);
451 r
= curl_header_strdup(contents
, sz
, "Last-Modified:", &last_modified
);
457 (void) curl_parse_http_time(last_modified
, &j
->mtime
);
462 r
= j
->on_header(j
, contents
, sz
);
470 pull_job_finish(j
, r
);
474 static int pull_job_progress_callback(void *userdata
, curl_off_t dltotal
, curl_off_t dlnow
, curl_off_t ultotal
, curl_off_t ulnow
) {
475 PullJob
*j
= userdata
;
484 percent
= ((100 * dlnow
) / dltotal
);
485 n
= now(CLOCK_MONOTONIC
);
487 if (n
> j
->last_status_usec
+ USEC_PER_SEC
&&
488 percent
!= j
->progress_percent
&&
490 char buf
[FORMAT_TIMESPAN_MAX
];
492 if (n
- j
->start_usec
> USEC_PER_SEC
&& dlnow
> 0) {
493 char y
[FORMAT_BYTES_MAX
];
496 done
= n
- j
->start_usec
;
497 left
= (usec_t
) (((double) done
* (double) dltotal
) / dlnow
) - done
;
499 log_info("Got %u%% of %s. %s left at %s/s.",
502 format_timespan(buf
, sizeof(buf
), left
, USEC_PER_SEC
),
503 format_bytes(y
, sizeof(y
), (uint64_t) ((double) dlnow
/ ((double) done
/ (double) USEC_PER_SEC
))));
505 log_info("Got %u%% of %s.", percent
, j
->url
);
507 j
->progress_percent
= percent
;
508 j
->last_status_usec
= n
;
517 int pull_job_new(PullJob
**ret
, const char *url
, CurlGlue
*glue
, void *userdata
) {
518 _cleanup_(pull_job_unrefp
) PullJob
*j
= NULL
;
524 j
= new0(PullJob
, 1);
528 j
->state
= PULL_JOB_INIT
;
530 j
->userdata
= userdata
;
532 j
->content_length
= (uint64_t) -1;
533 j
->start_usec
= now(CLOCK_MONOTONIC
);
534 j
->compressed_max
= j
->uncompressed_max
= 8LLU * 1024LLU * 1024LLU * 1024LLU; /* 8GB */
536 j
->url
= strdup(url
);
546 int pull_job_begin(PullJob
*j
) {
551 if (j
->state
!= PULL_JOB_INIT
)
554 if (j
->grow_machine_directory
)
555 grow_machine_directory();
557 r
= curl_glue_make(&j
->curl
, j
->url
, j
);
561 if (!strv_isempty(j
->old_etags
)) {
562 _cleanup_free_
char *cc
= NULL
, *hdr
= NULL
;
564 cc
= strv_join(j
->old_etags
, ", ");
568 hdr
= strappend("If-None-Match: ", cc
);
572 if (!j
->request_header
) {
573 j
->request_header
= curl_slist_new(hdr
, NULL
);
574 if (!j
->request_header
)
577 struct curl_slist
*l
;
579 l
= curl_slist_append(j
->request_header
, hdr
);
583 j
->request_header
= l
;
587 if (j
->request_header
) {
588 if (curl_easy_setopt(j
->curl
, CURLOPT_HTTPHEADER
, j
->request_header
) != CURLE_OK
)
592 if (curl_easy_setopt(j
->curl
, CURLOPT_WRITEFUNCTION
, pull_job_write_callback
) != CURLE_OK
)
595 if (curl_easy_setopt(j
->curl
, CURLOPT_WRITEDATA
, j
) != CURLE_OK
)
598 if (curl_easy_setopt(j
->curl
, CURLOPT_HEADERFUNCTION
, pull_job_header_callback
) != CURLE_OK
)
601 if (curl_easy_setopt(j
->curl
, CURLOPT_HEADERDATA
, j
) != CURLE_OK
)
604 if (curl_easy_setopt(j
->curl
, CURLOPT_XFERINFOFUNCTION
, pull_job_progress_callback
) != CURLE_OK
)
607 if (curl_easy_setopt(j
->curl
, CURLOPT_XFERINFODATA
, j
) != CURLE_OK
)
610 if (curl_easy_setopt(j
->curl
, CURLOPT_NOPROGRESS
, 0) != CURLE_OK
)
613 r
= curl_glue_add(j
->glue
, j
->curl
);
617 j
->state
= PULL_JOB_ANALYZING
;