]> git.ipfire.org Git - thirdparty/systemd.git/blobdiff - src/import/pull-job.c
Drop the text argument from assert_not_reached()
[thirdparty/systemd.git] / src / import / pull-job.c
index 908546b96839af76e449a2c58ee38b5f4ec2e7b1..e751d3af05962fbc746181df46a4cbc308b512d6 100644 (file)
@@ -75,7 +75,6 @@ static int pull_job_restart(PullJob *j, const char *new_url) {
         j->error = 0;
         j->payload = mfree(j->payload);
         j->payload_size = 0;
-        j->payload_allocated = 0;
         j->written_compressed = 0;
         j->written_uncompressed = 0;
         j->content_length = UINT64_MAX;
@@ -176,7 +175,7 @@ void pull_job_curl_on_finished(CurlGlue *g, CURL *curl, CURLcode result) {
                 goto finish;
         }
 
-        if (j->content_length != (uint64_t) -1 &&
+        if (j->content_length != UINT64_MAX &&
             j->content_length != j->written_compressed) {
                 log_error("Download truncated.");
                 r = -EIO;
@@ -266,7 +265,7 @@ static int pull_job_write_uncompressed(const void *p, size_t sz, void *userdata)
                         return log_error_errno(SYNTHETIC_ERRNO(EIO), "Short write");
         } else {
 
-                if (!GREEDY_REALLOC(j->payload, j->payload_allocated, j->payload_size + sz))
+                if (!GREEDY_REALLOC(j->payload, j->payload_size + sz))
                         return log_oom();
 
                 memcpy(j->payload + j->payload_size, p, sz);
@@ -293,7 +292,7 @@ static int pull_job_write_compressed(PullJob *j, void *p, size_t sz) {
         if (j->written_compressed + sz > j->compressed_max)
                 return log_error_errno(SYNTHETIC_ERRNO(EFBIG), "File overly large, refusing.");
 
-        if (j->content_length != (uint64_t) -1 &&
+        if (j->content_length != UINT64_MAX &&
             j->written_compressed + sz > j->content_length)
                 return log_error_errno(SYNTHETIC_ERRNO(EFBIG),
                                        "Content length incorrect.");
@@ -371,7 +370,6 @@ static int pull_job_detect_compression(PullJob *j) {
 
         j->payload = NULL;
         j->payload_size = 0;
-        j->payload_allocated = 0;
 
         j->state = PULL_JOB_RUNNING;
 
@@ -395,7 +393,7 @@ static size_t pull_job_write_callback(void *contents, size_t size, size_t nmemb,
         case PULL_JOB_ANALYZING:
                 /* Let's first check what it actually is */
 
-                if (!GREEDY_REALLOC(j->payload, j->payload_allocated, j->payload_size + sz)) {
+                if (!GREEDY_REALLOC(j->payload, j->payload_size + sz)) {
                         r = log_oom();
                         goto fail;
                 }
@@ -423,7 +421,7 @@ static size_t pull_job_write_callback(void *contents, size_t size, size_t nmemb,
                 goto fail;
 
         default:
-                assert_not_reached("Impossible state.");
+                assert_not_reached();
         }
 
         return sz;
@@ -502,16 +500,13 @@ static size_t pull_job_header_callback(void *contents, size_t size, size_t nmemb
         if (r > 0) {
                 (void) safe_atou64(length, &j->content_length);
 
-                if (j->content_length != (uint64_t) -1) {
-                        char bytes[FORMAT_BYTES_MAX];
-
+                if (j->content_length != UINT64_MAX) {
                         if (j->content_length > j->compressed_max) {
-                                log_error("Content too large.");
-                                r = -EFBIG;
+                                r = log_error_errno(SYNTHETIC_ERRNO(EFBIG), "Content too large.");
                                 goto fail;
                         }
 
-                        log_info("Downloading %s for %s.", format_bytes(bytes, sizeof(bytes), j->content_length), j->url);
+                        log_info("Downloading %s for %s.", FORMAT_BYTES(j->content_length), j->url);
                 }
 
                 return sz;
@@ -556,10 +551,8 @@ static int pull_job_progress_callback(void *userdata, curl_off_t dltotal, curl_o
         if (n > j->last_status_usec + USEC_PER_SEC &&
             percent != j->progress_percent &&
             dlnow < dltotal) {
-                char buf[FORMAT_TIMESPAN_MAX];
 
                 if (n - j->start_usec > USEC_PER_SEC && dlnow > 0) {
-                        char y[FORMAT_BYTES_MAX];
                         usec_t left, done;
 
                         done = n - j->start_usec;
@@ -568,8 +561,8 @@ static int pull_job_progress_callback(void *userdata, curl_off_t dltotal, curl_o
                         log_info("Got %u%% of %s. %s left at %s/s.",
                                  percent,
                                  j->url,
-                                 format_timespan(buf, sizeof(buf), left, USEC_PER_SEC),
-                                 format_bytes(y, sizeof(y), (uint64_t) ((double) dlnow / ((double) done / (double) USEC_PER_SEC))));
+                                 FORMAT_TIMESPAN(left, USEC_PER_SEC),
+                                 FORMAT_BYTES((uint64_t) ((double) dlnow / ((double) done / (double) USEC_PER_SEC))));
                 } else
                         log_info("Got %u%% of %s.", percent, j->url);
 
@@ -604,7 +597,7 @@ int pull_job_new(PullJob **ret, const char *url, CurlGlue *glue, void *userdata)
                 .disk_fd = -1,
                 .userdata = userdata,
                 .glue = glue,
-                .content_length = (uint64_t) -1,
+                .content_length = UINT64_MAX,
                 .start_usec = now(CLOCK_MONOTONIC),
                 .compressed_max = 64LLU * 1024LLU * 1024LLU * 1024LLU, /* 64GB safety limit */
                 .uncompressed_max = 64LLU * 1024LLU * 1024LLU * 1024LLU, /* 64GB safety limit */