]> git.ipfire.org Git - thirdparty/ccache.git/commitdiff
refactor: Improve FileRecompressor to take a stat
authorJoel Rosdahl <joel@rosdahl.net>
Thu, 10 Nov 2022 09:07:33 +0000 (10:07 +0100)
committerJoel Rosdahl <joel@rosdahl.net>
Sun, 27 Nov 2022 20:33:51 +0000 (21:33 +0100)
This avoids extra stats in some scenarios.

src/core/FileRecompressor.cpp
src/core/FileRecompressor.hpp
src/core/mainoptions.cpp
src/storage/local/LocalStorage_compress.cpp

index 131a85f95b4850f73ce6394150cc750b313a890e..636d6761f39e3b6091b31631e7be5abc7f3a08be 100644 (file)
 
 namespace core {
 
-int64_t
-FileRecompressor::recompress(const std::string& cache_file,
+Stat
+FileRecompressor::recompress(const Stat& stat,
                              std::optional<int8_t> level,
                              KeepAtime keep_atime)
 {
-  core::CacheEntry::Header header(cache_file);
+  core::CacheEntry::Header header(stat.path());
 
   const int8_t wanted_level =
     level ? (*level == 0 ? core::CacheEntry::default_compression_level : *level)
           : 0;
-  const auto old_stat = Stat::lstat(cache_file, Stat::OnError::log);
-  Stat new_stat(old_stat);
+
+  std::optional<Stat> new_stat;
 
   if (header.compression_level != wanted_level) {
     const auto cache_file_data = util::value_or_throw<core::Error>(
-      util::read_file<util::Bytes>(cache_file),
-      FMT("Failed to read {}: ", cache_file));
+      util::read_file<util::Bytes>(stat.path()),
+      FMT("Failed to read {}: ", stat.path()));
     core::CacheEntry cache_entry(cache_file_data);
     cache_entry.verify_checksum();
 
@@ -52,23 +52,23 @@ FileRecompressor::recompress(const std::string& cache_file,
       level ? core::CompressionType::zstd : core::CompressionType::none;
     header.compression_level = wanted_level;
 
-    AtomicFile new_cache_file(cache_file, AtomicFile::Mode::binary);
+    AtomicFile new_cache_file(stat.path(), AtomicFile::Mode::binary);
     new_cache_file.write(
       core::CacheEntry::serialize(header, cache_entry.payload()));
     new_cache_file.commit();
-    new_stat = Stat::lstat(cache_file, Stat::OnError::log);
+    new_stat = Stat::lstat(stat.path(), Stat::OnError::log);
   }
 
   // Restore mtime/atime to keep cache LRU cleanup working as expected:
   if (keep_atime == KeepAtime::yes || new_stat) {
-    util::set_timestamps(cache_file, old_stat.mtime(), old_stat.atime());
+    util::set_timestamps(stat.path(), stat.mtime(), stat.atime());
   }
 
   m_content_size += header.entry_size;
-  m_old_size += old_stat.size_on_disk();
-  m_new_size += new_stat.size_on_disk();
+  m_old_size += stat.size_on_disk();
+  m_new_size += (new_stat ? *new_stat : stat).size_on_disk();
 
-  return Util::size_change_kibibyte(old_stat, new_stat);
+  return new_stat ? *new_stat : stat;
 }
 
 uint64_t
index b2fdb131ba6e8a014c9a10769846df1bbaf51928..84ce2824617ef82f4226b026649671ad151508b1 100644 (file)
@@ -18,6 +18,8 @@
 
 #pragma once
 
+#include <Stat.hpp>
+
 #include <atomic>
 #include <cstdint>
 #include <mutex>
@@ -33,10 +35,10 @@ public:
 
   FileRecompressor() = default;
 
-  // Returns on-disk size change in KiB.
-  int64_t recompress(const std::string& cache_file,
-                     const std::optional<int8_t> level,
-                     KeepAtime keep_atime);
+  // Returns stat after recompression.
+  Stat recompress(const Stat& stat,
+                  std::optional<int8_t> level,
+                  KeepAtime keep_atime);
 
   uint64_t content_size() const;
   uint64_t old_size() const;
index 7b3e639adfd1ed6fa91d783d475611ac14b18a92..9ebe5cee9ad31289c20800d98f9633e057758947 100644 (file)
@@ -273,19 +273,14 @@ trim_dir(const std::string& dir,
          std::optional<std::optional<int8_t>> recompress_level,
          uint32_t recompress_threads)
 {
-  struct File
-  {
-    std::string path;
-    Stat stat;
-  };
-  std::vector<File> files;
+  std::vector<Stat> files;
   uint64_t initial_size = 0;
 
   Util::traverse(dir, [&](const std::string& path, const bool is_dir) {
     if (is_dir || TemporaryFile::is_tmp_file(path)) {
       return;
     }
-    const auto stat = Stat::lstat(path);
+    auto stat = Stat::lstat(path);
     if (!stat) {
       // Probably some race, ignore.
       return;
@@ -296,12 +291,11 @@ trim_dir(const std::string& dir,
       throw Fatal(
         FMT("this looks like a local cache directory (found {})", path));
     }
-    files.push_back({path, stat});
+    files.emplace_back(std::move(stat));
   });
 
   std::sort(files.begin(), files.end(), [&](const auto& f1, const auto& f2) {
-    return trim_lru_mtime ? f1.stat.mtime() < f2.stat.mtime()
-                          : f1.stat.atime() < f2.stat.atime();
+    return trim_lru_mtime ? f1.mtime() < f2.mtime() : f1.atime() < f2.atime();
   });
 
   int64_t recompression_diff = 0;
@@ -313,15 +307,15 @@ trim_dir(const std::string& dir,
     core::FileRecompressor recompressor;
 
     std::atomic<uint64_t> incompressible_size = 0;
-    for (const auto& file : files) {
+    for (auto& file : files) {
       thread_pool.enqueue([&] {
         try {
-          recompressor.recompress(file.path,
-                                  *recompress_level,
-                                  core::FileRecompressor::KeepAtime::yes);
+          auto new_stat = recompressor.recompress(
+            file, *recompress_level, core::FileRecompressor::KeepAtime::yes);
+          file = std::move(new_stat); // Remember new size, if any.
         } catch (core::Error&) {
           // Ignore for now.
-          incompressible_size += file.stat.size_on_disk();
+          incompressible_size += file.size_on_disk();
         }
       });
     }
@@ -345,9 +339,9 @@ trim_dir(const std::string& dir,
     if (final_size <= trim_max_size) {
       break;
     }
-    if (Util::unlink_tmp(file.path)) {
+    if (Util::unlink_tmp(file.path())) {
       ++removed_files;
-      final_size -= file.stat.size_on_disk();
+      final_size -= file.size_on_disk();
     }
   }
 
index 6fbe0eba6d7c8d184641c2ce7862602d22e207db..fd4d258aa94d13f177fdb771e0f1010fbf74a961 100644 (file)
@@ -109,12 +109,16 @@ LocalStorage::recompress(const std::optional<int8_t> level,
           thread_pool.enqueue(
             [&recompressor, &incompressible_size, level, stats_file, file] {
               try {
-                int64_t size_change_kibibyte = recompressor.recompress(
-                  file.path(), level, core::FileRecompressor::KeepAtime::no);
-                StatsFile(stats_file).update([=](auto& cs) {
-                  cs.increment(core::Statistic::cache_size_kibibyte,
-                               size_change_kibibyte);
-                });
+                Stat new_stat = recompressor.recompress(
+                  file, level, core::FileRecompressor::KeepAtime::no);
+                auto size_change_kibibyte =
+                  Util::size_change_kibibyte(file, new_stat);
+                if (size_change_kibibyte != 0) {
+                  StatsFile(stats_file).update([=](auto& cs) {
+                    cs.increment(core::Statistic::cache_size_kibibyte,
+                                 size_change_kibibyte);
+                  });
+                }
               } catch (core::Error&) {
                 // Ignore for now.
                 incompressible_size += file.size_on_disk();